query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Test Shoppinglist creation without a user fails
def test_create_shoplist_without_user_fails(self): User.users = {} result = self.app.create_shoplist() expected = {1: {'user_id': 1, 'name': 'Apple', 'description': 'Fresh Green Apples'}} self.assertNotEqual(expected, result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_successful_shoplist_creation(self):\n result = self.app.create_shoplist()\n expected = {5: {'user_id': 0, 'name': 'apples', 'description': 'Fresh Green Apples'}}\n self.assertEqual(expected, result)", "def test_create_new_shopping_list_correct_user(create_user, create_shopping_list): # noqa\n shopping_list = create_shopping_list\n owner = create_user\n assert shopping_list.owner == owner", "def test_app_can_add_list(self):\n add_list=self.client.post('/addshoppinglists/?user='+self.user['user'], \n data=self.shopllist, \n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n self.assertEqual(add_list.status_code,200)", "def test_shoppinglist_creation_with_error(self):\n res = self.app.post(\n '/shoppinglist', data={'name': 'Easter!'})\n self.assertEqual(res.status_code, 200)\n response = self.shopping_class_obj.create_list(\n 'Easter!', '[email protected]')\n self.assertIn(\"No special characters\", response)", "def test_shoppinglist_creation(self):\n self.app.post('/register', data=self.user_reg_details)\n self.app.post('/login', data=self.user_login_details)\n res = self.app.post(\n '/shoppinglist', data={'list-name': 'Easter'})\n self.assertEqual(res.status_code, 200)\n response = self.shopping_class_obj.create_list(\n 'Easter', '[email protected]')\n self.assertIsInstance(response, list)\n self.assertIn(\"Easter\", str(res.data))", "def test_create_shoplist(self):\n new_shoplist = self.app\n new_shoplist.create_shoplist()\n self.assertEqual(len(new_shoplist.shoplists), 1)", "def test_create_new_shopping_list(create_shopping_list):\n shopping_list = create_shopping_list\n assert shopping_list.items.values_list().count() == 0\n assert shopping_list.budget == 0", "def test_shoppingitems_creation(self):\n # register and login a user\n self.app.post('/register', data=self.user_reg_details)\n self.app.post('/login', data=self.user_login_details)\n # create a shopping list\n self.shopping_class_obj.create_list(\n 'Easter', '[email protected]')\n # make a post request to add an item\n res = self.app.post(\n '/shoppingitems/Easter', data={'item-name': 'Bread'})\n self.assertEqual(res.status_code, 200)\n response = self.item_class_obj.add_item(\n 'Easter', 'Bread', '[email protected]')\n self.assertIsInstance(response, list)\n # check if item was successfully created\n self.assertIn(\"Bread\", str(res.data))", "def test_creating_shopping_item(create_shopping_item, create_user):\n owner = create_user\n shopping_item = create_shopping_item\n assert shopping_item.owner == owner", "def test_shoppingitems_creation_with_error(self):\n # register and login a user\n self.app.post('/register', data=self.user_reg_details)\n self.app.post('/login', data=self.user_login_details)\n # create a shopping list\n self.shopping_class_obj.create_list(\n 'Easter', '[email protected]')\n # make a post request to add an item\n res = self.app.post(\n '/shoppingitems/Easter', data={'item-name': 'Bread-'})\n self.assertEqual(res.status_code, 200)\n response = self.item_class_obj.add_item(\n 'Easter', 'Bread-', '[email protected]')\n # test response from shoppingitems class\n self.assertIn(\"No special characters\", response)\n # check if item was successfully created\n self.assertIn(\"No special characters\", str(res.data))", "def test_user_create_list(self):\n c = Client()\n c.login(username= 'testuser', password = 'password')\n c.post('/inventory/list/add/', {'name': 'testname'})\n\n self.assertEqual(List.objects.get(name = 'testname'), List.objects.get(users__username = 'testuser'))", "def test_if_app_can_search_for_existing_list_without_products(self):\n add_list=self.client.post('/shoppinglists/', \n data=self.shopllist,\n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n searchforlists=self.client.get('/search/?q=shoes',\n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n self.assertEqual(searchforlists.status_code,200) \n self.assertIn(\"No list found\",str(searchforlists.data))", "def test_create_not_admin(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n expected = {'detail': 'You do not have permission to perform this action.'}\n response = self.client.post(\n '/api/products/', data=payload,\n content_type='application/json', **headers)\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)\n self.assertEqual(Product.objects.count(), 2)", "def test_creating_supply_user(self):\n request = self.factory.post(\n '/api/supplies/', {'name': '3d printer 2', 'state': 'good state', 'description': 'prints 3d objects'})\n force_authenticate(request, user=self.testuser1)\n response = SupplyListView.as_view()(request)\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n try:\n Supply.objects.get(name='3d printer')\n self.fail()\n except Supply.DoesNotExist:\n pass", "def test_create_product_as_customer_fails(self):\n customer = get_user_model().objects.create_user(\n '[email protected]',\n 'Customer',\n 'user123'\n )\n self.client.force_authenticate(customer)\n res = self.client.post(PRODUCTS_URL, PRODUCT_PAYLOAD)\n\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)", "def test_shoplists_dictionary(self):\n new_shoplist = self.app\n self.assertEqual(len(new_shoplist.shoplists), 0)\n new_shoplist.create_shoplist()\n self.assertIsInstance(new_shoplist, Shoppinglist)\n self.assertEqual(len(new_shoplist.shoplists), 1)", "def test_shoplist_id(self):\n new_shoplist = self.app\n self.assertTrue(new_shoplist.shop_id, 0)\n new_shoplist.create_shoplist()\n self.assertTrue(new_shoplist.shop_id, 1)\n for key in new_shoplist.shoplists:\n self.assertEqual(new_shoplist.shop_id, key)", "def test_user_id_in_shoplist(self):\n new_shoplist = self.app\n new_shoplist.create_shoplist()\n for value in Shoppinglist.shoplists.values():\n for key in User.users:\n self.assertEqual(value['user_id']+1, key)\n new_shoplist.create_shoplist()\n for value in Shoppinglist.shoplists.values():\n for key in User.users:\n self.assertEqual(value['user_id']+1, key)", "def test_create(self):\n pass", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def test_create_shopping_cart(self):\n client = APIClient()\n # First create a user\n Customer.objects.create_user(name=\"kevin\", email=\"[email protected]\", password=\"secret_pass\",\n shipping_region_id=1)\n\n # Then force login with that user\n url = reverse('login')\n data = {'email': \"[email protected]\", 'password': \"secret_pass\"}\n response = client.post(url, data, format='json')\n access_token = response.data['access']\n\n # Then add products to the shopping cart\n url = reverse('shopping_cart_add_product')\n data = {'cart_id': \"\", 'product_id': 1, 'attributes': \"Blue, XL\"}\n client.credentials(HTTP_AUTHORIZATION='Bearer ' + access_token)\n response = client.post(url, data, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[0]['item_id'], 1)\n self.assertEqual(ShoppingCart.objects.count(), 1)", "def test_08_create_user_not_exists(self):\n\n _, user = self.get_random_item(models.User)\n utils.create_user(user, session=self.session)\n success, error = utils.create_user(user, session=self.session)\n self.assertFalse(success)\n self.assertTrue(error)", "def test_createUser_single(self):\n #TODO: this and other tests", "def test_items_create_empty_user(patch_mongo):\n item = {\n \"content\": \"lorem ipsum\",\n \"priority\": \"high\",\n \"status\": \"backlog\",\n \"users\": [],\n }\n\n response = client.post(\"/item\", json=item)\n assert response.status_code == status.HTTP_400_BAD_REQUEST", "def test_cannot_make_sale_with_missing_fields(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'One of the fields is empty!')\n self.assertEqual(resp.status_code, 400)", "def create_item(self, user: User, **kwargs) -> None:", "def test_add_user(self):\n pass", "def test_shoppingcart_create(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we create a product\n id_prod = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id_prod:\n # then we can create the shoppingcart\n data = self.shoppingcart_data\n data[\"customer_id\"] = id\n data[\"product_id\"] = id\n self._create_model(\"shoppingcart\", data, [ \"quantity\", \"discount_value\", \"is_closed\" ])\n self.assertIsNotNone(id_prod)\n self.assertIsNotNone(id)" ]
[ "0.79067975", "0.7724857", "0.74877584", "0.74201185", "0.7388282", "0.73585427", "0.7208618", "0.7151508", "0.70583916", "0.7045453", "0.6953688", "0.68764377", "0.6793433", "0.6745461", "0.6735887", "0.67269695", "0.66694874", "0.6653867", "0.66537887", "0.6624037", "0.6624037", "0.6624037", "0.66155535", "0.65512323", "0.6549287", "0.65478766", "0.65456784", "0.65368503", "0.6498335", "0.64973503" ]
0.81097955
0
Test Shoppinglist creation is successful
def test_successful_shoplist_creation(self): result = self.app.create_shoplist() expected = {5: {'user_id': 0, 'name': 'apples', 'description': 'Fresh Green Apples'}} self.assertEqual(expected, result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_shoplist(self):\n new_shoplist = self.app\n new_shoplist.create_shoplist()\n self.assertEqual(len(new_shoplist.shoplists), 1)", "def test_shoppinglist_creation(self):\n self.app.post('/register', data=self.user_reg_details)\n self.app.post('/login', data=self.user_login_details)\n res = self.app.post(\n '/shoppinglist', data={'list-name': 'Easter'})\n self.assertEqual(res.status_code, 200)\n response = self.shopping_class_obj.create_list(\n 'Easter', '[email protected]')\n self.assertIsInstance(response, list)\n self.assertIn(\"Easter\", str(res.data))", "def test_app_can_add_list(self):\n add_list=self.client.post('/addshoppinglists/?user='+self.user['user'], \n data=self.shopllist, \n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n self.assertEqual(add_list.status_code,200)", "def test_shoppingitems_creation(self):\n # register and login a user\n self.app.post('/register', data=self.user_reg_details)\n self.app.post('/login', data=self.user_login_details)\n # create a shopping list\n self.shopping_class_obj.create_list(\n 'Easter', '[email protected]')\n # make a post request to add an item\n res = self.app.post(\n '/shoppingitems/Easter', data={'item-name': 'Bread'})\n self.assertEqual(res.status_code, 200)\n response = self.item_class_obj.add_item(\n 'Easter', 'Bread', '[email protected]')\n self.assertIsInstance(response, list)\n # check if item was successfully created\n self.assertIn(\"Bread\", str(res.data))", "def test_create_new_shopping_list(create_shopping_list):\n shopping_list = create_shopping_list\n assert shopping_list.items.values_list().count() == 0\n assert shopping_list.budget == 0", "def test_shoplists_dictionary(self):\n new_shoplist = self.app\n self.assertEqual(len(new_shoplist.shoplists), 0)\n new_shoplist.create_shoplist()\n self.assertIsInstance(new_shoplist, Shoppinglist)\n self.assertEqual(len(new_shoplist.shoplists), 1)", "def test_shoppinglist_creation_with_error(self):\n res = self.app.post(\n '/shoppinglist', data={'name': 'Easter!'})\n self.assertEqual(res.status_code, 200)\n response = self.shopping_class_obj.create_list(\n 'Easter!', '[email protected]')\n self.assertIn(\"No special characters\", response)", "def test_shoplist_id(self):\n new_shoplist = self.app\n self.assertTrue(new_shoplist.shop_id, 0)\n new_shoplist.create_shoplist()\n self.assertTrue(new_shoplist.shop_id, 1)\n for key in new_shoplist.shoplists:\n self.assertEqual(new_shoplist.shop_id, key)", "def test_create_order_list(self):\n self.assertEqual(self.response.status_code, status.HTTP_201_CREATED)", "def test_if_app_can_search_for_existing_lists_with_products(self):\n product_to_add = {'product':'nikes', 'Quantity':3, 'Amountspent':5000}\n jsonproduct_to_add = json.dumps(product_to_add)\n add_list = self.client.post('/shoppinglists/',\n data = self.shopllist, \n headers = {\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n add_product=self.client.post('/shoppinglist/shoes/items/',\n data=jsonproduct_to_add,\n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n searchforlists=self.client.get('/search/?q=shoes',\n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n searchforproducts=self.client.get('/searchProduct/?q=nike',\n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n self.assertIn(\"Success\",str(searchforlists.data))\n self.assertIn(\"Success\",str(searchforproducts.data))\n self.assertEqual(searchforproducts.status_code,200)\n self.assertEqual(searchforlists.status_code,200)", "def test_create_product_success(self):\n res = self.client.post(PRODUCTS_URL, PRODUCT_PAYLOAD)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n self.assertEqual(res.data['supplier_id'], self.user.id)\n self.assertEqual(res.data['name'], PRODUCT_PAYLOAD['name'])\n self.assertEqual(res.data['price'], PRODUCT_PAYLOAD['price'])", "def test_add_product(self):\n view = ProductCreateListView.as_view({'post': 'create'})\n uri = reverse('products:create/list-products')\n data = {\n \"name\": \"Iphone 7\",\n \"description\": \"Mobile phone\",\n \"price\": 200,\n \"is_available\": True\n }\n request = self.factory.post(uri, data, HTTP_AUTHORIZATION='Token {}'.format(self.token_admin.key))\n request.user = self.user['admin']\n response = view(request)\n self.assertEqual(response.status_code, 201,\n f'Expected Response Code 201, received {response.status_code} instead.')", "def test_adding_item_to_list(create_shopping_item, create_shopping_list):\n shopping_list = create_shopping_list\n items_before = shopping_list.items.values_list().count()\n new_item = create_shopping_item\n shopping_list.items.add(new_item)\n items_after = shopping_list.items.values_list().count()\n assert items_after > items_before\n assert items_before == 0\n assert items_after == 1", "def test_shoppingcart_create(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we create a product\n id_prod = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id_prod:\n # then we can create the shoppingcart\n data = self.shoppingcart_data\n data[\"customer_id\"] = id\n data[\"product_id\"] = id\n self._create_model(\"shoppingcart\", data, [ \"quantity\", \"discount_value\", \"is_closed\" ])\n self.assertIsNotNone(id_prod)\n self.assertIsNotNone(id)", "def test_create(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(self.token_admin)\n }\n response = self.client.post(\n '/api/products/', data=payload, content_type='application/json', **headers)\n self.assertEqual(response.status_code, 201)\n self.assertEqual(response['Content-Type'], 'application/json')\n self.assertEqual(Product.objects.count(), 3)\n\n product = Product.objects.get(name='New product')\n self.assertEqual(product.name, 'New product')\n self.assertEqual(product.category, self.category_1)\n self.assertEqual(product.sku, '11111111')\n self.assertEqual(product.description, 'New product description')\n self.assertEqual(float(product.price), 39.99)", "def test_shoppingitems_creation_with_error(self):\n # register and login a user\n self.app.post('/register', data=self.user_reg_details)\n self.app.post('/login', data=self.user_login_details)\n # create a shopping list\n self.shopping_class_obj.create_list(\n 'Easter', '[email protected]')\n # make a post request to add an item\n res = self.app.post(\n '/shoppingitems/Easter', data={'item-name': 'Bread-'})\n self.assertEqual(res.status_code, 200)\n response = self.item_class_obj.add_item(\n 'Easter', 'Bread-', '[email protected]')\n # test response from shoppingitems class\n self.assertIn(\"No special characters\", response)\n # check if item was successfully created\n self.assertIn(\"No special characters\", str(res.data))", "def test_create(self):\n pass", "def test_if_app_can_search_for_existing_list_without_products(self):\n add_list=self.client.post('/shoppinglists/', \n data=self.shopllist,\n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n searchforlists=self.client.get('/search/?q=shoes',\n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n self.assertEqual(searchforlists.status_code,200) \n self.assertIn(\"No list found\",str(searchforlists.data))", "def test_add_new_product(self):\n response=self.add_new_product()\n result = json.loads(response.data.decode('utf-8'))\n self.assertEqual(response.status_code, 201, result['New Product'])", "def test_successful_creation_of_service(self):\n create_store = self.client.post(create_store_url, data=json.dumps(self.shop_zero), headers=self.my_header)\n store_id = json.loads(create_store.data)\n store_id = json.loads(store_id['store_id'])\n store_id = store_id['$oid']\n response2 = self.client.post(store_url + store_id + '/service/',\n data=json.dumps(self.service_zero),\n headers=self.my_header)\n self.assertEqual(response2.status, \"201 CREATED\")\n self.assertIn(\"Success. You have added a new Service Live at the yard to the store.\", str(response2.data))", "def test_create_product(self):\n url = reverse('products:list')\n data = {\n 'name': 'Banana',\n 'description': '''\n Bananas are one of the most widely consumed fruits in the\n world for good reason. Eating them could help lower blood\n pressure and reduce the risks of cancer and asthma.\n '''\n }\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(models.Product.objects.filter(name=data['name']).count(), 1)", "def test_wish_list(self):\n data = {\"name\": \"test list 1\"}\n response = self.client.post(\"/wish_list/\", data, format='json')\n self.assertEqual(response.status_code, 200)\n response = self.client.get(\"/wish_list/\")\n self.assertEqual(response.status_code, 200)\n # item = Item.objects.get(name=\"New Item\")\n # self.assertEqual(item.name(), \"New Item\")", "def test_perform_create(self):\n\n response = self.client.post(reverse('action-list'), data=self.data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(response.data['name'], self.data['name'])\n self.assertTrue(len(response.data['institution']), self.data['institution'])", "def test_form_submition_and_product_creation(user_company, client, authenticated_user):\n add_product_url = reverse('add-product')\n response = client.post(add_product_url, {\n 'name': 'Test_product_name',\n 'serial_number': 'XZ001', \n 'manufacturer': 'Test company',\n 'price_net': 415.26,\n 'description': fake.paragraph(),\n 'stock': 16\n })\n assert response.status_code == 302\n product = Product.objects.get(name='Test_product_name')\n assert response.url == reverse('product-detail',kwargs={'pk': product.pk}) \n assert product.user == authenticated_user\n assert product in Product.objects.all()", "def test_create_new_shopping_list_correct_user(create_user, create_shopping_list): # noqa\n shopping_list = create_shopping_list\n owner = create_user\n assert shopping_list.owner == owner", "def testcreatelist(self):\n rv = self.app.get('/createcategory')\n self.assertEqual(rv.status_code, 302, \"createlist page should not load unless signed in\")", "def test_create_shoplist_without_user_fails(self):\n User.users = {}\n result = self.app.create_shoplist()\n expected = {1: {'user_id': 1, 'name': 'Apple', 'description': 'Fresh Green Apples'}}\n self.assertNotEqual(expected, result)", "def test_create_shopping_cart(self):\n client = APIClient()\n # First create a user\n Customer.objects.create_user(name=\"kevin\", email=\"[email protected]\", password=\"secret_pass\",\n shipping_region_id=1)\n\n # Then force login with that user\n url = reverse('login')\n data = {'email': \"[email protected]\", 'password': \"secret_pass\"}\n response = client.post(url, data, format='json')\n access_token = response.data['access']\n\n # Then add products to the shopping cart\n url = reverse('shopping_cart_add_product')\n data = {'cart_id': \"\", 'product_id': 1, 'attributes': \"Blue, XL\"}\n client.credentials(HTTP_AUTHORIZATION='Bearer ' + access_token)\n response = client.post(url, data, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[0]['item_id'], 1)\n self.assertEqual(ShoppingCart.objects.count(), 1)", "def test_add_bucketlist_items(self):\r\n email = \"[email protected]\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count()\r\n response = self.add_bucketlist_item(email, _pword, bucketlist.id, \"bucketlist item name\")\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '201 CREATED')\r\n self.assertEqual(result['message'], 'Bucket list item added')\r\n new_item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count()\r\n self.assertLess(item_no, new_item_no)", "def test_creating_shopping_item(create_shopping_item, create_user):\n owner = create_user\n shopping_item = create_shopping_item\n assert shopping_item.owner == owner" ]
[ "0.81821024", "0.7965354", "0.784286", "0.77807367", "0.7667245", "0.74447703", "0.73277265", "0.73256046", "0.7202339", "0.7178579", "0.7118226", "0.7112633", "0.70901626", "0.7072848", "0.7063823", "0.7033362", "0.69682866", "0.6960603", "0.6932682", "0.69283473", "0.6846909", "0.6846613", "0.6827464", "0.6801517", "0.6731905", "0.6730138", "0.6728181", "0.66881037", "0.66824454", "0.66765213" ]
0.85448605
0
Test activity's dict is empty at first
def test_activity_dictionary(self): new_activity = self.app self.assertEqual(len(new_activity.activities), 0) new_activity.create_activity(1) self.assertIsInstance(new_activity, Activity) self.assertEqual(len(new_activity.activities), 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_builddict_empty(self):\r\n # We don't care _that_ much that execution be silent. Nice if at least\r\n # one test executes the task and doesn't explode, tho.\r\n self.assertEqual('', self.execute_task())", "def init(self, activity, session):\n return {}", "def test_cache_null():\n cache = CacheDict()\n assert cache.__len__() == 0", "def test_empty_feed_to_dict(self):\n expected = dict(feed=[])\n feed = Feed()\n assert feed.to_dict() == expected", "def test_get_activity(self):\n pass", "def test_get_activity(self):\n pass", "def test_no_extra_fields():\n t_task = Task()\n t_dict = t_task._asdict()\n assert len(t_dict) <= 4", "def test_empty_optionals(self):\n data = self.valid_payload\n data[\"telephone\"] = \"\"\n data[\"cellphone\"] = \"\"\n data[\"activity_description\"] = \"\"\n data[\"about\"] = \"\"\n data[\"institute\"] = \"\"\n response = self.client.post(\n reverse('contacts'),\n data=json.dumps(data),\n content_type='application/json'\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "def test_key_no_data(self):\n key = Key({})\n\n assert key.warning is None\n assert key.in_car is None", "def test_empty_optionals(self):\n data = self.valid_payload\n # data[\"telephone\"] = \"\"\n # data[\"cellphone\"] = \"\"\n data[\"activity_description\"] = \"\"\n # data[\"about\"] = \"\"\n response = self.client.post(\n reverse('contacts'),\n data=json.dumps(data),\n content_type='application/json'\n )\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "def test_empty_dict(self):\n read_on_template = field_template_read({})\n self.assertFalse(read_on_template)\n self.assertEqual(read_on_template, {})", "def test_no_optionals(self):\n data = self.valid_payload\n del data[\"telephone\"]\n del data[\"cellphone\"]\n del data[\"activity_description\"]\n del data[\"about\"]\n del data[\"institute\"]\n response = self.client.post(\n reverse('contacts'),\n data=json.dumps(data),\n content_type='application/json'\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "def test_lights_no_data(self):\n lights = Lights({})\n\n assert lights.warning is None\n assert isinstance(lights.front, Light)\n assert isinstance(lights.back, Light)\n assert isinstance(lights.hazard, Light)\n\n dictionary = lights.as_dict()\n\n assert isinstance(dictionary, dict)\n assert dictionary == {\n \"warning\": None,\n \"front\": {\"warning\": None, \"off\": None},\n \"back\": {\"warning\": None, \"off\": None},\n \"hazard\": {\"warning\": None, \"off\": None},\n }", "def test_activity_id(self):\n new_activity = self.app\n self.assertTrue(Activity.activity_id, 0)\n new_activity.create_activity(1)\n self.assertTrue(new_activity.activity_id, 1)\n for key in new_activity.activities:\n self.assertEqual(new_activity.activity_id, key)", "def test_empty_dict():\n\n @type_checked\n def _run_test(thing:{}):\n assert isinstance(thing, dict)\n\n _run_test({\"foo\": \"bar\"})", "def test_issue_74():\n patient = Patient(active=True, address=[])\n assert \"address\" not in patient.dict()\n assert patient.dict(exclude_none=False)[\"address\"] == []", "def empty(self):\n return False if self.items else True", "def empty(self):\n return False if self.items else True", "def empty(self):\n return False if self.items else True", "def test_activity_attr(self):\n user = User()\n user_details = {\"student_id\": user.id, \"first_name\": \"Joe\"}\n student = Student(**user_details)\n self.assertTrue(hasattr(student, \"activity\"))\n if models.storage_t == 'db':\n self.assertEqual(student.activity, None)\n else:\n self.assertEqual(student.activity, \"\")", "def test_successful_empty(self):\n\n url = '/%s/jobs/%i/' % (self.api, self.job.id)\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(result['max_tries'], 3)\n self.assertEqual(result['job_type']['name'], self.job.job_type.name)\n self.assertEqual(result['job_type_rev']['job_type']['name'], self.job.job_type.name)\n\n if self.recipe:\n self.assertEqual(result['recipe']['recipe_type']['name'], self.recipe.recipe_type.name)\n else:\n self.assertEqual(len(result['recipe']), 0)", "def test_no_optionals(self):\n data = self.valid_payload\n # del data[\"telephone\"]\n # del data[\"cellphone\"]\n del data[\"activity_description\"]\n # del data[\"about\"]\n response = self.client.post(\n reverse('contacts'),\n data=json.dumps(data),\n content_type='application/json'\n )\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "def test_successful_activity_creation(self):\n result = self.app.create_activity(1)\n expected = {4: {'shoplist_id': 1, 'title': 'apples', 'description': 'Fresh Green Apples', 'status': True}}\n self.assertEqual(expected, result)", "def test_set_empty_1(self):\n data_dict = {\"type\":\"add\",\"cluster\":None}\n tickets.set_empty(data_dict)\n with self.subTest():\n self.assertEqual(data_dict[\"type\"], \"add\")\n with self.subTest():\n self.assertEqual(data_dict[\"cluster\"], \"\")", "def test_create_activity_default_fields(self):\n from .mockers import user_status as activity\n username = 'messi'\n self.create_user(username)\n res = self.testapp.post('/people/%s/activities' % username, json.dumps(activity), oauth2Header(username), status=201)\n self.assertIn('replies', res.json)\n self.assertIn('generator', res.json)\n self.assertIn('objectType', res.json)\n self.assertEqual(res.json['objectType'], 'activity')", "def test_get_activities(self):\n pass", "def test_from_empty_dict(self):\n from sosbeacon.event.message import Message\n\n self.assertRaisesRegexp(\n Exception, 'key is required', Message.from_dict, {})", "def test_no_events(self):\n result = self.client.get(BASE_URL, **headers)\n expected_result = {\n 'count': 0,\n 'next': None,\n 'previous': None,\n 'results': [],\n }\n self.assertDictEqual(result.data, expected_result)", "def test_metrics_empty(self):\n skill_map = SkillMap.load(self.course)\n sm_metrics = SkillMapMetrics(skill_map)\n self.assertEqual(sm_metrics.simple_cycles(), [])\n self.assertEqual(sm_metrics.singletons(), [])\n self.assertEqual(sm_metrics.long_chains(), [])\n expected = {'cycles': [], 'singletons': [], 'long_chains': []}\n self.assertEqual(sm_metrics.diagnose(), expected)", "def test_ws_getItemInfosShowEmptyValues(self):\n self.changeUser('pmCreator1')\n item = self.create('MeetingItem')\n resp = self._getItemInfos(item.UID(), toBeDeserialized=False)\n # empty values are returned\n self.assertTrue('_decision' in resp._itemInfo[0].__dict__)\n self.assertEqual(resp._itemInfo[0]._decision, '')\n resp = self._getItemInfos(item.UID(), showEmptyValues=False, toBeDeserialized=False)\n # empty values are no more returned\n self.assertFalse('_decision' in resp._itemInfo[0].__dict__)\n self.assertEqual(\n len(resp._itemInfo[0].__dict__),\n len([k for k, v in resp._itemInfo[0].__dict__.items() if v]))" ]
[ "0.6620643", "0.63007736", "0.62150484", "0.6162532", "0.606681", "0.606681", "0.604985", "0.59537953", "0.5953121", "0.5932054", "0.58168036", "0.581649", "0.58036715", "0.5788242", "0.5766684", "0.5758893", "0.5755703", "0.5755703", "0.5755703", "0.5739308", "0.5731855", "0.5728935", "0.57267946", "0.57217354", "0.5713528", "0.57093066", "0.56927496", "0.56853473", "0.566422", "0.5658131" ]
0.67997086
0
Test activity_id starts from one and increments by one
def test_activity_id(self): new_activity = self.app self.assertTrue(Activity.activity_id, 0) new_activity.create_activity(1) self.assertTrue(new_activity.activity_id, 1) for key in new_activity.activities: self.assertEqual(new_activity.activity_id, key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_crm_activity_next_action(self):\n # Add the next activity (like we set it from a form view)\n lead_model_id = self.env['ir.model']._get('crm.lead').id\n activity = self.env['mail.activity'].with_user(self.user_sales_manager).create({\n 'activity_type_id': self.activity_type_1.id,\n 'summary': 'My Own Summary',\n 'res_id': self.lead_1.id,\n 'res_model_id': lead_model_id,\n })\n activity._onchange_activity_type_id()\n\n # Check the next activity is correct\n self.assertEqual(self.lead_1.activity_summary, activity.summary)\n self.assertEqual(self.lead_1.activity_type_id, activity.activity_type_id)\n # self.assertEqual(fields.Datetime.from_string(self.lead.activity_date_deadline), datetime.now() + timedelta(days=activity.activity_type_id.days))\n\n activity.write({\n 'activity_type_id': self.activity_type_2.id,\n 'summary': '',\n 'note': 'Content of the activity to log',\n })\n activity._onchange_activity_type_id()\n\n self.assertEqual(self.lead_1.activity_summary, activity.activity_type_id.summary)\n self.assertEqual(self.lead_1.activity_type_id, activity.activity_type_id)\n # self.assertEqual(fields.Datetime.from_string(self.lead.activity_date_deadline), datetime.now() + timedelta(days=activity.activity_type_id.days))\n\n activity.action_done()\n\n # Check the next activity on the lead has been removed\n self.assertFalse(self.lead_1.activity_type_id)", "def test_create_activity_occurrence(self):\n pass", "def test_login_10(self):\n player = self._get_player()\n for i in range(14):\n timestamp = datetime.now() + timedelta(days=-i)\n Activity.objects.create(timestamp=timestamp, user_from=player, action='seen', public=False)\n\n self.assertEqual(consecutive_days_seen(player, datetime.now()), 14)", "def test_create_activity_check_not_duplicate_activity(self):\n from .mockers import user_status as activity\n username = 'messi'\n self.create_user(username)\n self.testapp.post('/people/%s/activities' % username, json.dumps(activity), oauth2Header(test_manager), status=201)\n self.testapp.post('/people/%s/activities' % username, json.dumps(activity), oauth2Header(test_manager), status=200)", "def increment_counter(self) -> None:", "def id(self):\n _id = super(ScheduleVisit, self).id\n return _id + 1", "def test_create_activity(self):\n pass", "def test_create_activity(self):\n contact = Contact.objects.first()\n sales_cycle = contact.sales_cycles.first()\n\n data = {\n \"owner\": self.user.id,\n \"sales_cycle_id\": sales_cycle.id,\n \"description\": \"test text\",\n }\n\n url, parsed = self.prepare_urls('v1:activity-list', subdomain=self.company.subdomain)\n \n response = self.client.post(url, data, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n self.authenticate_user()\n response = self.client.post(url, data, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n content = json.loads(response.content)\n self.assertTrue(content.has_key('owner'))\n self.assertNotEqual(content['owner'], None)\n self.assertTrue(content.has_key('company_id'))\n self.assertNotEqual(content['company_id'], None)\n\n url, parsed = self.prepare_urls('v1:activity-list', subdomain=self.company.subdomain)\n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n content = json.loads(response.content)\n self.assertEqual(self.activities_count+1, content['count']) # added 1 activity", "def test_get_activities(self):\n pass", "def update_next_id(cls):\n cls.next_id += 1", "def testspecincrement(self):\n global idct\n a = Base()\n idct += 1\n self.assertEqual(a.id, idct)\n b = Base(19)\n self.assertEqual(b.id, 19)\n c = Base()\n idct += 1\n self.assertEqual(c.id, idct)\n d = Base()\n idct += 1\n self.assertEqual(d.id, idct)", "def next_id(self):\n self.id_counter += 1\n return self.id_counter - 1", "def test_update_activity_occurrence_status(self):\n pass", "def test_get_activity(self):\n pass", "def test_get_activity(self):\n pass", "def test_update_activity(self):\n pass", "def _get_next_event_id():\n VenueCrawler._event_id += 1\n return VenueCrawler._event_id", "def test_get_next_sequence():\n app = create_app()\n seq = utils.get_next_sequence(\"test_name123\")\n unique_string = uuid.uuid4().hex[:6].upper()\n\n #Check if the next sequence a valid sequence. \n\n # It must start with number 0.\n assert utils.get_next_sequence(unique_string) == 0\n\n # It must return allways next sequence.\n assert utils.get_next_sequence(\"test_name123\") == seq + 1", "def test_login_10_less(self):\n\n player = self._get_player()\n for i in range(20):\n timestamp = datetime.now() - timedelta(hours=i*7)\n Activity.objects.create(timestamp=timestamp, user_from=player, action='seen', public=False)\n self.assertLess(consecutive_days_seen(player, datetime.now()), 14)", "def test_create_activity_as_context_check_not_duplicated_activity(self):\n from .mockers import user_status_as_context\n from .mockers import create_context\n from hashlib import sha1\n self.create_context(create_context)\n url_hash = sha1(create_context['url']).hexdigest()\n self.testapp.post('/contexts/%s/activities' % url_hash, json.dumps(user_status_as_context), oauth2Header(test_manager), status=201)\n self.testapp.post('/contexts/%s/activities' % url_hash, json.dumps(user_status_as_context), oauth2Header(test_manager), status=200)", "def testExerciseModActivity(self):\n attr = self.session.create_visit_attr()\n\n # mod_activity_days\n self.util.intTypeTest(self, attr, \"mod_activity_days\")\n\n self.util.intPropertyTest(self, attr, \"mod_activity_days\")\n\n # mod_activity_hours\n self.util.intTypeTest(self, attr, \"mod_activity_hours\")\n\n self.util.intPropertyTest(self, attr, \"mod_activity_hours\")\n\n # mod_activity_minutes\n self.util.intTypeTest(self, attr, \"mod_activity_minutes\")\n\n self.util.intPropertyTest(self, attr, \"mod_activity_minutes\")", "def test_api_get_activity_by_id(self):\n # create a bucket\n res = self.register_login_get_token()\n self.assertEqual(res.status_code, 201)\n\n # create a activity\n res = self.client().post('/bucketlist/1/activities',\n headers=dict(\n Authorization=\"Bearer \" + self.access_token),\n data=self.activity)\n self.assertEqual(res.status_code, 201)\n # get activity created\n activity_created = json.loads(res.data.decode())\n # get activity by its ID\n res = self.client().get('/bucketlist/1/activities/{}'.format(activity_created['id']),\n headers=dict(\n Authorization=\"Bearer \" + self.access_token))\n self.assertEqual(res.status_code, 200)\n self.assertIn('Shop in', str(res.data))", "def test_list_activity_occurrences(self):\n pass", "def test_start_ids_1(self):\n\t\t\n\t\tdetails = self.watcher.describe()\n\t\tprint(details, len(details))\n\t\tactual_num_layers = len(details)\n\t\texpected_num_layers = 11\n\t\texpected_ids = details.layer_id.to_numpy().tolist()\n\t\texpected_ids = [x+1 for x in expected_ids]\n\n\t\tself.assertEqual(actual_num_layers, expected_num_layers)\n\t\tself.assertEqual(len(expected_ids), expected_num_layers)\n\n\n\t\t# test decribe\n\t\tdetails = self.watcher.describe(start_ids=1)\n\t\tprint(details)\n\t\tactual_ids = details.layer_id.to_numpy().tolist()\n\t\tself.assertEqual(actual_ids,expected_ids)\n\n\t\t# test analyze: very slow\n\t\t# details = self.watcher.analyze(start_ids=1)\n\t\t# actual_ids = details.layer_id.to_numpy().tolist()\n\t\t# self.assertEqual(actual_ids,expected_ids)\n\n\t\tparams = DEFAULT_PARAMS.copy()\n\t\tparams[START_IDS]=1\n\t\tparams[MIN_EVALS]=1 # there may be a side effect that resets this\n\t\t\n\t\t# test iterator\n\t\titerator = self.watcher.make_layer_iterator(model=self.model, params=params)\n\t\tnum = 0\n\t\tactual_ids = []\n\t\tfor ww_layer in iterator:\n\t\t\tself.assertGreater(ww_layer.layer_id,0)\n\t\t\tactual_ids.append(ww_layer.layer_id)\n\t\t\tnum += 1\n\t\t\tprint(num, ww_layer.layer_id)\n\t\tself.assertEqual(num,11)\n\t\tself.assertEqual(actual_ids,expected_ids)", "def test_login_with_multiple_seens(self):\n player = self._get_player()\n for i in range(100):\n timestamp = datetime.now() - timedelta(hours=i*16)\n Activity.objects.create(timestamp=timestamp, user_from=player, action='seen', public=False)\n self.assertGreaterEqual(consecutive_days_seen(player, datetime.now()), 14)", "def test_create_planned_activity(self):\n contact = Contact.objects.first()\n sales_cycle = contact.sales_cycles.first()\n\n data = {\n \"owner\": self.user.id,\n \"sales_cycle_id\": sales_cycle.id,\n \"description\": \"test text\",\n \"deadline\": timezone.now(),\n }\n\n url, parsed = self.prepare_urls('v1:activity-list', subdomain=self.company.subdomain)\n \n response = self.client.post(url, data, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n self.authenticate_user()\n response = self.client.post(url, data, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n content = json.loads(response.content)\n self.assertTrue(content.has_key('owner'))\n self.assertNotEqual(content['owner'], None)\n self.assertTrue(content.has_key('company_id'))\n self.assertNotEqual(content['company_id'], None)\n\n url, parsed = self.prepare_urls('v1:activity-list', subdomain=self.company.subdomain)\n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n content = json.loads(response.content)\n self.assertEqual(self.activities_count+1, content['count']) # added 1 activity", "def test_set_activity_occurrence_results(self):\n pass", "def collection_activity(user_id, activity_id):\n\n result = db_session.query(Collection_Activity).filter(and_(\n Collection_Activity.activity_id == activity_id, Collection_Activity.user_id == user_id)).all()\n if len(result) > 0:\n return 'already'\n else:\n collection_activity = Collection_Activity(\n user_id=user_id, activity_id=activity_id, time=datetime.now())\n db_session.add(collection_activity)\n db_session.commit()\n return 'success'", "def test_saving_name_increments_base_ticketing_id(self):\n # Create an initial Name so we can be confident that\n # a BaseTicketing Object will exist\n Name.objects.create(name=\"Test Name\", name_type=Name.ORGANIZATION)\n first_id = BaseTicketing.objects.all().last().id\n Name.objects.create(name=\"Test Name\", name_type=Name.ORGANIZATION)\n second_id = BaseTicketing.objects.all().last().id\n assert second_id == first_id + 1", "def get_next_id():\n with open(WORK_LOG_FILENAME, 'r') as work_log:\n work_log_reader = csv.DictReader(work_log)\n entry_id = 0\n for entry in work_log_reader:\n if int(entry['id']) > entry_id:\n entry_id = int(entry['id'])\n entry_id += 1\n return entry_id" ]
[ "0.62740666", "0.609006", "0.59693253", "0.59664935", "0.5893623", "0.586038", "0.583556", "0.5795325", "0.5753249", "0.572466", "0.5705445", "0.57026875", "0.56695217", "0.56664497", "0.56664497", "0.56583816", "0.5605769", "0.5542561", "0.55349314", "0.5509826", "0.55011", "0.5498097", "0.54874355", "0.54815996", "0.54654944", "0.5461292", "0.5459448", "0.543649", "0.5419084", "0.5417692" ]
0.74851114
0
Generate a segmented array of variablelength, contiguous ranges between pairs of start and endpoints.
def gen_ranges(starts, ends): if starts.size != ends.size: raise ValueError("starts and ends must be same size") if not ((ends - starts) > 0).all(): raise ValueError("all ends must be greater than starts") lengths = ends - starts segs = ak.cumsum(lengths) - lengths totlen = lengths.sum() slices = ak.ones(totlen, dtype=ak.int64) diffs = ak.concatenate((ak.array([starts[0]]), starts[1:] - starts[:-1] - lengths[:-1] + 1)) slices[segs] = diffs return segs, ak.cumsum(slices)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sa_range(start: int, end: int) -> StaticArray:\n forward = True # Declares variable for direction\n # Sets the number of elements to create\n if end > start:\n length = abs((end - start) + 1)\n else:\n length = abs((start - end) + 1)\n forward = False\n arr = StaticArray(length) # Creates a length n array\n\n # Fills array with consecutive integers\n for index in range(length):\n arr.set(index, start)\n if forward:\n start += 1\n else:\n start -= 1\n\n return arr", "def rangeArray(first, last):\n \n return np.arange(first, last+1)", "def linspace(start, stop, n, istart=True, istop=True):\r\n n = n-1\r\n arr = [start + ((stop-start)/n) * i for i in range(n+1)]\r\n return arr", "def buildSegments(array, segLength: int):\n s = math.floor(len(array)/segLength)\n segments = []\n for i in range(s+1):\n segments.append(array[i*segLength:(i+1)*segLength])\n return np.array(segments)", "def concat_ranges_1d_nb(a, start_idxs, end_idxs):\n out = np.empty((end_idxs[0] - start_idxs[0], start_idxs.shape[0]), dtype=a.dtype)\n for idx in range(start_idxs.shape[0]):\n out[:, idx] = a[start_idxs[idx]:end_idxs[idx]]\n return out", "def make_segments(x, y):\n\n points = np.array([x, y]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n return segments", "def make_segments(x, y):\n\n points = np.array([x, y]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n return segments", "def make_segments(x, y):\n\n points = np.array([x, y]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n return segments", "def make_segments(x, y):\n\n points = np.array([x, y]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n return segments", "def vrange(starts, stops):\n stops = np.asarray(stops)\n l = stops - starts # Lengths of each range.\n return np.repeat(stops - l.cumsum(), l) + np.arange(l.sum()), l.cumsum()", "def segments(self):\n return (self._subset((i,i+1)) for i in range(len(self)-1))", "def _segment(data, segment_length=200,\n seq_length=None,\n stride=None,\n input_type='trials'):\n x_out = []\n if input_type == 'trials':\n seq_length = 1\n\n if not stride:\n stride = segment_length\n\n for jj, xx in enumerate(data):\n\n n_ch, n_t = xx.shape\n last_segment_start = n_t - segment_length\n\n starts = np.arange(0, last_segment_start+1, stride)\n\n segments = [xx[..., s:s+segment_length] for s in starts]\n\n if input_type == 'seq':\n if not seq_length:\n seq_length = len(segments)\n seq_bins = np.arange(seq_length, len(segments)+1, seq_length)\n segments = np.split(segments, seq_bins, axis=0)[:-1]\n x_new = np.array(segments)\n else:\n x_new = np.stack(segments, axis=0)\n# if not events:\n# x_new = np.expand_dims(x_new, 1)\n\n x_out.append(x_new)\n if len(x_out) > 1:\n X = np.concatenate(x_out)\n else:\n X = x_out[0]\n print(\"Segmented as: {}\".format(input_type), X.shape)\n return X", "def _segments2slices(array_size, grid_segments, patch_segments):\n patch_slices = [slice(start, stop) for start, stop in patch_segments]\n array_slices = []\n\n for start, stop in grid_segments:\n segment_size = max(abs(start), abs(stop))\n k = int(ceil(float(segment_size) / array_size) + (-1 if start >= 0 else 0))\n cell_mirrored = k % 2\n \n step = 1\n if start < 0:\n start = k * array_size + start\n stop = k * array_size + stop\n else:\n start = start - k * array_size\n stop = stop - k * array_size\n\n if cell_mirrored:\n start = array_size - start - 1\n stop = array_size - stop - 1\n step = -1\n\n if stop < 0:\n stop = None\n\n array_slices.append(slice(start, stop, step))\n \n return array_slices, patch_slices", "def buildIntervalSegs(array, interval: int):\n interSegs = []\n for i in range(interval):\n interSegs.append(array[i::interval])\n return np.array(interSegs)", "def split_range(r, n):\n \n step = int(r / n)\n segments = []\n for i in range(n):\n new_segment = [step * i, step * (i + 1)]\n segments.append(new_segment)\n # correct the gap in the missing index due to the truncated step\n segments[-1][-1] = r\n return segments", "def _gen_segments(message):\n max_size = constants.UDP_SAFE_SEGMENT_SIZE\n count = (len(message) + max_size - 1) // max_size\n segments = (\n (count - i - 1, message[i * max_size: (i + 1) * max_size])\n for i in range(count)\n )\n return segments", "def concat_ranges_nb(a, start_idxs, end_idxs):\n out = np.empty((end_idxs[0] - start_idxs[0], start_idxs.shape[0] * a.shape[1]), dtype=a.dtype)\n for col in range(a.shape[1]):\n out[:, col * start_idxs.shape[0]:(col + 1) * start_idxs.shape[0]] = \\\n concat_ranges_1d_nb(a[:, col], start_idxs, end_idxs)\n return out", "def open_range(start, stop, step):\n return np.arange(start, stop+step/2, step)", "def build_slices(start: Sequence[int], stop: Sequence[int] = None) -> Tuple[slice, ...]:\n if stop is not None:\n check_len(start, stop)\n return tuple(map(slice, start, stop))\n\n return tuple(map(slice, start))", "def _get_slice(segments, shape):\n\n if not (1 <= len(shape) <= 2):\n raise ValueError('Cannot segment array of shape: %s' % str(shape))\n else:\n size = shape[0]\n slice_length = np.ceil(float(size) / segments)\n start_idx = 0\n end_idx = slice_length\n while start_idx < size:\n if len(shape) == 1:\n yield slice(start_idx, end_idx)\n else:\n yield (slice(start_idx, end_idx), slice(None))\n start_idx = end_idx\n end_idx = min(start_idx + slice_length, size)", "def _get_sharded_ranges(\n begin,\n end,\n max_length,\n):\n if max_length <= 0:\n raise ValueError(\"max_length <= 0.\")\n length = end - begin\n if length <= max_length:\n return [(begin, end)]\n pivot = begin + length // 2\n return (_get_sharded_ranges(begin, pivot, max_length) +\n _get_sharded_ranges(pivot, end, max_length))", "def make_segments(date_start=None, date_end=None, date_freq=None,\n n_series=3, n_segments=2, seg_sep=None,\n means=None, stds=None, trends=None,\n amplitudes=None, phases=None):\n assert isinstance(n_series, int)\n assert isinstance(n_segments, int) and n_segments >= 1\n # TODO\n pass", "def segment_array(a):\n\n l = [array(a.typecode) for chaff in range(16)]\n index = 0\n\n for i in range(0, len(a), 16):\n l[index].extend(a[i:i + 16])\n index = (index + 1) % 16\n\n return l", "def startEndPoints(start, end, num):\n ll = np.linspace(0,1,num)\n xxs = start[0]*(1-ll)+end[0]*ll\n tts = start[1]*(1-ll)+end[1]*ll\n return( np.array([xxs, tts]) )", "def segments(self):\n L = len(self.vertices)\n return itertools.chain((self._subset((i,i+1)) for i in range(len(self)-1)),\n (self._subset((L-1,0)),))", "def range_maker(low, hi, step, lst=None):\n return numpy.arange(low, hi, step)", "def arange(\n start,\n /,\n stop=None,\n step=1,\n *,\n dtype=None,\n device=None,\n usm_type=\"device\",\n sycl_queue=None,\n):\n dpu.validate_usm_type(usm_type, allow_none=False)\n sycl_queue_normalized = dpnp.get_normalized_queue_device(\n sycl_queue=sycl_queue, device=device\n )\n\n array_obj = dpt.arange(\n start,\n stop=stop,\n step=step,\n dtype=dtype,\n usm_type=usm_type,\n sycl_queue=sycl_queue_normalized,\n )\n\n return dpnp_array(array_obj.shape, buffer=array_obj)", "def arange(start=0, stop=None, step=None):\n raise NotImplementedError", "def segment_outer_range(segment_lengths, out_idx=tf.int32):\n max_length = tf.reduce_max(segment_lengths)\n tiled_range = tf.tile(tf.expand_dims(tf.range(tf.size(segment_lengths, out_type=out_idx)), 1), [1, max_length])\n return tf.boolean_mask(\n tiled_range, tf.sequence_mask(segment_lengths, max_length))", "def rangeLin(min, max, n):\n\n return np.arange( min, max, (max-min)/n )" ]
[ "0.7105989", "0.67178863", "0.6715174", "0.66361505", "0.6512398", "0.6505254", "0.6505254", "0.6505254", "0.6505254", "0.6452789", "0.63758105", "0.6338202", "0.6336955", "0.63071066", "0.6295646", "0.6254592", "0.6251713", "0.6232607", "0.6185326", "0.61489207", "0.61197853", "0.6099788", "0.60915685", "0.60672015", "0.6012338", "0.59994495", "0.5980601", "0.5972628", "0.5972498", "0.5972372" ]
0.6791115
1
Replace characters that are ok for the filesystem but have special meaning in the shell. It is assumed file_path is already passed in double quotes.
def sanitize_file_path_for_shell(file_path): file_path_sanitized = file_path.replace('\\', '\\\\') file_path_sanitized = file_path_sanitized.replace('$', '\\$') file_path_sanitized = file_path_sanitized.replace('"', '\\"') file_path_sanitized = file_path_sanitized.replace('`', '\\`') return file_path_sanitized
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _escape_path(path):\n path = path.strip()\n return '\"{0}\"'.format(path) if _platform_windows else path.replace(\" \", \"\\ \")", "def sanitize_file_path(file_path, replacement_text=\"\"):\n\n return __RE_INVALID_PATH.sub(replacement_text, file_path.strip())", "def _escape_filename(self, filename):\n return filename.replace('\\\\', '\\\\\\\\').replace(' ', '\\\\ ')", "def escape_path(path):\n if ' ' in path and not (path.startswith('\"') and path.endswith('\"')):\n return '\"' + path + '\"'\n else:\n return path", "def escape_filename(fn):\n return ''.join(filter(valid_chars.__contains__, fn))", "def _safe(text):\n return text.replace(\"'\", \"''\").replace(\"\\\\\", \"\\\\\\\\\")", "def quote_path(path):\n return '\"' + re.sub(r'([\\\\$\"[])', r\"\\\\\\1\", path) + '\"'", "def _escape_filename(filename):\n #Is adding the following helpful\n #if os.path.isfile(filename):\n # #On Windows, if the file exists, we can ask for\n # #its alternative short name (DOS style 8.3 format)\n # #which has no spaces in it. Note that this name\n # #is not portable between machines, or even folder!\n # try:\n # import win32api\n # short = win32api.GetShortPathName(filename)\n # assert os.path.isfile(short)\n # return short\n # except ImportError:\n # pass\n if \" \" not in filename:\n return filename\n #We'll just quote it - works on Windows, Mac OS X etc\n if filename.startswith('\"') and filename.endswith('\"'):\n #Its already quoted\n return filename\n else:\n return '\"%s\"' % filename", "def wipe_bad_chars(filename):\n return multi_replace(filename, {'(': '', ' ': '_', ')': '', '/': '_'})", "def escapeForPath(s):\n return s.replace(os.sep, \"\")", "def processFilename(filename):\n\n badchars = [\" \", \",\", \"+\", \"$\", \"_\", \"{\", \"}\", \"/\", \"&\"]\n fn = filename\n for bc in badchars:\n fn = fn.replace(bc, \"\")\n return fn", "def remove_quotes(fname_string):\n return fname_string.replace('\"', '').replace(\"'\", '')", "def cleanse_filename(fname):\n fname = os.path.split(fname)[1]\n INVALID = u\"\\\"*/:<>?\\\\|\"\n VALID_RANGE = range(128)\n result = []\n for c in fname:\n val = ord(c)\n if not c in INVALID and val in VALID_RANGE:\n result.append(c)\n else:\n result.append(u\"_\")\n result = u\"\".join(result)\n return result.replace(u\" \", u\"_\")", "def cleanFilename(filename):\n badChars = {ord('?'): None, ord('*'): None, ord('/'): None,\n ord('\\\\'): None, ord(':'): None, ord('\"'): \"''\",\n ord('<'): None, ord('>'): None, ord('|'): None}\n return filename.translate(badChars)", "def _strip_quotes(file_arg):\n return re.sub(\"^[\\'\\\"]|[\\'\\\"]$\", \"\", file_arg)", "def sanitize_filename(filename, replacement_text=\"\"):\n\n return __RE_INVALID_FILENAME.sub(replacement_text, filename.strip())", "def sanitize_filename(file_path: str) -> str:\n file_name = file_path.lower().replace(\" \", \"_\").replace(\".\", \"_\")\n file_name = \"\".join(\n [\n i if i in (string.ascii_letters + string.digits + \"_\") else \"\"\n for i in file_name\n ]\n )\n return file_name", "def sanitize_filename(f):\n keepchars = (\" \", \".\", \"_\")\n return \"\".join(c for c in f if c.isalnum() or c in keepchars).rstrip()", "def clean_slashes(path):\n return path.strip(\"/\")", "def clean(path):\n if os.sep == \"\\\\\":\n return path.replace(os.sep, \"\\\\\\\\\")\n return path", "def clean(line):\n line = line.strip('\\n').strip()\n line = line.replace('\\xe2\\x80\\x93', '-')\n line = line.replace('\\xe2\\x80\\x99', '\\'')\n\n return line", "def fix(text):\n\n text = text.replace(\"\\\\\", \"\\\\\\\\\")\n text = text.replace(\"{\", \"\\\\{\").replace(\"}\", \"\\\\}\")\n text = _nonAsciiPattern.sub(_replace, text)\n return text", "def escape_filename(input: str) -> str:\n\n output = re.sub(r\"[^\\w\\-_\\.]\", \"_\", input)\n return output", "def sanitize_filename(filename):\n sanitized_filename = re.sub(r'[/\\\\:*?\"<>|]', '-', filename)\n sanitized_filename = sanitized_filename.replace('&', 'and')\n sanitized_filename = sanitized_filename.replace('\"', '')\n sanitized_filename = sanitized_filename.replace(\"'\", '')\n sanitized_filename = sanitized_filename.replace(\"/\", '')\n sanitized_filename = sanitized_filename.replace(\"\\\\\", '')\n\n # Annoying.\n if sanitized_filename[0] == '.':\n sanitized_filename = u'dot' + sanitized_filename[1:]\n\n return sanitized_filename", "def shellquote(s):\n return '\"' + s.replace(\"'\", \"'\\\\''\") + '\"'", "def _EscapeFilename(self, filename):\r\n if \"@\" in filename and not filename.endswith(\"@\"):\r\n filename = \"%s@\" % filename\r\n return filename", "def escape(orig):\n return '\"{}\"'.format(orig.replace('\\\\', '\\\\\\\\').replace('\"', '\\\\\"'))", "def _quote(self, arg):\n arg = arg.replace('\\\\', '\\\\\\\\')\n arg = arg.replace('\"', '\\\\\"')\n return '\"%s\"' % arg", "def test_escape_argument_path_with_space():\n encoded = win_functions.escape_argument(\"C:\\\\Some Path\\\\With Spaces\")\n assert encoded == '^\"C:\\\\Some Path\\\\With Spaces^\"'", "def replace_special(text):\r\n text = text.replace('\\r\\n', ' ')\r\n text = text.replace('\\n', ' ')\r\n text = text.replace('``', \"''\")\r\n text = text.replace('`', \"'\")\r\n text = text.replace('“', '\"')\r\n text = text.replace('”', '\"')\r\n text = text.replace('’', \"'\")\r\n text = text.replace('‘', \"'\")\r\n text = text.replace(\"'\", \"'\")\r\n text = text.replace('–', \"-\")\r\n text = text.replace('\\\"', '\"')\r\n text = text.replace(\"\\'\", \"'\")\r\n return text" ]
[ "0.71783984", "0.71233505", "0.7065954", "0.70276165", "0.6792996", "0.667642", "0.6659069", "0.65962875", "0.6565966", "0.65407276", "0.6515731", "0.649737", "0.647577", "0.64607286", "0.6425371", "0.6413269", "0.6343448", "0.62579924", "0.6134818", "0.61246663", "0.61156386", "0.6078656", "0.60750306", "0.6059993", "0.6027538", "0.60232466", "0.60154223", "0.5995328", "0.59790343", "0.59701896" ]
0.7965993
0
Returns all parent regions of this region.
def all_parents(self) -> Iterable[Region]: if self.parent is not None: yield self.parent yield from self.parent.all_parents()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def children(self) -> List[Region]:\n return []", "def getParents(self):\n return self.parents[:]", "def parent_ids(self):\n return self._parent_ids", "def region(self):\n return [node.region for node in self]", "def children(self) -> List[Region]:\n return self._children", "def parents(self):\n return self._parents", "def parents(self):\n p = self\n result = []\n while p:\n result.append(p)\n p = p.parent\n return result", "def regions(self):\n return self._regions", "def parents(self):\n\n return self._parents", "def get_parents(self):\n return NodeList(self._my_map['parentNodes'])", "def _get_parents(self):\n parents = []\n parent = self.parent\n while(parent):\n parents.append(parent)\n parent = parent.parent\n parents.reverse()\n return parents", "def get_parents(self):\r\n\r\n raise NotImplementedError()", "def parent_resources(cls):\n parent = cls.parent_resource\n parents = [parent]\n\n try:\n while True:\n parent = parent.parent_resource\n parents.append(parent)\n except AttributeError:\n pass\n\n parents.reverse()\n return parents", "def ancestors(self):\n try:\n return self._ancestors\n except AttributeError:\n self._ancestors = [self, ]\n if self.parent is not None:\n parent = self.parent\n while parent is not None:\n self._ancestors.append(parent)\n try:\n parent = parent.parent\n except NoResultFound:\n parent = None\n return self._ancestors", "def get_parent_resource_nodes(self):\n raise errors.Unimplemented()", "def ancestors(self):\n for a in self._related(set(), 'parents'):\n yield a", "def get_parents_list(self):\n return []", "def parents(self):\n if self._pedigree is None:\n raise Exception(\"Pedigree is not defined\")\n return [self._pedigree.individual(pid) for pid in self._parent_ids]", "def get_ancestors(self):\n if self._parent is None:\n return list()\n ancestors = self._parent.get_ancestors()\n ancestors.append(self._parent)\n return ancestors", "def get_all_parent_edges(self):\n all_parent_edges = set()\n for parent in self.parents:\n all_parent_edges.add((self.id, parent.id))\n all_parent_edges |= parent.get_all_parent_edges()\n return all_parent_edges", "def get_ancestors(self):\r\n if self.parent:\r\n return self.parent.get_ancestors()+[self]\r\n else:\r\n return [self]", "def get_regions(self):\n return self._regions", "def GetAncestors(self):\n if self.parent is not None:\n return self.parent.GetAncestors() + [self.parent]\n return []", "def ancestors(self, include_self=False):\n c = self.parent\n a = [] if c is None else c.ancestors(include_self=True)\n if include_self:\n a.append(self)\n return a", "def ancestors(self):\r\n ret = []\r\n workunit = self\r\n while workunit is not None:\r\n ret.append(workunit)\r\n workunit = workunit.parent\r\n return ret", "def ancestors(self):\n return self._ancestors", "def ancestors(self):\n return self._ancestors", "def hierarchy(self):\n return [self] + (self.parent.hierarchy if self.parent else [])", "def get_all_parents(self):\n all_parents = set()\n for parent in self.parents:\n all_parents.add(parent.id)\n all_parents |= parent.get_all_parents()\n return all_parents", "def get_parents(self, table_name):\n return self._parent_map[table_name]" ]
[ "0.7434008", "0.7339606", "0.7157085", "0.7153343", "0.7056081", "0.6984813", "0.6946143", "0.6928434", "0.6888119", "0.6858729", "0.6857094", "0.6851539", "0.68390924", "0.6822237", "0.682013", "0.67900497", "0.676651", "0.6735071", "0.6715634", "0.6683919", "0.668044", "0.6676444", "0.6616762", "0.6599128", "0.6581652", "0.65814495", "0.65814495", "0.6568463", "0.653471", "0.6532503" ]
0.8541655
0
Initialize the battery's attributes.
def __init__(self, battery_size=40): self.battery_size = battery_size
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, battery_size=70):\r\n\t\tself.battery_size = battery_size", "def __init__(self, battery_size=70):\n self.battery_size = battery_size", "def __init__(self, battery_size=70):\n self.battery_size = battery_size", "def __init__(self, battery_size=70):\n self.battery_size = battery_size", "def __init__(self, battery_size=75):\n self.battery_size = battery_size", "def __init__(self, battery_size=75):\n self.battery_size = battery_size", "def __init__(self, battery_size= 75):\n self.battery_size = battery_size", "def set_battery(self, init_batt, min_batt, batt_rate):\n rospy.loginfo('%s is setting up battery requirements...' %\n self.namespace)\n self.INIT_VOLTAGE = init_batt\n self.MINIMUM_VOLTAGE = min_batt\n self.battery_voltages = [self.INIT_VOLTAGE for _ in range(20)]\n self.battery_rate_mean, self.battery_rate_std = batt_rate\n self.low_battery = False", "def __init__(self, battery_size=75): #Note that battery_size is optional parameter if no value is provided.\n self.battery_size = battery_size", "def __init__(self, battery_size=70):\n self.battery_size = battery_size\n self._range = 0", "def __init__(self,battery_size=85):\n self.battery_size = battery_size", "def __init__(self, manufacturer, model, year):\n super().__init__(manufacturer, model, year)\n self.battery = Battery()", "def __init__(self,make,model,year):\n super().__init__(make,model,year)\n self.battery = Battery()", "def __init__(self,make,model,year):\r\n\t\tsuper().__init__(make,model,year)\r\n\t\tself.battery = Battery()", "def __init__(self, make, model, year):\n super().__init__(make, model, year)\n self.battery = Battery()", "def __init__(self, make, model, year):\n super().__init__(make, model, year)\n self.battery = Battery()", "def __init__(self, make, model, year):\n super().__init__(make, model, year)\n self.battery = Battery()", "def __init__(self, make, model, year):\n super().__init__(make, model, year)\n self.battery = Battery()", "def __init__(self, make, model, year):\n super().__init__(make, model, year)\n self.battery = Battery()", "def __init__(self, make, model, year):\n super().__init__(make, model, year)\n self.battery = Battery()", "def __init__(self, make, model, year):\n super().__init__(make, model, year)\n self.battery = Battery()", "def set_attributes(self):\n for i, battery in enumerate(sorted(self.batteries.values(),\n key=operator.attrgetter(\"weight\"))):\n setattr(battery, \"cap\", self.caps[self.big_iterations][i])\n if self.caps[self.big_iterations][i] is 450:\n cost = 900\n elif self.caps[self.big_iterations][i] is 900:\n cost = 1350\n else:\n cost = 1800\n setattr(battery, \"cost\", cost)\n battery.capacity = self.caps[self.big_iterations][i]", "def __init__(self, make, model, year):\n super().__init__(make, model, year)\n # Attributes specific to electric cars.\n self.battery = Battery()", "def _initFields(self):\n\n self.windowType = \"backlight\"\n\n global BACKLIGHT_DIR, MAXIMUM_KEY\n\n self.brightness = 0\n self.max = int(open(BACKLIGHT_DIR + MAXIMUM_KEY).read())", "def __init__(self, make, model, year):\n super().__init__(make, model, year)\n self.battery_size = 70", "def __init__(self, make, model, year):\n super().__init__(make, model, year)\n self.battery_size = 70", "def __init__(self, make, model, year):\n super().__init__(make, model, year)\n self.battery_size = 70", "def __init__(self,id,batteryCapacity,rechargeTime,range):\n \n self.id=id\n self.capacity = batteryCapacity # J \n self.chargeRate= batteryCapacity/rechargeTime # J/sec \n self.range = range # meters", "def __init__(self):\n self.swagger_types = {\n 'maximum_over_capacity': 'float',\n 'minimum_health_capacity': 'float'\n }\n\n self.attribute_map = {\n 'maximum_over_capacity': 'maximumOverCapacity',\n 'minimum_health_capacity': 'minimumHealthCapacity'\n }\n\n self._maximum_over_capacity = None\n self._minimum_health_capacity = None", "def __init__(self, make,model,year):\n super().__init__(make,model,year) # calling the constructor of parent class\n self.battery_size =80" ]
[ "0.7373256", "0.73601586", "0.73601586", "0.73601586", "0.734221", "0.734221", "0.7319005", "0.7316006", "0.72850525", "0.7227408", "0.72071785", "0.7196292", "0.6998248", "0.696277", "0.69499767", "0.6907041", "0.6907041", "0.6907041", "0.6907041", "0.6907041", "0.6907041", "0.6626512", "0.66143054", "0.6593376", "0.6580727", "0.6580727", "0.6580727", "0.6530372", "0.6444245", "0.63928854" ]
0.7383211
0
Function to generate ground truth labels as specified by int_limit.
def generate_true_labels(int_limit, n_obs): if int_limit > 0: if int_limit > n_obs: raise ValueError(f"""Invalid value of int_limit {int_limit}: greater than the number of sequences""") else: true_labels = [1 if idx <= int_limit else 0 for idx in range(n_obs)] else: # Allows test cases where all sequence pairs are non-interacting true_labels = [0 for item in range(n_obs)] return true_labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_cut_labels(var, bin_edges, bottom_inclusive=True):\n incl = '=' if bottom_inclusive else ''\n return ['{low:g} <{incl} {var} < {high:g}'.format(var=var, low=bin_low,\n high=bin_high, incl=incl)\n for (bin_low, bin_high) in bin_edges]", "def generate_labels(n_samples):\n return np.ones([n_samples, 1]), np.zeros([n_samples, 1])", "def make_fixed_labels(self):\n fixed_labels = []\n for dim in range(self.opt.c_dim):\n t = [0] * self.opt.c_dim\n t[dim] = 1\n t = torch.FloatTensor(t).expand([self.opt.batch_size, self.opt.c_dim])\n fixed_labels.append(t)\n return fixed_labels", "def gen_labels(self, nidxs=None, condense_labels=False):\n\n if nidxs is None:\n nidxs = self.nidx_train\n\n y = []\n\n for r in nidxs:\n y.append(self.node_labels[r])\n\n if condense_labels:\n # This should be improved, since this will fail if there are labels with exactly the same number of samples\n # Current solution use a bit of noise to minimize conflicts/favors\n y = self.encode_labels(y)\n lab_weights = 1. - np.mean(y, axis=0)\n noise = np.random.normal(loc=0, scale=0.0001, size=np.shape(y))\n y_condensed = np.argmax(minmax_scale(y * lab_weights + noise, axis=1), axis=1)\n return y_condensed\n\n return self.encode_labels(y)", "def createlabel(q, n):\n # When using dec2base function make sure to pad the string with the right number of zeros e.g for base 3 dec2base\n # gives 1 rather than 01 if we were dealing with 2 qubits.\n # The number of kraus matrices or labels is n^q\n\n label = []\n for i in range(pow(n, q)):\n label.append(dec2base(i, n))\n\n # Next we make sure that each element in the label list has length the number of qubits if not add a zero\n for x in range(len(label)):\n if len(label[x]) < q:\n label[x] = label[x].zfill(q)\n else:\n break\n return label", "def range_to_label(arange):\r\n # pass\r\n C = arange.size - 1\r\n label = np.ones((arange[-1], ), dtype=np.int)\r\n for i in xrange(1, C):\r\n label[arange[i]: arange[i+1]] *= (i+1)\r\n return label", "def generate_labels(pics):\r\n return []", "def generateLabelsTicks(posns):\n if len(posns) <= 10:\n return labelsTicks(posns, 1)\n elif len(posns) <= 50:\n return labelsTicks(posns, 5)\n else:\n return labelsTicks(posns, 10)", "def make_labels(self, ilines):\n\n llist = []\n for lind, lstr in enumerate(ilines):\n # get label and value list\n rv, label, vals = self.get_label_vals(lstr)\n if rv < 1: continue\n\n nvals = len(vals)\n\n # label = self.find_parent_label(label)\n\n if self.verb > 2: print('++ label: %s, %d val(s)' % (label, nvals))\n\n llist.append(label)\n self.maxcounts[label] = nvals\n self.subjcounts[label] = 0\n\n if not UTIL.vals_are_unique(llist):\n print('** warning: labels are not unique, will use only last values')\n llist = UTIL.get_unique_sublist(llist)\n\n return 0, llist", "def generate_labels_fri(train_i, test_i, labels):\n train = labels[train_i]\n test = labels[test_i]\n\n train_y = train == 1\n test_y = test == 1\n\n return train_y, test_y", "def create_label(image_name,number):\r\n\r\n target=[]\r\n for i in range(0,number):\r\n target.append(0)\r\n target[image_name]=1\r\n\r\n return target", "def generatetensorstring(n, *args):\n out = ''\n label = 0\n arg = array(args) - 1\n\n for i in range(0, n):\n if i in arg:\n label += 1\n out += str(label)\n else:\n out += '0'\n return out", "def cond_int2str(cond_int=0):\n try:\n return {\n 0: '晴',\n 1: '多云',\n 2: '阴',\n 3: '阵雨',\n 4: '雷阵雨',\n 5: '雷阵雨伴有冰雹',\n 6: '雨夹雪',\n 7: '小雨',\n 8: '中雨',\n 9: '大雨',\n 10: '暴雨',\n 11: '大暴雨',\n 12: '特大暴雨',\n 13: '阵雪',\n 14: '小雪',\n 15: '中雪',\n 16: '大雪',\n 17: '暴雪',\n 18: '雾',\n 19: '冻雨',\n 20: '沙尘暴',\n 21: '小到中雨',\n 22: '中到大雨',\n 23: '大到暴雨',\n 24: '暴雨到大暴雨',\n 25: '大暴雨到特大暴雨25',\n 26: '小到中雪',\n 27: '中到大雪',\n 28: '大到暴雪',\n 29: '浮尘',\n 30: '扬沙',\n 31: '强沙尘暴',\n 53: '霾',\n 99: '无'\n }[cond_int]\n except KeyError as e:\n logging.warning(e)\n return \"-\"", "def setIntegerLabels():\n dislin.intax()", "def category_string_of_label_int(self, label_integer):\n if len(self.ints_to_strings) < len(self.label_vocab):\n self.ints_to_strings = {index: label for (label, index) in self.label_vocab.items()}\n return self.ints_to_strings[int(label_integer)]", "def create_slice_labels(dataset, base_task_name, slice_name, verbose=False):\n # TODO: break this out into more modular pieces oncee we have multiple slices\n slice_fn = globals()[slice_name]\n slice_indicators = torch.tensor(\n [slice_fn(dataset, idx) for idx in range(len(dataset))], dtype=torch.uint8\n ).view(-1, 1)\n\n Y_base = dataset.labels[f\"{base_task_name}_gold\"]\n Y_slice = Y_base.clone().masked_fill_(slice_indicators == 0, 0)\n\n if verbose:\n if not any(Y_slice):\n warnings.warn(f\"No examples were found to belong to slice {slice_name}\")\n else:\n print(f\"Found {sum(slice_indicators)} examples in slice {slice_name}.\")\n\n # NOTE: we assume here that all slice labels are for sentence-level tasks only\n return Y_slice", "def int_to_text(self, labels):\n string = []\n for i in labels:\n string.append(self.index_map[i])\n return ''.join(string).replace('', ' ')", "def generate_labels_frii(train_i, test_i, labels):\n train = labels[train_i]\n test = labels[test_i]\n\n train_y = train == 2\n test_y = test == 2\n\n return train_y, test_y", "def get_predefined_labels(self):\n raise NotImplementedError", "def neural_net_label_input(self, n_classes):\n labels = tf.placeholder(tf.int32, [None, n_classes], 'y')\n return labels", "def preprocessing_labels1(y,c = 1.,m = 0.6, f = 0.2 ,dataset = 'mnist'):\n perm_mnist = [3,5,8,6,0,4,7,9,2,1]\n perm_fmnist = [0,2,6,3,4,5,7,9,1,8]\n perm = [0,1,2,3,4,5,6,7,8,9]\n perm_cifar10 = [0,8,1,9,2,6,3,5,4,7]\n n = y.shape[0]\n y_res1 = np.zeros((int(c*n),2))\n print(int(c*n))\n y_res3 = np.zeros((int(f*n),10))\n if dataset == 'cifar10':\n perm = perm_cifar10\n elif dataset == 'mnist':\n perm = perm_mnist\n elif dataset == 'fashion_mnist':\n perm = perm_fmnist\n if dataset == 'cifar10':\n y_res2= np.zeros((int(m*n),5))\n for i in range(n):\n if i< int(c*n):\n if np.argmax(y[i]) in [0,1,8,9]:\n y_res1[i,0] = 1\n else :\n y_res1[i,1] = 1\n if i<int(m*n):\n if np.argmax(y[i]) in [0,8]:\n y_res2[i,0] = 1\n elif np.argmax(y[i]) in [1,9]:\n y_res2[i,1] = 1\n elif np.argmax(y[i]) in [2,6]:\n y_res2[i,2] = 1\n elif np.argmax(y[i]) in [3,5]:\n y_res2[i,3] = 1\n elif np.argmax(y[i]) in [4,7]:\n y_res2[i,4] = 1\n if i<int(f*n):\n y_res3[i,np.argmax(y[i])] = 1\n return(y_res1,y_res2,y_res3)\n else :\n y_res2= np.zeros((int(m*n),4))\n for i in range(n):\n if i< int(c*n):\n if np.argmax(y[i]) in perm[0:5]:\n y_res1[i,0] = 1\n else :\n y_res1[i,1] = 1\n if i<int(m*n):\n if np.argmax(y[i]) in perm[0:3]:\n y_res2[i,0] = 1\n elif np.argmax(y[i]) in perm[3:5]:\n y_res2[i,1] = 1\n elif np.argmax(y[i]) in perm[5:8]:\n y_res2[i,2] = 1\n elif np.argmax(y[i]) in perm[8:]:\n y_res2[i,3] = 1\n if i<int(f*n):\n y_res3[i,np.argmax(y[i])] = 1\n return(y_res1,y_res2,y_res3)", "def _onehot(integer_labels):\r\n n_rows = len(integer_labels)\r\n n_cols = integer_labels.max() + 1\r\n onehot = np.zeros((n_rows, n_cols), dtype='uint8')\r\n onehot[np.arange(n_rows), integer_labels] = 1\r\n return onehot", "def label_encoder(\n text_length: int, entities: List[Entity], label_to_int: Dict[str, int],\n) -> List[int]:\n # note 0 means negative labels\n code = [0] * text_length\n\n for span in entities:\n label_name = span.entity_type\n try:\n label_code = label_to_int[label_name]\n except KeyError as err:\n raise Exception(f\"Missing label {str(err)} in 'label_to_int' mapping.\")\n\n if label_to_int[label_name] == 0:\n continue\n s = span.start\n e = span.end\n\n if e > text_length:\n raise ValueError(\n f\"Entity span index is out of range: text length is \"\n f\"{text_length} but got span index {e}.\"\n )\n code[s:e] = [label_code] * (e - s)\n\n return code", "def reformat_labels(label, bin_limits=[2]):\n# num_labels = y_batch.max() + 1\n label = np.array([label], dtype=np.float32)\n num_labels = 2\n label = np.digitize(label, bins=[2])\n label = (np.arange(num_labels) == label[:, None]).astype(np.float32)[0]\n return label", "def compute_labels(pos, neg):\n labels = np.zeros(len(pos) + len(neg), dtype=np.int8)\n labels[:len(pos)] = 1\n labels[len(pos):] = 0\n return labels", "def cvpr2018_labels():\n\n return {\n 0: 'others',\n 33: 'car',\n 34: 'motorcycle',\n 35: 'bicycle',\n 36: 'pedestrian',\n 38: 'truck',\n 39: 'bus',\n 40: 'tricycle'\n }", "def preprocess_labels(label, number_slices):\n labels = [[] for i in range(np.array(label).shape[0])]\n\n for j in range(np.array(label).shape[0]):\n if type(label) is not np.ndarray:\n for i in range(number_slices):\n labels[j].append(np.array(Image.open(label[0][i]), dtype=np.uint8))\n\n label = np.array(labels[0])\n label = label.transpose((1, 2, 0))\n max_mask = np.max(label) * 0.5\n label = np.greater(label, max_mask)\n label = np.expand_dims(label, axis=0)\n\n return label", "def create_TargetLabel(dataset):\n label_Array = dataset['close_-1_r'].shift(-1)\n label_Array = label_Array.apply(lambda x:1 if x>0.0000 else 0)\n return label_Array", "def encode_labels(labels, nclass=5):\n Y = np.zeros((len(labels), nclass)).astype('float32')\n for j, y in enumerate(labels):\n for i in range(nclass):\n if i+1 == np.floor(y) + 1:\n Y[j,i] = y - np.floor(y)\n if i+1 == np.floor(y):\n Y[j,i] = np.floor(y) - y + 1\n return Y", "def main():\n num_label = 1\n print('Printing ten random integers...')\n for i in range(NUM_RANDOM):\n print(str(num_label) + '):' + ' ' + str(random.randint(MIN_RANDOM, MAX_RANDOM)))\n num_label += 1" ]
[ "0.58891654", "0.58660847", "0.58303446", "0.5694453", "0.5692392", "0.5666728", "0.56513286", "0.5643508", "0.5582945", "0.54308045", "0.5425018", "0.5418894", "0.5386328", "0.5373158", "0.5323061", "0.53213376", "0.53181946", "0.53151214", "0.5308631", "0.52794737", "0.5237495", "0.5228083", "0.5225577", "0.52242506", "0.51969385", "0.5196484", "0.5173365", "0.5153748", "0.5151106", "0.5148759" ]
0.75520366
0
Create the bucket where all datasets will be stored
def create_bucket() -> None: try: client.make_bucket(DATASETS_BUCKET) except BucketAlreadyOwnedByYou: logger.debug(f"Not creating bucket {DATASETS_BUCKET}: Bucket already exists") pass else: logger.debug(f"Successfully created bucket {DATASETS_BUCKET}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_buckets():\n s3 = boto.connect_s3()\n s3.create_bucket('mls_data.mls.angerilli.ca')", "def create_and_fill_bucket(self):\n EmrProcessing.bucket = \\\n self.s3_handle.create_bucket(EmrProcessing.bucket_name)\n key = EmrProcessing.bucket.new_key('input/test.csv')\n input_file_path = '../data/test.csv'\n key.set_contents_from_filename(input_file_path)\n key.set_acl('public-read')\n\n key = EmrProcessing.bucket.new_key('mapper/mapper.py')\n input_file_path = '../src/mapper/mapper.py'\n key.set_contents_from_filename(input_file_path)\n key.set_acl('public-read')", "def create_buckets(self):\n\n # 1. Create bucket\n for name in [BUCKET_1_SRC, BUCKET_1_DST, BUCKET_2_SRC, BUCKET_2_DST, BUCKET_3_SRC, BUCKET_3_DST]:\n self.create_gcs_bucket(name)\n\n # 2. Prepare parents\n first_parent = f\"gs://{BUCKET_1_SRC}/parent-1.bin\"\n second_parent = f\"gs://{BUCKET_1_SRC}/parent-2.bin\"\n\n self.execute_with_ctx(\n [\n \"bash\",\n \"-c\",\n f\"cat /dev/urandom | head -c $((1 * 1024 * 1024)) | gsutil cp - {first_parent}\",\n ],\n key=GCP_GCS_KEY,\n )\n\n self.execute_with_ctx(\n [\n \"bash\",\n \"-c\",\n f\"cat /dev/urandom | head -c $((1 * 1024 * 1024)) | gsutil cp - {second_parent}\",\n ],\n key=GCP_GCS_KEY,\n )\n\n self.upload_to_gcs(first_parent, f\"gs://{BUCKET_1_SRC}/file.bin\")\n self.upload_to_gcs(first_parent, f\"gs://{BUCKET_1_SRC}/subdir/file.bin\")\n self.upload_to_gcs(first_parent, f\"gs://{BUCKET_2_SRC}/file.bin\")\n self.upload_to_gcs(first_parent, f\"gs://{BUCKET_2_SRC}/subdir/file.bin\")\n self.upload_to_gcs(second_parent, f\"gs://{BUCKET_2_DST}/file.bin\")\n self.upload_to_gcs(second_parent, f\"gs://{BUCKET_2_DST}/subdir/file.bin\")\n self.upload_to_gcs(second_parent, f\"gs://{BUCKET_3_DST}/file.bin\")\n self.upload_to_gcs(second_parent, f\"gs://{BUCKET_3_DST}/subdir/file.bin\")\n\n self.delete_gcs_bucket(first_parent)\n self.delete_gcs_bucket(second_parent)", "def create_bucket(self, name):\n return", "def create_infrastructure():\n\n create_bucket_if_not_exists(BUCKET)", "def test_create_bucket(self):\n pass", "def __init__(self):\n self.bucket = 1000\n self.bucketItem = 1000\n \n self.hashset = [None] * self.bucket", "def s3_create_bucket(self):\n self.conn.create_bucket(DEFAULT_BUCKET_NAME)", "def get_bucket():\n return FileBucket(os.path.join(context.site.data_path, 'buckets'))", "def create_bucket(self,bucket_name):\n \n bucket = self.storage_cl.create_bucket(bucket_name)\n print('Bucket {} created'.format(bucket.name))", "def __init__(self, bucket):\n self.bucket = bucket", "def __init__(self):\n self.size = 1000\n self.bucket = [None] * self.size", "def makeBucket():\n name = \"-\".join(parser_arguments().classes)\n name = name.lower()\n if(not minioClient.bucket_exists(name)):\n minioClient.make_bucket(name)\n print('The bucket {} have been created'.format(parser_arguments().classes))\n else:\n print('The bucket {} already exists.'.format(parser_arguments().classes))\n pass", "def __init__(self):\n self.bucket_length = 997\n self.bucket_array = [Bucket() for i in range(self.bucket_length)]", "def create_bucket(self, schema, dataset_id, operation=\"Replace\"):\n url = self.prism_endpoint + \"/wBuckets\"\n\n headers = {\n \"Authorization\": \"Bearer \" + self.bearer_token,\n \"Content-Type\": \"application/json\",\n }\n\n data = {\n \"name\": \"bucket_\" + str(random.randint(100000, 999999)),\n \"operation\": {\"id\": \"Operation_Type=\" + operation},\n \"targetDataset\": {\"id\": dataset_id},\n \"schema\": schema,\n }\n\n r = requests.post(url, headers=headers, data=json.dumps(data))\n\n if r.status_code == 201:\n logging.info(\"Successfully created a new wBucket\")\n return r.json()\n elif r.status_code == 400:\n logging.warning(r.json()[\"errors\"][0][\"error\"])\n else:\n logging.warning(\"HTTP Error {}\".format(r.status_code))", "def _create_keys(bucket_name, keys=[]):\n bucket = connection.create_bucket(bucket_name)\n\n for s in keys:\n key = bucket.new_key(s)\n key.set_contents_from_string(s)\n\n return bucket", "def get_bucketlist():\n pass", "def add_bucket(bucket_name):\n pass", "def single_bucket(empty_bucket): # pylint: disable=redefined-outer-name\n empty_bucket.insert(\"key 1\", \"value 1\")\n return empty_bucket", "def create_bucket(bucket_name):\r\n\r\n # initialize client & get bucket\r\n storage_client, bucket, _ = create_client(bucket_name)\r\n\r\n # set storage class, by default STANDARD\r\n bucket.storage_class = \"COLDLINE\"\r\n\r\n # create new bucket\r\n new_bucket = storage_client.create_bucket(bucket, location='us-central1')\r\n\r\n # print new bucket detail\r\n print(vars(bucket))\r\n\r\n return None", "def test_creating_a_bucket(self):\n with self.client:\n self.create_bucket(self.get_user_token())", "def blobs(self):\n if not self._blobs:\n workspace = self.attributes.workspace\n # Instantiates a google client, & get all blobs in bucket\n storage_client = storage.Client(project=self._user_project)\n bucket = storage_client.bucket(workspace['bucketName'], user_project=self._user_project)\n # get subset of data\n _blobs = {}\n try:\n for b in bucket.list_blobs(fields='items(size, etag, crc32c, name, timeCreated),nextPageToken'):\n name = f\"gs://{workspace['bucketName']}/{b.name}\"\n # cache.put(name, {'size': b.size, 'etag': b.etag, 'crc32c': b.crc32c, 'time_created': b.time_created, 'name': name})\n _blobs[name] = AttrDict({'size': b.size, 'etag': b.etag, 'crc32c': b.crc32c, 'time_created': b.time_created, 'name': name})\n self._blobs = _blobs\n except Exception as e:\n print(f\"{self.id} {workspace['bucketName']} {e}\")\n self._blobs = _blobs\n return self._blobs", "def multiple_bucket(single_bucket): # pylint: disable=redefined-outer-name\n single_bucket.insert(\"key 2\", \"value 2\")\n return single_bucket", "def MakeBucketsCommand(self, args, unused_sub_opts=None, headers=None,\n debug=0):\n for bucket_uri_str in args:\n bucket_uri = self.StorageUri(bucket_uri_str, debug=debug)\n print 'Creating %s...' % bucket_uri\n bucket_uri.create_bucket(headers)", "def create_bucket(bucket_name):\n storage_client = storage.Client()\n bucket = storage_client.create_bucket(bucket_name)\n print('Bucket {} created'.format(bucket.name))", "def __init__(self, buckets = 200):\n self.data = [None] * buckets\n self.slot = [None] * buckets\n self.size = buckets", "def empty_bucket(self):\n self.s3_handle = boto.connect_s3()\n EmrProcessing.bucket_name = self.generate_unique_name()\n EmrProcessing.bucket = \\\n self.s3_handle.create_bucket(EmrProcessing.bucket_name)\n EmrProcessing.bucket.delete_keys([key.name \\\n for key in EmrProcessing.bucket])", "def create_bucket(request: Dict) -> Dict:\n global config\n\n body = {\n \"user_name\": request.get(\"user_name\"),\n \"prefix\": request.get(\"bucket_name\")[0:5],\n \"bucket_name\": request.get(\"bucket_name\"),\n \"region\": request.get(\"region\")\n }\n\n response = requests.post(url=config.api_url('bucket'),\n data=json.dumps(body),\n headers={'content-type': 'application/json'})\n\n if response.status_code == HTTPStatus.OK:\n return response.json()", "def manipulate_bucketlist():\n pass", "def initialize(self):\n self.keys = [None] * BUCKET_SIZE\n self.values = [None] * BUCKET_SIZE" ]
[ "0.71161413", "0.6791009", "0.67162263", "0.6680957", "0.6674802", "0.6673578", "0.64413446", "0.63897455", "0.63669753", "0.63237906", "0.631078", "0.6286531", "0.62792015", "0.6254688", "0.62064546", "0.62060267", "0.61797833", "0.61677337", "0.61318856", "0.61285365", "0.61131907", "0.6074992", "0.60019785", "0.5988522", "0.59864753", "0.5985336", "0.59772885", "0.5969289", "0.5946221", "0.59389675" ]
0.692468
1
Fail if the two objects are unequal as determined by their difference rounded to the given number of decimal places (default 7) and comparing to zero, or by comparing that the between the two objects is more than the given delta. Note that decimal places (from zero) are usually not the same as significant digits (measured from the most signficant digit). If the two objects compare equal then they will automatically compare almost equal.
def almost_equal(first, second, places=None, delta=0.1): if first == second: # shortcut return True if delta is not None and places is not None: raise TypeError("specify delta or places not both") if delta is not None: if abs(first - second) <= delta: return True else: if places is None: places = 2 if round(abs(second-first), places) == 0: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def almost_equals(self, other, decimal=...): # -> bool:\n ...", "def assert_almost_equal(self, val1, val2, delta):\n return self.assertTrue(\n 0 <= abs(val1 - val2) <= delta,\n \"Absolute difference of {} and {} ({}) is not within {}\".format(\n val1,\n val2,\n abs(val1-val2),\n delta,\n ),\n )", "def almost_equal_values(x, y, precision):\n return round(x - y, precision) == 0", "def assertAlmostEqual(self, first, second, places=None, msg=None, delta=None):\r\n if first == second:\r\n # shortcut\r\n return\r\n if delta is not None and places is not None:\r\n raise TypeError(\"specify delta or places not both\")\r\n \r\n if delta is not None:\r\n if abs(first - second) <= delta:\r\n return\r\n \r\n standardMsg = '%s != %s within %s delta' % (safe_repr(first), \r\n safe_repr(second), \r\n safe_repr(delta))\r\n else:\r\n if places is None:\r\n places = 7\r\n \r\n if round(abs(second-first), places) == 0:\r\n return\r\n \r\n standardMsg = '%s != %s within %r places' % (safe_repr(first), \r\n safe_repr(second), \r\n places)\r\n msg = self._formatMessage(msg, standardMsg)\r\n raise self.failureException(msg)", "def assertNotAlmostEqual(self, first, second, places=None, msg=None, delta=None):\r\n if delta is not None and places is not None:\r\n raise TypeError(\"specify delta or places not both\")\r\n if delta is not None:\r\n if not (first == second) and abs(first - second) > delta:\r\n return\r\n standardMsg = '%s == %s within %s delta' % (safe_repr(first), \r\n safe_repr(second),\r\n safe_repr(delta))\r\n else:\r\n if places is None:\r\n places = 7\r\n if not (first == second) and round(abs(second-first), places) != 0:\r\n return\r\n standardMsg = '%s == %s within %r places' % (safe_repr(first), \r\n safe_repr(second),\r\n places)\r\n\r\n msg = self._formatMessage(msg, standardMsg)\r\n raise self.failureException(msg)", "def check_dict_almost_equal(dict_a: Dict[Any, float],\n dict_b: Dict[Any, float],\n decimal: int = 7) -> bool:\n if set(dict_a.keys()) != set(dict_b.keys()):\n return False\n for key in dict_a.keys():\n # Same test as np.testing.assert_almost_equal\n if abs(dict_a[key] - dict_b[key]) >= (1.5 * 10**(-decimal)):\n return False\n return True", "def test_delta_val2(self):\n d = Delta(\"+2.5-1.5\")\n self.assertEqual(d.cmp(0, 1), False)\n self.assertEqual(d.cmp(1, 3), False)\n self.assertEqual(d.cmp(3, 1), True)", "def nearly_equal(a, b, sig_fig=5):\n return a == b or int(a*10**sig_fig) == int(b*10**sig_fig)", "def assert_almost_equal(actual, desired, decimal=7):\n actual, desired = check_and_drop_units(actual, desired)\n numpy.testing.assert_almost_equal(actual, desired, decimal)", "def test_gt_2():\n a = FixedPoint(1, 'Q2.8')\n b = FixedPoint(1.1, 'Q2.8')\n assert b > a", "def assertDeepAlmostEqual(self, expected, actual, *args, **kwargs):\n kwargs.pop(\"__trace\", \"ROOT\")\n if (\n hasattr(expected, \"__geo_interface__\")\n and hasattr(actual, \"__geo_interface__\")\n and expected.__geo_interface__[\"type\"] == actual.__geo_interface__[\"type\"]\n and expected.__geo_interface__[\"type\"]\n not in [\"Feature\", \"FeatureCollection\"]\n ):\n shape_expected = shape(expected)\n shape_actual = shape(actual)\n assert shape_expected.equals(shape_actual)\n elif isinstance(expected, (int, float, complex)):\n self.assertAlmostEqual(expected, actual, *args, **kwargs)\n elif isinstance(expected, (list, tuple)):\n self.assertEqual(len(expected), len(actual))\n for index in range(len(expected)):\n v1, v2 = expected[index], actual[index]\n self.assertDeepAlmostEqual(v1, v2, __trace=repr(index), *args, **kwargs)\n elif isinstance(expected, dict):\n self.assertEqual(set(expected), set(actual))\n for key in expected:\n self.assertDeepAlmostEqual(\n expected[key], actual[key], __trace=repr(key), *args, **kwargs\n )\n else:\n self.assertEqual(expected, actual)", "def test_gt(self):\n f12: Fraction = Fraction(1, 2)\n f34: Fraction = Fraction(3, 4)\n f105: Fraction = Fraction(10, 5)\n self.assertTrue(f34 > f12)\n self.assertFalse(f12 > f105)\n self.assertFalse(f12 > f12)", "def almost_eq(e1,e2) :\n\treturn round(e1-e2,4) == 0.0", "def test_ge_2():\n a = FixedPoint(1, 'Q2.8')\n b = FixedPoint(1.1, 'Q2.8')\n assert b > a", "def test_delta_val6(self):\n d = Delta(\"+50-25=%\")\n self.assertEqual(d.cmp(0, 1), False)\n self.assertEqual(d.cmp(8, 4), True)\n self.assertEqual(d.cmp(8, 6), True)\n self.assertEqual(d.cmp(6, 8), False)\n self.assertEqual(d.cmp(6, 9), True)", "def test_GreaterThanorEqualto(self):\n self.assertTrue(Fraction(7,10)>=Fraction(7,10))", "def test_delta_val4(self):\n d = Delta(\"+-25%\")\n self.assertEqual(d.cmp(0, 1), False)\n self.assertEqual(d.cmp(8, 4), True)\n self.assertEqual(d.cmp(8, 6), False)", "def assert_almost_equal_verbose(actual, desired, verbose=False, **kwargs):\n err = np.abs(actual - desired).max()\n dec = -np.ceil(np.log10(err))\n\n if not (np.isfinite(dec)):\n dec = 18.0\n\n m = \"\\n>>>>>The actual precision is: \" + str(float(dec))\n\n if verbose:\n print(m)\n\n desired2 = np.broadcast_to(desired, actual.shape)\n np.testing.assert_almost_equal(actual, desired2, err_msg=m, **kwargs)\n pass", "def check_compare(change, reference_value):\n rounded_change = round(change, 2)\n compare_values(reference_value, rounded_change)", "def almost_equal(x, y):\n return abs(x-y) < FP_PREC", "def almost_equals(self, other):\n import math\n ox, oy = other\n dx = self[0] - ox\n dy = self[1] - oy\n return (dx*dx + dy*dy) < pygonal.EPSILON2", "def check_compare_decrease(out_fields):\n change = out_fields[CHANGE_FLD]\n expected_change = -25.00\n check_compare(change, expected_change)\n check_float_value(change, CHANGE_FLD)", "def is_almost_equal(self, other, places: int = 7) -> bool:\n if isinstance(other, numbers.Number):\n return not (self.num_variables or round(self.offset - other, places))\n\n def eq(a, b):\n return not round(a - b, places)\n\n try:\n if callable(other.vartype):\n vartype_eq = all(self.vartype(v) == other.vartype(v) for v in self.variables)\n else:\n vartype_eq = all(self.vartype(v) == other.vartype for v in self.variables)\n\n return (vartype_eq\n and self.shape == other.shape\n and eq(self.offset, other.offset)\n and all(eq(self.get_linear(v), other.get_linear(v))\n for v in self.variables)\n and all(eq(bias, other.get_quadratic(u, v))\n for u, v, bias in self.iter_quadratic())\n )\n except (AttributeError, ValueError):\n # it's not a BQM or variables/interactions don't match\n return False", "def test_numprops_different_pct(self):\n # Perform diff.\n minus, plus = 10, 20\n df = Differ(\n key=\"name\", deltas={\"energy\": Delta(\"+{}-{}=%\".format(plus, minus))}\n )\n d = df.diff(*self.engines)\n\n # Calculate expected results.\n def is_different(a, b):\n pct = 100.0 * (b - a) / a\n return pct <= -minus or pct >= plus\n\n changed = sum((int(is_different(e[0], e[1])) for e in self.energies))\n\n # Check results.\n if len(d[Differ.CHANGED]) != changed:\n result = d[Differ.CHANGED]\n msg = \"Values:\\n\"\n for i, e in enumerate(self.energies):\n if not is_different(*e):\n continue\n msg += \"{:d}) {:f} {:f}\\n\".format(i, e[0], e[1])\n msg += \"Result:\\n\"\n for i, r in enumerate(result):\n msg += \"{:d}) {} {}\\n\".format(i, r[\"old\"], r[\"new\"])\n self.assertEqual(len(d[Differ.CHANGED]), changed, msg=msg)", "def compare_float(geometry_x, geometry_y, geometry_z, precision_error):\n\n value_x = float(geometry_x)\n value_y = float(geometry_y)\n value_z = float(geometry_z)\n return abs((value_x - value_y)) <= precision_error and \\\n abs((value_x - value_z)) <= precision_error and \\\n abs((value_y - value_z)) <= precision_error", "def check_compare_grow(out_fields):\n change = out_fields[CHANGE_FLD]\n expected_change = 50.00\n check_compare(change, expected_change)\n check_float_value(change, CHANGE_FLD)", "def assertEqualEpsilon(self, first, second, msg=None):\n\n def epsilonCompare(value):\n return abs(value) <= epsilon\n\n comparison = map(epsilonCompare, (first - second))\n return self.assertTrue(all(comparison), msg)", "def assert_floats_are_equal(a, b, tol=1e-5):\r\n assert floats_are_equal(a, b, tol), (a,b)", "def __eq__(self, other):\n return abs(self - other) < 10e-10", "def compare_almost_equal(self, df1, df2, name):\n\n\t\tcomp_df = pd.DataFrame()\n\t\tcomp_df['left'] = df1[name].round(SIG_DIG)\n\t\tcomp_df['right'] = df2[name].round(SIG_DIG)\n\t\tcomp_df['diff'] = comp_df['left'] - comp_df['right']\n\t\tcomp_df['diff'] = comp_df['diff'].abs().round(SIG_DIG)\n\t\t# print(comp_df.query('diff > 0.0000'))\n\t\treturn comp_df.query('diff > .0001').empty" ]
[ "0.68236005", "0.6749057", "0.6739666", "0.670881", "0.6466249", "0.6378507", "0.6307229", "0.62476826", "0.62318397", "0.62042546", "0.6183346", "0.6175197", "0.61698675", "0.6129581", "0.61233366", "0.61050105", "0.60785776", "0.6067823", "0.60575235", "0.6055449", "0.6054629", "0.6049413", "0.60304177", "0.600526", "0.60029805", "0.59704006", "0.5963895", "0.592796", "0.59227276", "0.590295" ]
0.67849344
1
Rude check if uuid is in correct uuid1 format.
def validate_uuid(self, uuid): match = re.match( r'([a-z0-9]+)-([a-z0-9]+)-([a-z0-9]+)-([a-z0-9]+)-([a-z0-9]+)', uuid ) if match: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_uuid(uuid):\n try:\n converted = UUID(uuid, version=4)\n except ValueError:\n return False\n\n return str(converted) == uuid", "def validate_uuid(uuid_string):\n try:\n UUID(uuid_string, version=4)\n return True\n except:\n return False", "def is_valid_uuid(uuid_to_test, version=4):\n\n try:\n uuid_obj = UUID(uuid_to_test, version=version)\n except ValueError:\n return False\n return str(uuid_obj) == uuid_to_test", "def validate_uuid(self, uuid_to_check):\r\n if re.fullmatch(BASE62_REGEX, uuid_to_check):\r\n return 20 <= len(uuid_to_check) <= 22\r\n else:\r\n return False", "def is_valid_uuid(uuid_to_test, version=4):\n\ttry:\n\t\tuuid_obj = UUID(uuid_to_test, version=version)\n\t\treturn True\n\texcept:\n\t\treturn False", "def check_int_uuid(uuid):\n try:\n converted = UUID(int=uuid, version=4)\n except ValueError:\n return False\n\n return converted.int == uuid", "def is_uuid_v4(uuid_or_name):\n # Based on https://gist.github.com/ShawnMilo/7777304\n try:\n uuid = UUID(uuid_or_name, version=4)\n except Exception:\n return False\n\n return uuid.hex == uuid_or_name.replace(\"-\", \"\")", "def is_uuid_like(val):\n try:\n return str(uuid.UUID(val)) == val\n except (TypeError, ValueError, AttributeError):\n return False", "def is_uuid_like(val):\n try:\n return str(uuid.UUID(val)) == val\n except (TypeError, ValueError, AttributeError):\n return False", "def validate_uuid(data):\n\n if not uuidutils.is_uuid_like(data):\n raise exceptions.DiagnoseException(\n \"'%s' is not a valid UUID\" % data)", "def _verify_uuid(given_uuid):\n\n\t\tif isinstance(given_uuid, str) or isinstance(given_uuid, unicode):\n\t\t\t# Verify the given string is well-formed\n\t\t\tuuid.UUID(given_uuid)\n\t\t\treturn given_uuid\n\n\t\tif isinstance(given_uuid, uuid.UUID):\n\t\t\treturn given_uuid.__str__()\n\n\t\traise ValueError(\"Given object is neither a string nor a UUID object.\")", "def is_uuid4(uuid_string):\n try:\n UUID(uuid_string, version=4)\n except (ValueError, TypeError):\n # If it's a value error, then the string\n # is not a valid hex code for a UUID.\n # None will raise TypeError.\n return False\n return True", "def is_uuid(value: str | UUID) -> bool:\n if isinstance(value, str):\n return _uuid.match(str(value)) and True or False\n\n return isinstance(value, UUID)", "def is_valid_uuid_string(uuid_str):\n return isinstance(uuid_str, str) and VALID_UUID_REGEX.match(uuid_str)", "def test_get_shortuuid_uuid(self):\n id = get_shortuuid()\n self.assertTrue(len(id) == 22)", "def test_good_uuid_lowercase():\n good_uuid_lowercase = \"7cfb2470-b600-4eb3-a2cd-c1439e45b91f\"\n m = CannedRe.UUID.match(good_uuid_lowercase)\n # print getmembers(m)\n assert m is not None, \"Canned RegEx uuid test failed for %s\" % good_uuid_lowercase\n assert m.string == good_uuid_lowercase", "def checkuuidsyntax(uuidtxt):\n score = 0\n if uuidtxt != None:\n if len(uuidtxt) < 10:\n score = 0\n elif uuidtxt.find(\"{\") > -1 or uuidtxt.find(\"}\") > -1 or uuidtxt.lower() != uuidtxt:\n score = 1\n else:\n score = 2\n return score", "def is_uuid(my_object):\n try:\n my_uuid = uuid.UUID(my_object, version=4)\n except ValueError:\n return False\n return str(my_uuid) == my_object", "def test_bad_uuid_lowercase():\n bad_uiid_lower = \"7cfb2470-b600-4eb3-a2cd-c1439e45b91g\"\n m = CannedRe.UUID.match(bad_uiid_lower)\n assert m is None, \"Canned RegEx uiid test succeeded for %s while it should not\" % bad_uiid_lower", "def from_uuid(self):\n reason = \"[!] UUID's are in the format 00000000-0000-0000-0000-000000000000\"\n ts_type = self.ts_types['uu']\n try:\n uuid_lower = self.uu.lower()\n UUID_REGEX = re.compile('[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}')\n if not bool(UUID_REGEX.match(uuid_lower)):\n self.in_uuid = indiv_output = combined_output = False\n pass\n else:\n u = uuid.UUID(uuid_lower)\n if u.version == 1:\n unix_ts = int((u.time / 10000) - 12219292800000)\n self.in_uuid = dt.utcfromtimestamp(float(unix_ts) /1000.0).strftime('%Y-%m-%d %H:%M:%S.%f')\n else:\n pass\n indiv_output = str(\"{} {}\".format(ts_type, self.in_uuid))\n combined_output = str(\"{}{}\\t\\t\\t{} UTC{}\".format(self.left_color, ts_type, self.in_uuid, self.right_color))\n except Exception:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_uuid = indiv_output = combined_output = False\n return self.in_uuid, indiv_output, combined_output, reason", "def validate_uuid(value: Any, none_allowed: bool, display_name: str) -> None:\n if none_allowed and value is None:\n return\n\n if not isinstance(value, UUID) or value.version != 4:\n raise TypeError(f\"{display_name} must be a UUID version 4\")", "def test_good_uuid_uppercase():\n good_uuid_uppercase = \"7CFB2470-B600-4EB3-A2CD-C1439E45B91F\"\n m = CannedRe.UUID.match(good_uuid_uppercase)\n # print getmembers(m)\n assert m is not None, \"Canned RegEx UUID test failed for %s\" % good_uuid_uppercase\n assert m.string == good_uuid_uppercase", "def is_uuid(self) -> bool:\n ua = self.user_agent.strip('({})')\n if len(ua) >= 2 and ua[1] == ':':\n ua = self.user_agent[2:]\n\n return uuid_like_name(ua)", "def test_get_shortuuid_name(self):\n id1 = get_shortuuid(name='mytesturl.com')\n id2 = get_shortuuid(name='mytesturl.com')\n self.assertEqual(id1, id2)", "def test_bad_uuid_uppercase():\n bad_uiid_upper = \"7CFB2470-B600-4EB3-A2CD-C1439E45B91G\"\n m = CannedRe.UUID.match(bad_uiid_upper)\n assert m is None, \"Canned RegEx uiid test succeeded for %s while it should not\" % bad_uiid_upper", "def test_uuid():\n for _ in range(1000):\n uuid = uuid_generator()\n assert len(uuid) == 36\n assert uuid.count('-') == 4", "def test_invlalid_uuid_load():\n schema = UUIDSchema()\n result = schema.load({\n \"uuid_str\": INVALID_UUID_STR,\n \"uuid_uuid\": UUID(UUID_STR),\n })\n\n assert_that(result.errors[\"uuid_str\"], contains('Not a valid UUID.'))", "def test_get_shortuuid_specific_length(self):\n id = get_shortuuid(length=10)\n self.assertTrue(len(id) == 10)", "def test_uuid_adapter(self):\n with self.assertRaises(TypeError):\n adapter = UUIDAdapter('01234567-0123-0123-0123-0123456789ab')", "def test_hardware_uuid_type(self):\n \n boot_session_uuid = get_uuids()[2]\n \n # Check to make sure the returned value is a string\n self.assertEqual(type(boot_session_uuid), str)" ]
[ "0.7841857", "0.756394", "0.7472721", "0.739947", "0.73708713", "0.73455393", "0.7301181", "0.71633416", "0.71633416", "0.7073445", "0.7024794", "0.6966057", "0.69361615", "0.69247895", "0.6830562", "0.67718047", "0.6766432", "0.67584354", "0.65963656", "0.65020233", "0.6492053", "0.64873976", "0.6448111", "0.62902796", "0.623041", "0.62248313", "0.6218724", "0.6144012", "0.6068356", "0.59890896" ]
0.75939476
1
Checks if the CSV file contains all required columns.
def validate_column_names(self, cols): self.stdout.write('Verifying CSV header') csv_cols = set(cols) if self.required_csv_columns <= csv_cols: return True else: missing_cols = set(self.required_csv_columns).difference(csv_cols) raise ValidationError( "These columns '{0}' are required, but missing in the CSV " "file.".format( ', '.join(missing_cols) ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_csv(filename, header, cols, rows):\n\n # open file\n data = pd.read_csv(filename, delimiter='|')\n\n # validate header\n assert header == '|'.join(list(data.columns.values))\n\n # validate column count\n assert data.shape[1] == cols\n\n # validate row count\n assert data.shape[0] == rows\n\n # return (header_result == column_result == row_result) is True", "def cols_valid(self,\n df: pd.DataFrame,\n req_cols: set) -> bool:\n missing_cols = req_cols.difference(df.columns)\n\n if len(missing_cols) > 0:\n logging.error(f\"{missing_cols} columns required but missing\")\n return False\n\n return True", "def _check_missing_columns(self, df: pd.DataFrame) -> None:\n if any([c not in df.columns for c in REQUIRED_COLUMNS]):\n raise ValueError(\"Missing columns in dataset.\"\n f\"Columns: {df.columns}\"\n f\"Required: {REQUIRED_COLUMNS}\")", "def _check_columns(cdm_column_names, csv_columns, result):\n columns_valid = True\n\n # if len(csv_columns) != len(cdm_column_names):\n\n # check all column headers in the file\n for col in csv_columns:\n if col not in cdm_column_names:\n e = dict(message=MSG_INCORRECT_HEADER, column_name=col, actual=col)\n result['errors'].append(e)\n columns_valid = False\n\n # check cdm table headers against headers in file\n for col in cdm_column_names:\n if col not in csv_columns:\n e = dict(message=MSG_MISSING_HEADER, column_name=col, expected=col)\n result['errors'].append(e)\n columns_valid = False\n\n # check order of cdm table headers against headers in file\n for idx, col in enumerate(cdm_column_names):\n if idx < len(csv_columns) and csv_columns[idx] != col:\n e = dict(message=MSG_INCORRECT_ORDER,\n column_name=csv_columns[idx],\n actual=csv_columns[idx],\n expected=col)\n result['errors'].append(e)\n columns_valid = False\n break\n\n return columns_valid", "def valid(self):\r\n if self.file_exists and len(self.missing_columns) == 0 and len(self.veg_columns) > 0 and \\\r\n len(self.lat_errors) == 0 and len(self.lon_errors) == 0 and len(self.time_errors) == 0 and len(self.date_errors) == 0:\r\n return True\r\n else:\r\n return False", "def validate_columns(self, fieldnames, dao):\n unstored_columns = ['blank']\n expected_columns = dao.model_type.__table__.columns.keys() + unstored_columns\n for column_name in fieldnames:\n if column_name not in expected_columns:\n raise AttributeError(f\"{self.file_path}: {column_name} column mismatch for \"\n f\"expected file type: {self.file_type.name}\")", "def check_column_count(cls, line):\n\n # MAGIC n_cols = n_delim + 1 (no trailing delimiter)\n cols = line.count(cls.DELIMITER) + 1\n expected = 7 # MAGIC USAA convention, not all are populated though\n return cols == expected", "def test_missing_columns(self):\n file = SimpleUploadedFile(\n \"test.csv\",\n b\"msisdn,messaging consent,edd year,edd month,baby dob year,\"\n b\"baby dob month,baby dob day\\n\",\n )\n form = MomConnectImportForm(\n data={\"source\": \"MomConnect Import\"}, files={\"file\": file}\n )\n self.assertTrue(form.is_valid())\n instance = form.save()\n self.assertEqual(instance.status, MomConnectImport.Status.ERROR)\n [error] = instance.errors.all()\n self.assertEqual(\n error.error, \"Fields edd_day facility_code id_type not found in header\"\n )", "def are_there_available_columns_to_play(self):\n available_columns = self.get_available_columns()\n return self._state.n_neutral_markers != 3 and len(available_columns) > 0", "def verify_columns_in_dataset(self, columns):\n all_cols = self.train.columns\n for col in columns:\n if not col in all_cols:\n raise KeyError(\"column '%s' not in dataset\" % col)", "def check_valid_csvformat(self, csv_path):\n with open(self.csv_path, \"rb+\") as file_obj:\n reader = csv.reader(file_obj, delimiter=',') # CSV DictReader object\n self.check_valid_csv_header(reader.next())\n self.check_valid_csv_data(reader.next())", "def validate(self):\n super().validate()\n frame = getattr(self, 'frame', None)\n if frame is None:\n raise ValueError('Missing columns %s since no frame' % ', '.join(\n self.required_cols))\n cols = set(list(self.frame))\n missing = sorted(self.required_cols - cols)\n if missing:\n raise ValueError('Missing columns: [%s]' % ', '.join(missing))", "def verify_columns_in_dataset(self, columns):\n all_cols = self.dataset.columns\n for col in columns:\n if not col in all_cols:\n raise KeyError(\"column '%s' not in dataset\" % col)", "def check_schema_definition(schema_definition: pd.DataFrame) -> bool:\n\n if required_headers.issubset(set(list(schema_definition.columns))):\n return\n elif \"Requires\" in list(schema_definition.columns) or \"Requires Component\" in list(\n schema_definition.columns\n ):\n raise ValueError(\n \"The input CSV schema file contains the 'Requires' and/or the 'Requires \"\n \"Component' column headers. These columns were renamed to 'DependsOn' and \"\n \"'DependsOn Component', respectively. Switch to the new column names.\"\n )", "def verify(self):\n for col in self.columns:\n if col not in self.table_obj.columns.keys():\n raise Exception('{} column not found in {}'.format(\n col, self.table_obj))", "def check_column_values(self, values):\n none_keys = sorted(list(self._necessary_input_columns.intersection(set([elem for elem in self._columns if values[self.column_id[elem]] in [None, 'None']]))))\n if len(none_keys) > 0:\n raise Exception('missing_keys in ForcingOnMesh_DBManager add function parameter file_info:\\n%s\\n'%('\\n'.join([' - %s'%elem for elem in none_keys])))", "def validate_csv(filename: str) -> bool:\n # From: https://stackoverflow.com/questions/2984888/check-if-file-has-a-csv-format-with-python\n try:\n with open(filename, newline='') as csvfile:\n start = csvfile.read(4096)\n\n # isprintable does not allow newlines, printable does not allow umlauts...\n if not all([c in string.printable or c.isprintable() for c in start]):\n return False\n dialect = csv.Sniffer().sniff(start)\n return True\n except csv.Error:\n # Could not get a csv dialect -> probably not a csv.\n return False\n except UnicodeError:\n return False", "def has_headers(self):\n for column in self.columns:\n if column.header:\n return True\n return False", "def verify(self):\n for col in self._columns:\n if col not in self._table_obj.columns.keys():\n raise GaiaException('{} column not found in {}'.format(\n col, self._table_obj))", "def _check_headers(cursor, headers):\n all_columns = set(chain.from_iterable(_columns(cursor, table) for table in DATA_TABLES))\n for header in headers:\n if header not in all_columns:\n raise ValueError('column {} not recognized'.format(header))", "def test_column_presence(self):\n\n columns = [\"feature_is_filtered\", \"feature_biotype\"]\n\n for component_name in [\"var\", \"raw.var\"]:\n for column in columns:\n if column == \"feature_is_filtered\" and component_name == \"raw.var\":\n continue\n with self.subTest(component_name=component_name, column=column):\n\n # Resetting validator\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n\n component = Validator.getattr_anndata(\n self.validator.adata, component_name\n )\n component.drop(column, axis=1, inplace=True)\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n f\"ERROR: Dataframe '{component_name}' is missing \"\n f\"column '{column}'.\"\n ],\n )", "def _assert_columns_exist(self, columns):\n if not nonstringiter(columns):\n columns = (columns,)\n self_cols = self.columns()\n is_missing = lambda col: col not in self_cols\n missing = [c for c in columns if is_missing(c)]\n if missing:\n missing = ', '.join(repr(x) for x in missing)\n msg = '{0} not in {1}'.format(missing, self.__repr__())\n raise LookupError(msg)", "def areAllFieldsIncluded(ldata, columns):\n\treturn list(range(len(ldata))) == columns", "def all_columns(self):\r\n try:\r\n csv_file = open(self.file_path,'rbU')\r\n csv_rows = csv.DictReader(csv_file)\r\n _all_columns = csv_rows.fieldnames\r\n csv_file.close()\r\n return _all_columns\r\n except:\r\n return []", "def _check_columns_with_table(table: Table, columns: Sequence[str]) -> Optional[bool]:\n for column in columns:\n if column not in table.c.keys():\n raise TypeError(f\"Specified column {column} did not exist on table {table}\")\n return True", "def check_valid_csv_header(self, row):\n obj = re.match(re.compile('^Year\\,Month\\,.'),\n ','.join(row))\n if not obj:\n raise Exception(\"Invalid Headers must be `Year` `Month` Check Sample file\")", "def check_ingress_required_columns(self, col_names):\n if not set(col_names).issuperset(REQUIRED_COLUMNS):\n if not set(col_names).issuperset(REQUIRED_ALT_COLUMNS):\n missing_columns = [x for x in REQUIRED_ALT_COLUMNS if x not in col_names]\n return missing_columns\n return None", "def check_csv_headers(csvfile, headers):\n\n with open(csvfile, 'rb') as f:\n csv_header = f.readline()\n\n # Check the lower ones\n if headers[1][0] not in csv_header.lower():\n return False\n\n return True", "def test_wrong_number_of_columns(self):\n self.prepare()\n self.session.execute(\"\"\"\n CREATE TABLE testcolumns (\n a int PRIMARY KEY,\n b int\n )\"\"\")\n\n data = [[1, 2, 3]]\n tempfile = self.get_temp_file()\n write_rows_to_csv(tempfile.name, data)\n\n logger.debug('Importing from csv file: {name}'.format(name=tempfile.name))\n out, err, _ = self.run_cqlsh(\"COPY ks.testcolumns FROM '{name}'\".format(name=tempfile.name))\n\n assert not self.session.execute(\"SELECT * FROM testcolumns\")\n assert 'Failed to import' in err", "def check_cols(self):\n if self.ad_tab is not None and 'date' not in self.ad_cols:\n raise DataException(\"\"\"date column not found in adServer table.\"\"\")\n if self.ad_tab is not None and 'impressions' not in self.ad_cols:\n raise DataException(\"\"\"impressions column not found in adServer table.\"\"\")\n if 'timestamp' not in self.log_cols and 'date' not in self.log_cols:\n raise DataException(\"\"\"Both timestamp and date column missing from {t}\nCannot do dailyQA\"\"\".format(t=self.log_tab))\n if self.configs['hourshift'] != 0 or 'date' not in self.log_cols:\n if 'timestamp' not in self.log_cols:\n raise DataException(\"\"\"Time shift requested \\\nbut no timestamp column in {t}.\"\"\".format(t=self.log_tab))\n else:\n check_timestamp(self.configs['schema'], self.log_tab)" ]
[ "0.74219674", "0.71559626", "0.7064051", "0.7044676", "0.6933204", "0.6880624", "0.6842778", "0.67009544", "0.6593976", "0.6554927", "0.65337193", "0.64992315", "0.6493306", "0.64672685", "0.6462007", "0.64533734", "0.64333785", "0.6407367", "0.63730824", "0.63719225", "0.6365781", "0.6361085", "0.63599116", "0.6344191", "0.6339794", "0.63381624", "0.63355535", "0.63276404", "0.63036066", "0.6302704" ]
0.77545106
0
Concatenate observation data into one string.
def concatenate_observation_data( self, compartment, date, measurementmethod, orig_srid, origx, origy, parameter, property, quality, sampledevice, samplemethod, unit, value, ): # Convert to string before joining data = map(str, [ compartment, date, measurementmethod, orig_srid, origx, origy, parameter, property, quality, sampledevice, samplemethod, unit, value ]) return ''.join(data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def concatenate_data():", "def __str__(self):\n return ', '.join(str(item) for item in self._data)", "def obs_to_string(observations):\n str_obs = []\n for obs in observations:\n str_obs.append(obs.reshape(-1).tostring())\n return str_obs", "def __str__(self):\n lst = [str(i) for i in self.data]\n if self.column:\n return '[' + ', '.join(lst) + ']\\''\n else:\n return '[' + ', '.join(lst) + ']'", "def __str__(self):\n rows = ['[' + ', '.join([str(i) for i in row]) + ']' for row in self.data]\n return '\\n'.join(rows)", "def __str__(self):\n return '\\n\\n'.join(str(item) for item in self._data)", "def dataAsString(self):\n\n # Force generation of .array\n d = self.asArray()\n slist = []\n for l in self.array:\n s = \"%s %s\" % (self.name, self.rowAsString(l))\n slist.append(s)\n return '\\n'.join(slist)", "def concatenate_series_to_str(row: Series) -> str:\n return \"__by__\".join([str(c) for c in row])", "def updateString(olddata,newdata,concater):\r\n\r\n if olddata==\"\":\r\n return str(newdata)\r\n else:\r\n return str(olddata + concater + newdata)", "def exportAsString(self):\n dataString = \"\"\n data = self.exportAsArray()\n content = \"\"\n for field in data:\n if isinstance(field, types.StringTypes):\n content = field\n else:\n content = '&&'.join(field)\n dataString = '|'.join([dataString, content])\n\n return dataString[1:] # Remove the first '|' character from the output.", "def __str__(self):\t\t\n\t\tcadena = []\n\t\tactual = self.prim\t\t\n\t\twhile actual:\n\t\t\tif type(actual.dato) == str:\n\t\t\t\tcadena.append(\"'\" + str(actual.dato) + \"'\")\n\t\t\telse:\t\n\t\t\t\tcadena.append(str(actual.dato))\n\t\t\tactual = actual.prox\n\t\treturn \"[\" + \", \".join(cadena) + \"]\"", "def __str__(self):\n str_list = ['[']\n for i in self.data_list:\n str_list.append(str(i))\n str_list.append(', ')\n str_list.pop() # remove trailing space\n str_list.append(\"]\")\n\n return ''.join(str_list)", "def get_log_string(self):\n\n\t\tresult = json.dumps(self.data, sort_keys=True)\n\n\t\tif self.intrusion is not None and self.intrusion != \"\":\n\t\t\tresult += \",{}\".format(self.intrusion)\n\n\t\treturn result", "def get_atril_string(self):\n return \", \".join(str(item) for item in self.atril)", "def concatenate(row, fields):\n print row\n str = None\n for field in fields:\n if str == None:\n str = row[field]\n else:\n str += ' ' + row[field]\n return str", "def __str__ (self):\n return \", \".join(str(row) for row in self.rows()).join(\"()\")", "def concatenate_processed_text(self):\n\n\n\t\tconcatenated_text = \"\"\n\t\tfor line in self.processed_text:\n\t\t\tconcatenated_text += \" \".join(line) + \" \"\n\n\n\t\t# Remove the trailing space character from the concatenated string\n\t\t# of words.\n\t\tconcatenated_text = concatenated_text[:-1]\n\n\t\tself.concatenated_text = concatenated_text", "def array_to_concatenated_string(array):\r\n return \",\".join(str(x) for x in array)", "def getString(self):\n return \"\".join(self.data)", "def str(self) -> str:\n return \"\".join(self)", "def _concatenate_instance(\n self,\n emotion: str,\n target_utterance: str,\n evidence_utterance: str,\n conversation_history: str,\n ) -> str:\n concatenated_text = (\n \" \"\n + emotion\n + \" <SEP> \"\n + target_utterance\n + \" <SEP> \"\n + evidence_utterance\n + \" <SEP> \"\n + conversation_history\n )\n\n return concatenated_text", "def concat(values, sep=', '):\n concat_str = None\n try:\n concat_str = sep.join([str(v) for v in values if not is_empty(v)])\n except Exception as e:\n pass\n return concat_str", "def array_to_concatenated_string(array):\n return \",\".join(str(x) for x in array)", "def __str__(self):\n list_string = \"\"\n for item in self._data:\n list_string += item + \", \"\n return list_string", "def f_val_to_str(self):\n\n resstrlist = []\n strlen = 0\n\n for key in self._data:\n val = self._data[key]\n resstr = \"%s=%s, \" % (key, repr(val))\n resstrlist.append(resstr)\n\n strlen += len(resstr)\n if strlen > pypetconstants.HDF5_STRCOL_MAX_VALUE_LENGTH:\n break\n\n return_string = \"\".join(resstrlist)\n if len(return_string) > pypetconstants.HDF5_STRCOL_MAX_VALUE_LENGTH:\n return_string = (\n return_string[0 : pypetconstants.HDF5_STRCOL_MAX_VALUE_LENGTH - 3]\n + \"...\"\n )\n else:\n return_string = return_string[0:-2] # Delete the last `, `\n\n return return_string", "def concat_obs_and_action(obs, action):\n return F.concat((obs, action), axis=-1)", "def concat_obs_and_action(obs, action):\n return F.concat((obs, action), axis=-1)", "def concatena(*args):\n linea = ''\n for l in args:\n linea += str(l if l else '')\n return linea", "def __str__(self):\n s =\"\"\n if self.data is 0:\n return \"[]\"\n else:\n for i in range(len(self.data)):\n s += str(self.data[i])\n if i != len(self.data)-1:\n s += \", \"\n return \"[\" + s + \"]\"", "def __str__(self):\n outbuffer = []\n outbuffer.append(\"%d keys in dataset\" % len(self.__quantile))\n outbuffer.append(self.head())\n outbuffer.append(\"...\")\n outbuffer.append(self.tail())\n return \"\\n\".join(outbuffer)" ]
[ "0.724462", "0.6176857", "0.6093485", "0.60458475", "0.5983507", "0.59311306", "0.59206545", "0.58791316", "0.5874102", "0.5709245", "0.57053846", "0.5704484", "0.5695554", "0.5685938", "0.5673281", "0.565494", "0.56044185", "0.55893093", "0.5557514", "0.55474746", "0.5525495", "0.551103", "0.5498077", "0.54717296", "0.5470549", "0.54683214", "0.54683214", "0.54615366", "0.5460523", "0.54571426" ]
0.78445834
0
Removes Observations or removes related Environments.
def remove_or_deref_observations(self, processing_job): cursor = connection.cursor() env_hash = self.make_env_hash(processing_job) # Update observations which have the same environment setup. # Removes these items from the array. self.stdout.write('Dereference environment {0} in observations.'.format(env_hash)) cursor.execute(""" UPDATE script_execution_manager_observation SET processing_environments=array_remove(processing_environments, '{env_hash}') WHERE processing_environments @> ARRAY['{env_hash}'] """.format(env_hash=env_hash)) # Remove observations without related environments self.stdout.write('Removing observations without related processing_environments') Observation.objects.filter(processing_environments=[]).delete()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def env_remove(args):\n read_envs = []\n for env_name in args.rm_env:\n env = ev.read(env_name)\n read_envs.append(env)\n\n if not args.yes_to_all:\n answer = tty.get_yes_or_no(\n \"Really remove %s %s?\"\n % (\n string.plural(len(args.rm_env), \"environment\", show_n=False),\n string.comma_and(args.rm_env),\n ),\n default=False,\n )\n if not answer:\n tty.die(\"Will not remove any environments\")\n\n for env in read_envs:\n if env.active:\n tty.die(\"Environment %s can't be removed while activated.\" % env.name)\n\n env.destroy()\n tty.msg(\"Successfully removed environment '%s'\" % env.name)", "def teardown_test_env():\n if not keep_tmp_dirs:\n print('\\nCleaning up temporary directories...')\n shutil.rmtree(tmp_elm_dpath, ignore_errors=True)\n shutil.rmtree(tmp_elm_examples_dpath, ignore_errors=True)\n\n print('Removing conda environment used for testing...')\n sp.call('conda env remove -y -q -n {}'.format(test_env_name), shell=True, executable='/bin/bash', stdout=sp.DEVNULL)", "def env_cleanup(self):\n pass", "def cleanUp(self):\r\n # All intermediates should be removed by app controller\r\n pass", "def pop(self):\n\t\tif self.old_vars is None:\n\t\t\treturn\n\n\t\tfor k, v in self.old_vars.items():\n\t\t\tif v is None:\n\t\t\t\tif k in os.environ:\n\t\t\t\t\tdel os.environ[k]\n\t\t\telse:\n\t\t\t\tos.environ[k] = v\n\n\t\tself.old_vars = None", "def clear(self) -> None:\n self._REGISTERED_ENVS.clear()\n self._manifests = []\n self._sync = True", "def remove(self):\n if self.exists():\n try:\n utils.run_in_bash(\n f'{CONDA_BIN} env remove -q -y -n {self.name}')\n except CalledProcessError as err:\n err_message = err.output.strip().decode('ascii')\n if 'CondaEnvironmentError:' in err_message:\n inform.info('deactivating and retry')\n utils.run_in_bash(\n 'source deactivate && '\n f'{CONDA_BIN} env remove -q -y -n {self.name}')\n else:\n inform.error('Couldn\\'t remove environment. '\n 'Following error occured:')\n print(err_message)\n inform.critical()", "def setup():\n if os.path.exists(\"observations.p\"):\n os.remove(\"observations.p\")\n else:\n pass", "def envDeleteAll(self, key):\n while self.envExists(key):\n self.envDelete(key)", "def EndEnv(self,EnvironmentName):\n\n subprocess.call(['rmdir','/Q','/S',f'{EnvironmentName}'], shell=True, cwd=r\"C:\\Users\\caspe\\Envs\")\n\n with open(\"Envs.json\") as delete_file:\n elements = json.load(delete_file)\n\n if EnvironmentName in elements:\n del elements[EnvironmentName]\n\n with open(\"Envs.json\", \"w\") as add_file:\n json.dump(elements, add_file, indent=4)", "def clean_env():\n for key in ['FOO', 'THOR', 'IRON', 'NAME', 'PERSONAL_DIR']:\n os.environ.pop(key, None)", "def destroy(self):\r\n for node in self._nodes.copy():\r\n node.destroy()\r\n\r\n for parameter in self._parameters.copy():\r\n parameter.destroy()\r\n\r\n assert len(self._nodes) == 0\r\n assert len(self._parameters) == 0\r\n\r\n super(Environment, self).destroy()", "def unsetEnv(self, checked):\n\n # resolve circular dependencies\n if( self.name in checked ):\n return\n else:\n checked.append( self.name )\n\n # delete environment variables\n for k, v in self.env.iteritems():\n trydelenv(k)\n\n # restore path variables (only need to do this at the root module, skip recursivity!)\n if( len( checked ) == 1 ):\n for k, v in self.parent.envpathbak.iteritems():\n os.environ[k] = v\n\n # delete environment for dependencies\n mods = self.optmodules + self.reqmodules + self.reqmodules_buildonly + self.reqmodules_external\n for modname in mods:\n if( self.parent.module(modname) != None ):\n self.parent.module(modname).unsetEnv(checked)", "def tearDown(self):\n tests.utils.cleanup_environment()", "def tearDown(self):\n tests.utils.cleanup_environment()", "def remove(self):\n \n dbpath, config = self._start() \n desc_file = check_file(config.model_descriptions, dbpath,\n \"model_descriptions\", allow_none=False) \n self.logger.msg1(\"Reading model ids\")\n ids = values_in_column(desc_file, \"id\")\n self.logger.msg1(\"Deleting models: \"+str(len(ids)))\n delete_models(dbpath, ids)\n self._end()", "def _clean_up_experiment(self):\n if self.module_name == \"keras\":\n K.clear_session()", "def selenium_teardown():\n families_to_delete, visits_to_delete, responses_to_delete = [], [], []\n\n families_to_delete.extend(Family.objects.filter(study_id_number=59638))\n families_to_delete.extend(Family.objects.filter(study_id_number=83695))\n for f in families_to_delete:\n visits_to_delete.extend(f.visit_set.all())\n for v in visits_to_delete:\n responses_to_delete.extend(v.response_set.all())\n\n for r in responses_to_delete:\n r.delete()\n for v in visits_to_delete:\n v.delete()\n for f in families_to_delete:\n f.delete()", "def teardown_scripts(test=None):\n for key, value in original_environ.iteritems():\n if value is None:\n del os.environ[key]\n else:\n os.environ[key] = value\n original_environ.clear()\n\n for path in tmp_paths:\n if os.path.isdir(path):\n shutil.rmtree(path)\n else:\n os.remove(path)\n del tmp_paths[:]", "def cleanUp(self):\n import evoware.fileutil as F\n F.tryRemove(self.f_project, verbose=(self.VERBOSITY>1), tree=1)", "def _unload(apps, schema_editor):\n for modelname in models:\n model = apps.get_model(appname, modelname)\n model.objects.all().delete()", "def update(self, env):\n del env\n return", "def cleanup():\n if len(env.releases) > 3:\n directories = env.releases\n directories.reverse()\n del directories[:3]\n env.directories = ' '.join([ '%(releases_path)s/%(release)s' % { 'releases_path':env.releases_path, 'release':release } for release in directories ])\n run('rm -rf %(directories)s' % env)", "def modified_environ(self, *remove, **update):\n env = os.environ\n update = update or {}\n remove = remove or []\n\n # List of environment variables being updated or removed.\n stomped = (set(update.keys()) | set(remove)) & set(env.keys())\n # Environment variables and values to restore on exit.\n update_after = {k: env[k] for k in stomped}\n # Environment variables and values to remove on exit.\n remove_after = frozenset(k for k in update if k not in env)\n\n try:\n env.update(update)\n [env.pop(k, None) for k in remove]\n yield\n finally:\n env.update(update_after)\n [env.pop(k) for k in remove_after]", "def shutdown(self):\n del self.model\n del self.train_dataset\n del self.test_dataset", "def clearmodels(self):\n \n dbpath, config = self._start() \n ModelDescriptionTable(dbpath).empty()\n ModelPhenotypeTable(dbpath).empty()\n ModelScoreTable(dbpath).empty() \n self._end()", "def clear_db_env():\n global _FACADE\n _FACADE = None", "def remove_environment():\n modulepaths = os.environ.get(\"MAYA_MODULE_PATH\", \"\").split(os.pathsep)\n modulepaths.reverse()\n for p in modulepaths:\n modfile = os.path.join(p, \"AzureBatch.mod\")\n if os.path.exists(modfile):\n try:\n print(\"Removing mod file from {0}\".format(modfile))\n os.remove(modfile)\n except:\n print(\"Found AzureBatch mod file {0}, but \"\n \"couldn't delete.\".format(modfile))\n message = \"Remove installed Python dependencies?\"\n del_python = cmds.confirmDialog(\n title='Azure Batch', message=message, button=['Yes','No'],\n defaultButton='No', dismissString='No')\n\n if del_python == 'Yes':\n try:\n print(\"Removing Python dependencies: {0}\".format(INSTALL_DIR))\n shutil.rmtree(INSTALL_DIR)\n except:\n print(\"Couldn't remove {0}\".format(INSTALL_DIR))", "def tearDown(self):\n for model in MODELS:\n for obj in model.objects.all():\n obj.delete()", "def tearDown(self):\n for model in MODELS:\n for obj in model.objects.all():\n obj.delete()" ]
[ "0.62257844", "0.6080496", "0.5872701", "0.58601445", "0.5766776", "0.57624966", "0.5742403", "0.5738756", "0.56853753", "0.5683202", "0.5669383", "0.56654215", "0.5665408", "0.5649904", "0.5649904", "0.5643665", "0.56427264", "0.5626603", "0.5625567", "0.55980545", "0.559113", "0.5567895", "0.5480101", "0.54610735", "0.54607344", "0.5460536", "0.5412011", "0.53987163", "0.5393423", "0.5393423" ]
0.7031861
0
Check if concatenated_observation_data is in self.observations
def observation_exists_locally(self, concatenated_observation_data): local_observation = self.observations.get( concatenated_observation_data, None ) if local_observation: return local_observation['observation']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def observation_exists(self, concatenated_observation_data):\n local_observation = self.observation_exists_locally(\n concatenated_observation_data\n )\n if local_observation:\n return local_observation\n\n try:\n return Observation.objects.get(\n concatenated_data=concatenated_observation_data\n )\n except ObjectDoesNotExist:\n return None", "def checkObservation(self):\n if (self.independentVariable is not None \n and self.observation is not None \n and self.observationError is not None):\n l = len(self.independentVariable)\n if (l == len(self.observation) and l == len(self.observationError)):\n return True\n return False", "def is_in(batch, data):\n _id = batch[-1]\n for d in data:\n if d[-1] == _id:\n return True\n return False", "def _test_obsdup(t):\n return t.shape[0] != len(set(t.ids(axis='observation')))", "def __contains__(self, rq):\n return rq in self._data", "def __contains__(self, idx):\n return idx in self._data", "def _check_already_present(self, new_da):\n for da in self:\n self._id_of_DataArrays_equal(da, new_da)", "def _has_processed_data(self):\n return \\\n os.path.exists(\n os.path.join(self._data_root_path, self._processed_train_data_file_name)) and \\\n os.path.exists(\n os.path.join(self._data_root_path, self._processed_dev_data_file_name)) and \\\n os.path.exists(\n os.path.join(self._data_root_path, self._word_vocab_file_name)) and \\\n os.path.exists(\n os.path.join(self._data_root_path, self._char_vocab_file_name))", "def _is_legal_state(self, observation):\n servers_used_mem = np.zeros(len(self.servers_mem))\n for i, _ in enumerate(servers_used_mem):\n servers_used_mem[i] = np.sum(self.services_mem[observation==i])\n return np.alltrue(np.array(self.servers_mem) >= servers_used_mem)", "def __contains__(self, item):\n return item in self._data", "def has_data(self):\n return ([0] != self.__contexts) and ([0] != self.__weights)", "def __contains__(self, essid):\n return essid in self.essids", "def __contains__(self, key):\n\t\treturn key in self.__dStore", "def __contains__(self, item):\n return item in self.default_dataset", "def __eq__(self, other):\n if not isinstance(other, MultiConcatInfo):\n return False\n\n return self.__dict__ == other.__dict__", "def _are_features_already_extracted(self, output_path: str, subset: str) -> bool:\n file_path = join(output_path, subset + '.npy')\n return os.path.exists(file_path)", "def is_same_set(self, item1, item2):\n res = False\n for s in self._data:\n if item1 in s and item2 in s:\n res = True\n break\n return res", "def __contains__(self, identifier):\n # following breaks some tests, what is the expected behaviour?\n # return any(m.unique_id.endswith(identifier) for m in self)\n return any(m.unique_id == identifier for m in self)", "def contains(self, word: Iterable[Terminal]) -> bool:\n return self._get_final_state(word) is not None", "def obs_with_data(x):\n num_toks = np.sum(x,axis=1)\n has_data = num_toks > 0\n return has_data", "def classify(self, tokenized_record):\n\n return bool(set(tokenized_record).intersection(self.bo_markers))", "def __contains__(self, i):\n return i in self._ar", "def does_contain(self, other):\n if len(self.reactants) != len(other.retrons):\n return False\n for mols in itertools.permutations(self.reactants):\n if all(m.HasSubstructMatch(p)\n for m, p in zip(mols, other.retrons)):\n return True\n return False", "def _is_done_illegal_state(self, observation):\n servers_used_mem = np.zeros(len(self.servers_mem))\n for i, _ in enumerate(servers_used_mem):\n servers_used_mem[i] = np.sum(self.services_mem[observation==i])\n return np.alltrue(np.array(self.servers_mem) < servers_used_mem)", "def __contains__(self, key):\n keys = list(self._indexer(key))\n if len(keys) == 1:\n return keys[0] in self._data\n return [k in self._data for k in keys]", "def _is_done(self, observations):\n raise NotImplementedError()", "def _is_done(self, observations):\n raise NotImplementedError()", "def _is_done(self, observations):\n raise NotImplementedError()", "def _is_done(self, observations):\n raise NotImplementedError()", "def _is_done(self, observations):\n raise NotImplementedError()" ]
[ "0.65428954", "0.626618", "0.5845177", "0.5799288", "0.57642674", "0.5685769", "0.55044246", "0.5491718", "0.54879206", "0.5462496", "0.5368831", "0.5367507", "0.5330643", "0.5304856", "0.530262", "0.5287393", "0.52781326", "0.52779424", "0.52746606", "0.5272336", "0.52582705", "0.52546626", "0.52432734", "0.52410185", "0.5237098", "0.52211356", "0.52211356", "0.52211356", "0.52211356", "0.52211356" ]
0.70318484
0
Returns False if point does not exist, else return locationpoint id
def point_exists(self, point): qs = LocationPoint.objects.raw(""" SELECT * FROM script_execution_manager_locationpoint WHERE st_dwithin( thegeometry, st_transform( st_setsrid( st_point({point.x}, {point.y}), {point.srid}), 4326 ), -- This should be approximately one meter. -- See: http://stackoverflow.com/a/8477438/198050 -- 0.00001 -- Gerrit Hendriksen ([email protected]) says -- 8*10e-6 is approximately one meter. 8.181818181818181e-06 ) """.format(point=point) ) res = sum(1 for result in qs) return qs[0] if res else False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isSetId(self):\n return _libsbml.Point_isSetId(self)", "def check_point(point,points):\n if point in points:\n return True\n else:\n return False", "def find_point(self, point: Point):\n for internal_point in self.points:\n if internal_point == point:\n return internal_point\n return None", "def get_location_by_id(self, location_id):", "def _check_data_point(cube, metadata):\n point_index = []\n\n for dim_length in cube.shape:\n point_index.append(int(random.random() * dim_length))\n\n point_index = tuple(point_index)\n\n try:\n point_cube = cube[point_index]\n _data_point = point_cube.data\n except Exception:\n msg = 'Unable to extract data point {} from file: {}'.format(\n point_index, metadata['basename'])\n raise FileValidationError(msg)\n else:\n return True", "def _get_location_id(self, location):\r\n loc_svc = self.client['Location_Datacenter']\r\n datacenters = loc_svc.getDatacenters(mask='mask[longName,id,name]')\r\n for datacenter in datacenters:\r\n if datacenter['name'] == location:\r\n location = datacenter['id']\r\n return location\r\n raise ValueError('Invalid datacenter name specified.')", "def test_points_exists(self):\n self.assertEqual(Destination.objects.filter(name='testWithin')[0].point,\n self.test_point_inside)\n self.assertEqual(Destination.objects.filter(name='testWithout')[0].point,\n self.test_point_outside)", "def add_pnp_point(self, point, latlonalt):\n ind, exists = inside_circle(point, self.known_image_points)\n \n if exists:\n # Remove the point if it exists.\n del self.known_image_points[ind]\n self.known_geo_points_tbl.removeRow(ind)\n else:\n # Or add the point.\n self.known_image_points.append(point)\n\n # Update the table.\n row_pos = self.known_geo_points_tbl.rowCount()\n self.known_geo_points_tbl.insertRow(row_pos)\n\n if latlonalt:\n self.known_geo_points_tbl.setItem(\n row_pos, 0, QtWidgets.QTableWidgetItem(str(latlonalt[0])))\n self.known_geo_points_tbl.setItem(\n row_pos, 1, QtWidgets.QTableWidgetItem(str(latlonalt[1])))\n self.known_geo_points_tbl.setItem(\n row_pos, 2, QtWidgets.QTableWidgetItem(str(latlonalt[2])))", "def add_point(self, point):\n\n if point.uuid is None:\n point.uuid = self._generate_uuid()\n\n if point.uuid in self._points:\n error_str = \"Trying to add an already existing point with uuid: \"\\\n + str(point.uuid)\n raise KeyError(error_str)\n\n self._points[point.uuid] = Point.from_point(point)\n\n return point.uuid", "def does_location_exist(usage_key):\r\n try:\r\n search.path_to_location(modulestore(), usage_key)\r\n return True\r\n except ItemNotFoundError:\r\n # If the problem cannot be found at the location received from the grading controller server,\r\n # it has been deleted by the course author.\r\n return False\r\n except NoPathToItem:\r\n # If the problem can be found, but there is no path to it, then we assume it is a draft.\r\n # Log a warning in any case.\r\n log.warn(\"Got an unexpected NoPathToItem error in staff grading with location %s. \"\r\n \"This is ok if it is a draft; ensure that the location is valid.\", usage_key)\r\n return False", "def IsInsertedPoint(self, ):\n ...", "def getId(self):\n return _libsbml.Point_getId(self)", "def __find_make_pt(xy_tup, points_dict):\n point = points_dict.get(xy_tup)\n if point is not None:\n return point\n xy_point = geometry.Point(xy_tup[0], xy_tup[1])\n for realpoint in points_dict.values():\n dist = (xy_point - realpoint).length()\n if dist < geometry.ACC:\n return realpoint\n points_dict[xy_tup] = xy_point\n return xy_point", "def find_list_for_new_point(self, point):\n hash_code = get_parent_hash(point)\n for i, _ in enumerate(self._points):\n for point_move in self._points[i]:\n if hash_graphics_point(point_move) == hash_code:\n return i\n\n return None", "def test_location_ids_and_noise(self, example_staypoints):\n sp = example_staypoints\n sp2, _ = sp.as_staypoints.generate_locations(\n method=\"dbscan\", epsilon=10, num_samples=2, distance_metric=\"haversine\", agg_level=\"user\"\n )\n assert sp2.loc[1, \"location_id\"] == sp2.loc[15, \"location_id\"]\n assert sp2.loc[5, \"location_id\"] == sp2.loc[6, \"location_id\"]\n assert sp2.loc[80, \"location_id\"] == sp2.loc[3, \"location_id\"]\n assert sp2.loc[1, \"location_id\"] != sp2.loc[6, \"location_id\"]\n assert sp2.loc[1, \"location_id\"] != sp2.loc[80, \"location_id\"]\n\n assert sp2.loc[[2, 7], \"location_id\"].isnull().all()", "def test_missing_link(self):\n pfs_file = os.path.join(\"tests\", \"data\", \"positionfixes.csv\")\n pfs = ti.read_positionfixes_csv(pfs_file, sep=\";\", tz=\"utc\", index_col=\"id\", crs=\"epsg:4326\")\n _, sp = pfs.as_positionfixes.generate_staypoints(\n method=\"sliding\", gap_threshold=1e6, dist_threshold=0, time_threshold=0\n )\n warn_string = \"No locations can be generated, returning empty locs.\"\n with pytest.warns(UserWarning, match=warn_string):\n sp, _ = sp.as_staypoints.generate_locations(\n method=\"dbscan\", epsilon=1e18, num_samples=1000, agg_level=\"user\"\n )\n\n assert pd.isna(sp[\"location_id\"]).any()", "def key_by_point( self,point ):\n lons,lats,keys = self.lons_lats_keys\n for i, key in enumerate(keys):\n if in_polygon(point = point, poly = (lons[i],lats[i])):\n return key\n return None", "def _get_location_name(lat, lon):\n for shapeRecords in sf.iterShapeRecords():\n shape, record = shapeRecords.shape, shapeRecords.record\n bbox, points = shape.bbox, shape.points\n if point_inside_polygon((lon, lat), [(bbox[0], bbox[1]), (bbox[2], bbox[1]), (bbox[2], bbox[3]), (bbox[0], bbox[3])]):\n if point_inside_polygon((lon, lat), points):\n return record[13]", "def location():\n return _locate_or_create()", "def m_location_get(self) -> Point:\n pass", "def try_to_point(s):\n target_graph = s.store.get_if_already_have( s.url )\n if target_graph==None:\n s.pointing_at = None\n return False\n start_pt = s.start_pt or \"START\"\n if sortof_type_str_of(start_pt) == \"STR\":\n reach = target_graph.flags[ start_pt ]\n elif sortof_type_str_of(start_pt) == \"INT\":\n reach = target_graph.nodes[start_pt]\n else:\n raise \"I can't figure out what s.start_pt is: %s\" % str(start_pt)\n if s.path == None or s.path == []:\n s.pointing_at = reach\n return True\n\n # for now, we'll just not worry about indexing beyond reference nodes.\n # this'll work just fine,\n # if you're only indexing within the graph\n for index in s.path:\n try:\n reach = reach[index]\n except TypeError:\n s.pointing_at = None\n return False\n s.pointing_at = reach\n return True", "def is_point_exist(point, a_value, b_value, field):\n\n return (\n (point.y_crd ** 2 -\n (point.x_crd ** 3 + a_value *\n point.x_crd + b_value)) % field == 0 and\n 0 <= point.x_crd < field and 0 <= point.y_crd < field)", "def locate(self, point: Point[Scalar]) -> Location:\n return (Location.BOUNDARY\n if point in self._points_set\n else Location.EXTERIOR)", "def test_location_ids_and_noise_dataset(self, example_staypoints):\n sp = example_staypoints\n sp2, _ = sp.as_staypoints.generate_locations(\n method=\"dbscan\", epsilon=10, num_samples=2, distance_metric=\"haversine\", agg_level=\"dataset\"\n )\n\n assert (sp2.loc[[5, 6, 80, 3], \"location_id\"] == sp2.loc[5, \"location_id\"]).all()\n assert sp2.loc[1, \"location_id\"] == sp2.loc[15, \"location_id\"]\n assert sp2.loc[1, \"location_id\"] != sp2.loc[5, \"location_id\"]\n\n assert sp2.loc[[2, 7], \"location_id\"].isnull().all()", "def exists(self):\n try:\n self.world.find(self.ehandle)\n except KeyError:\n return False\n else:\n return True", "def has_current_location(self):\n return self.location_set.current_location is not None", "def locate(self):\n if self.location == '':\n return None\n if self.coords is not None:\n return self.coords\n\n loc = urlencode({'address': self.location})\n urldoc = urlopen(User._GMAP_URL.format(query=loc))\n jsObj = json.loads(urldoc.readall().decode('utf-8'))\n if len(jsObj['results']) > 0:\n # discard commercial results\n locTypes = jsObj['results'][0]['address_components'][0]['types']\n if not 'premise' in locTypes and not 'route' in locTypes and not 'establishment' in locTypes and not 'subpremise' in locTypes:\n self.coords = jsObj['results'][0]['geometry']['location']\n return self.coords\n # still here? it's all rubbish\n return None", "def get_point(self, uuid):\n\n try:\n return Point.from_point(self._points[uuid])\n except KeyError:\n error_str = \"Trying to get an non-existing point with uuid: {}\"\n raise ValueError(error_str.format(uuid))", "def point(x, y):\n return test(Point(x,y))", "def location_info(self, location_pk: int) -> Location:\n try:\n location = self.location_info_a1(location_pk)\n except Exception:\n # Users do not understand the output of such information and create bug reports\n # such this - https://github.com/adw0rd/instagrapi/issues/364\n # if not isinstance(e, ClientError):\n # self.logger.exception(e)\n location = self.location_info_v1(location_pk)\n return location" ]
[ "0.65851754", "0.60588527", "0.59962666", "0.5944725", "0.58938813", "0.5892437", "0.58379346", "0.5795499", "0.57793", "0.576884", "0.5735127", "0.57340264", "0.5658171", "0.56532735", "0.5652695", "0.56121695", "0.5558224", "0.5546083", "0.5536352", "0.5525766", "0.5519552", "0.5513254", "0.5509465", "0.5509119", "0.5505239", "0.55046463", "0.55023694", "0.5499846", "0.54930425", "0.5486255" ]
0.73598516
0
Make a CarlaSettings object with the settings we need.
def make_carla_settings(args): settings = CarlaSettings() settings.set( SynchronousMode=True, SendNonPlayerAgentsInfo=True, NumberOfVehicles=0, NumberOfPedestrians=0, SeedVehicles = '00000', WeatherId=1, QualityLevel=args.quality_level) settings.randomize_seeds() return settings
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_carla_settings(args):\n settings = CarlaSettings()\n settings.set(\n SynchronousMode=False,\n SendNonPlayerAgentsInfo=True,\n NumberOfVehicles=NUM_VEHICLES,\n NumberOfPedestrians=NUM_PEDESTRIANS,\n WeatherId=random.choice([1, 3, 7, 8, 14]),\n QualityLevel=args.quality_level)\n settings.randomize_seeds()\n camera0 = sensor.Camera('CameraRGB')\n camera0.set_image_size(WINDOW_WIDTH, WINDOW_HEIGHT)\n camera0.set_position(0, 0.0, CAMERA_HEIGHT_POS)\n camera0.set_rotation(0.0, 0.0, 0.0)\n settings.add_sensor(camera0)\n\n lidar = sensor.Lidar('Lidar32')\n lidar.set_position(0, 0.0, LIDAR_HEIGHT_POS)\n lidar.set_rotation(0, 0, 0)\n lidar.set(\n Channels=40,\n Range=MAX_RENDER_DEPTH_IN_METERS,\n PointsPerSecond=720000,\n RotationFrequency=10,\n UpperFovLimit=7,\n LowerFovLimit=-16)\n settings.add_sensor(lidar)\n \"\"\" Depth camera for filtering out occluded vehicles \"\"\"\n depth_camera = sensor.Camera('DepthCamera', PostProcessing='Depth')\n depth_camera.set(FOV=90.0)\n depth_camera.set_image_size(WINDOW_WIDTH, WINDOW_HEIGHT)\n depth_camera.set_position(0, 0, CAMERA_HEIGHT_POS)\n depth_camera.set_rotation(0, 0, 0)\n settings.add_sensor(depth_camera)\n # (Intrinsic) K Matrix\n # | f 0 Cu\n # | 0 f Cv\n # | 0 0 1\n # (Cu, Cv) is center of image\n k = np.identity(3)\n k[0, 2] = WINDOW_WIDTH_HALF\n k[1, 2] = WINDOW_HEIGHT_HALF\n f = WINDOW_WIDTH / \\\n (2.0 * math.tan(90.0 * math.pi / 360.0))\n k[0, 0] = k[1, 1] = f\n camera_to_car_transform = camera0.get_unreal_transform()\n lidar_to_car_transform = lidar.get_transform(\n ) * Transform(Rotation(yaw=90), Scale(z=-1))\n return settings, k, camera_to_car_transform, lidar_to_car_transform", "def settings() -> Settings:\n return Settings()", "def create_settings():\n\n settings = {}\n\n settings['induction'] = {'type': 'DT'}\n\n settings['selection'] = {'type': 'Base',\n 'its': 1,\n 'param': 1}\n\n settings['prediction'] = {'type': 'MI',\n 'its': 0.1,\n 'param': 0.95}\n\n settings['queries'] = {}\n\n settings['metadata'] = {}\n\n settings['model_data'] = {}\n\n return settings", "def __init__( settings={} ):", "def settings(self):\n from hubspot3.settings import SettingsClient\n\n return SettingsClient(**self.auth, **self.options)", "def initialize(cls, settings: Settings) -> Settings:\n\n settings_obj = SettingsService.load_game_conf()\n\n for entry in SettingsService.GAME_SETTINGS:\n value = settings_obj.get(SettingsService.GAME_SETTINGS_ROOT, {}).get(\n entry, None\n )\n if value is None:\n raise RuntimeError(f\"Entry {entry} is missing in settings.\")\n\n setattr(settings, entry, value)\n\n for entry in SettingsService.INITIALS:\n value = settings_obj.get(SettingsService.INITIALS_ROOT, {}).get(entry, None)\n if value is None:\n raise RuntimeError(f\"Entry {entry} is missing in settings.\")\n\n settings.initials[entry] = value\n\n return settings", "def get_settings():\n return SettingCollection.build()", "def settings():\n return SettingsMock.instance()", "def from_settings(settings):", "def find_settings():\n return Setting()", "def __init__(self, settings):\n\n # store settings\n self.settings = settings", "def __init__(self, settings):\n \n # storing otmbs settings\n self.settings = settings", "def build_mail_settings():\n mail_settings = MailSettings()\n mail_settings.bcc_settings = BCCSettings(True, Email(\"[email protected]\"))\n mail_settings.bypass_list_management = BypassListManagement(True)\n mail_settings.footer_settings = FooterSettings(True, \"Footer Text\",\n (\"<html><body>Footer \"\n \"Text</body></html>\"))\n mail_settings.sandbox_mode = SandBoxMode(True)\n mail_settings.spam_check = SpamCheck(True, 1,\n \"https://spamcatcher.sendgrid.com\")\n return mail_settings", "def build_settings(self, settings):\n \n settings.add_json_panel(\"Network\", self.config, data=network_json)\n settings.add_json_panel(\"Camera\", self.config, data=camera_json)\n settings.add_json_panel(\"CV\", self.config, data=cv_json)\n settings.add_json_panel(\"Admin\", self.config, data=admin_json)", "def setup_settings():\n settings = DEFAULT_SETTINGS\n if os.environ.get(\"MUTALYZER_SETTINGS\"):\n configuration_path = os.environ[\"MUTALYZER_SETTINGS\"]\n with open(configuration_path) as f:\n configuration_content = \"[config]\\n\" + f.read()\n loaded_settings = configparser.ConfigParser()\n loaded_settings.optionxform = str\n loaded_settings.read_string(configuration_content)\n loaded_settings = {\n sect: dict(loaded_settings.items(sect))\n for sect in loaded_settings.sections()\n }[\"config\"]\n for k in loaded_settings:\n if loaded_settings[k] in {\"yes\", \"true\", \"1\"}:\n loaded_settings[k] = True\n elif loaded_settings[k] in {\"no\", \"false\", \"0\"}:\n loaded_settings[k] = False\n elif loaded_settings[k].isnumeric():\n loaded_settings[k] = int(loaded_settings[k])\n settings.update(loaded_settings)\n\n return settings", "def get_settings(dataset: DS):\n if dataset == DS.ARTIFICIAL_BBOX:\n project_path = Path('data/artificial/')\n project_file = project_path / 'annotations.json'\n image_dir = 'images'\n _, annotations = create_color_classification(path=project_path, n_samples=50,\n size=(500, 500))\n\n anno = {str(project_path / image_dir / k): [f'{v}.jpg'] for k, v in annotations.items()}\n\n with open(project_file, 'w') as f:\n json.dump(anno, f)\n\n return Settings(project_path=project_path,\n project_file=project_file,\n image_dir=image_dir,\n label_dir='class_images',\n # used on create step - should be empty!\n result_dir='create_results',\n im_width=50, im_height=50,\n label_width=30, label_height=30,\n n_cols=3)\n elif dataset == DS.ARTIFICIAL_VIDEO:\n project_path = Path('data/artificial/')\n project_file = project_path / 'annotations.json'\n image_dir = 'images'\n create_mot_ds(project_path, image_dir, 20, True)\n return Settings(\n project_path=project_path,\n project_file=project_file,\n image_dir=image_dir,\n im_width=200,\n im_height=200,\n result_dir='create_results',\n )\n elif dataset == DS.CIFAR10:\n cifar_train_p, cifar_test_p = get_cifar10(Path('data'))\n\n return Settings(project_path=Path('data/cifar10/'),\n project_file=cifar_test_p,\n image_dir='test',\n label_dir=None,\n # used on create step - should be empty!\n result_dir='create_results',\n im_width=50, im_height=50,\n label_width=140, label_height=30,\n n_cols=2)\n\n elif dataset == DS.OXFORD102:\n flowers102_train_p, flowers102_test_p = get_oxford_102_flowers(Path('data'))\n\n return Settings(project_path=Path('data/oxford-102-flowers'),\n project_file=flowers102_test_p,\n image_dir='jpg',\n label_dir=None,\n # used on create step - should be empty!\n result_dir='create_results',\n im_width=50, im_height=50,\n label_width=40, label_height=30,\n n_cols=7)\n\n elif dataset == DS.CUB200:\n cub200_train_p, cub200_test_p = get_cub_200_2011(Path('data'))\n\n return Settings(project_path=Path('data/CUB_200_2011'),\n project_file=cub200_test_p,\n image_dir='images',\n label_dir=None,\n # used on create step - should be empty!\n result_dir='create_results',\n im_width=50, im_height=50,\n label_width=50, label_height=50,\n n_cols=7)\n else:\n raise UserWarning(f\"Dataset {dataset} is not supported!\")", "def settings(self):\r\n return SettingResource(self)", "def settings(self):\r\n return settings.Settings(self)", "def build(self) -> cern.lsa.domain.settings.ContextSettings:\n ...", "def _define_settings(self):\n\n self.settings = {}\n\n ##### ORIGINALLY IN THE DOMAIN FILE #######\n\n # Maximum input in the C-Space : no constituent can be more than 100% present\n self.settings['maxInp'] = 1\n\n #### ORIGINALLY IN THE SETTINGS FILE #####\n self.settings[\"epochs\"] = 3 # Training epochs\n self.settings[\"tgtStd\"] = 12e-6\n self.settings['TInit'] = 1e-6\n self.settings[\"TMin\"] = 0\n self.settings[\"TDecayRate\"] = 0.05\n self.settings[\"lambdaInit\"] = 0.011387\n self.settings['lambdaMin'] = 0.0001\n self.settings[\"lambdaDecayRate\"] = 0.60\n self.settings[\"maxSteps\"] = 300000\n self.settings[\"emaSpeedTol\"] = 0.009\n self.settings[\"emaFactor\"] = .005\n self.settings[\"printInterval\"] = 3000\n self.settings[\"summary_file\"] = \"data/summary.txt\"\n mean = torch.ones(self.grammar.bind.nF,\n self.grammar.bind.nR)/self.grammar.bind.nF\n self.settings[\"initStateMean\"] = mean\n self.settings[\"initStateStdev\"] = .025\n self.settings['clamp'] = False\n\n if self.custom_settings is not None:\n for key, value in self.custom_settings.items():\n if key in self.settings:\n self.settings[key] = value", "def cont_settings_(request):\n \n return {\"settings\": settings}", "def __init__(self, settings):\n self._settings = settings", "def make_settings(pypirc):\n default_pypirc = \"\"\"\n [pypi]\n username:foo\n password:bar\n \"\"\"\n\n def _settings(pypirc_text=default_pypirc, **settings_kwargs):\n pypirc.write(textwrap.dedent(pypirc_text))\n\n settings_kwargs.setdefault(\"sign_with\", None)\n settings_kwargs.setdefault(\"config_file\", str(pypirc))\n\n return settings.Settings(**settings_kwargs)\n\n return _settings", "def load_settings(self):\n\n self.std = settings.settings", "def build_settings(self, settings):\n settings.add_json_panel('Makesmith Settings', self.config, data=self.json)", "def config(settings):\n\n #T = current.T\n\n # PrePopulate data\n settings.base.prepopulate += (\"SHARE/LK\",)\n settings.base.prepopulate_demo += (\"SHARE/Demo\",)\n\n # Finance settings\n settings.fin.currencies = {\n #\"EUR\" : \"Euros\",\n #\"GBP\" : \"Great British Pounds\",\n \"LKR\" : \"Sri Lanka Rupees\",\n \"USD\" : \"United States Dollars\",\n }\n settings.fin.currency_default = \"USD\"", "def make_setting(\n cls,\n app_name,\n name,\n setting_type,\n value,\n value_json={},\n user_modifiable=True,\n project=None,\n user=None,\n sodar_uuid=None,\n ):\n values = {\n 'app_plugin': None\n if app_name == 'projectroles'\n else get_app_plugin(app_name).get_model(),\n 'project': project,\n 'name': name,\n 'type': setting_type,\n 'value': value,\n 'value_json': value_json,\n 'user_modifiable': user_modifiable,\n 'user': user,\n }\n if sodar_uuid:\n values['sodar_uuid'] = sodar_uuid\n setting = AppSetting(**values)\n setting.save()\n return setting", "def _get_initial_config(self):\r\n config = GeneralConfiguration()\r\n caching_config = CacheBaseyearConfiguration()\r\n config.merge(caching_config)\r\n return config", "def from_mcuSettings(cls, mcuSettings: MCUSettings):\n\n settings = cls.fromConfig()\n settings.start = mcuSettings.start\n settings.peep = mcuSettings.peep\n settings.freq = mcuSettings.freq\n settings.ratio = mcuSettings.ratio\n settings.pressure = mcuSettings.pressure\n settings.oxygen = mcuSettings.oxygen\n\n return settings", "def set_config(self): # called from button_set object \n self.settings['lights_on'] = self.lights_on.get()\n self.settings['lights_off'] = self.lights_off.get()\n self.settings['ambient_min'] = self.ambient_min.get()\n self.settings['soil_1'] = self.smc1.get()\n self.settings['soil_2'] = self.smc2.get()\n self.settings['soil_3'] = self.smc3.get()\n self.settings['soil_4'] = self.smc4.get()\n self.settings['overhead_level'] = self.overhead_level.get()\n\n # Save settings to config file in case of reboot / power-loss\n print \"UPDATING SETTINGS FILE\"\n with open(self.settings_path, 'w') as jsonfile:\n jsonfile.write(json.dumps(self.settings, indent=4))\n self.active_changes = True # (flag) changes are active!" ]
[ "0.7046542", "0.64021003", "0.6321816", "0.59555054", "0.58940154", "0.5859983", "0.58227974", "0.5814349", "0.5743213", "0.5712618", "0.56494814", "0.563011", "0.56196254", "0.56128407", "0.55752116", "0.5572938", "0.5552482", "0.5540732", "0.55223477", "0.5518125", "0.54987264", "0.5486225", "0.5468102", "0.5462582", "0.54525614", "0.544642", "0.5426187", "0.54152817", "0.53828895", "0.53633475" ]
0.8063497
0
Check whether a color is 'dark'. Currently, this is simply whether the luminance is <50%
def dark_color(color): rgb = hex_to_rgb(color) if rgb: return rgb_to_hls(*rgb)[1] < 128 else: # default to False return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_dark(self):\n\n return self.red() < 125 and self.green() < 125 and self.blue() < 125", "def dark(r, d):\n return d * 1.0 / (r + d) + d * r * 1.0 / ((r + d) ** 2)", "def is_monochromatic(self):\n return equal(s.color for s in self.iter_states())", "def ensureBrightOrDark( nColor, bBright = True ):\n #~ print( \"ensureBrightOrDark: nColor: 0x%x, bBright: %s\" % (nColor, bBright) ); \n rB = ( ( nColor & 0xFF ) );\n rG = ( ( nColor & 0xFF00 ) >> 8 );\n rR = ( ( nColor & 0xFF0000 ) >> 16 );\n \n #~ print( \"ensureBrightOrDark: comp: r=%s, g=%s, b=%s\" % (rR, rG, rB) );\n\n nMed = 0x7F+0xFF;\n if( bBright ):\n if( rB + rG + rR < nMed ):\n nColor = interpolateColor( nColor, 0xFFFFFF, 0.4 );\n else:\n if( rB + rG + rR >= nMed ):\n nColor = interpolateColor( nColor, 0x000000, 0.4 );\n #~ print( \"ensureBrightOrDark: => nColor: 0x%x\" % (nColor) );\n return nColor;", "def lcd_backlight_color(self, color='off'):\n colorcode = _lcd_colors.index(color)\n if self._request('FB', str(colorcode))[0]:\n return True\n\n raise EvseError", "def darken(color):\n hue, saturation, value = rgb_to_hsv(color.red, color.green, color.blue)\n value /= 1.5\n saturation /= 1.25\n return hsv_to_rgb(hue, saturation, value) + (color.alpha,)", "def is_black(self):\n return \"black\" == self.color", "def lightness(color):\n\n strongest = max(color.red, color.green, color.blue)\n weakest = min(color.red, color.green, color.blue)\n return 0.5 * (strongest + weakest) / 255", "def lights_are_on(image_path):\n _brightness = get_image_brightness(image_path)\n if _brightness > 10:\n return True\n return False", "def dark_square(self, square):\n\n logger.debug(u'dark_square({})'.format(square))\n\n row, column = square\n return (row + column) % 2 != 0", "def is_colour(self, im):\n hsl = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)\n h, s, v = np.mean(hsl, (0, 1))\n if s < 100:\n self.log.info(\n \"Grayscale scan detected (hsv %s, %s, %s), converting...\", h, s, v\n )\n return False\n return True", "def is_blurry_colorful(image):\n b, _, _ = cv2.split(image)\n a = variance_of_laplacian(b)\n return (variance_of_laplacian(b) < 100)", "def colored(img: np.array):\n # Check if image is colored or black and white\n r, g, b = [normalize(img[..., i]) for i in range(3)]\n color_factor = sum([np.mean(np.square(c1 - c2)) for c1, c2 in ((r, g), (r, b), (b, r))])\n return color_factor >= 0.04", "def is_on(self):\n return self._brightness > 0 or self._white_value > 0", "def darken_color(color, amount=0.5):\n import matplotlib.colors as mc\n import colorsys\n try:\n c = mc.cnames[color]\n except:\n c = color\n c = colorsys.rgb_to_hls(*mc.to_rgb(c))\n return colorsys.hls_to_rgb(c[0], amount * c[1], c[2])", "def recognize_color(self):\n x = (self.x + DIRECTIONS[(self.facing_direction - self.config) % 8][0]) % (self.image.shape[0] - 1)\n y = (self.y + DIRECTIONS[(self.facing_direction - self.config) % 8][1]) % (self.image.shape[1] - 1)\n color_left = self.image[x, y]\n if abs(self.luminance(color_left) - self.luminance_fcolor) <= self.lum_threshold:\n return self.turn_left\n x = (self.x + DIRECTIONS[self.facing_direction][0]) % (self.image.shape[0] - 1)\n y = (self.y + DIRECTIONS[self.facing_direction][1]) % (self.image.shape[1] - 1)\n color_forward = self.image[x, y]\n if abs(self.luminance(color_forward) - self.luminance_fcolor) <= self.lum_threshold:\n return self.move_forward\n x = (self.x + DIRECTIONS[(self.facing_direction + self.config) % 8][0]) % (self.image.shape[0] - 1)\n y = (self.y + DIRECTIONS[(self.facing_direction + self.config) % 8][1]) % (self.image.shape[1] - 1)\n color_right = self.image[x, y]\n if abs(self.luminance(color_right) - self.luminance_fcolor) <= self.lum_threshold:\n return self.turn_right\n return None", "def find_dark_states(excited_state, ground_states):", "def darkText(img):\n kernel = np.ones((30, 30), np.uint8) \n img_orig = cv2.morphologyEx(img, cv2.MORPH_BLACKHAT, kernel)\n \n TH = 150\n img_orig[(img_orig[:,:,0] < TH) | (img_orig[:,:,1] < TH) | (img_orig[:,:,2] < TH)] = (0,0,0)\n \n img_orig = closing(img_orig, size=(1, int(img.shape[1] / 8)))\n \n return (cv2.cvtColor(img_orig, cv2.COLOR_BGR2GRAY) != 0).astype(np.uint8)", "def color_mode(self) -> ColorMode:\n if self._dimmable:\n return ColorMode.BRIGHTNESS\n return ColorMode.ONOFF", "def detect(frame: numpy.ndarray) -> bool:\n color = frame[:20, 1100:1150].mean(axis=(0, 1))\n return numpy.linalg.norm(color - BG_COLOR) < 5", "def dark_cloud(self):\n self.data['dark_cloud'] = ((self.data['Close'].shift(1) > self.data['Open'].shift(1)) & \\\n (((self.data['Close'].shift(1) + self.data['Open'].shift(1)) / 2) > self.data['Close']) & \\\n (self.data['Open'] > self.data['Close']) & (self.data['Open'] > self.data['Close'].shift(1)) &\\\n (self.data['Close'] > self.data['Open'].shift(1)) & \\\n ((self.data['Open'] - self.data['Close']) / (.001 + (self.data['High'] - self.data['Low'])) > .6))", "def is_on(self):\n return self._brightness_pct != 0", "def contains_black(image):\n extrema = ImageStat.Stat(image).extrema\n r = extrema[0][0]\n g = extrema[1][0]\n b = extrema[2][0]\n\n if r == 0 and g == 0 and b == 0:\n return True\n\n return False", "def is_black(self):\n return self.color == Color.BLACK", "def check_for_white(img):\n return white_percentage(img, 220, 0.8)", "def check_for_white(img):\n return white_percentage(img, 220, 0.8)", "def dark(config):\n\n # -----------DARK CURRENT----------------------\n files = sorted(glob.glob(config['raw_dir'] + '/radiance/{}/dark/dark_*.txt'.format(config['date'])))\n # Import the data from the file\n dark_file = np.genfromtxt(files[0], delimiter='', skip_header=11)\n\n # Create array to save data\n dark = np.zeros(list(dark_file.shape))\n\n print('Calculating...')\n cnt = 0\n # Calculate mean of dark files\n for i in np.arange(len(files)):\n dark += np.genfromtxt(files[i], delimiter='', skip_header=11)\n cnt += 1\n dark = dark / (cnt + 1)\n\n # create the radiance matrix\n dark_current = np.zeros([113, 992])\n alignment = add_align()\n\n for i in np.arange(113):\n if str(alignment.iloc[i][3]) == 'nan':\n dark_current[i] = np.nan\n else:\n dark_current[i] = dark[:, int(alignment.iloc[i][3]) +\n config['channel_pixel_adj']]\n print('Complete')\n\n return dark_current", "def get_dark_squares_color(self) -> ColorTuple:\n return self._dark_squares_color", "def LightContrastColour(c):\r\n\r\n amount = 120\r\n\r\n # if the colour is especially dark, then\r\n # make the contrast even lighter\r\n if c.Red() < 128 and c.Green() < 128 and c.Blue() < 128:\r\n amount = 160\r\n\r\n return StepColour(c, amount)", "def contrast_from_bg(cls, col=\"#000000\", dark_default=\"000000\", light_default=\"FFFFFF\", hashed=\"#\"):\n trigger = float(0.45) #Values greater than this result in black text\n if not col:\n return \"#000000\" #Default to black\n if col in (\"Transparent\",\"transparent\"):\n return \"#000000\" #Default to black\n if not hashed:\n hashed = \"\"\n elif hashed is True:\n hashed = \"#\"\n try:\n col_out = cls.colour_to_rgb_tuple(col)\n r,g,b = col_out\n div = 255.0 #Produces a value between 0-1 as a float\n lum = float(0.2126*pow(r/div, 2.2)) + float(0.7152*pow(g/div, 2.2)) + float(0.0722*pow(b/div, 2.2))\n except (TypeError, ValueError):\n return dark_default\n #logging.info (\"Luminosity: %s\" % lum)\n #Decision gate:\n if lum >= trigger: #Light background, need dark text\n return \"%s%s\" % (hashed, dark_default)\n else: #Dark background, need light text\n return \"%s%s\" % (hashed, light_default)" ]
[ "0.8725131", "0.65368", "0.65338826", "0.6522034", "0.6357333", "0.63260835", "0.62616885", "0.62494576", "0.62354577", "0.6234573", "0.6193287", "0.6178857", "0.6167206", "0.6154868", "0.6118449", "0.6047277", "0.6014609", "0.59861344", "0.5977451", "0.59697956", "0.5940999", "0.5917745", "0.589651", "0.5882892", "0.5881247", "0.5881247", "0.58786166", "0.58707124", "0.58465457", "0.58441323" ]
0.81742746
1
Guess whether the background of the style with name 'stylename' counts as 'dark'.
def dark_style(stylename): return dark_color(get_style_by_name(stylename).background_color)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_color(style):\n for kw in list(cc.keys()):\n m = re.search(kw, style)\n if m:\n return m.group()\n\n # Return 'b' if nothing has found\n return 'b'", "def is_dark(self):\n\n return self.red() < 125 and self.green() < 125 and self.blue() < 125", "def check_theme(theme: str) -> bool:\n themes = [\"dark\", \"light\"]\n\n return theme in themes", "def HasBackgroundColour(self):\r\n \r\n return self._colBack != wx.NullColour", "def style_exists(stylename, u):\n stat, ds_request = u.request(method = 'GET',\n path = 'rest/styles/' + \\\n stylename + '.json',\n payload = None,\n mime = 'application/json')\n return stat == 200", "def black_or_white(bgcolor):\n ary_bgcolors = re.findall(r\"[\\w']+\", bgcolor)\n R = int(ary_bgcolors[1])\n G = int(ary_bgcolors[2])\n B = int(ary_bgcolors[3])\n Lumi = (sum([R,G,B])/3)\n\n if Lumi > 125:\n colorfont = 'rgb(0,0,0)'\n else:\n colorfont = 'rgb(255,255,255)'\n\n return colorfont", "def get_color(cls, string_color: str) -> Union['Color', bool]:\n r = False\n for color in cls:\n # if color == cls.CSI:\n # continue\n if str(color) == string_color:\n return color\n if not r:\n r = str(color).startswith(string_color)\n\n return r", "def dark_color(color):\n rgb = hex_to_rgb(color)\n if rgb:\n return rgb_to_hls(*rgb)[1] < 128\n else: # default to False\n return False", "def is_red(self):\n return \"red\" == self.color", "def is_color(s):\n def in_range(i): return 0 <= i <= int('0xFFFFFF', 0)\n\n try:\n if type(s) == int:\n return in_range(s)\n elif type(s) not in (str, bytes):\n return False\n elif s in webcolors.css3_names_to_hex:\n return True\n elif s[0] == '#':\n return in_range(int('0x' + s[1:], 0))\n elif s[0:2] == '0x':\n return in_range(int(s, 0))\n elif len(s) == 6:\n return in_range(int('0x' + s, 0))\n except ValueError:\n return False", "def check_highlight_style(highlight_style: str) -> bool:\n\n valid_highlight_styles = [\n 'pygments',\n 'tango',\n 'espresso',\n 'zenburn',\n 'kate',\n 'monochrome',\n 'breezedark',\n 'haddock'\n ]\n\n if highlight_style in valid_highlight_styles:\n return True\n\n beauty_styles = \", \".join(valid_highlight_styles)\n print(f'Invalid highlight style {highlight_style} given. Must be one of: {beauty_styles}')\n sys.exit(1)", "def check_colormap(cmap):\n names = set(['BrBG', 'PiYG', 'PRGn', 'PuOr', 'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn', 'Spectral',\n 'Blues', 'BuGn', 'BuPu', 'GnBu', 'Greens', 'Greys', 'Oranges', 'OrRd', 'PuBu',\n 'PuBuGn', 'PuRd', 'Purples', 'RdPu', 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd',\n 'Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2', 'Set1', 'Set2', 'Set3', 'Lightning'])\n if cmap not in names:\n raise Exception(\"Invalid cmap '%s', must be one of %s\" % (cmap, names))\n else:\n return cmap", "def guess_colour(dbo, s):\n s = str(s).lower()\n guess = db.query_int(dbo, \"SELECT ID FROM basecolour WHERE LOWER(BaseColour) LIKE '%\" + db.escape(s) + \"%'\")\n if guess != 0: return guess\n return configuration.default_colour(dbo)", "def is_black(self):\n return \"black\" == self.color", "def getColorFlag(color):\n if color == 0: # MONO\n return 0\n elif color == 1: # BAYER\n return -1\n elif color == 2: # AS IS RBG\n return 1", "def is_win(self, color):\n win = self.n\n # check y-strips\n for y in range(self.n):\n count = 0\n for x in range(self.n):\n if self[x][y] == color:\n count += 1\n if count == win:\n return True\n # check x-strips\n for x in range(self.n):\n count = 0\n for y in range(self.n):\n if self[x][y] == color:\n count += 1\n if count == win:\n return True\n # check two diagonal strips\n count = 0\n for d in range(self.n):\n if self[d][d] == color:\n count += 1\n if count == win:\n return True\n count = 0\n for d in range(self.n):\n if self[d][self.n - d - 1] == color:\n count += 1\n if count == win:\n return True\n\n return False", "def dark_mode(grid: bool = False) -> sns.set_theme:\n if grid:\n return sns.set_theme(style=\"darkgrid\")\n return sns.set_theme(style=\"dark\")", "def has_theme(cards, theme):\n for card in cards:\n if card.CARDTYPE == theme:\n return True\n\n return False", "def update_known_styles_state(app: sphinx.application.Sphinx) -> None:\n global _KNOWN_STYLES_IN_USE\n\n _KNOWN_STYLES_IN_USE = {\n \"light\": _get_light_style(app),\n \"dark\": _get_dark_style(app),\n }", "def lookup_bg_color(self, bg_color: str) -> int:\n # Background.\n if bg_color in BG_ANSI_COLORS:\n return BG_ANSI_COLORS[bg_color]\n else:\n return self._color_indexes(bg_color)[1]", "def contains_color(self, target: str = \"shiny gold\") -> bool:\n for bag in self.bags:\n if bag.name == target or bag.contains_color(target):\n return True\n\n return False", "def _iscolorstring(self, color):\n try:\n rgb = self.cv.winfo_rgb(color)\n ok = True\n except TK.TclError:\n ok = False\n return ok", "def get_current_mode() -> ThemeMode:\n try:\n # test:\n assert False\n # todo: Check platform and add more platforms.\n with winreg.OpenKey(\n winreg.HKEY_CURRENT_USER,\n (r'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Themes'\n r'\\Personalize'),\n access=winreg.KEY_READ) as hkey:\n return (ThemeMode.light\n if winreg.QueryValueEx(hkey, 'AppsUseLightTheme')[0]\n else ThemeMode.dark)\n except Exception:\n return get_current_app_mode()", "def contrast_from_bg(cls, col=\"#000000\", dark_default=\"000000\", light_default=\"FFFFFF\", hashed=\"#\"):\n trigger = float(0.45) #Values greater than this result in black text\n if not col:\n return \"#000000\" #Default to black\n if col in (\"Transparent\",\"transparent\"):\n return \"#000000\" #Default to black\n if not hashed:\n hashed = \"\"\n elif hashed is True:\n hashed = \"#\"\n try:\n col_out = cls.colour_to_rgb_tuple(col)\n r,g,b = col_out\n div = 255.0 #Produces a value between 0-1 as a float\n lum = float(0.2126*pow(r/div, 2.2)) + float(0.7152*pow(g/div, 2.2)) + float(0.0722*pow(b/div, 2.2))\n except (TypeError, ValueError):\n return dark_default\n #logging.info (\"Luminosity: %s\" % lum)\n #Decision gate:\n if lum >= trigger: #Light background, need dark text\n return \"%s%s\" % (hashed, dark_default)\n else: #Dark background, need light text\n return \"%s%s\" % (hashed, light_default)", "def get_pygments_style_colors(\n style: Style, *, fallbacks: Dict[str, str]\n) -> Dict[str, str]:\n background = style.background_color\n text_colors = style.style_for_token(Text)\n foreground = text_colors[\"color\"]\n\n if not background:\n background = fallbacks[\"background\"]\n\n if not foreground:\n foreground = fallbacks[\"foreground\"]\n else:\n foreground = f\"#{foreground}\"\n\n return {\"background\": background, \"foreground\": foreground}", "def getSquareColor(file: int, rank: int) -> str:\r\n if (rank % 2 == file % 2):\r\n return 'light'\r\n else:\r\n return 'dark'", "def colorOK(colorStr):\n tkWdg = _getTkWdg()\n\n try:\n tkWdg.winfo_rgb(colorStr)\n except tkinter.TclError:\n return False\n return True", "def has_winner(self):\n if self.color_check_mate(ChessGame.BLACK):\n return ChessGame.WHITE\n elif self.color_check_mate(ChessGame.WHITE):\n return ChessGame.BLACK\n else:\n return None", "def sub_brightbg(self, ansimatch):\n return self.ansi_xterm256_bright_bg_map_dict.get(ansimatch.group(), \"\")", "def EyeColorTest(str):\n\n\tvalidcolors = ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']\n\treturn str in validcolors" ]
[ "0.66091275", "0.63446206", "0.6265952", "0.561835", "0.5487273", "0.5460873", "0.54280055", "0.5422753", "0.5419636", "0.53896165", "0.5383962", "0.5319423", "0.5317914", "0.5317887", "0.53108865", "0.52508605", "0.5241777", "0.5228757", "0.5217556", "0.52019304", "0.51880467", "0.51641494", "0.51630795", "0.51499957", "0.51474243", "0.5131287", "0.5127544", "0.5093552", "0.5091321", "0.5081813" ]
0.69731295
0
Preempt all contained states.
def request_preempt(self): # Set preempt flag smach.State.request_preempt(self) # Notify concurrence that it should preempt running states and terminate with self._done_cond: self._done_cond.notify_all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def preempt(self):\n rospy.logwarn(\"Preempting scan...\")\n self.preempted = True", "def skip_all_animations(self):\n for child in self.children:\n child.skip_all_animations()\n \n # remove unskippable animations from queue\n unskippables = [anim for anim in self.queued_animations if not anim.skippable]\n self.queued_animations = list(filter(lambda anim: anim.skippable, self.queued_animations))\n while len(self.queued_animations) > 0:\n self.update(100)\n self.queued_animations = unskippables", "def update_states(self, dt):\n\n for state in self.active_states:\n state.update(dt)\n\n current_state = self.current_state\n\n # handle case where the top state has been dismissed\n if current_state is None:\n self.done = True\n\n if current_state in self._state_resume_set:\n current_state.resume()\n self._state_resume_set.remove(current_state)", "def enable_irq(state:int):", "def update_step(self) -> NoReturn:\n to_remove = []\n for s in self._states_queue:\n current_lifetime = s.lifetime\n\n if current_lifetime == INFINITE_LIFETIME:\n continue\n\n new_lifetime = current_lifetime - 1\n\n if new_lifetime < 0:\n log.debug(f\"State exceeded lifetime: {s}\")\n to_remove.append(s)\n else:\n # Update to new lifetime\n s.lifetime = new_lifetime\n\n for s in to_remove:\n self._states_queue.remove(s)\n\n if isinstance(self._states_queue, SyncableList):\n self._states_queue.sync()", "def _run_state(self):\n main_log.debug(\"Running state \" + self.state)\n\n if not self.get_state_info(\"condition\"):\n self._run_next_state()\n return\n\n try:\n self._pre()\n except StateSwitchException as e:\n self.state = e.next_state\n self._run_state()\n return\n\n if self.get_state_info(\"wake_players\"):\n self._waiting_for_players = True\n self._wake_players()\n else:\n self._players_are_done()", "def _reinit(self):\n # If there are ready states still then it was a paused execution\n assert not self._ready_states\n assert not self._busy_states\n\n with self.locked_context(\"wasm.saved_states\", list) as saved_states:\n while saved_states:\n state_id = saved_states.pop()\n self._revive_state(state_id)", "def rollout_algorithm(self):\n states, actions = [], []\n done = False\n state = self.env._reset()\n while not done:\n supervisor_label = self.supervisor.eval_policy(self.env)\n action_packed = [supervisor_label, 1]\n next_state, _, done, _ = self.env._step(action_packed)\n\n states.append(state)\n actions.append(supervisor_label)\n state = next_state\n\n return states, actions", "def revert_state(self):\n if self.previous_states > 0: # checks for empty\n self.update_status(self.previous_states.pop())", "def reset_states(self):\n K.batch_set_value([(v, 0) for v in self.variables])", "def lock_gate(self):\n self.fsm_gate.clear()", "def startMovementAll(self):\n self.startMovementX()\n self.startMovementY()\n self.startMovementZ()", "def _changeTemporarily(self, enable_states=None,\n disable_states=None, other=None):\n if enable_states == None:\n enable_states = []\n if disable_states == None:\n disable_states = []\n self.__storedTemporarily.append(self.enabled)\n other_ = other\n if isinstance(other, ClassWithCollections):\n other = other.states\n\n if not other is None:\n # lets take states which are enabled in other but not in\n # self\n add_enable_states = list(Set(other.enabled).difference(\n Set(enable_states)).intersection(self.names))\n if len(add_enable_states)>0:\n if __debug__:\n debug(\"ST\",\n \"Adding states %s from %s to be enabled temporarily\" %\n (add_enable_states, other_) +\n \" since they are not enabled in %s\" %\n (self))\n enable_states += add_enable_states\n\n # Lets go one by one enabling only disabled once... but could be as\n # simple as\n self.enable(enable_states)\n self.disable(disable_states)", "def reset_states(self):\n self.mean_makespan_baseline.assign(0)\n self.mean_makespan_train.assign(0)\n self.step.assign(0)", "def shush(self):\n cancel_all()", "def pre_rebalance(self):\n logger.info('Sleeping for {} seconds before taking actions'\n .format(self.rebalance_settings.start_after))\n time.sleep(self.rebalance_settings.start_after)", "def start(self):\n try:\n self._unsafe_run()\n except Exception:\n _logger.exception(\"Failure while managing state machine.\")\n for shutdown_flag in self._shutdown_flags:\n shutdown_flag.set_value()", "def enable_irq(state: bool = True, /) -> None:", "def expunge_all(self) -> None:\n\n all_states = self.identity_map.all_states() + list(self._new)\n self.identity_map._kill()\n self.identity_map = identity.WeakInstanceDict()\n self._new = {}\n self._deleted = {}\n\n statelib.InstanceState._detach_states(all_states, self)", "def reset():\n for cpu_id in POSSIBLE_CPUS:\n set_cpu(cpu_id, True)", "def _reset_traversal_state(self):\n for n in self.nodes.values():\n n.reset_traversal_state()", "def _pre_submit(self):\n if self._uuid is not None:\n return self._state\n for rdisk in self.resources:\n rdisk.flush()", "def wait_for_everyone():\n PartialState().wait_for_everyone()", "def sync(self) -> None: #\n self.__target.load_state_dict(self.__policy.state_dict())", "def updateState(self):\n self.state = self.microgridPolicy.computeState();", "def interrupt(self):\n if self.stack_reevaluate:\n # wir waren gerade dabei vorbedingungen zu prüfen\n # wir höhren natürlich damit auf...\n self.stack_reevaluate = False\n # damit merkt update() das es aufhöhren muss\n self.stack = [self._init_module(self.start_module,\n self.start_module_data)]", "def _reset_for_new_walk(self):\n # Starting State\n self.state = State('start', 0, 1, 0, 0, self.state_space_parameters.input_size, 0, 0, False)\n\n # Architecture String\n self.state_list = [self.state.copy()]", "def updateState(self):\n\n if ('cutting' in self.step_ops) and (self.cut_state.user_cutting):\n self.step_ops['cutting'] = True\n \n if ('cooking' in self.step_ops) and (self.cut_state.user_cooking):\n self.step_ops['cooking'] = True\n\n # TODO: add the rest of the operations\n\n advance = True\n\n # Check if ALL operations are complete\n for op in self.step_ops:\n if self.step_ops[op] == False:\n advance = False\n break\n\n if advance:\n self.nextStep()", "async def resume_behaviors(self) -> None:", "def swarm(self) -> None:\n self.state[:, :, Boids.Attr.ACC] *= 0\n self.update_acc_by_rules()\n self._update_vel()\n self._update_loc()" ]
[ "0.554885", "0.5414512", "0.5326902", "0.529442", "0.5274952", "0.5222493", "0.5195873", "0.51523536", "0.51299024", "0.50996476", "0.5081874", "0.5050064", "0.50393015", "0.50298834", "0.5028418", "0.50032896", "0.49937183", "0.49687582", "0.49533314", "0.49426508", "0.4942377", "0.49346018", "0.492713", "0.4923774", "0.49061704", "0.48875713", "0.48845991", "0.48712775", "0.48655674", "0.48385385" ]
0.7223714
0
We are going to generate 10 unique characters in the addrs and labels
def generateUniqueAddrs(saveImagePath,numUnique,trainType,addrs_labels): print("Saving only {} unique characters for {}".format(numUnique,trainType)) train_addrs = addrs_labels[0] train_labels = addrs_labels[1] test_addrs = addrs_labels[2] test_labels = addrs_labels[3] generic_addrs = addrs_labels[4] generic_labels = addrs_labels[5] startNum = 171 #skip the alphanumeric characters print('Type of data is: {}'.format(trainType)) if trainType == 'train': labels = train_labels addrs = train_addrs elif trainType == 'test': labels = test_labels addrs = test_addrs elif trainType == 'generic': labels = generic_labels addrs = generic_addrs else: raise ValueError("Error, not running train, test, or generic labels, exiting") exit() unique_labels = list(set(labels))[startNum:numUnique+startNum] #skip non-Chinese chars unique_addrs = [] for i in unique_labels: tempString = '\\{}_copy'.format(i) #generate the string containing unique_label unique_addrs.append([i for i in addrs if tempString in i]) #find the addresses #unique_addrs is currently a list of lists, turn it into a flat list... unique_addrs = [item for sublist in unique_addrs for item in sublist] unique_labels = convertToTFRecord.convLabels(saveImagePath,trainType,unique_addrs) print("Number of samples for " + trainType + ": {}".format(len(unique_addrs))) return unique_addrs, unique_labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _generate_id() -> str:\n return \"\".join(sample(\"abcdefghjkmopqrstuvqxyz\", 16))", "def generate_uuids():\n uuid_start = str(uuid())\n while uuid_start.startswith(\"zzzzzzzz\"):\n uuid_start = str(uuid())\n uuid_end = list(deepcopy(uuid_start))\n \n char_pool = list(string.digits) + \\\n list(string.ascii_uppercase) + \\\n list(string.ascii_lowercase) \n # print(f\"char_pool: {char_pool}\")\n substitute_char = ''\n i = 0\n while i < 8:\n char_from_start_uuid = uuid_start[i]\n if char_from_start_uuid == \"z\":\n i += 1\n continue\n else:\n next_index_in_pool = char_pool.index(char_from_start_uuid) + 1\n substitute_char = char_pool[next_index_in_pool]\n break\n uuid_end[i] = substitute_char\n uuid_end = ''.join(uuid_end)\n print(f\"generated uuids: {uuid_start}, {uuid_end}\")\n return uuid_start, str(uuid_end)", "def _generate_new_address(self) -> str:\n while True:\n address = \"0x\" + \"\".join([str(hex(randint(0, 16)))[-1] for _ in range(20)])\n if address not in self.accounts.keys():\n return address", "def generate_anki_guid() -> str:\n\n def base62(num: int, extra: str = \"\") -> str:\n s = string\n table = s.ascii_letters + s.digits + extra\n buf = \"\"\n while num:\n num, i = divmod(num, len(table))\n buf = table[i] + buf\n return buf\n\n _base91_extra_chars = \"!#$%&()*+,-./:;<=>?@[]^_`{|}~\"\n\n def base91(num: int) -> str:\n # all printable characters minus quotes, backslash and separators\n return base62(num, _base91_extra_chars)\n\n return base91(random.randint(0, 2 ** 64 - 1))", "def locID_generator(self, str_size, chars=string.ascii_uppercase + string.digits):\n \n return ''.join([random.choice(chars) for _ in range(str_size)])", "def _unique_id():\n id = \"\"\n for i in xrange(0,8):\n id += choice(ascii_letters)\n return id", "def unique_label(orig_label: str) -> str:\n return orig_label[0] + \"l\" + uuid4().hex\n # TODO: check for meteors.", "def hash_gen(n):\n domain = \"abcdefghijklmnopqrstuvwxyz\"\n temp = \"\"\n for i in range(0, n):\n temp += domain[random.randrange(0, 26)]\n return temp", "def create_uid():\n return random_string(5, string.hexdigits.lower())\n # return (\"%x\" % (int(time.time()) * 0x10 % 0x1000000000)\n # + random_string(7, string.hexdigits.lower()))", "def genLowCaseID(size):\n\tid = \"\"\n\tfor i in range(size):\n\t\tid = id + selectRandomFromList(loCaseChars)\n\treturn id", "def generate_mac():\n rand_str = generate_name(choices=\"0123456789abcdef\", length=12)\n return \":\".join(re.findall(\"..\", rand_str))", "def generate_mac():\n rand_str = generate_name(choices=\"0123456789abcdef\", length=12)\n return \":\".join(re.findall(\"..\", rand_str))", "def generate_unique_name():\n return 'titanic-' + str(get_mac())", "def random_name_maker():\n new_out = ''\n for i in range(10):\n random_letter_or_number = random.randint(1, 2)\n if random_letter_or_number is 1:\n new_out += random_letter(letters)\n if random_letter_or_number is 2:\n new_out += str(random.randint(0, 9))\n if new_out not in names_generated: # it's unique\n names_generated.append(new_out)\n return new_out", "def _random_id(n):\n ''.join(choice(alphanums) for i in range(n))", "def _create_id(length=40):\n\n numbers = map(str, range(10))\n letters = string.ascii_lowercase\n options = [*letters[:letters.index('f') + 1], *numbers]\n\n return ''.join(random.choice(options) for _ in range(length))", "def make_address(pubkeys, n):\n return (str(len(pubkeys)) + str(n) +\n base58_encode(det_hash({str(n): pubkeys}))[0:29])", "def unique_str():\n return hex(random.randint(0, 256 * 256 * 256 * 256 - 1))[2:]", "def id_generator(size=15, chars=string.ascii_uppercase + string.digits):\n return ''.join(random.choice(chars) for x in range(size))", "def generate_unique_name(base):\n random_length = 10\n random_string = ''.join(random.choices(string.ascii_lowercase,\n k=random_length))\n return \"%s-%s\" % (base, random_string)", "def fill(l):\n s = ''\n for _ in range(0, l):\n s += \"ACGT\"[randint(0,3)]\n return s", "def gen_code():\n return ''.join([random.choice(string.ascii_uppercase + string.digits) for _ in range(10)])", "def id_generator(size=6, chars=string.ascii_uppercase + string.digits):\n return ''.join(random.choice(chars) for x in range(size))", "def genIp():\n ip = \".\".join(str(random.randint(0, 255)) for _ in range(4))\n return ip", "def randid(length=12):\n\timport random\n\treturn ''.join(random.choice('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789') for x in range(length))", "def generate_id():\n return str(uuid.uuid4())[:5].replace('e','a')", "def idGenerator(size=16, chars=string.digits + string.ascii_letters + string.digits):\n return ''.join(random.choice(chars) for _ in range(size))", "def id_generator(size=7, chars=string.ascii_uppercase + string.digits):\n return ''.join(random.choice(chars) for _ in range(size))", "def uniqid(prefix = None, more_entropy = False):\n out = ''\n num = (23 if more_entropy else 13)\n if prefix != None:\n random.seed(prefix)\n for i in range(0, num):\n out += random.choice(\\\n 'abcdefghijklmnopqrstuvwxyz' + \\\n 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' + \\\n '0123456789')\n return out", "def __generate_random_string():\n return uuid4().hex[:6].upper()" ]
[ "0.640046", "0.6369727", "0.63013107", "0.6263165", "0.62521905", "0.6217594", "0.62151855", "0.6057115", "0.6023755", "0.6016253", "0.59843117", "0.59843117", "0.5947065", "0.5907196", "0.5886632", "0.5849489", "0.58435", "0.58254504", "0.5823662", "0.58192736", "0.5805219", "0.5804603", "0.5789245", "0.57475954", "0.5746451", "0.5740534", "0.5733578", "0.57305104", "0.5722157", "0.57215226" ]
0.7429078
0
Check if have an modal in the page and close it
def check_modal(client): modal_close_btn_xpath = "/html/body/div[9]/div[3]/div/button[1]" try: modal_close_btn = wait(client, 20).until( EC.visibility_of_element_located((By.XPATH, modal_close_btn_xpath)) ).click() except TimeoutException: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wait_until_modal_is_closed(self):\n self.selenium.wait_until_page_does_not_contain_element(\n lex_locators[\"modal\"][\"is_open\"], timeout=15\n )", "def close_modal(self):\n locator = lex_locators[\"modal\"][\"close\"]\n self._jsclick(locator)", "def isModal(self) -> bool:\n ...", "def isModal(self) -> bool:\n ...", "def isModal(self) -> bool:\n ...", "def isModal(self) -> bool:\n ...", "def is_modal(self) -> bool:\n return False", "def handle_alert_unload() -> bool:\n current_alerts = _S(\".modal\")\n for alert_modal in current_alerts:\n alert_modal = _S(alert_modal)\n data = alert_modal.data(\"bs.modal\")\n if data is None:\n continue\n elif not data.isShown:\n continue\n elif data.options and data.options.backdrop != \"static\":\n # bootstrap alerts have a backdrop of static when not dismissible\n alert_modal.modal(\"hide\")\n else:\n from . import _navigation\n\n _navigation.stopUnload()\n return True\n return False", "def click_close_modal_content_button(self):\n self._basket.click_close_modal_content_button()", "def wait_until_modal_is_open(self):\n self.selenium.wait_until_page_contains_element(\n lex_locators[\"modal\"][\"is_open\"],\n timeout=15,\n error=\"Expected to see a modal window, but didn't\",\n )", "def is_shown(self):\n return self.page.q(css=self.MODAL_SELECTOR).present", "def _close(self, event):\n self.EndModal(wx.ID_OK)", "def web_view_close(self):\n self.webWindow.close()\n return", "def close_sign_up_prompt(self):\n try:\n self.driver.find_element_by_class_name('modal_closeIcon').click()\n except NoSuchElementException:\n logger.info(\"No Element Found to Close\")", "def try_dismiss_popup(self):\n try:\n self._driver.switch_to.alert.accept\n logger.warning(\"Javascript alert found, dismissing.\")\n return True\n except NoAlertPresentException:\n # There is no alert box.\n try:\n popup_keywords = {\"Modal\", \"Popup\", \"Overlay\"}\n # See if there is some sort of close button we can click.\n popup_xpath = [f\"\"\"contains(., \"{keyword}\") or contains(., \"{keyword.lower()}\")\"\"\" for keyword in popup_keywords]\n popup_xpath = \"\"\"//*[@*[\"\"\" + \" or \".join(popup_xpath) + \"\"\"]]\"\"\"\n # for keyword in popup_keywords:\n # modal_xpath += f\"\"\"//*[@*[contains(., \"{keyword}\") or contains(., \"{keyword.lower()}\")\"\"\" + \\\n # \"\"\" or contains(., \"popup\") or contains(., \"Popup\")\"\"\" + \\\n # \"\"\" or contains(., \"overlay\") or contains(., \"Overlay\")]]\"\"\"\n # The close button can either be a button or something with role=button.\n close_button_xpaths = {\n \"\"\"//*[@role=\"button\"][@demod_reachable=\"true\"][@*[contains(., \"close\") or contains(., \"Close\")]]\"\"\",\n \"\"\"//button[@demod_reachable=\"true\"][@*[contains(., \"close\") or contains(., \"Close\")]]\"\"\"\n }\n close_button_xpaths = {popup_xpath + close_button_xpath for close_button_xpath in close_button_xpaths}\n close_button_xpath = \"|\".join(close_button_xpaths)\n close_button = self._driver.find_element_by_xpath(close_button_xpath)\n logger.warning(\"Popup found, dismissing.\")\n close_button.click()\n return True\n except NoSuchElementException:\n return False", "def exit(self):\n if self.window:\n self.window.close()", "def close_notification(self, level, check=True):\n self.app.page_base.modal.wait_for_absence()\n with self.app.page_base.notification(level) as popup:\n popup.button_close.click()\n if check:\n popup.wait_for_absence()", "def onBtnCloseClicked(self):\n self.close()", "def close_pop_up_windows(self):\n self.button_click(self.DECLINE_BUTTON)\n self.button_click(self.CLOSE_POPUP_BUTTON)", "def __window_close(self):\n pass", "def verifyTeamOpenClose(self):\n element = self.getElement(locator=self._userProfile_membersTable)\n value = self.getAttribute(attribute=\"class\", element=element)\n if \"show\" in value:\n self.log.info(\"Verify Team details window result: \" + str(True))\n return True\n else:\n self.log.info(\"Verify Team details window result: \" + str(False))\n return False", "def on_close(self, evt):\n wx.Dialog.Show(self, False)\n evt.Skip()", "def close_UI(self):", "def if_quit(self):\n answer = helper.quit_popup()\n if answer:\n self.parent.destroy()", "def closeEvent(self, event) -> None:\n global dialog\n dialog = None", "def closing_plugin(self, cancelable=False):\n return True", "def closing_plugin(self, cancelable=False):\n return True", "def close_1(self):\n self.pop_up_del.destroy()", "def dismiss_extra(self):\n return True", "def _close_dialog(*args):\n global _dialog\n if _dialog is not None:\n _dialog.destroy()\n _dialog = None" ]
[ "0.7223902", "0.6937706", "0.6832686", "0.6832686", "0.6832686", "0.6832686", "0.6668967", "0.6477973", "0.6467144", "0.64522135", "0.6235597", "0.6161681", "0.609987", "0.6045032", "0.60414016", "0.6033631", "0.5973035", "0.5968023", "0.5936488", "0.5881599", "0.5880967", "0.5868204", "0.5835525", "0.58259994", "0.5801232", "0.57501936", "0.57501936", "0.56975466", "0.5673252", "0.5666284" ]
0.6974228
1
Takes in a list of column headers and the Data object and returns a list of the mean values for each column. Use the builtin numpy functions to execute this calculation.
def mean(headers, data): column_matrix=data.get_data(headers) mean_values=column_matrix.mean(0) return mean_values
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mean_list(data):\n return sum(data) / len(data)", "def calculate_mean(data_dir):\n data = ([each for each in os.listdir(data_dir)\n if each.endswith('.h5')])\n all_data = []\n for num_data in data:\n processed_data = os.path.join(data_dir, num_data)\n file = h5py.File(processed_data, 'r') \n data = file.get('Processed_data') \n all_data.append(data)\n all_data = np.array(all_data)\n all_data = np.mean(all_data, axis=0)\n return all_data", "def mean_calc(data, col):\n\tm = sum([row[col] for row in data]) / len(data)\n\treturn m", "def column_means(self):\n return list(self._scala.columnMeans())", "def get_mean(self):\r\n for i in range(1,len(self.data[0])):\r\n self.prom.append(np.mean(self.data[:,i]))", "def col_means(\n x: DataFrame,\n na_rm: bool = False,\n # dims: int = 1,\n # weights = None,\n # freq = None,\n # n = None\n) -> Iterable[NumericType]:\n return x.agg(mean, na_rm=na_rm)", "def _arrays_mean(array_list):\n dims = array_list[0].shape[2]\n out = np.zeros(array_list[0].shape)\n var_out = out.copy()\n\n# i = 1\n for i in range(dims):\n temp = [j[:, :, i] for j in array_list]\n\n # calculate mean\n means_out = np.zeros(temp[0].shape)\n for k in temp:\n means_out += k # sum\n\n out[:, :, i] = means_out / len(array_list) # mean\n\n return(out)", "def Mean(data):\n return data.mean()", "def column_mean(column_values):\n\n try:\n mean = sum(column_values)/len(column_values)\n except ZeroDivisionError:\n print(\"Column is empty, cannot perform calculation\",\n file=sys.stderr)\n sys.exit(1)\n return mean", "def to_np_arr_and_then_mean(list_of_lists):\n # print(list_of_lists)\n np_arr = np.array(list_of_lists)\n return np_arr.mean(axis=0)", "def average_data(directory, columns=['CA', 'CV']):\n k = 0\n \n l = corrLib.readdata(directory)\n for num, i in l.iterrows():\n data = pd.read_csv(i.Dir)\n # check if given label exists in data\n for label in columns:\n if label not in data:\n raise IndexError('Column \\'{0}\\' does not exist in given data'.format(label))\n if k == 0:\n temp = data[columns]\n else:\n temp += data[columns]\n k += 1 \n \n # finally, append all other columns (in data but not columns) to averaged\n other_cols = []\n for label in data.columns:\n if label not in columns:\n other_cols.append(label) \n \n averaged = pd.concat([temp / k, data[other_cols]], axis=1) \n \n return averaged", "def compute_column_means_with_incomplete_data(df):\n X = np.array(df)\n return np.nanmean(X, axis = 0)", "def compute_column_means_with_incomplete_data(df):\n X = np.array(df)\n return np.nanmean(X, axis = 0)", "def means_of_variables(dataset, name_of_variable):\r\n list_of_means = []\r\n for num in range(len(dataset)):\r\n sum_of_variables = 0\r\n for row in dataset[num].items():\r\n if name_of_variable in row[0]:\r\n if row[1].strip() != \"\":\r\n sum_of_variables += int(row[1])\r\n if sum_of_variables != 0:\r\n list_of_means.append(float(sum_of_variables) / number_of_variables(dataset, name_of_variable)) \r\n return list_of_means", "def compute_averaged_values(filenames, cols, skip=0, stop=-1, column_major=False, separator='[\\t ]'):\n\n # Read columns\n all_cols = read_file_lines(filenames, cols, skip, stop, column_major, separator)\n\n # Initialise results array\n res = [np.zeros(len(i)) for i in all_cols[0]]\n\n # For each set of columns\n for file_data in all_cols:\n # For each column of the file\n for num, col in enumerate(file_data):\n # Add the value divided by the amount of files considered (to get the average)\n res[num] += col/len(all_cols)\n\n return res", "def row_means(\n x: DataFrame,\n na_rm: bool = False,\n # dims: int = 1,\n # weights = None,\n # freq = None,\n # n = None\n) -> Iterable[NumericType]:\n return x.agg(mean, axis=1, na_rm=na_rm)", "def mean(data):\n n = len(data)\n return sum(data)/float(n)", "def calc_mean(data: list) -> float:\n if len(data) == 0:\n return 0.0\n acc = 0.0\n for n in data:\n acc += n\n return acc / len(data)", "def to_np_arr_and_then_mean(list_of_lists):\n np_arr = np.array(list_of_lists)\n return np_arr.mean(axis=0)", "def rowMean(mtx):\n try:\n for i in range(0, len(mtx)):\n assert len(mtx[i]) == len(mtx[i-1]) # check whether each list has the same length.\n \n res = list()\n for j in range(0, len(mtx[0])): \n tmp = 0\n for i in range(0, len(mtx)): \n tmp = tmp + mtx[i][j]\n res.append(tmp/len(mtx))\n return(res)\n \n except AssertionError as detail:\n return ('Length of lists is irregular or input format is wrong.')\n except TypeError as detail:\n return ('Undefined operand type')", "def average(data):\n return np.average(data)", "def meanOf(classObj):\r\n return np.mean(classObj.dataSet, axis=0)", "def get_mean_difference(self, data):\n # Create a temporary blank list.\n temp = []\n\n # Get the number of columns in the DataFrame.\n col = data.shape[1]\n\n # Iterate the number of columns and only select the column having\n # the data for means. Since there is only two groups, the subtraction\n # will be hardcoded. There are two possible scenarios where the first\n # mean is larger than the second mean or vise versa. When the difference\n # is acquired, add it to the temporary list.\n for x in range(col):\n if x % 2 == 0:\n if data.loc[0][x] >= data.loc[1][x]:\n diff = data.loc[0][x] - data.loc[1][x]\n temp.append(diff)\n elif data.loc[0][x] < data.loc[1][x]: \n diff = data.loc[1][x] - data.loc[0][x]\n temp.append(diff)\n\n # Convert the list to a Series.\n means = pd.Series(temp)\n\n return means", "def out_mean_value(infile,column_num):\n\n \n column_list = read_file(infile,column_num)\n \n np_array = array(column_list)\n mean_value = mean(np_array)\n\n return mean_value", "def get_sp_mean(self):\n best_values = open(os.path.join(self.path, 'mean.txt'),'r').readlines()\n \n headers = best_values[0].rstrip('\\n').rstrip('\\t').split('\\t')\n values = best_values[1].rstrip('\\n').rstrip('\\t').split('\\t')\n# print values\n \n output = []\n \n for i in range(len(headers)):\n output.append((headers[i], values[i]))\n\n\n return output", "def get_mean(self):\n mean = np.array(np.zeros((4,8)))\n for i,c in enumerate(self.cellLines):\n for j,l in enumerate(self.ligands):\n mean[i][j] = self.aucs[c][l]['mean']\n return mean", "def get_mean(self):\n average = self.df[self.col_name].mean()\n return average", "def calculate_mean_dark(data_dir):\n\n data = ([each for each in os.listdir(data_dir)\n if each.endswith('.h5')])\n \n all_data = []\n for num_data in data:\n #print(num_data)\n processed_data = os.path.join(data_dir, num_data)\n file = h5py.File(processed_data, 'r') \n data = file.get('Processed_data')\n all_data.append(data)\n #print\n\n all_data = np.array(all_data)\n all_data = np.mean(all_data, axis=0)\n return all_data", "def get_mean(numlist):\n return np.mean(numlist)", "def mean(data_matrix):\n return np.asmatrix(np.mean(data_matrix, axis=0))" ]
[ "0.688118", "0.6788073", "0.67306453", "0.6660681", "0.65742046", "0.6569169", "0.6488681", "0.64012986", "0.6394433", "0.6307643", "0.6307385", "0.6297519", "0.6297519", "0.6287866", "0.6264603", "0.6255944", "0.62483", "0.6204587", "0.6190885", "0.61622494", "0.6139135", "0.6095428", "0.6092405", "0.6090372", "0.6079464", "0.60746264", "0.6072023", "0.6061065", "0.60570896", "0.6045627" ]
0.81816053
0
stdev Takes in a list of column headers and the Data object and returns a list of the standard deviation for each specified column. Use the builtin numpy functions to execute this calculation.
def stdev(headers, data): column_matrix=data.get_data(headers) mean_values=column_matrix.std(0) std_values=mean_values.tolist() return std_values
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_std_dev(self, data):\n mean = 0\n data_arr = []\n for i in data:\n data_arr.append(i[1])\n return statistics.stdev(data_arr)", "def column_stdev(column_values, mean):\n\n try:\n stdev = math.sqrt(\n sum([(mean-x)**2 for x in column_values]) / len(column_values))\n except ZeroDivisionError:\n print(\"Column is empty, cannot perform calculation\",\n file=sys.stderr)\n sys.exit(1)\n\n return stdev", "def get_stddev(self):\r\n for i in range(1,len(self.data[0])):\r\n self.stddev.append(np.std(self.data[:,i]))", "def stdev(items):\n return Series.std(Series(items))", "def stdev(values):\n mean = avg(values)\n diffs = [(value - mean) ** 2 for value in values]\n return avg(diffs) ** 0.5", "def stdDev(data):\r\n sum = 0\r\n ave = average(data)\r\n for i in data:\r\n sum += (i-ave)**2\r\n return math.sqrt(sum/len(data))", "def stdev(data, xbar=None):\n return math.sqrt(variance(data, xbar))", "def get_stdev(cls, data: tuple or list, is_population=False) -> float:\n cls._data_validation(data)\n from math import sqrt\n return sqrt(cls.get_var(data, is_population))", "def get_std_dev(data, n = -1):\n mean = get_mean(data, n =n)\n\n deviations = []\n\n for i in range(0,n):\n deviations.append( (data[i] - mean)**2 )\n\n std_dev = sqrt( sum(deviations)/n )\n\n return std_dev", "def test_stdev_from_mean(self):\r\n x = [2.1, 4.2, 5.9, 8.4, 9.6]\r\n result = stdev_from_mean(x)\r\n self.assertFloatEqual(\r\n result,\r\n [-1.292463399014413,\r\n -0.60358696806764478,\r\n -0.045925095396451399,\r\n 0.77416589382589174,\r\n 1.1678095686526162])", "def calc_stdev(a, b, c, d, e):\n mean_of_num = (a + b + c + d + e) / 5\n return (((a - mean_of_num)**2 + (b - mean_of_num)**2 + (c - mean_of_num)**2\n + (d - mean_of_num)**2 + (e - mean_of_num)**2) / 5) ** 0.5", "def standard_deviation(data):\n\n return np.sqrt(variance(data))", "def standard_deviation(data):\n\n return np.sqrt(variance(data))", "def MeanAndStandardDeviation(data):\n n = len(data)\n if n == 0:\n return 0.0, 0.0\n mean = float(sum(data)) / n\n variance = sum([(element - mean)**2 for element in data]) / n\n return mean, math.sqrt(variance)", "def col_sds(\n x: DataFrame,\n na_rm: bool = False,\n # dims: int = 1,\n # weights = None,\n # freq = None,\n # n = None\n) -> Iterable[NumericType]:\n from ..stats import sd\n return x.agg(sd, na_rm=na_rm)", "def zstddev(list) -> float:\n\n var = zvariance.zvariance(list)\n std_dev = math.sqrt(var)\n return std_dev", "def stdev_from_mean(x):\r\n x = array(x)\r\n return (x - mean(x)) / std(x)", "def std_deviation(array):\n if not array or len(array) == 1:\n return 0\n\n average = AGGREGATES['mean_arithmetic'](array)\n variance = map(lambda x: (x-average)**2,array)\n stdev = AGGREGATES['mean_arithmetic'](variance)\n return math.sqrt(stdev)", "def stddev(r):\n avg = average(r)\n sdsq = sum([(i - avg) ** 2 for i in r])\n return (sdsq / (len(r) - 1 or 1)) ** 0.5", "def _std(self, data):\n var = stats.var(data)\n if var>0.0:\n sd = math.sqrt(var)\n else:\n sd = 0.0\n return sd", "def stddev(std_numbers):\n mean = sum(std_numbers) / float(len(std_numbers))\n sum_std = 0.0\n\n for x in std_numbers:\n sum_std += (mean - x) * (mean - x)\n\n variance = sum_std / float(len(std_numbers))\n stddev = math.sqrt(variance)\n\n return stddev", "def stddevSeries(requestContext, *seriesLists):\n (seriesList,start,end,step) = normalize(seriesLists)\n name = \"stddevSeries(%s)\" % formatPathExpressions(seriesList)\n values = ( safeStdDev(row) for row in izip(*seriesList) )\n series = TimeSeries(name,start,end,step,values)\n series.pathExpression = name\n return [series]", "def calc_standard_deviation(data: list) -> float:\n mean = calc_mean(data)\n acc = 0.0\n for n in data:\n acc += (n - mean) ** 2\n acc /= len(data) - 1\n return math.sqrt(acc)", "def standard_deviation(list):\n num_items = len(list)\n mean = sum(list) / num_items\n differences = [x - mean for x in list]\n sq_differences = [d ** 2 for d in differences]\n ssd = sum(sq_differences)\n\n\n variance = ssd / num_items\n\n sd = sqrt(variance)\n\n return sd", "def StandardDeviation(numlist):\n\tv = Variance(numlist)\n\t#print v\n\treturn math.sqrt(v)", "def standard_deviation(lst):\n\tnum_items = len(lst)\n\tif num_items == 0:\n\t\treturn -1\n\tmean = sum(lst) / num_items\n\tdifferences = [x - mean for x in lst]\n\tsq_differences = [d ** 2 for d in differences]\n\tssd = sum(sq_differences)\n\treturn ssd", "def stdev(requestContext, seriesList, points, windowTolerance=0.1):\n\n # For this we take the standard deviation in terms of the moving average\n # and the moving average of series squares.\n for (seriesIndex,series) in enumerate(seriesList):\n stddevSeries = TimeSeries(\"stddev(%s,%d)\" % (series.name, int(points)), series.start, series.end, series.step, [])\n stddevSeries.pathExpression = \"stddev(%s,%d)\" % (series.name, int(points))\n\n validPoints = 0\n currentSum = 0\n currentSumOfSquares = 0\n for (index, newValue) in enumerate(series):\n # Mark whether we've reached our window size - dont drop points out otherwise\n if index < points:\n bootstrapping = True\n droppedValue = None\n else:\n bootstrapping = False\n droppedValue = series[index - points]\n\n # Track non-None points in window\n if not bootstrapping and droppedValue is not None:\n validPoints -= 1\n if newValue is not None:\n validPoints += 1\n\n # Remove the value that just dropped out of the window\n if not bootstrapping and droppedValue is not None:\n currentSum -= droppedValue\n currentSumOfSquares -= droppedValue**2\n\n # Add in the value that just popped in the window\n if newValue is not None:\n currentSum += newValue\n currentSumOfSquares += newValue**2\n\n if validPoints > 0 and \\\n float(validPoints)/points >= windowTolerance:\n\n try:\n deviation = math.sqrt(validPoints * currentSumOfSquares - currentSum**2)/validPoints\n except ValueError:\n deviation = None\n stddevSeries.append(deviation)\n else:\n stddevSeries.append(None)\n\n seriesList[seriesIndex] = stddevSeries\n\n return seriesList", "def _get_stddevs(index):\r\n if stddev_type is 'Total':\r\n Sigma = np.sqrt(sigma[index] ** 2. + tau[index] ** 2.)\r\n elif stddev_type is 'Intra':\r\n Sigma = sigma[index]\r\n elif stddev_type is 'Inter':\r\n Sigma = tau[index]\r\n else:\r\n print('Select a valid type of standard deviation')\r\n return Sigma", "def _get_stddevs(index):\r\n if stddev_type is 'Total':\r\n Sigma = np.sqrt(sigma[index] ** 2. + tau[index] ** 2.)\r\n elif stddev_type is 'Intra':\r\n Sigma = sigma[index]\r\n elif stddev_type is 'Inter':\r\n Sigma = tau[index]\r\n else:\r\n print('Select a valid type of standard deviation')\r\n return Sigma", "def get_stdev(self):\n var_x = numpy.var(self._x)\n var_y = numpy.var(self._y)\n return numpy.sqrt(var_x + var_y)" ]
[ "0.762345", "0.76011705", "0.7323709", "0.71934015", "0.715191", "0.7065229", "0.70174754", "0.69014233", "0.6891203", "0.67621124", "0.67020977", "0.6664076", "0.6664076", "0.6625717", "0.65968806", "0.65932107", "0.6561748", "0.65059817", "0.6488277", "0.6485756", "0.64840895", "0.6481201", "0.6469008", "0.643065", "0.6416402", "0.64127165", "0.6399206", "0.63859963", "0.63859963", "0.63498753" ]
0.855148
0
Takes in a list of column headers and the Data object and returns a matrix with each column normalized so its minimum value is mapped to zero and its maximum value is mapped to 1.
def normalize_columns_separately(headers, data): column_matrix=data.get_data(headers) column_max=column_matrix.max(1) column_min=column_matrix.min(1) range=column_max-column_min nomalized=(column_matrix-column_min)/range return nomalized
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalize_columns_together(headers, data):\n\tcolumn_matrix=data.get_data(headers)\n\tmax=column_matrix.max()\n\tprint \"The maximum:\t \", max\n\tmin=column_matrix.min()\n\tprint \"The minimum:\t \", min\n\trange=max-min\n\tprint \"range: \", range\n\tcolumn_matrix=column_matrix-min\n\tnormalized=column_matrix/range\n\treturn normalized", "def rescaleData(data,column_names):\n for column_name in column_names:\n min_value = np.min(data[column_name])\n max_value = np.max(data[column_name])\n data[column_name] = (data[column_name] - min_value) / (max_value - min_value)", "def normalize(data):\n # normalize data and return\n # https://stackoverflow.com/questions/29661574/normalize-numpy-array-columns-in-python\n return (data - data.min(axis=0)) / data.ptp(axis=0)", "def normalize(dataset):\n minVals = dataset.min(axis=0)\n maxVals = dataset.max(axis=0)\n factors = maxVals-minVals\n num = dataset.shape[0]\n norm_data = (dataset - np.tile(minVals,(num,1)))/np.tile(factors,(num,1)) \n return norm_data", "def normalize(data):\n norm_matrix = np.int_(np.log10(data)**2)\n norm_matrix = map(lambda x: x if x < BOARD_SIZE else BOARD_SIZE, norm_matrix)\n norm_matrix = map(lambda x: x if x > 0 else 0, norm_matrix)\n return norm_matrix", "def norm(data, max_list, min_list):\n max_list, min_list = np.array(max_list), np.array(min_list)\n diff = max_list - min_list\n for i in np.arange(data.shape[1]):\n data[:, i] = (data[:, i]-min_list[i])/diff[i]\n\n data[data > 1] = 0.99\n data[data < 0] = 0.00\n return data", "def normalize_col_scale01(data,tol=1e-6,data_min=None,data_max=None,clip=False,clip_min=1e-3,clip_max=1e3):\n if clip:\n data[data<clip_min]=clip_min\n data[data>clip_max]=clip_max\n if data_max is None:\n data_max=np.max(data,axis=0)\n data_max.reshape((1,data_max.shape[0]))\n if data_min is None:\n data_min=np.min(data,axis=0)\n data_min.reshape((1,data_min.shape[0]))\n #tol=0#1e-8\n return (data-data_min)/(data_max-data_min+tol),data_min,data_max", "def transform(self, data):\n\n self.column_range = []\n for i in range(len(self.column_max_value)):\n scale = self.column_max_value[i] - self.column_min_value[i]\n if scale < 0:\n raise ValueError(\"scale value should large than 0\")\n elif np.abs(scale - 0) < 1e-6:\n scale = 1\n self.column_range.append(scale)\n\n f = functools.partial(MinMaxScale.__scale, max_value_list=self.column_max_value,\n min_value_list=self.column_min_value, scale_value_list=self.column_range,\n process_cols_list=self.scale_column_idx)\n\n transform_data = data.mapValues(f)\n\n return transform_data", "def min_max_normalization(input_data):\n\n # Insert debugging assertions\n assert type(input_data) is np.ndarray, \"The 'input_data' must be numpy array.\"\n\n # Get the minimum and maximun values of the input numpy array along the axis \n Max = np.max(input_data, axis = 0)\n Min = np.min(input_data, axis = 0)\n\n # Min-max normalization \n normalized_input_data = (input_data - Min) / (Max - Min + sys.float_info.min)\n\n # Return normalized input data\n return normalized_input_data", "def scale_data(X, meanzero=True, probability=False):\n\n\n \"\"\"CASES X1, X5, X12-X23: Scale large data values by indices. How these\n should be scaled is up for debate though the default is mean=0, std=1\"\"\"\n a = [0, 4, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]\n for i in a:\n if meanzero:\n # values with mean=0 and std=1:\n X[:, i] = X[:, i] - np.mean(X[:, i])\n X[:, i] = X[:, i] / np.std(X[:, i])\n\n elif probability:\n # values from 0 to 1:\n X[:, i] = X[:, i] - X[:, i].min()\n X[:, i] = X[:, i] / X[:, i].max()\n\n \"\"\"CASES X6-X11: Separate categorical and continuous data. Do this first\n to avoid changing the indices for the categories lower down.\"\"\"\n c = [5, 6, 7, 8, 9, 10]\n newmtxs = np.zeros(6, dtype = np.ndarray)\n i=0\n X = pd.DataFrame(X)\n for j in c:\n # 'manual' one-hot encoding:\n row1 = X[j]\n row1 = row1.apply(lambda x: 1 if x==-2. else 0)\n vec1 = row1.values\n row2 = X[j]\n row2 = row2.apply(lambda x: 1 if x==-1. else 0)\n vec2 = row2.values\n row3 = X[j]\n row3 = row3.apply(lambda x: 1 if x==0. else 0)\n vec3 = row3.values\n row4 = X[j]\n if meanzero:\n norm = np.mean([1, 2, 3, 4, 5, 6, 7, 8, 9]) # for normalization\n std = np.std([1, 2, 3, 4, 5, 6, 7, 8, 9])\n row4 = row4.apply(lambda x: (x-norm)/std if (x>=1 and x<=9) else 0)\n vec4 = row4.values\n elif probability:\n row4 = row4.apply(lambda x: (x-1)/9 if (x>=1 and x<=9) else 0)\n vec4 = row4.values\n\n A = np.column_stack((vec1, vec2))\n B = np.column_stack((vec3, vec4))\n # combine the new column matrices (N,2) to a matrix of size (N,4):\n newmtxs[i] = np.append(A,B, axis=1)\n i+=1\n\n # need to replace the arrays from X6-X11 with these matrices:\n Xs = np.split(X, [5,11], axis=1) # remove columns X6-X11\n E1 = Xs[0].values # left side dims (29601, 5)\n E2 = Xs[2].values # right side dims (29601, 12)\n\n \"\"\"These matrices are all the data columns except for X6-X11. We want to\n replace these columns with the new matrices in the newmtxs list:\"\"\"\n p1 = np.append(newmtxs[0], newmtxs[1], axis=1) # combine the matrices\n p2 = np.append(newmtxs[2], newmtxs[3], axis=1)\n pR = np.append(newmtxs[4], newmtxs[5], axis=1)\n pL = np.append(p1, p2, axis=1)\n p5 = np.append(pL, pR, axis=1) # combine Left and Right sides\n LS = np.append(E1, p5, axis=1) # combine with E1 and E2\n X = np.append(LS, E2, axis=1) # final scaled product\n\n \"\"\"CASES X2, X3, X4: One-hot encoding categories. These are purely\n categorical, so the one-hot encoding is easier.\"\"\"\n b = [1, 2, 3]\n b_elem = [1, 3, 2] # no. of (additional) features from one-hot\n extra = 0 # counts the extra indices needed after additions\n\n for j in range(3):\n i = b[j] + extra\n series = pd.Series(X[:, i])\n dummies = pd.get_dummies(series).values # one hot encoded\n # add array into place 'i' (sandwitch dummies between arrays)\n X = np.append(np.append(X[:, :i], \\\n dummies, axis=1), X[:, i + 1 :], axis=1)\n # adding columns changes the 'i' indices we need.\n extra += b_elem[j]\n\n return X", "def normalize_columns(mat):\n norm = np.sqrt((mat**2).sum(0))\n return mat / norm", "def normalize_matrix(matrix, min_val, max_val):\n return (max_val - min_val) * (matrix - np.min(matrix)) / (np.max(matrix) - np.min(matrix)) + min_val", "def normalize(data):\n row = np.size(data, 0) # number of data points\n col = np.size(data, 1) # dimensionality of data points\n for j in range(col):\n # find the average for each column\n col_sum = 0\n for i in range(row):\n col_sum = col_sum + data[i][j]\n col_sum = col_sum / row\n # subtract the average from each value in the column\n for i in range(row):\n data[i][j] = data[i][j] - col_sum\n return data", "def normalize(data):\n row = np.size(data, 0) # number of data points\n col = np.size(data, 1) # dimensionality of data points\n for j in range(col):\n # find the average for each column\n col_sum = 0\n for i in range(row):\n col_sum = col_sum + data[i][j]\n col_sum = col_sum / row\n # subtract the average from each value in the column\n for i in range(row):\n data[i][j] = data[i][j] - col_sum\n return data", "def normalize(data):\n data = numpy.asmatrix(data)\n std_devs = numpy.std(data, axis=1)\n std_devs[std_devs == 0] = 1 # prevent div by 0\n return (data - numpy.mean(data, axis=1)) / std_devs", "def normalise(x):\n x = np.copy(x)\n n_cols = x.shape[1]\n for col_index in range(n_cols):\n col = x[:, col_index]\n factor = np.max(col)\n x[:, col_index] = col / factor\n\n return x", "def normalize_data(data=None):\n # Data pre-processing\n n = data.shape[0]\n for i in range(n):\n xx = data[i,:,:]\n xx -= np.mean(xx) # Centering in 0\n xx /= np.linalg.norm(xx) # Normalizing to 1\n data[i] = xx # Affect value\n return data", "def matrix_min(data):\n if is_SparseDataFrame(data):\n data = [np.min(data[col]) for col in data.columns]\n elif is_sparse_dataframe(data):\n data = [sparse_series_min(data[col]) for col in data.columns]\n elif isinstance(data, pd.DataFrame):\n data = np.min(data)\n elif isinstance(data, sparse.lil_matrix):\n data = [np.min(d) for d in data.data] + [0]\n elif isinstance(data, sparse.dok_matrix):\n data = list(data.values()) + [0]\n elif isinstance(data, sparse.dia_matrix):\n data = [np.min(data.data), 0]\n return np.min(data)", "def normalize(data, name):\n cols = list(data.columns)\n vals = data.values\n for i in range(len(vals)):\n v = vals[i]\n l = np.sum(v[0:len(v)-1])\n if l != 0:\n t = v[0:len(v)-1]/l\n v = np.append(t, v[-1])\n vals[i] = v\n write_data(vals, cols, name)", "def normalise(raw_data, normalise_by_column=False):\n data = raw_data\n if normalise_by_column:\n #normaliza valores usando o maximo de cada coluna\n col_maxes = raw_data.max(axis=0)\n #divide cada valor pelo maximo correspondente de cada coluna\n data = raw_data / col_maxes[np.newaxis,:] \n else:\n #divide todos os valores pelo maximo do dataset (tudo na mesma escala)\n data = raw_data / raw_data.max()\n\n return data", "def fit(self, data):\n self.column_min_value, self.column_max_value = self._get_min_max_value(data)\n self.scale_column_idx = self._get_scale_column_idx(data)\n self.header = self._get_header(data)\n\n self.column_range = []\n for i in range(len(self.column_max_value)):\n scale = self.column_max_value[i] - self.column_min_value[i]\n if scale < 0:\n raise ValueError(\"scale value should large than 0\")\n elif np.abs(scale - 0) < 1e-6:\n scale = 1\n self.column_range.append(scale)\n\n f = functools.partial(MinMaxScale.__scale, max_value_list=self.column_max_value,\n min_value_list=self.column_min_value, scale_value_list=self.column_range,\n process_cols_list=self.scale_column_idx)\n fit_data = data.mapValues(f)\n\n return fit_data", "def normalize_data(X, range_d = None):\n n,d = X.shape\n\n if range_d is None:\n range_d = np.zeros([2,d])\n range_d[0,:] = np.min(X, axis = 0)\n range_d[1,:] = np.max(X, axis = 0)\n\n X = (X - range_d[0,:]) / (range_d[1,:] - range_d[0,:])\n\n return X", "def fromCols(cls, data):\n m = len(data[0])\n # check that list of data is valid\n if any([len(col) != m for col in data[1:]]):\n raise ValueError(\"inconsistent column lengths\")\n return Matrix.fromRows(data).transpose()", "def normalize_data(data_frame):\n min_max_scaler = preprocessing.MinMaxScaler()\n x_scaled = min_max_scaler.fit_transform(data_frame)\n return pd.DataFrame(x_scaled)", "def normalize_values(self, data):\n\n df = pd.DataFrame(data[1:], columns = data[0]).astype(str)\n\n df = df.replace(ami_md_constants.NAS)\n\n df = df.replace(ami_md_constants.REGEX_REPLACE_DICT, regex=True)\n df = df.replace(ami_md_constants.STRING_REPLACE_DICT)\n df['source.object.format_type'] = df['source.object.format'].map(ami_md_constants.FORMAT_TYPE)\n\n for key in ami_md_constants.MEASURE_UNIT_MAPS.keys():\n value_map = ami_md_constants.MEASURE_UNIT_MAPS[key]\n df = self.map_value(df,\n value_map['from_column'],\n value_map['to_column'],\n value_map['constant_value'],\n value_map['values_map_column'],\n value_map['values_map'])\n\n #force all the numerics back to numeric, and drop all empty columns\n df = df.apply(pd.to_numeric, errors='ignore').dropna(axis=1, how = \"all\")\n\n vals = df.values.tolist()\n cols = df.columns.tolist()\n vals.insert(0, cols)\n\n return vals", "def normalize_data(data, class_name):\n row_count = len(data.index)\n col_count = len(data.columns)\n normalized_data = []\n\n normalized_class_list = []\n class_list = data.iloc[(range(row_count)), 0].values\n for value in class_list:\n normalized_class_list.append(1 if value == class_name else 0)\n normalized_data.append(normalized_class_list)\n\n for index in range(1, col_count):\n feature_list = data.iloc[(range(row_count)), index].values\n normalized_data += normalize(feature_list)\n \n return normalized_data", "def normalizeColumns(W):\n for i in range(W.shape[1]):\n W[:, i] /= np.linalg.norm(W[:, i]) + 0.001\n\n return W", "def normalize(data, vmin=0, vmax=1):\n data = np.array(data, dtype=np.float64)\n return (vmin + (data - data.min()) * (vmax - vmin) / (data.max() - data.min())).tolist()", "def norm_data(data):\n return (data-np.min(data))/(np.max(data)-np.min(data))", "def normalize_multivariate_data(data, scaling_values=None):\n normed_data = np.zeros(data.shape, dtype=data.dtype)\n scale_cols = [\"mean\", \"std\"]\n if scaling_values is None:\n scaling_values = pd.DataFrame(np.zeros((data.shape[-1], len(scale_cols)), dtype=np.float32),\n columns=scale_cols)\n for i in range(data.shape[-1]):\n scaling_values.loc[i, [\"mean\", \"std\"]] = [data[:, :, :, i].mean(), data[:, :, :, i].std()]\n normed_data[:, :, :, i] = (data[:, :, :, i] - scaling_values.loc[i, \"mean\"]) / scaling_values.loc[i, \"std\"]\n return normed_data, scaling_values" ]
[ "0.7496443", "0.66081625", "0.65859145", "0.6499091", "0.6407396", "0.6192507", "0.6134639", "0.60927606", "0.6090253", "0.60883844", "0.6063456", "0.60361797", "0.59998703", "0.59998703", "0.59897834", "0.5978918", "0.5962571", "0.5955856", "0.5939429", "0.59259486", "0.5905432", "0.58085996", "0.5793961", "0.5744075", "0.5724691", "0.5702509", "0.5698353", "0.5689664", "0.5686403", "0.5678257" ]
0.708409
1
Takes in a list of column headers and the Data object and returns a matrix with each entry normalized so that the minimum value (of all the data in this set of columns) is mapped to zero and its maximum value is mapped to 1.
def normalize_columns_together(headers, data): column_matrix=data.get_data(headers) max=column_matrix.max() print "The maximum: ", max min=column_matrix.min() print "The minimum: ", min range=max-min print "range: ", range column_matrix=column_matrix-min normalized=column_matrix/range return normalized
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalize_columns_separately(headers, data):\n\tcolumn_matrix=data.get_data(headers)\n\tcolumn_max=column_matrix.max(1)\n\tcolumn_min=column_matrix.min(1)\n\trange=column_max-column_min\n\tnomalized=(column_matrix-column_min)/range\n\treturn nomalized", "def normalize(data):\n # normalize data and return\n # https://stackoverflow.com/questions/29661574/normalize-numpy-array-columns-in-python\n return (data - data.min(axis=0)) / data.ptp(axis=0)", "def matrix_min(data):\n if is_SparseDataFrame(data):\n data = [np.min(data[col]) for col in data.columns]\n elif is_sparse_dataframe(data):\n data = [sparse_series_min(data[col]) for col in data.columns]\n elif isinstance(data, pd.DataFrame):\n data = np.min(data)\n elif isinstance(data, sparse.lil_matrix):\n data = [np.min(d) for d in data.data] + [0]\n elif isinstance(data, sparse.dok_matrix):\n data = list(data.values()) + [0]\n elif isinstance(data, sparse.dia_matrix):\n data = [np.min(data.data), 0]\n return np.min(data)", "def rescaleData(data,column_names):\n for column_name in column_names:\n min_value = np.min(data[column_name])\n max_value = np.max(data[column_name])\n data[column_name] = (data[column_name] - min_value) / (max_value - min_value)", "def normalize(data):\n norm_matrix = np.int_(np.log10(data)**2)\n norm_matrix = map(lambda x: x if x < BOARD_SIZE else BOARD_SIZE, norm_matrix)\n norm_matrix = map(lambda x: x if x > 0 else 0, norm_matrix)\n return norm_matrix", "def normalize(dataset):\n minVals = dataset.min(axis=0)\n maxVals = dataset.max(axis=0)\n factors = maxVals-minVals\n num = dataset.shape[0]\n norm_data = (dataset - np.tile(minVals,(num,1)))/np.tile(factors,(num,1)) \n return norm_data", "def scale_data(X, meanzero=True, probability=False):\n\n\n \"\"\"CASES X1, X5, X12-X23: Scale large data values by indices. How these\n should be scaled is up for debate though the default is mean=0, std=1\"\"\"\n a = [0, 4, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]\n for i in a:\n if meanzero:\n # values with mean=0 and std=1:\n X[:, i] = X[:, i] - np.mean(X[:, i])\n X[:, i] = X[:, i] / np.std(X[:, i])\n\n elif probability:\n # values from 0 to 1:\n X[:, i] = X[:, i] - X[:, i].min()\n X[:, i] = X[:, i] / X[:, i].max()\n\n \"\"\"CASES X6-X11: Separate categorical and continuous data. Do this first\n to avoid changing the indices for the categories lower down.\"\"\"\n c = [5, 6, 7, 8, 9, 10]\n newmtxs = np.zeros(6, dtype = np.ndarray)\n i=0\n X = pd.DataFrame(X)\n for j in c:\n # 'manual' one-hot encoding:\n row1 = X[j]\n row1 = row1.apply(lambda x: 1 if x==-2. else 0)\n vec1 = row1.values\n row2 = X[j]\n row2 = row2.apply(lambda x: 1 if x==-1. else 0)\n vec2 = row2.values\n row3 = X[j]\n row3 = row3.apply(lambda x: 1 if x==0. else 0)\n vec3 = row3.values\n row4 = X[j]\n if meanzero:\n norm = np.mean([1, 2, 3, 4, 5, 6, 7, 8, 9]) # for normalization\n std = np.std([1, 2, 3, 4, 5, 6, 7, 8, 9])\n row4 = row4.apply(lambda x: (x-norm)/std if (x>=1 and x<=9) else 0)\n vec4 = row4.values\n elif probability:\n row4 = row4.apply(lambda x: (x-1)/9 if (x>=1 and x<=9) else 0)\n vec4 = row4.values\n\n A = np.column_stack((vec1, vec2))\n B = np.column_stack((vec3, vec4))\n # combine the new column matrices (N,2) to a matrix of size (N,4):\n newmtxs[i] = np.append(A,B, axis=1)\n i+=1\n\n # need to replace the arrays from X6-X11 with these matrices:\n Xs = np.split(X, [5,11], axis=1) # remove columns X6-X11\n E1 = Xs[0].values # left side dims (29601, 5)\n E2 = Xs[2].values # right side dims (29601, 12)\n\n \"\"\"These matrices are all the data columns except for X6-X11. We want to\n replace these columns with the new matrices in the newmtxs list:\"\"\"\n p1 = np.append(newmtxs[0], newmtxs[1], axis=1) # combine the matrices\n p2 = np.append(newmtxs[2], newmtxs[3], axis=1)\n pR = np.append(newmtxs[4], newmtxs[5], axis=1)\n pL = np.append(p1, p2, axis=1)\n p5 = np.append(pL, pR, axis=1) # combine Left and Right sides\n LS = np.append(E1, p5, axis=1) # combine with E1 and E2\n X = np.append(LS, E2, axis=1) # final scaled product\n\n \"\"\"CASES X2, X3, X4: One-hot encoding categories. These are purely\n categorical, so the one-hot encoding is easier.\"\"\"\n b = [1, 2, 3]\n b_elem = [1, 3, 2] # no. of (additional) features from one-hot\n extra = 0 # counts the extra indices needed after additions\n\n for j in range(3):\n i = b[j] + extra\n series = pd.Series(X[:, i])\n dummies = pd.get_dummies(series).values # one hot encoded\n # add array into place 'i' (sandwitch dummies between arrays)\n X = np.append(np.append(X[:, :i], \\\n dummies, axis=1), X[:, i + 1 :], axis=1)\n # adding columns changes the 'i' indices we need.\n extra += b_elem[j]\n\n return X", "def normalize_columns(mat):\n norm = np.sqrt((mat**2).sum(0))\n return mat / norm", "def fit(self, data):\n self.column_min_value, self.column_max_value = self._get_min_max_value(data)\n self.scale_column_idx = self._get_scale_column_idx(data)\n self.header = self._get_header(data)\n\n self.column_range = []\n for i in range(len(self.column_max_value)):\n scale = self.column_max_value[i] - self.column_min_value[i]\n if scale < 0:\n raise ValueError(\"scale value should large than 0\")\n elif np.abs(scale - 0) < 1e-6:\n scale = 1\n self.column_range.append(scale)\n\n f = functools.partial(MinMaxScale.__scale, max_value_list=self.column_max_value,\n min_value_list=self.column_min_value, scale_value_list=self.column_range,\n process_cols_list=self.scale_column_idx)\n fit_data = data.mapValues(f)\n\n return fit_data", "def min_max_normalization(input_data):\n\n # Insert debugging assertions\n assert type(input_data) is np.ndarray, \"The 'input_data' must be numpy array.\"\n\n # Get the minimum and maximun values of the input numpy array along the axis \n Max = np.max(input_data, axis = 0)\n Min = np.min(input_data, axis = 0)\n\n # Min-max normalization \n normalized_input_data = (input_data - Min) / (Max - Min + sys.float_info.min)\n\n # Return normalized input data\n return normalized_input_data", "def fromCols(cls, data):\n m = len(data[0])\n # check that list of data is valid\n if any([len(col) != m for col in data[1:]]):\n raise ValueError(\"inconsistent column lengths\")\n return Matrix.fromRows(data).transpose()", "def norm(data, max_list, min_list):\n max_list, min_list = np.array(max_list), np.array(min_list)\n diff = max_list - min_list\n for i in np.arange(data.shape[1]):\n data[:, i] = (data[:, i]-min_list[i])/diff[i]\n\n data[data > 1] = 0.99\n data[data < 0] = 0.00\n return data", "def normalize_col_scale01(data,tol=1e-6,data_min=None,data_max=None,clip=False,clip_min=1e-3,clip_max=1e3):\n if clip:\n data[data<clip_min]=clip_min\n data[data>clip_max]=clip_max\n if data_max is None:\n data_max=np.max(data,axis=0)\n data_max.reshape((1,data_max.shape[0]))\n if data_min is None:\n data_min=np.min(data,axis=0)\n data_min.reshape((1,data_min.shape[0]))\n #tol=0#1e-8\n return (data-data_min)/(data_max-data_min+tol),data_min,data_max", "def transform(self, data):\n\n self.column_range = []\n for i in range(len(self.column_max_value)):\n scale = self.column_max_value[i] - self.column_min_value[i]\n if scale < 0:\n raise ValueError(\"scale value should large than 0\")\n elif np.abs(scale - 0) < 1e-6:\n scale = 1\n self.column_range.append(scale)\n\n f = functools.partial(MinMaxScale.__scale, max_value_list=self.column_max_value,\n min_value_list=self.column_min_value, scale_value_list=self.column_range,\n process_cols_list=self.scale_column_idx)\n\n transform_data = data.mapValues(f)\n\n return transform_data", "def normalize_data(data=None):\n # Data pre-processing\n n = data.shape[0]\n for i in range(n):\n xx = data[i,:,:]\n xx -= np.mean(xx) # Centering in 0\n xx /= np.linalg.norm(xx) # Normalizing to 1\n data[i] = xx # Affect value\n return data", "def normalize(data):\n row = np.size(data, 0) # number of data points\n col = np.size(data, 1) # dimensionality of data points\n for j in range(col):\n # find the average for each column\n col_sum = 0\n for i in range(row):\n col_sum = col_sum + data[i][j]\n col_sum = col_sum / row\n # subtract the average from each value in the column\n for i in range(row):\n data[i][j] = data[i][j] - col_sum\n return data", "def normalize(data):\n row = np.size(data, 0) # number of data points\n col = np.size(data, 1) # dimensionality of data points\n for j in range(col):\n # find the average for each column\n col_sum = 0\n for i in range(row):\n col_sum = col_sum + data[i][j]\n col_sum = col_sum / row\n # subtract the average from each value in the column\n for i in range(row):\n data[i][j] = data[i][j] - col_sum\n return data", "def normalize(data):\n data = numpy.asmatrix(data)\n std_devs = numpy.std(data, axis=1)\n std_devs[std_devs == 0] = 1 # prevent div by 0\n return (data - numpy.mean(data, axis=1)) / std_devs", "def normalize_matrix(matrix, min_val, max_val):\n return (max_val - min_val) * (matrix - np.min(matrix)) / (np.max(matrix) - np.min(matrix)) + min_val", "def normalise(raw_data, normalise_by_column=False):\n data = raw_data\n if normalise_by_column:\n #normaliza valores usando o maximo de cada coluna\n col_maxes = raw_data.max(axis=0)\n #divide cada valor pelo maximo correspondente de cada coluna\n data = raw_data / col_maxes[np.newaxis,:] \n else:\n #divide todos os valores pelo maximo do dataset (tudo na mesma escala)\n data = raw_data / raw_data.max()\n\n return data", "def normalise(x):\n x = np.copy(x)\n n_cols = x.shape[1]\n for col_index in range(n_cols):\n col = x[:, col_index]\n factor = np.max(col)\n x[:, col_index] = col / factor\n\n return x", "def normalize(data, name):\n cols = list(data.columns)\n vals = data.values\n for i in range(len(vals)):\n v = vals[i]\n l = np.sum(v[0:len(v)-1])\n if l != 0:\n t = v[0:len(v)-1]/l\n v = np.append(t, v[-1])\n vals[i] = v\n write_data(vals, cols, name)", "def get_normalised_confusion_matrix(self, data, columns):\n # Makes the confusion matrix according to the labels and predictions columns of the data.\n labels = columns[0]\n predictions = columns[1]\n confusion_matrix = pd.crosstab(data[labels], data[predictions])\n confusion_matrix = confusion_matrix.div(confusion_matrix.sum(axis=1), axis=0)\n\n # Reorders the index and columns according to the dialogue act distribution.\n distribution_order = pd.read_csv('analyses/dialogue_act_distribution.csv', index_col=[0], header=None)\n confusion_matrix = confusion_matrix.reindex(distribution_order.index)\n\n if set(distribution_order.index) == set(confusion_matrix.columns):\n confusion_matrix = confusion_matrix[distribution_order.index]\n else:\n sorted_index = sort_list(list(confusion_matrix.columns), list(distribution_order.index))\n confusion_matrix = confusion_matrix[sorted_index]\n return confusion_matrix", "def normalize_data(data, class_name):\n row_count = len(data.index)\n col_count = len(data.columns)\n normalized_data = []\n\n normalized_class_list = []\n class_list = data.iloc[(range(row_count)), 0].values\n for value in class_list:\n normalized_class_list.append(1 if value == class_name else 0)\n normalized_data.append(normalized_class_list)\n\n for index in range(1, col_count):\n feature_list = data.iloc[(range(row_count)), index].values\n normalized_data += normalize(feature_list)\n \n return normalized_data", "def transform(self, data: Dict) -> Dict:\n\n for c in data.columns:\n if c in self.featBin:\n data[c] = data[c].astype(int)\n if data[c].max() > 1:\n data.loc[data[c] > 1, c] = 1\n elif data[c].min() < 0:\n data.loc[data[c] < 0] = 1\n else:\n pass\n elif c in self.featNum:\n data[c] = np.abs(data[c])\n\n else:\n pass\n\n return data", "def normalize_data(data_frame):\n min_max_scaler = preprocessing.MinMaxScaler()\n x_scaled = min_max_scaler.fit_transform(data_frame)\n return pd.DataFrame(x_scaled)", "def normalize_values(self, data):\n\n df = pd.DataFrame(data[1:], columns = data[0]).astype(str)\n\n df = df.replace(ami_md_constants.NAS)\n\n df = df.replace(ami_md_constants.REGEX_REPLACE_DICT, regex=True)\n df = df.replace(ami_md_constants.STRING_REPLACE_DICT)\n df['source.object.format_type'] = df['source.object.format'].map(ami_md_constants.FORMAT_TYPE)\n\n for key in ami_md_constants.MEASURE_UNIT_MAPS.keys():\n value_map = ami_md_constants.MEASURE_UNIT_MAPS[key]\n df = self.map_value(df,\n value_map['from_column'],\n value_map['to_column'],\n value_map['constant_value'],\n value_map['values_map_column'],\n value_map['values_map'])\n\n #force all the numerics back to numeric, and drop all empty columns\n df = df.apply(pd.to_numeric, errors='ignore').dropna(axis=1, how = \"all\")\n\n vals = df.values.tolist()\n cols = df.columns.tolist()\n vals.insert(0, cols)\n\n return vals", "def normalizeColumns(W):\n for i in range(W.shape[1]):\n W[:, i] /= np.linalg.norm(W[:, i]) + 0.001\n\n return W", "def _normalize(self, data):\n norm_data = []\n\n for row in data:\n norm_row = []\n\n for column in row:\n # custom format strings for specific objects\n if isinstance(column, float):\n format_str = '{{:.{}f}}'.format(2)\n item = format_str.format(column)\n\n elif isinstance(column, datetime):\n item = column.strftime('%Y-%m-%d %H:%M')\n\n else:\n item = str(column)\n\n norm_row.append(item)\n\n norm_data.append(norm_row)\n\n return norm_data", "def test_normalize_matrix(self):\n input_matrix = [\n [0, 1.0],\n [1.0, 1.0]\n ]\n\n expected = [\n [0, 1],\n [0.5, 0.5]\n ]\n\n result = self.summarizer.normalize_matrix(input_matrix)\n\n self.assertEqual(expected, result)" ]
[ "0.6924816", "0.6368028", "0.634722", "0.6282522", "0.62384415", "0.6170771", "0.5904337", "0.5867747", "0.58657235", "0.585182", "0.5851378", "0.58379954", "0.5837087", "0.5817979", "0.57913613", "0.5751155", "0.5751155", "0.5748093", "0.57406265", "0.5719488", "0.5708877", "0.5637406", "0.5602439", "0.55933064", "0.55917776", "0.55595315", "0.5508027", "0.5494237", "0.5491411", "0.54537666" ]
0.73747426
0
Return the numeric matrices with sorted columns
def sort(headers, data): # extension column_matrix=data.get_data(headers) # get raw matrix data for numeric values print "\n before sorting \n " print column_matrix column_matrix=column_matrix.tolist() column_array=np.asarray(column_matrix) column_array.sort(axis=0) print "\n \n done sorting here is your matrix \n" return column_array
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_for_sorting():\n return RaggedArray([[1, 0], [2, 0], [0, 0]])", "def _sort_rows(matrix, num_rows):\n tmatrix = array_ops.transpose(matrix, [1, 0])\n sorted_tmatrix = nn_ops.top_k(tmatrix, num_rows)[0]\n return array_ops.transpose(sorted_tmatrix, [1, 0])", "def Msort(index, arr1, arr2, matrix):\n #names\n temp_arr1 = np.empty(len(index), dtype='object') # use object instead of 'str' to have arbitrary length\n #zscre\n temp_arr2 = np.zeros(len(index))\n #LD mat\n temp_mat1 = np.ndarray(shape = (len(index),len(index)))\n temp_mat2 = np.ndarray(shape = (len(index),len(index)))\n\n for i in range(len(index)):\n temp_arr1[i]= arr1[index[i]]\n temp_arr2[i] = arr2[index[i]]\n #get row\n temp_mat1[i] = matrix[index[i]]\n #we want to sort the columns too, so we transpose the matrix, get columns as the rows, then transpose again \n temp_mat1 = temp_mat1.transpose()\n #get column as the rows, then transpose the matrix \n for i in range(len(index)):\n temp_mat2[i] = temp_mat1[index[i]]\n temp_mat2 = temp_mat2.transpose()\n\n return temp_arr1, temp_arr2, temp_mat2", "def sort_and_print_matrix(m: list, list_of_mat: list):\n m.sort()\n for element in m:\n list_of_mat.append(element)", "def sortKey( self, mode, matrix ):\n # TODO: figure out how to handle \n return False,[],None", "def sorting_generator(G,desired_numberofcolumns):\n dimension = len(G)\n\n if dimension == desired_numberofcolumns:\n return G , None\n indexlist = np.argsort(np.linalg.norm( G ,axis=0 ,ord = None))\n sortedG = (G)[:,indexlist]\n G_untouched = sortedG[: , - (desired_numberofcolumns - dimension ): ]\n G_reduced = sortedG[: , : -(desired_numberofcolumns - dimension ) ]\n \n return G_reduced , G_untouched", "def finalise_matrix(matrix):\n if get_density(matrix) < DENSITY_THRESHOLD:\n matrix = matrix.tocsc()\n matrix.sort_indices()\n else:\n matrix = matrix.toarray()\n if is_integer(matrix):\n matrix = convert_to_smallest_int_type(matrix)\n return matrix", "def data_for_sorting(allow_in_pandas):\n # Use an empty tuple for first element, then remove,\n # to disable np.array's shape inference.\n return PandasArray(\n np.array([(), (2,), (3,), (1,)])[1:]\n )", "def sort_by_rows(arr):\n return arr[np.lexsort(arr.T[::-1])]", "def parse_matrix(lines):\r\n col_headers = None\r\n result = []\r\n row_headers = []\r\n for line in lines:\r\n if line[0] == '#':\r\n continue\r\n if line[0] == '\\t': # is header\r\n col_headers = map(strip, line.split('\\t')[1:])\r\n else:\r\n entries = line.split('\\t')\r\n result.append(map(float, entries[1:]))\r\n row_headers.append(entries[0])\r\n return col_headers, row_headers, asarray(result)", "def columnspace(M):\r\n v = orth(M)\r\n if (v.size == 0):\r\n return [np.zeros((M.shape[0],), dtype = int)]\r\n else:\r\n return v", "def matrix_to_column_list(m):\n return np.ravel(m, order='F').tolist()", "def data_missing_for_sorting():\n return RaggedArray([[1, 0], [], [0, 0]])", "def get_eigen_values_and_vectors(matrix, num_values):\n (w, v) = eigen_decomp(matrix)\n eigen_values = []\n eigen_vectors = []\n ### YOUR CODE HERE\n max_indexs=np.argpartition(w, -num_values)\n max_indexs=max_indexs[-num_values:]\n ids=np.argsort(w[max_indexs])\n sort_index=max_indexs[ids]\n eigen_values=w[sort_index]\n eigen_vectors=v[:,sort_index]\n ### END YOUR CODE\n return eigen_values, eigen_vectors", "def standard_sorting(cls, zmat):\n if zmat is None:\n return None\n nats = len(zmat)\n ncoords = 3*nats - 6\n if nats < 4:\n return None\n else:\n r_coords = [0, 1, 3]\n a_coords = [2, 4]\n t_coords = [5]\n if nats > 4:\n extra = np.arange(6, ncoords+1)\n r_coords += extra[::4].tolist()\n a_coords += extra[1::4].tolist()\n t_coords += extra[2::4].tolist()\n return np.argsort(np.concatenate([r_coords, a_coords, t_coords]))", "def _sort_ns(self):\n n = []\n for layer in self.structure:\n n.append(layer.get_index())\n n = np.asarray(n)\n return n", "def _input_as_gctmpca_rate_matrix(self,matrix,char_order):\n matrix_rows = []\n for c in char_order:\n matrix_rows.append('\\t'.join([str(matrix[c][col_c]) \\\n for col_c in char_order]))\n return matrix_rows", "def column_order(self):\n return ((1, 2), (1, 0), (1, 1))", "def generate_order(arr, descending=True):\n sorted_indices = torch.argsort(arr, 0, descending=descending)\n return sorted_indices.reshape((len(arr), ))", "def getProjectionMatrix(sorted_eigvecs):\n matrix_w = np.vstack(sorted_eigvecs).transpose()\n return matrix_w", "def sorted_index(self) -> np.ndarray:\n return np.argsort(self.result_array.sum(axis=1))[::-1]", "def HN_algorithm(self,hand_matrix):\n numbers_frequency=hand_matrix.sum(axis=1)\n \n sorted_numbers=[]\n\n for i in range(1,5):\n sorted_numbers.extend(sorted(numbers_frequency.loc[numbers_frequency==(5-i)].index,reverse=True))\n\n return sorted_numbers", "def permutation_matrix(order):\n matrix = np.zeros([order,order])\n matrix[-1,0] = 1\n matrix[0:-1,1::] = np.identity(order-1)\n return matrix", "def sort_forward(mat, axis=0):\n if axis == 0:\n mat = np.transpose(mat)\n (nrow, ncol) = mat.shape\n list_index = np.arange(0.0, ncol, 1.0)\n mat_index = np.tile(list_index, (nrow, 1))\n mat_comb = np.asarray(np.dstack((mat_index, mat)))\n mat_comb_sort = np.asarray(\n [row[row[:, 1].argsort()] for row in mat_comb])\n mat_sort = mat_comb_sort[:, :, 1]\n mat_index = mat_comb_sort[:, :, 0]\n if axis == 0:\n mat_sort = np.transpose(mat_sort)\n mat_index = np.transpose(mat_index)\n return mat_sort, mat_index", "def ToMatrix(lines):\r\n #print lines\r\n arr = np.zeros([4, 4])\r\n for j in xrange(4):\r\n arr[j, :] = np.array([int(num) for num in lines[j].split(\" \")])\r\n #print np.array([int(num) for num in lines[j].split(\" \")])\r\n return arr", "def sort(self, Ncol, order):\n self.emit(SIGNAL(\"layoutAboutToBeChanged()\"))\n self.arraydata = sorted(self.arraydata, key=operator.itemgetter(Ncol)) \n if order == Qt.DescendingOrder:\n self.arraydata.reverse()\n self.emit(SIGNAL(\"layoutChanged()\"))", "def sort(self, Ncol, order):\n self.emit(SIGNAL(\"layoutAboutToBeChanged()\"))\n self.arraydata = sorted(self.arraydata, key=operator.itemgetter(Ncol)) \n if order != Qt.DescendingOrder:\n self.arraydata.reverse()\n self.emit(SIGNAL(\"layoutChanged()\"))", "def create_g_matrix(order, arrays):\n size = len(order)\n g = np.zeros((size, size), dtype=np.int)\n for i, row in enumerate(g):\n for j, col in enumerate(order):\n g[i][j] = arrays[i][order[j]]\n return g", "def wc_matrix(matrix):\n return [{\"A\": position[\"T\"], \"T\": position[\"A\"], \"C\": position[\"G\"], \"G\": position[\"C\"]} for position in matrix[::-1]]", "def order_column_indices(self):\n return self._order_column_indices()" ]
[ "0.6472477", "0.6157688", "0.60989267", "0.6080551", "0.5776652", "0.5737888", "0.56588423", "0.56309026", "0.556288", "0.5534623", "0.551707", "0.5514763", "0.55020404", "0.5490696", "0.5472799", "0.54618865", "0.5438563", "0.5436786", "0.53938407", "0.53862166", "0.53718483", "0.53567475", "0.53539205", "0.535349", "0.53449285", "0.5334047", "0.5328534", "0.5310418", "0.5309621", "0.53075844" ]
0.71695834
0
register_attr(attr, editor, clazz = None) Registers EDITOR as the editor for atrribute ATTR of class CLAZZ, or for any class if CLAZZ is None. EDITOR can be either a Tk widget subclass of editobj.editor.Editor, or None to hide the attribute. MRO is used in order to allow subclasses to use the editor registered for their mother.
def register_attr(attr, editor, clazz = None): for_attr = _attr_editors.get(attr) if for_attr: for_attr[clazz] = editor else: _attr_editors[attr] = { clazz : editor }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def register_children_attr(attr, insert = \"insert\", del_ = \"__delitem__\", clazz = None):\n \n if clazz: _children_attrs[clazz] = (attr, insert, del_)\n else: _children_attrs[None].append((attr, insert, del_))", "def register_on_edit(func, clazz):\n \n _on_edit[clazz] = func", "def addEditor(self, editor: QWidget, type: str):\n if type == NodeType.Node:\n self.ui.layoutNode.addWidget(editor)\n else:\n self.ui.layoutSpec.addWidget(editor)\n editor.setVisible(False)\n self._editor_dict[type] = editor", "def attr(self, attr: _PyteAugmentedValidator):\n # Use the __override_opcode param and __override_list_restriction\n return _AttrLoader(self, attr)", "def regattr(self, attr):\n\n return super().regattr(attr=attr)", "def add_attribute(self, attr):\n self.attrs.add(attr)", "def add_attribute(self, attr):\n self.attrs.add_attribute(attr)", "def add_request_attribute(self, attr, name=None, decorator=None,\n reify=False):\n if not name:\n if hasattr(attr, '__name__'):\n name = attr.__name__\n elif isinstance(attr, property):\n name = attr.fget.__name__\n if not name:\n raise ValueError(\n 'attribute of type {} requires a name'.format(attr.__class__))\n if callable(attr):\n if decorator:\n attr = decorator(attr)\n if reify:\n attr = tangled.decorators.cached_property(attr)\n elif decorator or reify:\n raise ValueError(\"can't decorate a non-callable attribute\")\n self.register('dynamic_request_attr', attr, name)", "def node_editor(*args, show: bool = True, parent: str = \"\", before: str = \"\", callback: Callable = None, \n delink_callback: Callable = None, id:str=''):\n try:\n widget = internal_dpg.add_node_editor(*args, show=show, parent=parent, before=before, \n callback=callback, delink_callback=delink_callback, id=id)\n internal_dpg.push_container_stack(widget)\n yield widget\n finally:\n internal_dpg.pop_container_stack()", "def register_method(method, clazz, *args_editor):\n \n methods = _methods.get(clazz)\n if methods: methods.append((method, args_editor))\n else: _methods[clazz] = [(method, args_editor)]", "def set_attr_impl(context, builder, sig, args, attr):\n typ, valty = sig.args\n target, val = args\n\n if attr in typ.struct:\n # It's a struct member\n inst = context.make_helper(builder, typ, value=target)\n data_ptr = inst.data\n data = context.make_data_helper(builder, typ.get_data_type(),\n ref=data_ptr)\n\n # Get old value\n attr_type = typ.struct[attr]\n oldvalue = getattr(data, _mangle_attr(attr))\n\n # Store n\n setattr(data, _mangle_attr(attr), val)\n context.nrt.incref(builder, attr_type, val)\n\n # Delete old value\n context.nrt.decref(builder, attr_type, oldvalue)\n\n elif attr in typ.jit_props:\n # It's a jitted property\n setter = typ.jit_props[attr]['set']\n disp_type = types.Dispatcher(setter)\n sig = disp_type.get_call_type(context.typing_context,\n (typ, valty), {})\n call = context.get_function(disp_type, sig)\n call(builder, (target, val))\n _add_linking_libs(context, call)\n else:\n raise NotImplementedError(\n 'attribute {0!r} not implemented'.format(attr))", "def add_attribute(obj, attr, val):\n if not hasattr(obj, \"__dict__\"):\n raise TypeError(\"can't add new attribute\")\n setattr(obj, attr, val)", "def _handle_attr(self, attr, dev):\n attr_val = None\n list_flag = False\n\n if attr.name == \"os\":\n attr_val = self.OS_MAPPER[attr.val]\n elif attr.name == \"network\":\n attr_val = self._create_network(attr.val)\n list_flag = True\n elif attr.name == \"bluetooth\":\n attr_val = Bluetooth(version=attr.val.version)\n elif attr.name == \"cpu\":\n attr_val = CPU(cpu_family=attr.val.cpu_family,\n max_freq=float(attr.val.max_freq\n * self.FREQ_MULT[attr.val.unit]),\n fpu=attr.val.fpu)\n elif attr.name == \"memory\":\n attr_val = self._create_memory(attr.val)\n elif attr.name == \"type\":\n self._per_type = self.PER_MAPPER[attr.val]\n elif attr.name == \"pins\":\n list_flag = True\n attr_val = self._create_pins(attr.val)\n else:\n attr_val = attr.val\n\n # Set attribute\n if list_flag:\n getattr(dev, attr.name).extend(attr_val)\n elif attr_val:\n setattr(dev, attr.name, attr_val)", "def register(self, tag_class_or_alias=None, mode='standard'):\n # type: (Union[Type[Tag], str], bool) -> Callable\n # if mode == 'jekyll':\n # from .jekyll.tags import tag_manager as tmgr\n # return tmgr.register(tag_class_or_alias)\n if mode == 'python':\n from ..python.tags import tag_manager as tmgr\n return tmgr.register(tag_class_or_alias)\n\n def decorator(tag_class):\n \"\"\"The decorator for the tag class\"\"\"\n name = tag_class.__name__\n if name.startswith('Tag'):\n name = name[3:]\n # keep all-uppercase names, they are special tags\n # like LITERAL, COMMENT, OUTPUT\n if not name.isupper():\n name = name.lower()\n name = [name]\n\n if tag_class_or_alias and tag_class is not tag_class_or_alias:\n names = tag_class_or_alias\n if isinstance(names, str):\n names = (alias.strip() for alias in names.split(','))\n name = names\n\n for nam in name:\n self.__class__.tags[nam] = tag_class\n return tag_class\n\n if callable(tag_class_or_alias):\n return decorator(tag_class_or_alias)\n\n return decorator", "def register(self, cls, force=False):\n if not issubclass(cls, self.type):\n raise InvalidRegistryItemType(\n \"Invalid item type `{0}` for registry \"\n \"`{1}`\".format(cls, self.__class__)\n )\n\n uid = BasePluginWidgetRegistry.namify(cls.theme_uid, cls.plugin_uid)\n\n # If item has not been forced yet, add/replace its' value in the\n # registry.\n if force:\n\n if uid not in self._forced:\n self._registry[uid] = cls\n self._forced.append(uid)\n return True\n else:\n return False\n\n else:\n\n if uid in self._registry:\n return False\n else:\n self._registry[uid] = cls\n return True", "def attr(self, item: _PyteAugmentedValidator):\n self._attrs.append(item)\n return self", "def register_metadata(self, lbl: str, fld: str, val: str, inherited: bool = False) -> None:\n lookup = \".\".join([lbl, fld])\n self._metadata_registry[lookup] = val\n self._inheritance_registry[lookup] = inherited", "def register(widget):\n w = widget.class_traits()\n _registry.register(w['_model_module'].default_value,\n w['_model_module_version'].default_value,\n w['_model_name'].default_value,\n w['_view_module'].default_value,\n w['_view_module_version'].default_value,\n w['_view_name'].default_value,\n widget)\n return widget", "def add_attribute(self, attr: ResourceAttributeDescriptor) -> None:\n self._attributes[assert_not_none(attr.name)] = attr.bind(self)", "def decorate(self, node, cls):\n # Collect classvars to convert them to attrs.\n if self.args[cls][\"auto_attribs\"]:\n ordering = classgen.Ordering.FIRST_ANNOTATE\n else:\n ordering = classgen.Ordering.LAST_ASSIGN\n ordered_locals = classgen.get_class_locals(\n cls.name, allow_methods=False, ordering=ordering, vm=self.vm)\n own_attrs = []\n for name, local in ordered_locals.items():\n typ, orig = local.get_type(node, name), local.orig\n if is_attrib(orig):\n attrib = orig.data[0]\n if typ and attrib.has_type:\n # We cannot have both a type annotation and a type argument.\n self.vm.errorlog.invalid_annotation(self.vm.frames, typ)\n attr = Attribute(\n name=name,\n typ=self.vm.convert.unsolvable,\n init=attrib.init,\n kw_only=attrib.kw_only,\n default=attrib.default)\n elif not typ:\n # Replace the attrib in the class dict with its type.\n attr = Attribute(\n name=name,\n typ=attrib.typ,\n init=attrib.init,\n kw_only=attrib.kw_only,\n default=attrib.default)\n cls.members[name] = classgen.instantiate(node, name, attr.typ)\n else:\n # cls.members[name] has already been set via a typecomment\n attr = Attribute(\n name=name,\n typ=typ,\n init=attrib.init,\n kw_only=attrib.kw_only,\n default=attrib.default)\n self.vm.check_annotation_type_mismatch(\n node, attr.name, attr.typ, attr.default, local.stack,\n allow_none=True)\n own_attrs.append(attr)\n elif self.args[cls][\"auto_attribs\"]:\n if not match_classvar(typ):\n self.vm.check_annotation_type_mismatch(\n node, name, typ, orig, local.stack, allow_none=True)\n attr = Attribute(\n name=name, typ=typ, init=True, kw_only=False, default=orig)\n if not orig:\n cls.members[name] = classgen.instantiate(node, name, typ)\n own_attrs.append(attr)\n\n base_attrs = self.get_base_class_attrs(cls, own_attrs, _ATTRS_METADATA_KEY)\n attrs = base_attrs + own_attrs\n # Stash attributes in class metadata for subclasses.\n cls.metadata[_ATTRS_METADATA_KEY] = attrs\n\n # Add an __init__ method\n if self.args[cls][\"init\"]:\n init_method = self.make_init(node, cls, attrs)\n cls.members[\"__init__\"] = init_method", "def add_attribute(self, attr):\n self.add(attr)", "def test_register_existing_attr(self):\n pass", "def add_valid_attribute(self, attr, deletable=False):\n if self.__class__ is Row:\n raise TypeError(msg.inherited_rows)\n super(Row, self).__setattr__(\n \"__sawhitelist__\", set(self.__sawhitelist__ | set((attr,))))\n if deletable:\n super(Row, self).__setattr__(\n \"__delwhitelist__\", set(self.__delwhitelist__ | set((attr,))))", "def set_commentor(self, lang, *args, **kwargs):\n commentor_cls = kwargs.get('commentor_cls', Commentor)\n self.__registry[lang] = commentor_cls(*args)", "def register(self, cls):\n if not issubclass(cls, FormCallback):\n raise InvalidRegistryItemType(\n \"Invalid item type `{0}` for registry \"\n \"`{1}`\".format(cls, self.__class__)\n )\n\n # uid = self.uidfy(cls)\n # If item has not been forced yet, add/replace its' value in the\n # registry.\n\n if cls in self._registry[cls.stage]:\n return False\n else:\n self._registry[cls.stage].append(cls)\n return True", "def set(self, attr, val):\r\n self.__dict__[attr] = val", "def editor(self, editor):\n\n self._editor = editor", "def put_elem_attr(self, elem_blk_id, attr):\n ierr = exolib.py_expeat(self.exoid, elem_blk_id, attr.T)\n if ierr:\n raise ExodusIIWriterError(\"Error putting element attribute\")", "def __init__(self, attr=None):\r\n self.attr = attr", "def addAttr(*args, attributeType: Union[AnyStr, bool]=\"\", binaryTag: Union[AnyStr, bool]=\"\",\n cachedInternally: bool=True, category: Union[AnyStr, List[AnyStr], bool]=\"\",\n dataType: Union[AnyStr, List[AnyStr], bool]=\"\", defaultValue: Union[float,\n bool]=0.0, disconnectBehaviour: Union[int, bool]=0, enumName: Union[AnyStr,\n bool]=\"\", exists: bool=True, fromPlugin: bool=True, hasMaxValue: bool=True,\n hasMinValue: bool=True, hasSoftMaxValue: bool=True, hasSoftMinValue: bool=True,\n hidden: bool=True, indexMatters: bool=True, internalSet: bool=True, keyable:\n bool=True, longName: Union[AnyStr, bool]=\"\", maxValue: Union[float, bool]=0.0,\n minValue: Union[float, bool]=0.0, multi: bool=True, niceName: Union[AnyStr,\n bool]=\"\", numberOfChildren: Union[int, bool]=0, parent: Union[AnyStr, bool]=\"\",\n proxy: Union[AnyStr, bool]=\"\", readable: bool=True, shortName: Union[AnyStr,\n bool]=\"\", softMaxValue: Union[float, bool]=0.0, softMinValue: Union[float,\n bool]=0.0, storable: bool=True, usedAsColor: bool=True, usedAsFilename: bool=True,\n usedAsProxy: bool=True, writable: bool=True, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[None, Any]:\n pass" ]
[ "0.569334", "0.52287275", "0.4944205", "0.4870793", "0.477365", "0.46567985", "0.46526116", "0.46501258", "0.4630243", "0.4627526", "0.45808572", "0.45672143", "0.4548827", "0.45476916", "0.45291042", "0.4518718", "0.44888854", "0.4439509", "0.4431176", "0.4427259", "0.44245055", "0.44194838", "0.44136882", "0.44056478", "0.44002178", "0.43894187", "0.43802541", "0.43669865", "0.43600515", "0.43295985" ]
0.837206
0
register_children_attr(attr, insert = "insert", del_ = "__delitem__", clazz = None) Registers ATTR as an attribute that can act as the "content" or the "children" of an object of class CLAZZ (or any class if None). If ATTR is None, the object is used as its own list of children (automatically done for list / dict subclasses). INSERT and DEL_ are the names of the methods called for inserting and deleting items. INSERT can accept 2 arguments (as list.insert) or only one (as list.append), if you don't care the children's order. Default values for INSERT and DEL_ are OK for lists; for dicts, use INSERT = "__setitem__". EditObj will display these items in the tree view. Only one such attribute can be set for a given class (several are accepted for None). MRO is used in order to allow subclasses to use the children attribute registered for their mother. By default, "children" is considered for any class, and instances of classes that inherits from list or dict are their own children.
def register_children_attr(attr, insert = "insert", del_ = "__delitem__", clazz = None): if clazz: _children_attrs[clazz] = (attr, insert, del_) else: _children_attrs[None].append((attr, insert, del_))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_children(self, children: dict) -> None:\n for child in children:\n self.children[child.move] = child", "def register_attr(attr, editor, clazz = None):\n \n for_attr = _attr_editors.get(attr)\n if for_attr: for_attr[clazz] = editor\n else: _attr_editors[attr] = { clazz : editor }", "def add_children(self, *args):\n for child in args:\n if isinstance(child, AbstractNode):\n child.parent = self\n self._children.append(child)\n else:\n raise TypeError(\"child is not a device tree node object\")", "def register_available_children(children_codes, clazz):\n \n if isinstance(children_codes, list):\n try: _available_children[clazz].extend(children_codes)\n except: _available_children[clazz] = children_codes\n else:\n _available_children[clazz] = children_codes", "def add_child(self, child):\r\n self.children.append(child)", "def add_children(self, *args):\r\n self.children.extend(args)\r\n return self", "def test_add_node_with_children(self):\n root = netapp_api.NaElement('root')\n self.mock_object(netapp_api.NaElement,\n 'create_node_with_children',\n return_value=zapi_fakes.FAKE_INVOKE_DATA)\n mock_invoke = self.mock_object(root, 'add_child_elem')\n\n root.add_node_with_children('options')\n\n mock_invoke.assert_called_with(zapi_fakes.FAKE_INVOKE_DATA)", "def add_attr(node, attr, attr_data, verbose=False):\n\n parent = attr_data.get('parent')\n keyable = attr_data.get('keyable')\n non_keyable = attr_data.get('non_keyable')\n value = attr_data.get('value')\n attr_type = attr_data.get('type')\n\n # get parent and make sure it is a string\n if parent and type(parent) is list:\n parent = parent[0]\n\n # skip if the attr already exists\n if mc.objExists(node+'.'+attr):\n if verbose:\n mc.warning('# Attr {0}.{1} already exists! Skipping..'.format(node, attr))\n return\n\n # add message attrs\n elif attr_type == 'message':\n mc.addAttr(node, ln=attr, at='message')\n\n if verbose:\n print 'Added attribute: '+node+'.'+attr\n return True\n\n # add compound attrs\n elif attr_type == 'compound':\n number_children = attr_data.get('number_children')\n\n try:\n if parent:\n mc.addAttr(node, ln=attr, at='compound', p=parent, k=keyable, number_children=number_children)\n else:\n mc.addAttr(node, ln=attr, at='compound', k=keyable, number_children=number_children)\n\n if verbose:\n print 'Added attribute: '+node+'.'+attr\n return True\n\n except:\n mc.warning('# Could not add attr: {0}.{1}'.format(node, attr))\n\n # add string attrs\n elif attr_type == 'string' :\n try:\n if parent:\n mc.addAttr(node, ln=attr, dt='string',p=parent)\n else:\n mc.addAttr(node, ln=attr, dt='string')\n\n if verbose:\n print 'Added attribute: '+node+'.'+attr\n return True\n\n except:\n mc.warning('# Could not add attr: {0}.{1}'.format(node, attr))\n\n # add enum attrs\n elif attr_type == 'enum':\n try:\n enum = attr_data.get('enum')\n default_value = attr_data.get('default_value')\n\n if parent:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable, en=enum, p=parent)\n else:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable, en=enum)\n\n if verbose:\n print 'Added attribute: '+node+'.'+attr\n return True\n\n except:\n mc.warning('# Could not add attr: {0}.{1}'.format(node, attr))\n\n\n elif attr_type == 'bool':\n try:\n default_value = attr_data.get('default_value') or 0\n if parent:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable, dv=default_value, p=parent)\n else:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable, dv=default_value)\n\n if verbose:\n print 'Added attribute: '+node+'.'+attr\n return True\n\n except:\n mc.warning('# Could not add attr: {0}.{1}'.format(node, attr))\n\n elif attr_type in ['float2', 'float3', 'double2', 'double3', 'short3', 'long2', 'long3']:\n try:\n if parent:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable, p=parent)\n else:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable)\n\n if verbose:\n print 'Added attribute: '+node+'.'+attr\n return True\n\n except:\n mc.warning('# Could not add attr: {0}.{1}'.format(node, attr))\n\n else:\n try:\n min_value = attr_data.get('min')\n max_value = attr_data.get('max')\n default_value = attr_data.get('default_value') or 0\n\n if parent:\n if min_value and max_value:\n mc.addAttr(node, ln=attr, min=min_value, max=max_value, at=attr_type, k=keyable, dv=default_value, p=parent)\n elif min_value:\n mc.addAttr(node, ln=attr, min=min_value, at=attr_type, k=keyable, dv=default_value, p=parent)\n elif max_value:\n mc.addAttr(node, ln=attr, max=max_value, at=attr_type, k=keyable, dv=default_value, p=parent)\n else:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable, dv=default_value, p=parent)\n else:\n if min_value is not None and max_value is not None:\n mc.addAttr(node, ln=attr, min=min_value, max=max_value, at=attr_type, k=keyable, dv=default_value)\n elif min_value:\n mc.addAttr(node, ln=attr, min=min_value, at=attr_type, k=keyable, dv=default_value)\n elif max_value:\n mc.addAttr(node, ln=attr, max=max_value, at=attr_type, k=keyable, dv=default_value)\n else:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable, dv=default_value)\n\n if verbose:\n print 'Added attribute: '+node+'.'+attr\n return True\n\n except:\n mc.warning('# Could not add attr: {0}.{1}'.format(node, attr))", "def add_children(self, *args):\r\n self._children.extend(args)\r\n return self", "def __init__(self,tag,attributes=None,children=None): \n self.tag = mapping[tag]\n if attributes is None:\n self.attributes = []\n else:\n self.attributes = attributes\n if children is None:\n self.children = []\n else:\n self.children = children", "def add_child(self, cls, id=None, collection=\"channels\", prefix=\"ch_\", attr_name=\"\", **kwargs):\n child = cls(self, id, **kwargs)\n collection_data = getattr(self, collection, {})\n if isinstance(collection_data, CommonBase.BaseChannelCreator):\n collection_data = {}\n # Create channel interface if prefix or name is present\n if (prefix or attr_name) and id is not None:\n if not collection_data:\n # Add a grouplist to the parent.\n setattr(self, collection, collection_data)\n collection_data[id] = child\n child._collection = collection\n if attr_name:\n setattr(self, attr_name, child)\n child._name = attr_name\n else:\n setattr(self, f\"{prefix}{id}\", child)\n child._name = f\"{prefix}{id}\"\n elif attr_name and id is None:\n # If attribute name is passed with no channel id\n # set the child to the attribute name.\n setattr(self, attr_name, child)\n child._name = attr_name\n else:\n if collection_data:\n raise ValueError(f\"An attribute '{collection}' already exists.\")\n setattr(self, collection, child)\n child._name = collection\n return child", "def _handle_attr(self, attr, dev):\n attr_val = None\n list_flag = False\n\n if attr.name == \"os\":\n attr_val = self.OS_MAPPER[attr.val]\n elif attr.name == \"network\":\n attr_val = self._create_network(attr.val)\n list_flag = True\n elif attr.name == \"bluetooth\":\n attr_val = Bluetooth(version=attr.val.version)\n elif attr.name == \"cpu\":\n attr_val = CPU(cpu_family=attr.val.cpu_family,\n max_freq=float(attr.val.max_freq\n * self.FREQ_MULT[attr.val.unit]),\n fpu=attr.val.fpu)\n elif attr.name == \"memory\":\n attr_val = self._create_memory(attr.val)\n elif attr.name == \"type\":\n self._per_type = self.PER_MAPPER[attr.val]\n elif attr.name == \"pins\":\n list_flag = True\n attr_val = self._create_pins(attr.val)\n else:\n attr_val = attr.val\n\n # Set attribute\n if list_flag:\n getattr(dev, attr.name).extend(attr_val)\n elif attr_val:\n setattr(dev, attr.name, attr_val)", "def get_children(obj):\n ret = obj.to_dict()\n if obj.children.all():\n ret.__setitem__('children',[get_children(j) for j in obj.children.all()])\n return ret", "def node_attribute(*args, show: bool = True, output: bool = False,\n static: bool = False, parent: str = \"\", before: str = \"\", shape: int = 54010, id:str=''\n , indent=-1):\n try:\n widget = internal_dpg.add_node_attribute(*args, show=show, parent=parent, before=before, \n output=output, static=static, shape=shape, id=id,\n indent=indent)\n internal_dpg.push_container_stack(widget)\n yield widget\n finally:\n internal_dpg.pop_container_stack()", "def children(self, children: List[str]):\n self._children = children", "def add(self, obj, x, y, angle, mirror):\n # we make a tuple to put in the list\n self.children.append( (obj, x, y, angle, mirror) )", "def child(*args, show: bool = True, parent: str = \"\", before: str = \"\", width: int = 0, pos=[],\n height: int = 0, border: bool = True, autosize_x: bool = False, autosize_y: bool = False,\n no_scrollbar: bool = False, horizontal_scrollbar: bool = False, menubar: bool = False, id:str='', \n indent=-1):\n try: \n widget = internal_dpg.add_child(*args, show=show, parent=parent, before=before, width=width,\n height=height, border=border, autosize_x=autosize_x, autosize_y=autosize_y,\n no_scrollbar=no_scrollbar, horizontal_scrollbar=horizontal_scrollbar,\n menubar=menubar, id=id, indent=indent, pos=pos)\n internal_dpg.push_container_stack(widget)\n yield widget\n finally:\n internal_dpg.pop_container_stack()", "def add_child(self, child):\r\n self.children.append(child)", "def __init__(self, attribute):\n self.attribute = attribute\n self.children = {}", "def __init__(self, label, *children):\n self.__label = label;\n self.__children = \\\n [ c if type(c) is Tree else Tree(c) \n for c in children]", "def fm_append_member(cls, parent, child):\n parent.fm_append(child, cls.CHILD)\n child.fm_append(parent, cls.PARENT)", "def add(self, attr):\n self.validate_type(attr)\n self.categories.add(attr.value)", "def addnode(self, uid, **attrs):\n\n raise NotImplementedError", "def addChildObject(self, child):\n \n currChild = self.getChild(child.getName())\n if currChild:\n index = self.getIndex(currChild)\n if index != -1:\n self._children[index] = child\n child.setParent(self)\n # Unset the existing child's parent\n currChild.setParent(None)\n del currChild\n \n self.__setChildDict(child)\n else:\n child.setParent(self) \n self._children.append(child)\n self.__setChildDict(child)", "def __init__(self, label, *children):\n self.__label = label;\n self.__children = \\\n [ c if type(c) is Tree else Tree(c) for c in children]", "def register_object(self, obj, isdelete = False, listonly = False, postupdate=False, **kwargs):\n #print \"REGISTER\", repr(obj), repr(getattr(obj, '_instance_key', None)), str(isdelete), str(listonly)\n \n # things can get really confusing if theres duplicate instances floating around,\n # so make sure everything is OK\n self.uow._validate_obj(obj)\n \n mapper = object_mapper(obj)\n self.mappers.add(mapper)\n task = self.get_task_by_mapper(mapper)\n\n if postupdate:\n mod = task.append_postupdate(obj)\n if mod: self._mark_modified()\n return\n \n # for a cyclical task, things need to be sorted out already,\n # so this object should have already been added to the appropriate sub-task\n # can put an assertion here to make sure....\n if task.circular:\n return\n \n mod = task.append(obj, listonly, isdelete=isdelete, **kwargs)\n if mod: self._mark_modified()", "def add_child(self, obj):\n obj.parent = self\n if obj not in self.children:\n self.children.add(obj)\n # TODO(andi): This assumes that the node is already child of a root\n # Database node which makes it impossible to create a sub-tree that\n # should be added to the real root later. For example:\n # db = Database()\n # node = Node()\n # db.add_child(node)\n # sub = Node()\n # sub.add_child(Node()) # <-- fails\n # node.add_child(db)\n self.db._oid_idx[obj.oid] = obj\n return obj", "def addChild(node):", "def register_on_children_visible(func, clazz):\n \n _on_children_visible[clazz] = func", "def parse_tag(self, root, fmt, insert_children=True):\n arguments = {}\n extra_args = {}\n children = []\n\n for k, val in root.attrib.iteritems():\n k = k.lower()\n # 'version' is currently the only supported XML attribute.\n if k == 'version' and root.tag == 'odML':\n continue\n\n # We currently do not support XML attributes.\n self.error(\"Attribute not supported, ignoring '%s=%s' \" % (k, val), root)\n\n for node in root:\n node.tag = node.tag.lower()\n self.is_valid_argument(node.tag, fmt, root, node)\n if node.tag in fmt.arguments_keys:\n # this is a heuristic, but works for now\n if node.tag in self.tags and node.tag in fmt.map_keys:\n sub_obj = self.parse_element(node)\n if sub_obj is not None:\n extra_args[fmt.map(node.tag)] = sub_obj\n children.append(sub_obj)\n else:\n tag = fmt.map(node.tag)\n if tag in arguments:\n self.warn(\"Element <%s> is given multiple times in \"\n \"<%s> tag\" % (node.tag, root.tag), node)\n\n # Special handling of values;\n curr_text = node.text.strip() if node.text else None\n if tag == \"values\" and curr_text:\n content = from_csv(node.text)\n arguments[tag] = content\n # Special handling of cardinality\n elif tag.endswith(\"_cardinality\") and curr_text:\n arguments[tag] = parse_cardinality(node.text)\n else:\n arguments[tag] = curr_text\n else:\n self.error(\"Invalid element <%s> in odML document section <%s> \"\n % (node.tag, root.tag), node)\n\n check_args = dict(list(arguments.items()) + list(extra_args.items()))\n self.check_mandatory_arguments(check_args, fmt, root.tag, root)\n\n # Instantiate the current odML object with the parsed attributes.\n obj = fmt.create()\n try:\n obj = fmt.create(**arguments)\n except Exception as exc:\n self.error(str(exc), root)\n\n if insert_children:\n for child in children:\n obj.append(child)\n\n return obj" ]
[ "0.5429349", "0.54085225", "0.53786886", "0.53273", "0.51638204", "0.5120558", "0.5015715", "0.50146097", "0.50063944", "0.49629942", "0.48599657", "0.48443073", "0.48174873", "0.47865376", "0.47392642", "0.47035292", "0.47018874", "0.46887925", "0.46817335", "0.4680879", "0.46541718", "0.46409082", "0.46351188", "0.46336925", "0.46154046", "0.46122825", "0.45911974", "0.4586146", "0.45736283", "0.45596337" ]
0.8719397
0
register_method(method, clazz, args_editor) Registers METHOD as a method that must be displayed in EditObj for instance of CLAZZ. METHOD can be either a method name (a string), or a function (in this case, it is not a method, strictly speaking). ARGS_EDITOR are the editors used for entering the argument, e.g. use editobj.editor.FloatEditor for a float argument, or editobj.editor.EntryEditor for a Python eval'ed line of code. MRO is used in order to allow subclasses to use the methods registered for their mother. If ARGS_EDITOR is (None,) the method is hidden. Use this on a subclass to hide a method provided by a mother class.
def register_method(method, clazz, *args_editor): methods = _methods.get(clazz) if methods: methods.append((method, args_editor)) else: _methods[clazz] = [(method, args_editor)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _add_method(cls: type) -> Callable:\n\n def decorator(func):\n func.enable = lambda: _method_enable(\n cls, [_plugin_funcname(func)], func\n )\n func.disable = lambda: _method_disable(\n cls, [_plugin_funcname(func)], func\n )\n return func\n\n return decorator", "def add_method(self, method: Callable):\n self._add(method.__name__, method)", "def add_method(self, cls: type):\n return _add_method(cls)", "def _register_method(self, obj, method_name, method):\n\n logger.debug('register method %s to %s', method_name, obj.name)\n self._methods.append((obj, method_name))\n setattr(obj, method_name, MethodType(method, obj))\n if hasattr(obj, '_tab'):\n obj._tab.add(method_name)", "def add_method (self, cls, name) :\n wrapped = self._wrapped (cls, name)\n setattr (cls, name, pyk.new_instancemethod (wrapped, None, cls))", "def add_member_function(cls, methodName, newMethod):\n cls.add_registration_code('def(\"%s\",%s)'%(methodName, newMethod), True)", "def add_method(self, method, name=None, request_arg=True, store_arg=True):\n # Was this a decorated servicemethod?\n if hasattr(method, '__servicemethod__'):\n options = method.__servicemethod__\n else:\n options = {'name': name or method.__name__, 'store': self.store,\n 'request_arg': request_arg, 'store_arg': store_arg}\n\n method.__servicemethod__ = options\n self.methods[ options['name'] ] = method", "def add_hook(self, method, args=None, kwargs=None):\n self.hook.append((method, args, kwargs))", "def wrap_method(cls, methodName, newMethod):\n cls[methodName].exclude()\n add_member_function(cls, methodName, newMethod)", "def register(self, alias, method, *args, **kargs):\n pfunc = functools.partial(method, *args, **kargs)\n pfunc.__name__ = alias\n pfunc.__doc__ = method.__doc__\n \n try:\n # Some methods don't have any dictionary, in these cases simply \n # don't copy it.\n pfunc.__dict__.update(method.__dict__.copy())\n except AttributeError:\n pass\n \n setattr(self, alias, pfunc)", "def set_method(self, method, **kwargs):\n if isinstance(method, str) and method in self.available_methods.keys():\n self.method_name = method\n self.method = self.available_methods[method]['IDIMethod'](self, **kwargs)\n elif callable(method) and hasattr(method, 'calculate_displacements'):\n self.method_name = 'external_method'\n try:\n self.method = method(self, **kwargs)\n except:\n raise ValueError(\"The input `method` is not a valid `IDIMethod`.\")\n else:\n raise ValueError(\"method must either be a valid name from `available_methods` or an `IDIMethod`.\")\n \n # Update `get_displacements` docstring\n tools.update_docstring(self.get_displacements, self.method.calculate_displacements)\n # Update `show_points` docstring\n if hasattr(self.method, 'show_points'):\n try:\n tools.update_docstring(self.show_points, self.method.show_points)\n except:\n pass", "def register(\n self, cls: typing.Any, method: typing.Optional[typing.Callable[..., _T]] = None\n ) -> typing.Any:\n return self.dispatcher.register(cls, func=method)", "def replaces_method(func: Callable[..., Tuple[str]], classname: str, method_name: str):\n Replacements._method_rep[(classname, method_name)] = func\n return func", "def register(self, cls, method=None):\n if isinstance(cls, (classmethod, staticmethod)):\n first_annotation = {}\n for k, v in cls.__func__.__annotations__.items():\n first_annotation[k] = v\n break\n cls.__annotations__ = first_annotation\n return self.dispatcher.register(cls, func=method)", "def register(self, cls, method=None):\n if isinstance(cls, (classmethod, staticmethod)):\n first_annotation = {}\n for k, v in cls.__func__.__annotations__.items():\n first_annotation[k] = v\n break\n cls.__annotations__ = first_annotation\n return self.dispatcher.register(cls, func=method)", "def entry_for_one_method(nom, method):\r\n # TODO(lhosken) : This is darned similar to entry_for_one_func. Merge 'em?\r\n # (Punted so far since funcdoc indentation made my head hurt)\r\n assert inspect.ismethod(method)\r\n args, varargs, varkw, defaults = inspect.getargspec(method)\r\n # args[:1] instead of args to discard \"self\" arg\r\n argspec = inspect.formatargspec(args[1:], varargs, varkw, defaults)\r\n return entry(nom,\r\n argspec=argspec,\r\n funcdoc=(method.__doc__ or \"\").replace(\"\\n\", \" \"))", "def _add_method(*clazzes):\n\n def wrapper(method):\n done = []\n for clazz in clazzes:\n if clazz in done:\n continue # Support multiple names of a clazz\n done.append(clazz)\n assert clazz.__name__ != \"DefaultTable\", \"Oops, table class not found.\"\n assert not hasattr(\n clazz, method.__name__\n ), \"Oops, class '%s' has method '%s'.\" % (clazz.__name__, method.__name__)\n setattr(clazz, method.__name__, method)\n return None\n\n return wrapper", "def classmethod(self, encoding):\n # Add encodings for hidden self and cmd arguments.\n encoding = ensure_bytes(encoding)\n typecodes = parse_type_encoding(encoding)\n typecodes.insert(1, b'@:')\n encoding = b''.join(typecodes)\n\n def decorator(f):\n def objc_class_method(objc_cls, objc_cmd, *args):\n py_cls = ObjCClass(objc_cls)\n py_cls.objc_cmd = objc_cmd\n args = convert_method_arguments(encoding, args)\n result = f(py_cls, *args)\n if isinstance(result, ObjCClass):\n result = result.ptr.value\n elif isinstance(result, ObjCInstance):\n result = result.ptr.value\n return result\n name = f.__name__.replace('_', ':')\n self.add_class_method(objc_class_method, name, encoding)\n return objc_class_method\n return decorator", "def add(self, method, name=None):\n # If no custom name was given, use the method's __name__ attribute\n if not name:\n if not hasattr(method, '__name__'):\n raise AttributeError(\n '%s has no __name__ attribute. '\n 'Use add(method, name) to specify a method name'\n % type(method))\n name = method.__name__\n self.__dict__[name] = method\n return method", "def add_method(self,f,*def_args,**def_kw):\n\t\tmethod = LadonMethodInfo(self,f,*def_args,**def_kw)\n\t\t# store the method info\n\t\tself.methods[get_function_name(f)] = method\n\t\treturn method", "def addCallback(self, onetime, method, *args, **kwargs):\n\n if not method in self.callbacks:\n self.callbacks[method] = (_MethodWrapper(method, *args, **kwargs),\n onetime)", "def register_on_edit(func, clazz):\n \n _on_edit[clazz] = func", "def add_method(self, func):\r\n # define a wrapper to add functions which don't have 'self'\r\n def wrapper(self, *args, **kwargs):\r\n return func(*args, **kwargs)\r\n\r\n # if function takes no parameters\r\n if func.__code__.co_argcount == 0:\r\n\r\n # add with 'self'\r\n setattr(model, func.__name__, wrapper)\r\n\r\n # if function does not take 'self'\r\n elif \"self\" not in func.__code__.co_varnames[0]:\r\n\r\n # add with 'self'\r\n setattr(model, func.__name__, wrapper)\r\n\r\n # if function takes 'self'\r\n elif \"self\" in func.__code__.co_varnames[0]:\r\n\r\n # add unmodified\r\n setattr(model, func.__name__, func)", "def register_method(cls, name, func):\n setattr(cls, name, staticmethod(func))", "def override(self, method, function: Union[bool, Callable] = False):\n method = method if isinstance(method, str) else method.__name__\n if not hasattr(self, method):\n raise AttributeError(\n f\"Can't override method '{method}' because it's not defined\")\n if function is False:\n # assume it's used as a decorator\n # @train.override('step')\n # def custom_step(trainer, inputs, targets):\n def decorator(f):\n setattr(self, method, types.MethodType(f, self))\n\n return decorator\n else:\n setattr(self, method, types.MethodType(function, self))", "def add_callback(self, method, callback):\n if method not in self.callbacks:\n raise ValueError(\"Unrecognized method name: \" + str(method))\n if callback in self.callbacks[method]:\n raise ValueError(\n str(callback) + \" has already been attached to this instance.\"\n )\n self.callbacks[method].append(callback)", "def inject_methods(cls, channel, method_descriptions):\n to_cache = cls.__cached_methods__ or set()\n for description in method_descriptions.get('commands', ()):\n if description.get('type') not in ('method', 'multidispatch'):\n continue\n if description.get('type') == 'multidispatch':\n method = MultiMethod(description, cls.__namespace__)\n else:\n method = ProxyMethod(description, cls.__namespace__)\n # if description.get('name') in to_cache:\n # method = lru_cache(maxsize=1024)(method)\n setattr(cls, method.__name__, method)\n if 'cls' in method_descriptions:\n cls.interfaces = method_descriptions['cls'].get('interfaces', [])\n if cls.__namespace__ != cls.__name__:\n cls.interfaces.append(cls.__namespace__)\n for interface in cls.interfaces:\n PROXY_RELATIONS.setdefault(interface, set()).add(cls.__namespace__)\n # log.info(\n # \"Class %s implements %s\",\n # cls.__namespace__,\n # interface,\n # )", "def add(self, method: str, pattern: str, handler: Callable) -> None:", "def replace_method(self, method_name, method):\n\n if not callable(getattr(self, method_name, None)):\n raise ValueError(f\"No method with name '{method_name}'\")\n\n setattr(self, method_name, types.MethodType(method, self))", "def identify_method(self, func):" ]
[ "0.5905217", "0.58517236", "0.5786096", "0.5741496", "0.57013345", "0.561656", "0.5559318", "0.55209756", "0.5503904", "0.54889995", "0.53730494", "0.5337263", "0.5303383", "0.5225363", "0.5225363", "0.52207655", "0.5208166", "0.51892644", "0.5186976", "0.5176117", "0.51709825", "0.51359314", "0.51318455", "0.5125068", "0.511427", "0.50765944", "0.5054606", "0.50511724", "0.5045061", "0.50321853" ]
0.78775865
0
register_available_children(children_codes, clazz) Register the CHILDREN_CODES that are proposed for addition in an instance of CLAZZ. If CHILDREN_CODES is a list of strings (Python code), EditObj will display a dialog box. If CHILDREN_CODES is a single string, no dialog box will be displayed, and this code will automatically be used. If CHILDREN_CODES is "", nothing is done when clicking on the "Add..." button. The codes are just eval'ed to create the children; they can use the "parent" variable, which is set to the list/dict we are adding into.
def register_available_children(children_codes, clazz): if isinstance(children_codes, list): try: _available_children[clazz].extend(children_codes) except: _available_children[clazz] = children_codes else: _available_children[clazz] = children_codes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_children(self, *args):\r\n self.children.extend(args)\r\n return self", "def add_children(self, *args):\r\n self._children.extend(args)\r\n return self", "def addChildren(self, values):\r\n for i, value in enumerate(values):\r\n newScope = copy(self.scope)\r\n newScope.index = i\r\n setattr(newScope, self.entryName, value)\r\n self.componentsLoader.loadAll(self.tokens, scope=newScope, onto=self.widget)", "def add_child_info(conll_tokens, child_funcs, child_strings, lex):\n\tfor child_id in child_funcs:\n\t\tif child_id > len(conll_tokens)-1:\n\t\t\tcontinue\n\t\tfor func in child_funcs[child_id]:\n\t\t\tif func not in conll_tokens[child_id].child_funcs:\n\t\t\t\tconll_tokens[child_id].child_funcs.append(func)\n\t\t\t\tif lex.filters[\"neg_func\"].match(func):\n\t\t\t\t\tconll_tokens[child_id].negated = True\n\t\tfor tok_text in child_strings[child_id]:\n\t\t\tif tok_text not in conll_tokens[child_id].child_strings:\n\t\t\t\tconll_tokens[child_id].child_strings.append(tok_text)", "def register_children_attr(attr, insert = \"insert\", del_ = \"__delitem__\", clazz = None):\n \n if clazz: _children_attrs[clazz] = (attr, insert, del_)\n else: _children_attrs[None].append((attr, insert, del_))", "def test_add_node_with_children(self):\n root = netapp_api.NaElement('root')\n self.mock_object(netapp_api.NaElement,\n 'create_node_with_children',\n return_value=zapi_fakes.FAKE_INVOKE_DATA)\n mock_invoke = self.mock_object(root, 'add_child_elem')\n\n root.add_node_with_children('options')\n\n mock_invoke.assert_called_with(zapi_fakes.FAKE_INVOKE_DATA)", "def add_children(self, children: dict) -> None:\n for child in children:\n self.children[child.move] = child", "def init_children(self):\n children = []\n legal_moves = list(chess.Board(self.state).legal_moves)\n for move in legal_moves:\n temp_board = chess.Board(self.state)\n temp_board.push_san(str(move))\n children.append(Node(temp_board.fen(), self))\n self.children = children", "def add_children(self, *args):\n for child in args:\n if isinstance(child, AbstractNode):\n child.parent = self\n self._children.append(child)\n else:\n raise TypeError(\"child is not a device tree node object\")", "def get_code_per_child(self, obj, child):\n return []", "def addChild(node):", "def register_on_children_visible(func, clazz):\n \n _on_children_visible[clazz] = func", "def create_children(self):\n actionCount = len(self.availableActions)\n self.children = [None] * actionCount\n\n # Split creation into multiple threads if this is the master node.\n if self.level == 0 and USE_THREADS:\n threads = [None] * actionCount\n for idx in range(actionCount):\n threads[idx] = threading.Thread(target=create_child, args=(self, idx))\n threads[idx].start()\n for t in threads:\n t.join()\n else:\n for idx in range(actionCount):\n create_child(self, idx)\n # Stop making child branches if the most recent child branch already found lethal.\n if self.children[idx].get_max_win_strength() == WIN_VALUE:\n self.children = self.children[:idx+1]\n break", "def __init__(self, label, *children):\n self.__label = label;\n self.__children = \\\n [ c if type(c) is Tree else Tree(c) \n for c in children]", "def _addChildren(self, pid, chunks):\n if chunks[pid].type in [0, -1]:\n self._addEntity(pid, chunks)\n else:\n self._addPredicate(pid, chunks)", "def add_child(self, child):\r\n self.children.append(child)", "def add_children(self, lines):\n\n if isinstance(lines, str):\n lines = [lines]\n\n for line in lines:\n self.add_child(line)", "def addChildren(self, child_list, add_to_node):\n #import pdb; pdb.set_trace()\n c = self.c\n for child in child_list:\n new_node = add_to_node.insertAsLastChild()\n c.setHeadString(new_node,child.headline)\n c.setBodyString(new_node,child.body)\n self.addChildren(child.children, new_node)", "def children(self, children: List[str]):\n self._children = children", "def register(self):\n # Set the evaluation state of this instance to REGISTER, as it has been\n # recognized by the root object.\n self.root.registry.append(self)\n self.eval_state = COMMAND_EVAL_REGISTER\n\n # Loop through children and register them too, recursively.\n for ch in self.children:\n # Only register the child if it has not been registered yet;\n # therefore its evaluation state has been set to NONE.\n if ch.eval_state == COMMAND_EVAL_NONE:\n ch.register()", "def __init__(self, label, *children):\n self.__label = label;\n self.__children = \\\n [ c if type(c) is Tree else Tree(c) for c in children]", "def add_child(self, value, current_class, possible_class_values):\n child = FPNode(value, 1, current_class, possible_class_values, self)\n self.children.append(child)\n return child", "def add_child(self, cd, wt: float):\n self.child.append([cd, wt])", "def getNewCodeList(self):\n tmp = []\n for child in self.children:\n tmp.extend(child.getNewCodeList())\n return tmp", "def set_children(self, c, line_number=0):\n if isinstance(self.children, set):\n self.children = self.children | {c}\n self._children_lines = self._children_lines | {line_number}\n else:\n self.children = {c} if (c and c != 'NA') else 'NA'\n self._children_lines = {line_number}", "def create_new_child(self,instance):\n\t\tnew_child = self.tree.makeTree(self.tree.root, self.tree)\n\t\tnew_child.utility.increment_counts(instance)\n\t\tself.tree.children.append(new_child)", "def child(*args, show: bool = True, parent: str = \"\", before: str = \"\", width: int = 0, pos=[],\n height: int = 0, border: bool = True, autosize_x: bool = False, autosize_y: bool = False,\n no_scrollbar: bool = False, horizontal_scrollbar: bool = False, menubar: bool = False, id:str='', \n indent=-1):\n try: \n widget = internal_dpg.add_child(*args, show=show, parent=parent, before=before, width=width,\n height=height, border=border, autosize_x=autosize_x, autosize_y=autosize_y,\n no_scrollbar=no_scrollbar, horizontal_scrollbar=horizontal_scrollbar,\n menubar=menubar, id=id, indent=indent, pos=pos)\n internal_dpg.push_container_stack(widget)\n yield widget\n finally:\n internal_dpg.pop_container_stack()", "def _load_children(self,\n children: Sequence, loader: Callable, *,\n address: metadata.Address, path: Tuple[int, ...],\n resources: Mapping[str, wrappers.MessageType]) -> Mapping:\n # Iterate over the list of children provided and call the\n # applicable loader function on each.\n answer = {}\n for child, i in zip(children, range(0, sys.maxsize)):\n wrapped = loader(child, address=address, path=path + (i,),\n resources=resources)\n answer[wrapped.name] = wrapped\n return answer", "def expansion(self, actions):\n for action in actions: \n self.children[action[0]] = TreeNode()", "def create_code_helper(root_node, code, huff_list):\r\n if root_node is None: # base case, if tree is None\r\n return huff_list\r\n if root_node.left is None and root_node.right is None: # leaf node, no children\r\n huff_list[ord(root_node.char)] = code # inserts char's code\r\n create_code_helper(root_node.left, code + \"0\", huff_list)\r\n create_code_helper(root_node.right, code + \"1\", huff_list)\r\n return huff_list" ]
[ "0.5452896", "0.53605896", "0.5334651", "0.5284387", "0.52782434", "0.5211498", "0.5191764", "0.5082743", "0.50570357", "0.5024224", "0.49518868", "0.49307147", "0.49156395", "0.4906171", "0.48993438", "0.48965266", "0.4896071", "0.48945415", "0.48457983", "0.4797419", "0.4797095", "0.47651955", "0.47543034", "0.47519827", "0.4728365", "0.47247303", "0.47231966", "0.46985835", "0.46963423", "0.4686187" ]
0.79036367
0
register_values(attr, code_expressions) Registers CODE_EXPRESSIONS as a proposed value for ATTR.
def register_values(attr, code_expressions): code_expressions = map(unicodify, code_expressions) try: _values[attr].extend(code_expressions) except KeyError: _values[attr] = list(code_expressions)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_reg_expressions(self, expressions_update: Dict[str, Any]) -> None:\n expressions = self.base_expressions.copy()\n expressions.update(expressions_update)\n self.reg_expressions = expressions", "def RegisterValues():\n return get_float64_array(lib.Generators_Get_RegisterValues)", "def calculate_y_values(f, x_values):\r\n y_values = x_values.copy()\r\n for counter, value in enumerate(x_values):\r\n expr = f.replace(\"x\", \"(\" + str(value) + \")\")\r\n try:\r\n y_values[counter] = sympy.N(expr, 5)\r\n except:\r\n y_values[counter] = None\r\n return y_values", "def user_expressions(self, expressions):\n compiled_expressions = {}\n for key, expr in expressions.items():\n try:\n compiled_expressions[key] = COMPILER.parse_eval(expr)\n except CoconutException:\n compiled_expressions[key] = expr\n return super(CoconutShell, self).user_expressions(compiled_expressions)", "def as_expression(self):\n def store(attr,value):\n if value and attr in ['firebird_version','platform','init_script',\n 'test_script','expected_stdout','expected_stderr',\n 'resources','substitutions','test_type']:\n return True\n elif attr == 'database' and value in [None,DB_EXISTING,DB_RESTORE]:\n return True\n elif attr == 'database_name' and value and self.database in [None,DB_EXISTING]:\n return True\n elif attr == 'backup_file' and self.database == DB_RESTORE:\n return True\n elif attr == 'user_name' and value != 'SYSDBA':\n return True\n elif attr == 'user_password' and (value != 'masterkey' or self.user_name != 'SYSDBA'):\n return True\n elif attr == 'database_character_set' and value and value != 'NONE':\n return True\n elif attr == 'connection_character_set' and value and value != 'NONE':\n return True\n elif attr == 'page_size' and value:\n return True\n elif attr == 'sql_dialect' and value != 3:\n return True\n else:\n return False\n\n data = [(key,self.__dict__[key]) for key in self.FIELDS]\n items = []\n for (key,value) in data:\n if not store(key,value):\n continue\n if isinstance(value,types.UnicodeType):\n value = value.encode('utf-8')\n if isinstance(value,types.StringTypes):\n value = trim_value(value)\n if key in ['database_name','expected_stderr','expected_stdout',\n 'init_script','test_script']:\n items.append(\" '%s': %s\" % (key,quote(value)))\n elif isinstance(value,types.StringType):\n items.append(\" '%s': %s\" % (key,quote(value)))\n elif key == 'substitutions':\n l = []\n for (pattern,replacement) in value:\n if isinstance(pattern,types.UnicodeType):\n pattern = pattern.encode('utf-8')\n if isinstance(replacement,types.UnicodeType):\n replacement = replacement.encode('utf-8')\n l.append('(%s,%s)' % (quote(pattern),quote(replacement)))\n items.append(\" '%s': %s\" % (key,'[%s]' % ','.join(l)))\n elif key == 'resources':\n l = []\n for res in value:\n l.append(quote(res))\n items.append(\" '%s': %s\" % (key,'[%s]' % ','.join(l)))\n else:\n items.append(\" '%s': %s\" % (key,str(value)))\n r = '{\\n%s\\n}' % ',\\n'.join(items)\n return r", "def regexp(regexp_list):\n def add_attribute(func):\n if not hasattr(func, \"regexp\"):\n func.regexp = []\n func.regexp.append(regexp_list)\n return func\n return add_attribute", "def write_registers(self, registeraddress, values):\n if not isinstance(values, list):\n raise TypeError('The \"values parameter\" must be a list. Given: {0!r}'.format(values))\n _checkInt(len(values), minvalue=1, description='length of input list')\n # Note: The content of the list is checked at content conversion.\n\n self._genericCommand(16, registeraddress, values, numberOfRegisters=len(values), payloadformat='registers')", "def write_register(self, registeraddress, value, numberOfDecimals=0, functioncode=16, signed=False):\n _checkFunctioncode(functioncode, [6, 16])\n _checkInt(numberOfDecimals, minvalue=0, maxvalue=10, description='number of decimals')\n _checkBool(signed, description='signed')\n _checkNumerical(value, description='input value')\n\n self._genericCommand(functioncode, registeraddress, value, numberOfDecimals, signed=signed)", "def add_regvar(*args):\n return _ida_frame.add_regvar(*args)", "def register_ast(self, funcs):\n for name, func in funcs.items():\n self.ast_functions[name] = func", "def gen_expr(self, expr, rvalue=False):\n assert isinstance(expr, expressions.CExpression), str(expr)\n\n with self.builder.use_location(expr.location):\n if isinstance(expr, expressions.UnaryOperator):\n value = self.gen_unop(expr)\n elif isinstance(expr, expressions.BinaryOperator):\n value = self.gen_binop(expr)\n elif isinstance(expr, expressions.TernaryOperator):\n value = self.gen_ternop(expr)\n elif isinstance(expr, expressions.VariableAccess):\n value = self.gen_variable_access(expr)\n elif isinstance(expr, expressions.FunctionCall):\n value = self.gen_call(expr)\n elif isinstance(expr, expressions.StringLiteral):\n value = self.gen_string_literal(expr)\n elif isinstance(expr, expressions.CharLiteral):\n value = self.gen_char_literal(expr)\n elif isinstance(expr, expressions.NumericLiteral):\n value = self.gen_numeric_literal(expr)\n elif isinstance(expr, expressions.CompoundLiteral):\n value = self.gen_compound_literal(expr)\n elif isinstance(expr, expressions.InitializerList):\n self.error(\"Illegal initializer list\", expr.location)\n elif isinstance(expr, expressions.Cast):\n value = self.gen_cast(expr)\n elif isinstance(expr, expressions.Sizeof):\n value = self.gen_sizeof(expr)\n elif isinstance(expr, expressions.FieldSelect):\n value = self.gen_field_select(expr)\n elif isinstance(expr, expressions.ArrayIndex):\n value = self.gen_array_index(expr)\n elif isinstance(expr, expressions.BuiltIn):\n value = self.gen_builtin(expr)\n else: # pragma: no cover\n raise NotImplementedError(str(expr))\n\n # Check for given attributes:\n assert isinstance(expr.typ, types.CType)\n assert isinstance(expr.lvalue, bool) # C semantics lvalue\n\n # If we need an rvalue, load it!\n if rvalue and expr.lvalue:\n if not expr.typ.is_function:\n value = self._load_value(value, expr.typ)\n\n elif not rvalue:\n assert expr.lvalue\n return value", "def map_values_to_functions(values):\n\n sin_values = np.sin(values)\n cos_values = np.cos(values)\n\n # Defino una nueva funcion anonima que uso para mapear values a la funcion\n # compleja que no puedo expresar de forma directa\n complex_function = lambda x: np.tanh(np.sin(x) + np.cos(x))\n complex_function_values = complex_function(values)\n\n return sin_values, cos_values, complex_function_values", "def write(self, tags, values, step):\n if not isinstance(tags, list):\n tags = list(tags)\n if not isinstance(values, list):\n values = list(values)\n\n for i, (tag, value) in enumerate(zip(tags,values)):\n self.writer.add_scalar(tag, value, step)", "def gen_values(self):", "def register(self, funcs):\n for name, func in funcs.items():\n self.functions[name] = func", "def set_attribute(node_uuid, name, values):\n if not isinstance(values, list):\n values = [values]\n with session_for_write() as session:\n\n for value in values:\n attr = model.Attribute(node_uuid=node_uuid,\n uuid=uuidutils.generate_uuid(),\n name=name, value=value)\n session.add(attr)", "def register_code(id, code):\n #print \"Adding %s to the registry\" % id\n #print code\n if _theRegistry.has_id(id):\n raise ValueError, 'key %s is already registerd' % id\n _theRegistry.add_code( id, code)", "def register_write_multiple(self, register_indices, values):\n # TODO: rename 'register_indices' to 'registers'\n register_indices = register_indices[:]\n if len(register_indices) != len(values):\n raise ValueError('Must be an equal number of registers and values')\n\n num_regs = len(register_indices)\n for idx, indice in enumerate(register_indices):\n if isinstance(indice, six.string_types):\n register_indices[idx] = self._get_register_index_from_name(indice)\n buf = (ctypes.c_uint32 * num_regs)(*register_indices)\n data = (ctypes.c_uint32 * num_regs)(*values)\n\n # TODO: For some reason, these statuses are wonky, not sure why, might\n # be bad documentation, but they cannot be trusted at all.\n statuses = (ctypes.c_uint8 * num_regs)(0)\n\n res = self._dll.JLINKARM_WriteRegs(buf, data, statuses, num_regs)\n if res != 0:\n raise errors.JLinkException(res)\n\n return None", "def uCSIsSupplementalMathematicalOperators(code):\n ret = libxml2mod.xmlUCSIsSupplementalMathematicalOperators(code)\n return ret", "def applyFuncOnValues(self, func):\r\n self._value = func(self._value)", "def process_exp_values(exp_data_list):\n exp_data_values = []\n for exp_data in exp_data_list:\n exp_data_values.append(process_exp_value(exp_data))\n return exp_data_values", "def values(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Expression]:", "def values(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Expression]:", "def set_element_codes(self,elements,number_of_element_types = 1): \n \n #create dimensions\n try: self.dataset.createDimension('number_of_elements_in_domain',len(elements))\n except Exception, e: print \"WARNING: %s\" % e\n \n #create dimensions\n try: self.dataset.createDimension('number_of_element_types',number_of_element_types)\n except Exception, e: print \"WARNING: %s\" % e\n \n \n try: element_codes = self.dataset.createVariable(varname = 'element_codes',datatype = 'i', dimensions=('number_of_elements_in_domain',)) \n except Exception, e:\n \telement_codes = self.dataset.variables['element_codes']\n \tprint \"WARNING: %s\" % e\n \n codes = []\n for el in elements:\n \tcodes.append(el[1])\n \t\n element_codes[:] = array(codes)", "def add(self, attr):\n self.validate_type(attr)\n self.values.add(attr.value)", "def compile_expression_list(self):\n\n\t\tself.compile_expression()\n\n\t\twhile(self.tokenizer.get_token() == ','):\n\t\t\tself.outfile.write(self.tokenizer.symbol())\n\t\t\tself.compile_expression()", "def evaluateValue(compiled_expression):", "def _init_builtins(self):\n for k, rexp in self.expressions.items():\n func = getattr(self, \"%s_processor\"%k)()\n yield (rexp, [func] + self._extra_rules.get(k, []))", "def extensible_attributes_list_values():\n return \"extensibleattributedef?\" \\\n \"_return_fields=\" \\\n \"list_values,\" \\\n \"comment,\" \\\n \"name,\" \\\n \"type\"", "def uCSIsMathematicalOperators(code):\n ret = libxml2mod.xmlUCSIsMathematicalOperators(code)\n return ret" ]
[ "0.48745957", "0.4734857", "0.46250662", "0.46225697", "0.4597566", "0.45449904", "0.45448384", "0.4482638", "0.44240987", "0.44201872", "0.4374772", "0.4365421", "0.43483615", "0.43303338", "0.43257523", "0.43160722", "0.42835793", "0.42812833", "0.427737", "0.42618823", "0.42568296", "0.42445025", "0.42445025", "0.42413637", "0.4240705", "0.42149514", "0.42095956", "0.4208227", "0.42031953", "0.4186778" ]
0.85605466
0
register_on_edit(func, clazz) Register FUNC as an "on_edit" event for CLAZZ. When an instance of CLAZZ is edited, FUNC is called with the instance and the editor Tkinter window as arguments.
def register_on_edit(func, clazz): _on_edit[clazz] = func
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addEdit( self, cCtrlName, nPositionX, nPositionY, nWidth, nHeight,\n cText=None,\n textListenerProc=None,\n cReadOnly=None,\n cMultiline=None,\n cAutoVScroll=None):\n self.addControl( \"com.sun.star.awt.UnoControlEditModel\",\n cCtrlName, nPositionX, nPositionY, nWidth, nHeight, bDropdown=None, cMultiline=cMultiline, cReadOnly=cReadOnly, cAutoVScroll=cAutoVScroll)\n\n if cText != None:\n self.setEditText( cCtrlName, cText )\n if textListenerProc != None:\n self.addTextListenerProc( cCtrlName, textListenerProc )", "def on_edit(self, dataobj):", "def put_edit(self, f, *args, **kwds):\n self.put_nowait(functools.partial(f, *args, **kwds))", "def add_callback(callback, control_instance):\n pass", "def on_edit(self, event, text):\n return None", "def link_edit_callback(self):\n pass", "def register_edit_view(self, blueprint):\n view = apply_decorators(self.edit_view, self.edit_decorators)\n blueprint.add_url_rule(\n self.edit_rule, self.edit_endpoint, view, methods=['GET', 'POST'])", "def editor():\n pass", "def edit():", "def DoEdit(self,event):\r\n raise UncodedError", "def on_edit_changed(self, edit):\n\t\tself.emit('value-changed', edit.get_text())", "def register_callback(self, func):\n self.callback = func", "def editor(*args, control: bool=True, defineTemplate: AnyStr=\"\", docTag: Union[AnyStr, bool]=\"\",\n exists: bool=True, filter: Union[AnyStr, bool]=\"\", forceMainConnection: Union[AnyStr,\n bool]=\"\", highlightConnection: Union[AnyStr, bool]=\"\", lockMainConnection: bool=True,\n mainListConnection: Union[AnyStr, bool]=\"\", panel: Union[AnyStr, bool]=\"\", parent:\n Union[AnyStr, bool]=\"\", selectionConnection: Union[AnyStr, bool]=\"\", stateString:\n bool=True, unParent: bool=True, unlockMainConnection: bool=True,\n updateMainConnection: bool=True, useTemplate: AnyStr=\"\", q=True, query=True, e=True,\n edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def XPAddWidgetCallback(self, inWidget, inNewCallback):\n pass", "def edit(self, **kwargs):\n ...", "def _init_edit(self):\n def edit(core, args):\n month = ' '.join(getattr(args, 'month', []))\n core.edit(month)\n\n usage = 'stl edit [month]'\n desc = (\n 'lets you vim the right file'\n )\n\n subp = self.subparsers.add_parser(\n 'edit', usage=usage, description=desc, help=desc)\n\n subp.add_argument(\n 'month', nargs=argparse.REMAINDER,\n help='the month you want to edit, e.g. oct 2016')\n\n subp.set_defaults(func=edit)", "def hotkeyEditorPanel(*args, annotation: Union[AnyStr, bool]=\"\", backgroundColor:\n Union[List[float, float, float], bool]=None, defineTemplate: AnyStr=\"\",\n docTag: Union[AnyStr, bool]=\"\", dragCallback: Script=None, dropCallback:\n Script=None, enable: bool=True, enableBackground: bool=True,\n enableKeyboardFocus: bool=True, exists: bool=True, fullPathName:\n bool=True, height: Union[int, bool]=0, highlightColor: Union[List[float,\n float, float], bool]=None, isObscured: bool=True, manage: bool=True,\n noBackground: bool=True, numberOfPopupMenus: bool=True, parent:\n Union[AnyStr, bool]=\"\", popupMenuArray: bool=True, preventOverride:\n bool=True, statusBarMessage: AnyStr=\"\", useTemplate: AnyStr=\"\", visible:\n bool=True, visibleChangeCommand: Union[Script, bool]=None, width:\n Union[int, bool]=0, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass", "def edit_object(obj):\n return __EditMode(obj)", "def newEditorView(self, fn, caller, filetype=\"\", indexes=None):\n editor, assembly = self.cloneEditor(caller, filetype, fn)\n \n self._addView(assembly, fn, caller.getNoName(), indexes=indexes)\n self._modificationStatusChanged(editor.isModified(), editor)\n self._checkActions(editor)\n \n return editor", "def parameter_tweaks( cls, ):\n cls.file_text_editor.add_command( cls.parameters.ex_editor )\n print( f\"parameter tweaks {cls.text_editors}\" ) #", "def SetEditable(self, edit):\r\n \r\n self._edit = edit\r\n return self", "def addActionListenerProc( self, cCtrlName, actionListenerProc ):\n oControl = self.getControl( cCtrlName )\n oActionListener = ActionListenerProcAdapter( actionListenerProc )\n oControl.addActionListener( oActionListener )", "def proxy(self, *args):\n text_area = self.get_current()\n cmd = (text_area._orig,) + args\n try:\n result = text_area.tk.call(cmd)\n except:\n return\n if (args[0] in (\"insert\", \"replace\", \"delete\") or\n args[0:3] == (\"mark\", \"set\", \"insert\") or\n args[0:2] == (\"xview\", \"moveto\") or\n args[0:2] == (\"xview\", \"scroll\") or\n args[0:2] == (\"yview\", \"moveto\") or\n args[0:2] == (\"yview\", \"scroll\")\n ):\n text_area.event_generate(\"<<Change>>\", when=\"tail\")\n # return what the actual widget returned\n return result", "def add_class_hook(cls, event, function):\n if event not in cls.class_hooks:\n cls.class_hooks[event] = []\n cls.class_hooks[event].append(function)", "def setFunction(self, func: ghidra.program.model.listing.Function, entry: ghidra.program.model.address.Address, dbg: ghidra.app.decompiler.DecompileDebug) -> None:\n ...", "def _setup_editor_function(self):\n assert self.is_function()\n self.set_result_visible(True)\n func = self.function.function\n args = getargspec(func)[0]\n label = function_label(self.function)\n self._ui.info.setText(label)\n self._output_widget.label = self.function.output_labels[0]\n self._clear_input_canvas()\n for a in args:\n self._add_argument_widget(a)\n\n self.spacer = QtWidgets.QSpacerItem(5, 5, QtWidgets.QSizePolicy.Minimum,\n QtWidgets.QSizePolicy.Expanding)\n self._ui.input_canvas.layout().addItem(self.spacer)", "def __init__(self, text, value, callback):\n self.input_widget = urwid.IntEdit(('edittxt', text), value)\n self.widget = urwid.Pile([urwid.AttrMap(self.input_widget, 'editbx', 'editfc')])\n BaseTimedWidgetWrap.__init__(self, self.widget)\n\n self.__f = callback", "def addKnobChanged(call, args=(), kwargs={}, nodeClass='*', node=None):\n pass", "def set_InfoCallback(self,func):\n self.__obj.set_InfoCallback(func)", "def register_attr(attr, editor, clazz = None):\n \n for_attr = _attr_editors.get(attr)\n if for_attr: for_attr[clazz] = editor\n else: _attr_editors[attr] = { clazz : editor }" ]
[ "0.5836717", "0.57678133", "0.5657995", "0.5506038", "0.5439217", "0.5395581", "0.53424084", "0.53140134", "0.51880944", "0.51332206", "0.5114195", "0.51119095", "0.509349", "0.50865626", "0.5086496", "0.50285983", "0.50238705", "0.5020771", "0.49626553", "0.4934645", "0.49255812", "0.49175835", "0.49130362", "0.49126658", "0.49090376", "0.49059522", "0.48917523", "0.486811", "0.4845519", "0.4825893" ]
0.8712736
0
register_on_children_visible(func, clazz) Register FUNC as an "on_children_visible" event for CLAZZ. When the children of an instance of CLAZZ are shown or hidden, FUNC is called with the instance and the new visibility status (0 or 1) as arguments.
def register_on_children_visible(func, clazz): _on_children_visible[clazz] = func
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def visible(self, show):", "def register_available_children(children_codes, clazz):\n \n if isinstance(children_codes, list):\n try: _available_children[clazz].extend(children_codes)\n except: _available_children[clazz] = children_codes\n else:\n _available_children[clazz] = children_codes", "def is_visible(self):", "def visit_children(self, func):\n for child in self._children:\n func(child)", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def is_visible(self, is_visible):\n\n self.container['is_visible'] = is_visible", "def ToggleVisible(self, event):\n pass", "def showInvisibles(self: Self, event: Event = None) -> None:\n c = self\n showInvisiblesHelper(c, True)", "def set_visible(self, visible):\n # Make sure the 'visible' attribute is synced up as a result\n # of the method call. This may fire a notification, in which\n # case the change handler will call this method again. This\n # guard prevents that unneeded recursion.\n if guard.guarded(self, 'set_visible'):\n return\n else:\n with guard(self, 'set_visible'):\n self.visible = visible\n \n # Only set the visibility to True (which will show the window) \n # if the component is fully initialized.\n if not visible or self.initialized:\n self.abstract_obj.set_visible(visible)", "def setVisible( self, state ):\n self._visible = state\n \n super(XNode, self).setVisible(self.isVisible())\n \n self.dispatch.visibilityChanged.emit(state)\n self.setDirty()", "def test_constructor_visible_widgets(plugin_dialog_constructor):\n assert not plugin_dialog_constructor.direct_entry_edit.isVisible()\n assert not plugin_dialog_constructor.direct_entry_btn.isVisible()", "def visible(self, visible):\n\n self._visible = visible", "def isVisible(self):\n\t\treturn True", "def set_visible(self, visible):\n self._visible = visible\n for artist in self.artists:\n artist.set_visible(visible)", "def __checkCenterVisibility(self, itemNode, itemXc, itemYc):\r\n for sibling in itemNode.findall('following-sibling::*[@is-in-tab-area=\"true\"]'):\r\n name = sibling.getAttribute('image')\r\n siblingX, siblingY, siblingW, siblingH = [int(c) for c in sibling.getAttribute('coords').split(\",\")]\r\n if itemXc>=siblingX and itemXc <=(siblingX + siblingW) and itemYc>=siblingY and itemYc <= (siblingY + siblingH):\r\n return (self.HIDDEN,(itemXc,itemYc), itemNode)\r\n\r\n return (self.VISIBLE,(itemXc,itemYc), itemNode)", "def fl_object_is_visible(ptr_flobject):\n _fl_object_is_visible = library.cfuncproto(\n library.load_so_libforms(), \"fl_object_is_visible\",\\\n cty.c_int, [cty.POINTER(xfdata.FL_OBJECT)],\\\n \"\"\"int fl_object_is_visible(FL_OBJECT * obj)\"\"\")\n library.check_if_flinitialized()\n library.keep_elem_refs(ptr_flobject)\n retval = _fl_object_is_visible(ptr_flobject)\n return retval", "def hasChildren():", "def visible(self) -> bool:\n try:\n return bool(self.driver.wait_until_all_visible(*self.ROOT_LOCATOR))\n except WebDriverException:\n return False", "def getChildren():", "def register_children_attr(attr, insert = \"insert\", del_ = \"__delitem__\", clazz = None):\n \n if clazz: _children_attrs[clazz] = (attr, insert, del_)\n else: _children_attrs[None].append((attr, insert, del_))" ]
[ "0.5089612", "0.50880736", "0.5036541", "0.5036325", "0.4887222", "0.4887222", "0.4887222", "0.4887222", "0.4887222", "0.4887222", "0.4887222", "0.4887222", "0.4887222", "0.4887222", "0.4887222", "0.48733646", "0.48165864", "0.47937766", "0.47393054", "0.47151053", "0.46700406", "0.4662116", "0.4655274", "0.46503824", "0.4637998", "0.4587284", "0.456064", "0.45376027", "0.4529063", "0.4520932" ]
0.8832136
0
This method uses to check if the food name in our database or not. name It is the name of the food from the users. true if food in databases, false othewise
def findFood(self,name): name = name.lower() return dictfood.has_key(name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __contains__(self, user_name):\n tuples = self._execute(\n \"SELECT name FROM users WHERE name == ?\",\n (user_name,)\n )\n return len(tuples) == 1", "def player_exists_in_db(name: str):\n with open('db.json') as fo:\n data = loads(fo.read())\n return name in data", "def userCheck(name):\r\n \r\n from logger.gamelogger import logger\r\n \r\n sql = \"\"\"SELECT count(*) FROM players where name = '{0}' COLLATE NOCASE;\"\"\".format(name)\r\n \r\n try:\r\n conn = sqlite3.connect(os.path.join(\"data\", \"players.db\"))\r\n cursor = conn.cursor()\r\n cursor.execute(sql)\r\n \r\n results = cursor.fetchall()\r\n\r\n except sqlite3.Error, e:\r\n logger.log.critical(\"Error using utils.gameutils.userCheck(): {0}\".format(e.args[0]))\r\n return False\r\n \r\n for row in results:\r\n if row[0] is 1:\r\n return True\r\n elif row[0] > 1:\r\n logger.log.warn(\"Duplicate username exists in player database: {0}\".format(name))\r\n \r\n return False", "def UserName_availabity():\r\n try:\r\n \r\n UserName=request.args.get(\"UserName\")\r\n user_details=fetch_details(UserName)\r\n user_name=user_details[0]['UserName']\r\n if str(UserName)==str(user_name):\r\n msg=\"UserName is already taken kindly choose another one\"\r\n except IndexError:\r\n msg=\"UserName is available.\"\r\n return msg", "def checkFood(self, food):\n pass", "def check():\n username = request.args.get(\"username\")\n if len(username) < 1:\n print(\"false len\")\n return jsonify(\"false\")\n name = db.execute(f\"SELECT * FROM users WHERE username = '{username}'\")\n if name:\n print(\"false\")\n return \"false\"\n else:\n print(\"true\")\n return \"true\"", "async def is_garage_name_exit(self, garage_name:str):\r\n async with self._db.acquire() as conn:\r\n result= [dict(row.items()) async for row in await conn.execute(\r\n Garage.select().where((Garage.c.garage_name == garage_name)))\r\n ]\r\n return len(result) >0 and True or False", "def test_name_search(self):\n # A name in the database\n search_string = \"Umut\"\n # Search For Umut\n resp = SearchTest.client.get('/api/search/',{\"token\":SearchTest.valid_token,\"search_string\":search_string})\n search_result = json.loads(resp.content)\n # Check the name field of the result\n self.assertEqual(search_string,search_result[0]['name'],\"It doesn't return the user with the name {}\".format(search_string))", "def check_name_db ():\n db_checks = [DB_FIRST_MALE, DB_FIRST_FEMALE,\n DB_LAST_SIMPLE, DB_LAST_NAMESON,\n DB_LAST_GAELIC1, DB_LAST_GAELIC2,\n DB_LAST_COMBO1, DB_LAST_COMBO2,\n DB_LAST_UPPER1, DB_LAST_UPPER2]\n\n db_exists = db.database_exists\n for db_name in db_checks:\n if not db_exists(db_name):\n raise DatabaseException, db_name", "def party_exist(party_name: str) -> bool:\n\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select name from party where name = '{}'\".format(party_name)\n cursor.execute(query)\n data = cursor.fetchall()\n db.disconnect()\n if len(data) == 0:\n return False\n return True", "def search_food(cls, name):\n obj = cls.objects(name=name).first()\n return obj", "def _check_name(self):\n\t\tpass", "def searchByName(database):\n firstname=str(input(\"What is his first name :\"))\n usr,find=getByName(database,firstname)\n if find:\n print(usr)", "def displayFolowers(database):\n firstname=str(input(\"who do you want to display followers :\"))\n usr,find=getByName(database,firstname)\n if find:\n print(f\"{usr.firstname} {usr.lastname} is folowed by:\")\n for folower in usr.folowed:\n print(folower)", "def check():\n\n username = request.args.get(\"username\")\n\n names = db.execute(\"SELECT username FROM users WHERE username=:username\", username=username)\n print(names)\n print(type(names))\n if not names and username:\n return jsonify(True)\n else:\n return jsonify(False)", "def name_exists(self, login):\n\t\treturn login in self.users_by_name", "def is_cool(name):\n if (name == \"Joe\") or (name == \"John\") or (name == \"Stephen\"):\n return True\n else:\n return False", "def test_name(name):\n # To work with the name, we remove the address and then\n # split it by its blanks\n name = name.split(\",\")[0]\n name = name.split()\n # First, we check whether the fictional person is a doctor or not\n doctor = 0\n if \"Dr.\" in name:\n doctor = 1\n\n # We save the results in a list\n result = [doctor]\n # Next we look at whether the person has a double first name\n if \"-\" in name[-2]:\n result.append(1)\n else:\n result.append(0)\n\n # Next we check if the person hat a double last name.\n if \"-\" in name[-1]:\n result.append(1)\n else:\n result.append(0)\n\n # Next we check whether the person is male or female.\n first_name = name[-2]\n if result[1] == 1:\n first_name = (first_name.split(\"-\"))[-2]\n if (first_name in names.woman and \"Herr\" not in name) or \"Frau\" in name:\n result.append(\"female\")\n elif (first_name in names.man and \"Frau\" not in name) or \"Herr\" in name:\n result.append(\"male\")\n return result", "def is_an_oak(name):\n if 'quercus' in name.lower():\n return True\n else:\n return False", "def test_search_checkname(self):\n self.assertEquals(self.t['Scrubs'].search('my first')[0]['episodename'], 'My First Day')\n self.assertEquals(self.t['My Name Is Earl'].search('Faked His Own Death')[0]['episodename'], 'Faked His Own Death')", "def isAddName(name):\t\n if lib.essentials.isAlphanumeric(name) != 0:\n\tprint \" '%s' is not valid name. \\n Vadapter-name should be an alphanumeric.\" % (name)\n #output.completeOutputError(lib.errorhandler.InvalidArgumentCount(descape = \" '%s' is not valid name. \\n Vadapter-name should be an alphanumeric.\" % (name))) \n return -1\n \n if lib.essentials.isStartNumeric(name) != 0:\n\tprint \"'%s' is not valid name. \\n Vadapter name should not start with an digit\"% (name)\n\t#output.completeOutputError(lib.errorhandler.InvalidArgumentCount(descape = \"'%s' is not valid name. \\n Vadapter name should not start with an digit\"% (name)))\n return -1\n\n if lib.essentials.isContainSpecial(name) != 0:\n\tprint \"'%s' is not valid name. \\n Vadapter name should not contain special characher\" % (name)\n\t#output.completeOutputError(InvalidArgumentCount(descape = \"'%s' is not valid name. \\n Vadapter name should not contain special characher\" % (name)))\n return -1\n\n# if lib.db.db.ifExistsInDatabase(name) == 0:\n#\tprint NameError(\"'%s' is not valid name. \\n Already Exists\" % (name))\n#\treturn -1\n \n return 0", "def supplier_exist(supplier_name: str) -> bool:\n\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select name from supplier where name = '{}'\".format(supplier_name)\n cursor.execute(query)\n data = cursor.fetchall()\n db.disconnect()\n if len(data) == 0:\n return False\n return True", "def contains(name):", "def getByName(database,firstname):\n correspondant=[]\n for key,usr in database.items():\n if firstname == usr.firstname:\n correspondant.append(usr)\n if len(correspondant)==0:\n print(f\"there is no user named {firstname}\")\n return 0, False\n if len(correspondant)>1:\n print(f\"there are many users named {firstname}\")\n lastname=input(\"Whar is his last name\")\n for usr in correspondant:\n if usr.lastname==lastname:\n return usr,True\n else:\n return correspondant[0],True", "def filter ( self, name, context ):\n return (name == self.name_last)", "def check(what,string):\n if what == \"characters\":\n query = list(engine.execute(f\"SELECT name FROM characters WHERE name = '{string}'\"))\n if len(query) > 0:\n return True\n else:\n return False\n \n elif what == \"script\":\n query = list(engine.execute(f\"SELECT script_l FROM script WHERE script_l = '{string}'\"))\n if len(query) > 0:\n return True\n else:\n return False\n \n elif what == \"episodes\":\n query = list(engine.execute(f\"SELECT episode FROM episodes WHERE episode = '{string}'\"))\n if len(query) > 0:\n return True\n else:\n return False\n #extra meme..", "def test_name_returner(self):\n test = self.data.name_returner()\n self.assertIn(('Trevor', 'Harvey'), test)\n self.assertIn(('Nik', 'Silver'), test)", "def FoodCheckIn(sc, event):\n channel = sc.api_call('channels.info', channel=event['channel'])\n food = event['text'][9:]\n if food:\n if 'pizza' in food:\n sc.api_call('reactions.add', as_user='true', channel=event['channel'],\n timestamp=event['ts'], name='pizza')\n user = sc.api_call('users.info', user=event['user'])\n db = pymysql.connect(host='localhost', user='pizzabot', db='pizzachat')\n cursor = db.cursor()\n query = 'INSERT INTO foodlist (who, what) VALUES (%s, %s)'\n cursor.execute(query, (user['user']['name'], food.encode('utf-8')))\n db.commit()\n db.close()", "def getAllWhereNameIs(table, name):\n\ttry:\n\t\tcon = sqlite3.connect('PampDb.db')\n\t\tcur = con.cursor()\n\t\tcur.execute(\"SELECT * FROM \" + table + \" WHERE name like'\" + name + \"%'\")\n\t\tob = cur.fetchall()\n\t\tif not ob:\n\t\t\treturn \"\"\n\t\telse:\n\t\t\tobje = ob[0]\n\t\t\treturn obje\n\t\tcon.commit()\n\t\tcon.close()\n\texcept:\n\t\tprint('Could not run function getAllWhereNameIs from DbController')", "def get_names_users(self):\n user_1 = self.view.entry_player_1.get()\n user_2 = self.view.entry_player_2.get()\n if len(user_1) == 0 or len(user_2) == 0:\n\n tk.messagebox.showwarning(\"Warning\", \"Please enter players name\")\n self.logger.warning(\"Please enter players name\")\n return False\n self.update_players_name(user_1, user_2)\n return True" ]
[ "0.65653473", "0.63060015", "0.6292949", "0.62000495", "0.6126626", "0.6053125", "0.6008738", "0.5959989", "0.59151655", "0.5908109", "0.58882326", "0.58628136", "0.58392847", "0.5811589", "0.58058965", "0.5798567", "0.57955873", "0.57936364", "0.5790114", "0.5773204", "0.5770911", "0.57299495", "0.57217395", "0.5712629", "0.5710864", "0.56901556", "0.56858337", "0.56688577", "0.56485623", "0.56396246" ]
0.73230463
0
IFieldWidget factory for LocationWidget.
def LocationFieldWidget(field, request): return FieldWidget(field, LocationWidget(request))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n self.fields = [ \n \n #plugins.FieldWidget(\"widget\", descr=\"Start from widget\",\n # default=\"/\"),\n #plugins.FieldMarker(\"markersearch\", descr=\"Search for marker\"),\n #plugins.FieldMarker(\"markerreplace\", descr=\"Replace with marker\"),\n #plugins.FieldBool(\"character\", descr=\"Plot character of bands\")\n ]", "def __init__(self):\n self.fields = [ \n \n #plugins.FieldWidget(\"widget\", descr=\"Start from widget\",\n # default=\"/\"),\n #plugins.FieldMarker(\"markersearch\", descr=\"Search for marker\"),\n #plugins.FieldMarker(\"markerreplace\", descr=\"Replace with marker\"),\n #plugins.FieldBool(\"character\", descr=\"Plot character of bands\")\n ]", "def __init__(self):\n self.fields = [ \n \n# plugins.FieldWidget(\"widget\", descr=\"Start from widget\",\n# default=\"/\"),\n# plugins.FieldMarker(\"markersearch\", descr=\"Search for marker\"),\n# plugins.FieldMarker(\"markerreplace\", descr=\"Replace with marker\"),\n plugins.FieldBool(\"character\", descr=\"Plot character of bands\")\n ]", "def location(self, location_id):\r\n return Location(self, location_id)", "def __init__(self, location):\n self.location = location", "def create_location(self, location):\n \"Does nothing\"", "def __init__(__self__, *,\n location: pulumi.Input[str]):\n pulumi.set(__self__, \"location\", location)", "def newLocation(self, **attrlinks):\n return Location(self, **attrlinks)", "def add_simple_widget(self, name, widget, label=None, value_handler=None, add_indicator=None, location=(None,0)):\n if name in self.params:\n raise KeyError(\"widget {} already exists\".format(name))\n row,col,rowspan,colspan=self._normalize_location(location,default=(None,0,1,None))\n if label is not None:\n wlabel=QtWidgets.QLabel(self)\n wlabel.setObjectName(_fromUtf8(\"{}__label\".format(name)))\n self.formLayout.addWidget(wlabel,row,col,rowspan,1)\n wlabel.setText(_translate(self.name,label,None))\n else:\n wlabel=None\n value_handler=value_handler or values_module.get_default_value_handler(widget)\n if add_indicator is None:\n add_indicator=self.add_indicator\n if add_indicator:\n windicator=QtWidgets.QLabel(self)\n windicator.setObjectName(_fromUtf8(\"{}__indicator\".format(name)))\n self.formLayout.addWidget(windicator,row,col+2,rowspan,1)\n indicator_handler=values_module.WidgetLabelIndicatorHandler(windicator,widget=value_handler)\n else:\n indicator_handler=None\n if wlabel is None:\n self.formLayout.addWidget(widget,row,col,rowspan,colspan or (2 if add_indicator else 3))\n else:\n self.formLayout.addWidget(widget,row,col+1,rowspan,colspan or (1 if add_indicator else 2))\n self._add_widget(name,self.ParamRow(widget,wlabel,value_handler,indicator_handler))\n return value_handler", "def from_location(self, location: str) -> Location:\n return Location({\n 'location': location,\n '': 'Location'\n })", "def get_location(self):\n return self.location", "def PlaceWidget(self, p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=...):\n ...", "def PlaceWidget(self, p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=...):\n ...", "def __init__(self, location_id, x=0, y=0):\r\n self.location_id = location_id\r\n self.x = x\r\n self.y = y", "def PlaceWidget(self, p_float, p_float=..., p_float=..., p_float=..., *args, **kwargs):\n ...", "def set_location(self, location):\n self.location = location", "def get_location(self):\n return self.cleaned_data['location']", "def _makeLocationElement(self, locationObject, name=None):\n\n locElement = ET.Element(\"location\")\n if name is not None:\n locElement.attrib['name'] = name\n for dimensionName, dimensionValue in locationObject.items():\n dimElement = ET.Element('dimension')\n dimElement.attrib['name'] = dimensionName\n if type(dimensionValue)==tuple:\n dimElement.attrib['xvalue'] = \"%f\"%dimensionValue[0]\n dimElement.attrib['yvalue'] = \"%f\"%dimensionValue[1]\n else:\n dimElement.attrib['xvalue'] = \"%f\"%dimensionValue\n locElement.append(dimElement)\n return locElement", "def __str__(self):\n return \"Location(%s, %s)\" % (self.latitude, self.longitude)", "def XPGetWidgetForLocation(inContainer,\n inXOffset, inYOffset,\n inRecursive,\n inVisibleOnly):\n pass", "def add_custom_widget(self, name, widget, value_handler=None, indicator_handler=None, location=(None,0,1,None)):\n if name in self.params:\n raise KeyError(\"widget {} already exists\".format(name))\n location=self._normalize_location(location,default=(None,0,1,3))\n self.formLayout.addWidget(widget,*location)\n value_handler=value_handler or values_module.get_default_value_handler(widget)\n indicator_handler=indicator_handler or values_module.get_default_indicator_handler(widget)\n self._add_widget(name,self.ParamRow(widget,None,value_handler,indicator_handler))\n return value_handler", "def __init__(self, loc):\n self.loc = loc", "def _get_field(self) -> typing.Union[QWidget, Widget]:\n if self.per_dimension:\n self.per_dimension = False\n prop = self.from_algorithm_property(self)\n self.per_dimension = True\n res = ListInput(prop, 3)\n elif not inspect.isclass(self.value_type):\n res = self._get_field_magicgui(self)\n elif hasattr(self.value_type, \"get_object\"):\n res = self.value_type.get_object()\n else:\n res = self._get_field_from_value_type(self)\n tool_tip_text = self.help_text or \"\"\n tool_tip_text += f\" default value: {_pretty_print(self.default_value)}\"\n if isinstance(res, QWidget):\n res.setToolTip(tool_tip_text)\n if isinstance(res, Widget):\n res.tooltip = tool_tip_text # pylint: disable=attribute-defined-outside-init # false positive\n return res", "def test_create_location(self):\n location = self.location\n\n self.assertTrue(isinstance(location, Location))\n self.assertEqual(location.name, \"Test Location\")", "def create_location_based(cls, name, question, default_response, contacts, user): \n poll = Poll.objects.create(\n name=name,\n question=question,\n default_response=default_response, \n user=user,\n type=Poll.TYPE_LOCATION)\n poll.contacts = contacts\n return poll", "def XPathFieldWidget(field, request):\n return FieldWidget(field, XPathWidget(request))", "def make_from_instance(instance):\n data = model_to_dict(instance)\n data['id'] = instance.id\n if instance.location:\n data['latitude'] = round(instance.location.y, 7)\n data['longitude'] = round(instance.location.x, 7)\n else:\n data['latitude'] = None\n data['longitude'] = None\n return GeneralInformationForm(\n initial=data\n )", "def __init__(__self__, *,\n name: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[Union[str, 'ExtendedLocationTypes']]] = None):\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if type is not None:\n pulumi.set(__self__, \"type\", type)", "def __init__(__self__, *,\n name: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[Union[str, 'ExtendedLocationTypes']]] = None):\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if type is not None:\n pulumi.set(__self__, \"type\", type)", "def __init__(self, location, latitude, longitude, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.location = location\n self.latitude = latitude\n self.longitude = longitude" ]
[ "0.5998817", "0.5998817", "0.58848", "0.5651634", "0.56191385", "0.56007206", "0.5575786", "0.53905076", "0.5337973", "0.5329842", "0.5285621", "0.52263105", "0.52263105", "0.5196118", "0.51863796", "0.5159046", "0.5151313", "0.5149169", "0.5144681", "0.5127543", "0.5118616", "0.5114372", "0.5112062", "0.509714", "0.50733006", "0.5067035", "0.50479084", "0.5033048", "0.5033048", "0.50246906" ]
0.8268078
0
Save files like the dataset, mask and settings as pickle files so they can be loaded in the ``Aggregator``
def save_attributes_for_aggregator(self, paths): # These functions save the objects we will later access using the aggregator. They are saved via the `pickle` # module in Python, which serializes the data on to the hard-disk. with open(f"{paths.pickle_path}/dataset.pickle", "wb") as f: pickle.dump(self.dataset, f) with open(f"{paths.pickle_path}/settings.pickle", "wb+") as f: pickle.dump(self.settings, f)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pickle_data(self):\n if 'data_sets.pckl' in self.expected_pickles:\n to_file(\n self.data_sets,\n os.path.join(self.logdir, 'data_sets.pckl')\n )\n if 'all_params.pckl' in self.expected_pickles:\n to_file(\n self.all_params,\n os.path.join(self.logdir, 'all_params.pckl')\n )\n if 'labels.pckl' in self.expected_pickles:\n to_file(\n self.labels,\n os.path.join(self.logdir, 'labels.pckl')\n )\n if 'minimiser_info.pckl' in self.expected_pickles:\n to_file(\n self.minimiser_info,\n os.path.join(self.logdir, 'minimiser_info.pckl')\n )", "def pickle_dump_files():\n with open('data/' + dataset_name + '_' + model_name + '_' + 'predictions', 'wb') as f:\n pickle.dump(predictions, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'state_sentences', 'wb') as f:\n pickle.dump(final_state_sentences, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'decoded_sentences', 'wb') as f:\n pickle.dump(final_decoded_sentences, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'ids', 'wb') as f:\n pickle.dump(idx, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'exemplars', 'wb') as f:\n pickle.dump(exemplars, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'counter_exemplars', 'wb') as f:\n pickle.dump(counter_exemplars, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'top_exemplar_words', 'wb') as f:\n pickle.dump(top_exemplar_words, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'top_counter_exemplar_words', 'wb') as f:\n pickle.dump(top_counter_exemplar_words, f)", "def save_to(self, save_path=\"./\", run_flag='', save_method=\"pickle\"):\n # TODO: Finish the save_method parameters\n time_stamp = self.time_stamp\n time_stamp = self.time_stamp + \"_\" + run_flag\n save_path = os.path.join(save_path, time_stamp)\n\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n\n if self.feature_importance_pool:\n file_path = os.path.join(save_path, \"feature_importances.pkl\")\n save_file(file_path, self.feature_importance_pool)\n\n if self.feature_importance_hist:\n file_path = os.path.join(save_path, \"feature_importances_hist.png\")\n save_file(file_path, self.feature_importance_hist[0])\n\n if self.area_under_curve_pool:\n file_path = os.path.join(save_path, \"auc_fpr_tpr.pkl\")\n save_file(file_path, self.area_under_curve_pool)\n\n if self.receiver_operating_characteristic_curve:\n file_path = os.path.join(save_path, \"roc_curve.png\")\n save_file(file_path, self.receiver_operating_characteristic_curve[0])\n\n if self.training_report_pool:\n file_path = os.path.join(save_path, \"training_report.pkl\")\n save_file(file_path, self.training_report_pool)\n\n if self.learning_line:\n file_path = os.path.join(save_path, \"learning_curve.png\")\n save_file(file_path, self.learning_line[0])\n\n file_path = os.path.join(save_path, time_stamp + \"_object.pkl\")\n with open(file_path, 'wb') as opfh:\n pickle.dump(self, opfh)", "def __save_datasets(self):\n self.train.to_csv('{}/{}/{}'.format(path_to_train_set, img_format, 'train.csv'))\n self.valid.to_csv('{}/{}/{}'.format(path_to_valid_set, img_format, 'valid.csv'))\n self.test.to_csv('{}/{}/{}'.format(path_to_test_set, img_format, 'test.csv'))", "def saveSettings(self):\n self.genFiles.applyData()\n self.genGraph.applyData()", "def save_datasets(self):\n if self.processed_extension == '.csv':\n # Save to csv\n logger.info(f'Saving sets to csv:')\n \n # TRAIN\n logger.info(f'train: {self.train_path}')\n \n # Concatenate X and y\n train_data = self.train_data[0]\n train_data['TARGET'] = self.train_data[1]\n \n # Save as csv\n train_data.to_csv(self.train_path, index = False)\n \n \n # VAL\n logger.info(f'val: {self.val_path}')\n \n # Concatenate X and y\n val_data = self.val_data[0]\n val_data['TARGET'] = self.val_data[1]\n \n # Save as csv\n val_data.to_csv(self.val_path, index = False)\n \n # TEST\n logger.info(f'test: {self.test_path}')\n \n # Concatenate X and y\n test_data = self.test_data[0]\n test_data['TARGET'] = self.test_data[1]\n \n # Save as csv\n self.test_data.to_csv(self.test_path, index = False)\n \n elif self.processed_extension == '.npz':\n # Convert y to numpy array\n if isinstance(self.train_data[1], pd.Series):\n self.train_data[1] = self.train_data[1].to_numpy()\n if isinstance(self.val_data[1], pd.Series):\n self.val_data[1] = self.val_data[1].to_numpy()\n if isinstance(self.test_data[1], pd.Series):\n self.test_data[1] = self.test_data[1].to_numpy()\n \n # Save to npz (scipy sparse)\n logger.info(f'Saving sets to npz:')\n\n logger.info(f'train: {self.train_path}')\n train_data = [self.train_data[0], np.reshape(self.train_data[1], (-1,1))]\n sparse.save_npz(self.train_path, sparse.hstack(train_data))\n \n logger.info(f'val: {self.val_path}')\n val_data = [self.val_data[0], np.reshape(self.val_data[1], (-1,1))]\n sparse.save_npz(self.val_path, sparse.hstack(val_data))\n\n logger.info(f'test: {self.test_path}')\n test_data = [self.test_data[0], np.reshape(self.test_data[1], (-1,1))]\n sparse.save_npz(self.test_path, sparse.hstack(test_data))\n\n else:\n raise AttributeError(f'Wrong extension: {self.processed_extension}')\n \n self.input_size = self.train_data[0].shape[1]\n logger.info(f'Saved datasets.')", "def save(self, path, name):\n if not self._frozen:\n raise Exception(\"Dataset must be frozen\")\n # create directory\n pathlib.Path(os.path.join(path,name)).mkdir(parents=True, exist_ok=True)\n self._raw_data.to_hdf(os.path.join(path,name,\"dataset.h5\"), key=\"raw_data\")\n self._proc_data.to_hdf(os.path.join(path,name,\"dataset.h5\"), key=\"proc_data\")\n np.save(os.path.join(path,name,\"_X_train.npy\"), self._X_train)\n np.save(os.path.join(path,name,\"_X_test.npy\"), self._X_test)\n np.save(os.path.join(path,name,\"_y_train.npy\"), self._y_train)\n np.save(os.path.join(path,name,\"_y_test.npy\"), self._y_test)\n \n np.save(os.path.join(path,name,\"_X_mean.npy\"), self._X_mean)\n np.save(os.path.join(path,name,\"_X_std.npy\"), self._X_std)\n np.save(os.path.join(path,name,\"_y_mean.npy\"), self._y_mean)\n np.save(os.path.join(path,name,\"_y_std.npy\"), self._y_std)\n \n with open(os.path.join(path,name,\"_seed.pkl\"), \"wb\") as fp: #Pickling\n pickle.dump(self._seed, fp)\n with open(os.path.join(path,name,\"_train_part.pkl\"), \"wb\") as fp: #Pickling\n pickle.dump(self._train_part, fp)\n with open(os.path.join(path,name,\"_test_part.pkl\"), \"wb\") as fp: #Pickling\n pickle.dump(self._test_part, fp)\n with open(os.path.join(path,name,\"_columns.pkl\"), \"wb\") as fp: #Pickling\n pickle.dump(self._columns, fp)", "def save_all(cls, dirpath=\".\"):\n for n, v in cls.__data.items():\n pickle.dump(v, open(cls.dirpath + n + '.p', 'wb'))\n print \"Data saved to: %s\" % dirpath", "def save(self):\n pickle_save(self.results, 'results', self.main_dir)", "def save_inst(self):\n self.sanity_check()\n self.data_loaded_check()\n\n fname_pub_auth_all = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_auth_all, '_',\n self.config.experiment_id, '.pk'])\n fname_pub_auth_top = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_auth_top, '_',\n self.config.experiment_id, '.pk'])\n fname_pub_inst_all = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_inst_all, '_',\n self.config.experiment_id, '.pk'])\n fname_pub_inst_top = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_inst_top, '_',\n self.config.experiment_id, '.pk'])\n\n pickle.dump(self.pub_auth_all, open(fname_pub_auth_all, 'wb'))\n pickle.dump(self.pub_auth_top, open(fname_pub_auth_top, 'wb'))\n pickle.dump(self.pub_inst_all, open(fname_pub_inst_all, 'wb'))\n pickle.dump(self.pub_inst_top, open(fname_pub_inst_top, 'wb'))\n\n fname_pub_history = ''.join([self.config.dir_data, '/history_',\n self.config.experiment_id, '.pk'])\n pickle.dump(self.history, open(fname_pub_history, 'wb'))\n\n fname_pub_staff = ''.join([self.config.dir_data, '/staff_',\n self.config.experiment_id, '.pk'])\n pickle.dump(self.staff, open(fname_pub_staff, 'wb'))", "def save_data_pickle(self, save_full=False):\n self.train.to_pickle('../input/train_mod.pkl')\n self.test.to_pickle('../input/test_mod.pkl')\n if save_full:\n self.train_full.to_pickle('../input/train_full_mod.pkl')", "def save_outputs(self):\n write_pickled(join(self.output_folder, \"results.pkl\"), self.get_results())", "def save_features_to_file(self):\n if not os.path.exists(self.features_save_path):\n os.makedirs(self.features_save_path)\n for s in self.sets:\n self.save_features_to_file_by_set(s)", "def save(self):\n if self.loaded:\n list_embeddingNames = [self.embeddings.vsm_name, self.synset_embeddings.vsm_name, self.imagined_embeddings.vsm_name]\n full_file_name = self.resource_manager.get_multimodal_dataset(self.corpus, list_embeddingNames)\n logging.info('Saving dataset to [%s]', full_file_name)\n with lzma.open(full_file_name, 'wb') as f:\n pickle.dump(self, f)\n else:\n logging.error('Dataset not loaded, call \"build\" method first!')", "def dumpme(self) :\n fileName = \"./data/oP4_ModelBuilder.dump\"\n with open(fileName,\"wb\") as dumpedFile:\n oPickler = pickle.Pickler(dumpedFile)\n oPickler.dump(self)", "def _dump_states(self, train=True):\n\t\tprefix = self.config.experiment_dir_name+\"/datastreams/\"\n\t\ttry:\n\t\t\tos.mkdir(prefix)\n\t\texcept:\n\t\t\tpass\n\n\t\tprefix += \"{}.pickle\"\n\n\t\tif train:\n\t\t\tself.train_accuracy.dump(prefix.format(\"train_accuracy\"))\n\t\t\tself.train_epochs.dump(prefix.format(\"train_epochs\"))\n\t\t\tself.train_loss.dump(prefix.format(\"train_loss\"))\n\t\t\tself.train_confusion_matrix.dump(prefix.format(\"train_confusion_matrix\"))\n\t\t\tself.learning_rate.dump(prefix.format(\"learning_rate\"))\n\t\telse:\n\t\t\tself.val_accuracy.dump(prefix.format(\"val_accuracy\"))\n\t\t\tself.val_epochs.dump(prefix.format(\"val_epochs\"))\n\t\t\tself.val_loss.dump(prefix.format(\"val_loss\"))\n\t\t\tself.val_confusion_matrix.dump(prefix.format(\"val_confusion_matrix\"))\n\t\t\"\"\"\n\t\tSave dataset specific metadata into experiment dir\n\t\t\"\"\"\n\t\t# TODO: Redo this with more information from dataset meta.json file\n\t\tmeta_path = self.config.experiment_dir_name+\"/meta.json\"\n\t\t_meta = {}\n\t\t_meta[\"classes\"] = self.classes\n\t\t_meta[\"plot_platform\"] = self.config.plot_platform\n\t\t# _meta[\"dataset_dir\"] = self.dataset.dataset_folder\n\t\tif not os.path.exists(meta_path):\n\t\t\tfp = open(meta_path, \"w\")\n\t\t\tfp.write(json.dumps(_meta))\n\t\t\tfp.close()", "def _save(self, dataset, path, files, copy_files=False):\n raise NotImplementedError('Loader {} does not support saving datasets.'.format(self.type()))", "def _save(self):\n\t\t\n\t\tdirectory = self.Output_path\n\n\t\t# replace with \n\t\t# file_name = hermes.mk_themis_file_name(themis_obj = self)\n\t\tfile_name = f'Themis_{self.CELL_ID[\"experiment\"]}_u{self.CELL_ID[\"unit\"]}_c{self.CELL_ID[\"cell\"]}_r{self.CELL_ID[\"run\"]}.pkl'\n\n\t\tsave_path = directory / file_name\n\n\t\t# Atomic saving (helpful?)\n\t\ttemp_path = save_path.with_suffix(save_path.suffix + '.tmp')\n\t\t\n\t\tself.SavePath = save_path\n\n\t\t\n\t\twith open(temp_path, 'wb') as f:\n\t\t\tpickle.dump(self, f)\n\n\t\ttemp_path.rename(save_path)\n\n\t\tprint(f'Saved {self.RUN_KEY} as {save_path}')", "def save(self, path=None):\n data = self._collect_data()\n\n name = np.random.choice(['a', 'b', 'c', 'd', 'e', 'f']+list(map(str, range(0, 10))), size=8)\n if path is None:\n path = './logs/'+\"\".join(name)+'_'\n with open(path, \"wb\") as f:\n cloudpickle.dump(data, f)\n print(\"Saved at {}\".format(path))", "def pickle(self,data,filename):\n pickle.dump(data, open(filename, 'wb'))", "def save_configurations(self):\n # Get the file path\n self.data_path = self.data_path_entry.get()\n # Open the file\n with open(self.data_path, 'rb') as file:\n self.log('Opened ' + str(self.data_path))\n # Un-serialize\n info = pickle.load(file)\n # Write the new properties\n self.main_window.overwrite_properties(info)\n\n self.exit()", "def save(self):\n if self.loaded:\n full_file_name = self.resource_manager.get_dataset(self.corpus, self.embeddings.vsm_name)\n logging.info('Saving dataset to [%s]', full_file_name)\n with lzma.open(full_file_name, 'wb') as f:\n pickle.dump(self, f)\n else:\n logging.error('Dataset not loaded, call \"build\" method first!')", "def save(self, filename):\n with open(filename, 'w') as f:\n pickle.dump((self.components, self.mean), f)", "def save_data_pickle(PATH, data, dataset, filename):\n with open(PATH + '/' + dataset + \"_\" + filename + \".pkl\",\"wb\") as f:\n pickle.dump(data,f)\n print(filename, \"created\")", "def save(self):\n\n # make a clone to preserve the original in case it's still needed\n clone = {}\n\n for machine in self.activity.keys():\n data = self.activity[machine].copy()\n data[\"filtered activity\"] = np.array(data[\"filtered activity\"], dtype=np.float)\n data[\"raw activity\"] = np.array(data[\"raw activity\"], dtype=np.float)\n data[\"time\"] = np.array(data[\"time\"], dtype=np.float)\n clone[machine] = data\n\n out = open(self.filename, \"wb\")\n pickle.dump(clone, out)\n out.close()", "def save(self):\n pickle.dump(self, open(self.path, \"wb\"))", "def store(self):\n\t\tprint(\"Storing to\", self.storagedir)\n\n\t\tfor filename in self.FILENAMES:\n\t\t\twith open(os.path.join(self.storagedir, filename), \"wb\") as handle:\n\t\t\t\t_pickle.dump(getattr(self, filename), handle)", "def save(self, filename=\"matpipe.p\"):\n temp_backend = self.learner.backend\n self.learner._backend = self.learner.backend.fitted_pipeline_\n for obj in [self, self.learner, self.reducer, self.cleaner,\n self.autofeaturizer]:\n obj._logger = None\n with open(filename, 'wb') as f:\n pickle.dump(self, f)\n self.learner._backend = temp_backend", "def load_files(self):\n print('Saving numpy mask arrays in {0}'.format(self.ProcDir))\n\n if not os.path.isdir(self.ProcDir): os.mkdir(self.ProcDir)\n if not os.path.isdir(self.OutDir): os.mkdir(self.OutDir)\n\n self.Files = {}\n for ig in self.Set:\n phase = roipy.tools.load_half(ig,2)\n # convert wavelength to displacements\n # NOTE: make attributes of commonly used values in rsc: float(ig.Rsc['WAVELENGTH'])\n disp = phase * (ig.Wavelength / (4*np.pi))\n igram = ma.array(disp, mask=ma.nomask)\n name = self.save_ma(ig, igram) #Mask_ array is just zeros at this point..\n self.Files[ig.ID] = name\n\n print('load_files() complete: {0} interferograms'.format(self.Set.Nig))", "def save(self, path: utils.URLPath):\n save_somclassifier_config(self.config, path / \"config.json\")\n self.model.save(str(path / \"model.h5\"))\n io_functions.save_joblib(self.binarizer, path / \"binarizer.joblib\")\n\n io_functions.save_json(self.data_ids[\"validation\"], path / \"ids_validate.json\")\n io_functions.save_json(self.data_ids[\"train\"], path / \"ids_train.json\")" ]
[ "0.71841556", "0.7051201", "0.6999294", "0.69657177", "0.6965532", "0.67290497", "0.6701712", "0.67006266", "0.66806704", "0.6629543", "0.662331", "0.66160923", "0.6615659", "0.66139543", "0.659722", "0.65589774", "0.65515673", "0.65493506", "0.6524547", "0.64821887", "0.6479978", "0.6473524", "0.64637905", "0.64386934", "0.6420897", "0.6402101", "0.6381964", "0.6371142", "0.63617325", "0.6352685" ]
0.7810631
0
Registers new managers with the component manager. Managers are configured and setup before components.
def add_managers(self, managers: Union[List[Any], Tuple[Any]]): for m in self._flatten(managers): self.apply_configuration_defaults(m) self._managers.add(m)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_manager(self) -> None:\n\n #Clean out the process list.\n self.process_list.clear()\n for _ in range(self.num_processes):\n p = Process(target=self.multiprocessing_job,\n args=(self.process_job,))\n self.process_list.append(p)\n self.restart_required = False", "def add_manager(self, agent):\n with self.simulation_mutex:\n self.get(\"manager_agents\")[agent.name] = agent", "def setup(self, manager):\n self._manager = manager\n self._configured = True", "def _configure_manager(self):\n self._manager = CloudDatabaseManager(self,\n resource_class=CloudDatabaseInstance, response_key=\"instance\",\n uri_base=\"instances\")\n self._flavor_manager = BaseManager(self,\n resource_class=CloudDatabaseFlavor, response_key=\"flavor\",\n uri_base=\"flavors\")\n self._backup_manager = CloudDatabaseBackupManager(self,\n resource_class=CloudDatabaseBackup, response_key=\"backup\",\n uri_base=\"backups\")", "def SetManager(self, mgr):\r\n\r\n self.manager = mgr", "def register_manager(self, update, context):\r\n new_manager_chat_id = update['message']['chat']['id']\r\n new_manager_name = update['message']['chat']['first_name']\r\n\r\n with open('managers.json') as obj:\r\n managers = json.load(obj)\r\n\r\n managers[new_manager_name] = new_manager_chat_id\r\n\r\n with open('managers.json', 'w') as obj:\r\n json.dump(managers, obj)\r\n\r\n context.bot.send_message(chat_id=update.message.chat_id, text=f'{new_manager_name} - {new_manager_chat_id}')", "def setManager(self, manager=None):\n self._manager = manager", "def register(self, events=[]):\n self.events = events\n if not self in manager.handler:\n manager.handler.append(self)", "def setup_components(self, builder: \"Builder\"):\n self._setup_components(builder, self._managers + self._components)", "def install_providers():\n host = env.host_string\n providers = get_providers(host)\n for provider in providers.values():\n if getattr(provider, 'manager', None) is not None:\n provider.manager.install()\n\n provider.install()", "def startManager(self):\n\t\tlogging.info(\"----->>>The DeviceDataManager will be started\")\n\t\tself.sysPerfManager.startManager()\n\t\tself.sensorAdapterManager.startManager()\n\t\tif self.enableRedis:\n\t\t\tself.redisClient.connectClient()\n\t\t\n\t\tif self.enableMqtt:\n\t\t\tself.mqttClient.connectClient()", "def set_up():\n ResourcesManager().set_up()\n LocatorUtil().load_locators()", "def _init_pluginmanager(self):\n self.pluginmanager = PluginManager(logger=self.logger)\n self.logger.debug(\"Registering execution wide plugins:\")\n self.pluginmanager.load_default_run_plugins()\n self.pluginmanager.load_custom_run_plugins(self.args.plugin_path)\n self.logger.debug(\"Execution wide plugins loaded and registered.\")", "def _setManager(self, mgr: \"StrategyManager\") -> None:", "def getPackageManager(self) -> None:\n\t\tfor pkgmgr in config.SUPPORTED_PACKAGE_MGRS:\n\t\t\tif subprocess.run([\"which\", pkgmgr]).returncode == 0:\n\t\t\t\tself.package_manager = pkgmgr\n\t\t\t\treturn\n\t\tlogger.error(\"Supported package manager not found, aborting.\")\n\t\traise ValueError(\"Package manager unsupported\")", "def RegisterParsers(cls, parser_classes):\n for parser_class in parser_classes:\n cls.RegisterParser(parser_class)", "def configure_node_managers(config):\n host_names = get_compute_node_host_names(config)\n sysctl_settings = unflatten_dict_keys(config, 'sysctl_(.*)')\n sys_settings = unflatten_dict_keys(config, '(/sys/.*)')\n pjobs = [delayed(configure_node_manager)(host_name, sysctl_settings, sys_settings, config.get('transparent_hugepage_enabled')) for host_name in host_names]\n Parallel(n_jobs=len(pjobs))(pjobs)", "def _configure_managers() -> Tuple[tff.simulation.FileCheckpointManager,\n List[tff.simulation.MetricsManager]]:\n root_output_dir = FLAGS.root_output_dir\n experiment_name = FLAGS.experiment_name\n utils_impl.create_directory_if_not_exists(root_output_dir)\n\n checkpoint_dir = os.path.join(root_output_dir, 'checkpoints', experiment_name)\n utils_impl.create_directory_if_not_exists(checkpoint_dir)\n checkpoint_manager = tff.simulation.FileCheckpointManager(\n checkpoint_dir, step=FLAGS.rounds_per_checkpoint)\n\n results_dir = os.path.join(root_output_dir, 'results', experiment_name)\n utils_impl.create_directory_if_not_exists(results_dir)\n csv_file = os.path.join(results_dir, 'experiment.metrics.csv')\n csv_manager = tff.simulation.CSVMetricsManager(csv_file)\n\n summary_dir = os.path.join(root_output_dir, 'logdir', experiment_name)\n tensorboard_manager = tff.simulation.TensorBoardManager(summary_dir)\n\n logging.info('Writing...')\n logging.info(' checkpoints to: %s', checkpoint_dir)\n logging.info(' CSV metrics to: %s', csv_file)\n logging.info(' TensorBoard summaries to: %s', summary_dir)\n\n return checkpoint_manager, [csv_manager, tensorboard_manager]", "def register(self):\n self._register_dockyard()\n self._register_docker()", "def register_execution_manager(self, execution_manager):\n self.execution_managers.append(execution_manager)", "def init_managers(endpoints_file: Optional[Text]) -> None:", "def test_register_dynamic_plugin_manager1(self):\n pass", "def create_allcomponents(self):\n\n # we store all components in a list/hash which we iterate for startup/shutdown/dumps debugging, and which can be used to lookup components\n self.components = MDictList()\n\n # setup log manager helper early so that log manager can receive messages (and queue them until startup)\n self.createappendcomp('logmanager', mlogger.MewloLogManager)\n\n # now update site state (log manager should catch this)\n self.set_statelabel(mconst.DEF_SITESTATE_INITIALIZE_START)\n\n # create (non-db-persistent) site settings -- these are set by configuration at runtime\n self.settings = self.createappendcomp('settings', MewloSettings)\n\n # database manager\n self.createappendcomp('dbmanager', mdbmanager_sqlalchemy.MewloDatabaseManagerSqlA)\n\n # component registry\n self.createappendcomp('registrymanager', mregistry.MewloRegistryManager)\n\n # signal dispatcher\n self.createappendcomp('signalmanager', msignal.MewloSignalManager)\n\n # rbac permission manager\n self.createappendcomp('rbacmanager', mrbac.MewloRbacManager)\n\n # create persistent(db) pack settings\n self.createappendcomp('packsettings', mdbsettings_pack.MewloSettingsDb_Pack)\n\n # collection of mewlo addon packs\n self.createappendcomp('packmanager', mpackmanager.MewloPackManager)\n\n # site addon manager\n #self.createappendcomp('siteaddonmanager', msiteaddon.MewloSiteAddonManager)\n\n # route manager\n self.createappendcomp('routemanager', mroute.MewloRouteManager)\n\n # navnode manager\n self.createappendcomp('navnodemanager', mnav.NavNodeManager)\n\n # template manager\n self.createappendcomp('templatemanager', mtemplate.MewloTemplateManager)\n\n # asset and alias manager\n self.createappendcomp('assetmanager', massetmanager.MewloAssetManager)\n\n # template helper (this is available inside template/views and provides helper functions like navigation menus, etc.)\n self.createappendcomp('templatehelper', mtemplatehelper.MewloTemplateHelper)\n\n # session manager\n self.createappendcomp('sessionmanager', msessionmanager.MewloSessionManager)\n\n # verification manager\n self.createappendcomp('verificationmanager', mverificationmanager.MewloVerificationManager)\n\n # user manager\n self.createappendcomp('usermanager', musermanager.MewloUserManager)\n\n # mail manager\n self.createappendcomp('mailmanager', mmailmanager.MewloMailManager)", "def manager(self, ckpt_manager):\n self.ckpt_manager = ckpt_manager", "def _configure_manager(self):\n self._manager = CloudLoadBalancerManager(self,\n resource_class=CloudLoadBalancer,\n response_key=\"loadBalancer\", uri_base=\"loadbalancers\")", "def manager():\n pass", "def manufacturers(self, manufacturers):\n\n self._manufacturers = manufacturers", "def addAllFactories(self) -> None:\n ...", "def set_manager(self, manager=None):\n if not manager:\n manager = ScheduleManager()\n else:\n for task in self._tasks:\n if task.name in manager:\n error = \"Duplicate task name <{}>.\".format(task.name)\n raise TaskNameDuplicateError(error)\n\n for task in self._tasks:\n if task.manager:\n task.manager.unregister(name=task.name)\n manager.register(task)\n\n return manager", "def register(self):\n if self.registered:\n return\n\n config = current_app.config.get('TERMINAL_CONFIGS', {})\n apps = config.get('apps', [])\n\n for app in apps:\n cls, mod = app.rsplit('.', maxsplit=1)\n imported = import_module(cls)\n instance = getattr(imported, mod)()\n\n if getattr(instance, 'name', None) is None:\n continue\n\n if getattr(instance, 'hidden', False):\n self.hidden[getattr(instance, 'name')] = instance\n else:\n self.apps[getattr(instance, 'name')] = instance\n\n self.__set_apps_aliases(getattr(instance, 'name'), getattr(instance, 'aliases'))\n\n self.registered = True" ]
[ "0.61248213", "0.60335684", "0.6032508", "0.5993886", "0.59699", "0.58547497", "0.5809432", "0.56251633", "0.56170464", "0.56035906", "0.55961794", "0.5585247", "0.5480994", "0.54456997", "0.53996974", "0.53993297", "0.53955936", "0.5385212", "0.5380328", "0.5357524", "0.5357201", "0.533299", "0.5320499", "0.52582926", "0.52460986", "0.5233379", "0.5221362", "0.52081925", "0.51999635", "0.5195066" ]
0.74649
0
Get all components that are an instance of ``component_type``.
def get_components_by_type( self, component_type: Union[type, Tuple[type, ...]] ) -> List[Any]: return [c for c in self._components if isinstance(c, component_type)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_components_by_type(\n self, component_type: Union[type, Tuple[type, ...]]\n ) -> List[Any]:\n return self._manager.get_components_by_type(component_type)", "def get_components(self, filter_type=None):\n\n if filter_type is None:\n out = self.components\n elif isinstance(filter_type, str):\n out = {}\n cls = co.str_to_comp(filter_type)\n for comp in self.get_components():\n if isinstance(self.components[comp], cls):\n out[comp] = self.components[comp]\n else:\n out = {}\n for comp in self.get_components():\n if isinstance(self.components[comp], filter_type):\n out[comp] = self.components[comp]\n\n return out", "def components(self):\r\n children = self.container.findall(\"ComponentInstance\")\r\n return [XMLComponent(c) for c in children]", "def components(self):\r\n return [JSONComponent(c) for c\r\n in self.container.get(\"ComponentInstances\", [])]", "def __components__():\n # Get the component registry of the active application.\n registry = context.app.component_registry\n # A shortcut: return cached components.\n if registry.components is not None:\n return registry.components\n # A list of `Component` subclasses defined in modules exported by addons.\n components = [Component]\n idx = 0\n while idx < len(components):\n for subclass in components[idx].__subclasses__():\n # Skip realizations.\n if issubclass(subclass, Realization):\n continue\n # Check if the component belongs to the current application.\n if subclass.__enabled__():\n components.append(subclass)\n idx += 1\n # Cache and return the components.\n registry.components = components\n return components", "def components(self):\r\n return list(self._components)", "def components(self):\r\n return self.q(css=Component.BODY_SELECTOR).map(\r\n lambda el: Component(self.browser, el.get_attribute('data-locator'))).results", "def components(self, predicate=None):\n \n if predicate is None:\n return self._get(\"components\").json()\n else:\n return self._get(\"components/search\", params={\"predicate\":predicate}).json()", "def queryComponent(type=None, filter=None, all=0):", "def get_items_of_type(self, item_type):\n return (item for item in self.items if item.get_type() == item_type)", "def list_components(self, request, context):\n response = ListComponentsResponse()\n for component in self._delegator.list_components():\n response.components.append(component)\n return response", "def list(self,\n component_type=None,\n cursor=None,\n included_fields=None,\n page_size=None,\n sort_ascending=None,\n sort_by=None,\n summary=None,\n sync=None,\n ):\n return self._invoke('list',\n {\n 'component_type': component_type,\n 'cursor': cursor,\n 'included_fields': included_fields,\n 'page_size': page_size,\n 'sort_ascending': sort_ascending,\n 'sort_by': sort_by,\n 'summary': summary,\n 'sync': sync,\n })", "def components(self) -> List[IngredientObjectComponents]:\n return self._components", "def get_instance_of_type(self, instance_type):\n query = read_query('typing/instance_of_type') % instance_type\n response = self._submit_query(query)\n return [elem['name']['value'] for elem in response] if response else []", "def get_components(state: ChemicalSystem) -> ParseCompRet:\n def _get_single_comps(comp_list, comptype):\n ret_comps = [comp for comp in comp_list\n if isinstance(comp, comptype)]\n if ret_comps:\n return ret_comps[0]\n else:\n return None\n\n solvent_comp: Optional[SolventComponent] = _get_single_comps(\n list(state.values()), SolventComponent\n )\n\n protein_comp: Optional[ProteinComponent] = _get_single_comps(\n list(state.values()), ProteinComponent\n )\n\n small_mols = []\n for comp in state.components.values():\n if isinstance(comp, SmallMoleculeComponent):\n small_mols.append(comp)\n\n return solvent_comp, protein_comp, small_mols", "def components(self):\n return self._components", "def components(self):\n return self._components", "def components(self):\n return self._components", "def components(self):\n return self.__components", "def components(self):\n return self.__components", "def test_get_component_descriptors_by_type_using_get(self):\n pass", "def get_all_components(self, platform: RuntimeProcessorType) -> List[Component]:\n components: List[Component] = []\n\n catalogs = self._component_cache.get(platform.name, {})\n for catalog_name, catalog_properties in catalogs.items():\n components.extend(list(catalog_properties.get(\"components\", {}).values()))\n\n if not components and platform != RuntimeProcessorType.LOCAL:\n self.log.error(f\"No components could be found in any catalog for platform type '{platform.name}'.\")\n\n return components", "def list_components(self) -> Dict[str, Any]:\n return self._manager.list_components()", "def get_components_from_registry(registry):\n\n unique_component_classes = set(registry.all().values())\n\n components = []\n for component_class in unique_component_classes:\n components.append(component_class())\n\n return components", "def get_objects_by_type(self, object_type):\n\n # Get dictionary of objects by type.\n try:\n object_dict = self.model_map['object'][object_type]\n except KeyError:\n # This object type isn't in the model map.\n return None\n\n # Extract the object dictionaries and put them in list for\n # return.\n out = [value[1] for value in object_dict.values()]\n\n # The 'out' list can be empty if the object type is mapped,\n # but all the objects have been removed.\n if len(out) == 0:\n return None\n else:\n return out", "def iter_components(self):\n return self.components.values()", "def get_component(self):\n component = []\n component = [self.component_type, self.component_value, self.spot]\n\n if component[2] != None:\n print component\n return component", "def components(self):\n # The '_components' attribute is defined according to the\n # subclass of Dyadic the instance belongs to.\n return self._components", "def get_store_component(type):\n store_components = StoreComponent.query.filter(\n StoreComponent.component_type == type) # no need to order\n store_components_data = [\n component.to_dict() for component in store_components.all()]\n return jsonify(store_components=store_components_data)", "def component_type(self):\n return self._component_type" ]
[ "0.8547184", "0.6889929", "0.61937904", "0.6006221", "0.59004545", "0.5866749", "0.5784385", "0.57724464", "0.57717794", "0.5745258", "0.57174027", "0.5656232", "0.5640331", "0.5635853", "0.5634207", "0.5617102", "0.5617102", "0.5615041", "0.5579383", "0.5579383", "0.5561703", "0.55425507", "0.55365974", "0.54980105", "0.5460907", "0.5460393", "0.5426799", "0.54138416", "0.540968", "0.5396431" ]
0.8519839
1
Get the component with name ``name``. Names are guaranteed to be unique.
def get_component(self, name: str) -> Any: for c in self._components: if c.name == name: return c raise ValueError(f"No component found with name {name}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_component(self, name):\n for cmpt in self.components:\n if cmpt['name'] == name:\n return cmpt", "def get_component(self, name: str) -> Any:\n return self._manager.get_component(name)", "def get(name):\r\n return componentManager.components[name]", "def comp(self, componentname):\n retv = self.components.lookup(componentname)\n if (retv == None):\n raise Exception(\"Component not found: '{0}'.\".format(componentname))\n return retv", "def get_component(self, sCompName):\n return self._dComponents.get(sCompName, None)", "def get_by_name(self, name):\n return self.by_name.get(name.upper())", "def name(self, name):\n return self[self.name_cache[name]]", "def find_by_name(self, name):\n return self.get(name)", "def get_by_name(cls, name):\n return cls.query.filter(cls.name == name).first()", "def by_name(cls, name):\n return cls.all().filter('name =', name).get()", "def get_element_by_name(self, name):\n for element in self._elements:\n if element.get_name() == name:\n return element", "def get_by_name(self, name: str) -> Gst.Element:\n return self._pipeline.get_by_name(name)", "def get(self, name):\n return self.cm.get(name)", "def lookup_by_name(cls, name):\n return cls.__by_name[name]", "def get_by_name(name: str):\n return Category.query.filter_by(name=name).first()", "def get_card(self, name):\n for card in self.cards:\n if card.name == name:\n return card\n\n return None", "def get_room(self, name):\n for i in self.rooms:\n if self.rooms[i].name == name:\n return self.rooms[i]\n raise RuntimeError, \"Room '%s' not known\" % name", "def get_by_name(self, name):\n category = Category.query.filter_by(name=name).first()\n\n return category", "def get_card_by_name(self,name):\n try:\n card_id = self._category2id['name'][name].values()\n except KeyError:\n print \"No card by given name! [{}]\".format(name)\n return None\n\n if len(card_id) > 1:\n print \"Multiple cards match name, returning first...\"\n\n return self._id2database[card_id[0]]", "def get_element_by_name(self, name):\n for e in self.E:\n if e.name == name:\n return e", "def by_name(cls, name):\n u = cls.all().filter('name =', name).get()\n return u", "def component(self, component_name, version=None):\n response = self._base_request(\n 'get',\n ['components', component_name.lower()],\n schema=COMPONENT_SCHEMA,\n )\n versions = response['versions']\n\n if version:\n requested_version = tools.manifest.ComponentVersion(str(version))\n best_version = [v for v in versions\n if tools.manifest.ComponentVersion(v['version']) == requested_version][0]\n else:\n best_version = max(versions, key=lambda v: semver.Version(v['version']))\n\n return tools.manifest.Manifest(\n name=('%s/%s' % (response['namespace'], response['name'])),\n version=tools.manifest.ComponentVersion(best_version['version']),\n download_url=best_version['url'],\n dependencies=self._version_dependencies(best_version),\n maintainers=None,\n )", "def component_path(self, name: str) -> Path:\n rv = self.root / f\"components/{name}/component.yaml\"\n return rv", "def getContactByName(self, name):\n for contact in self.contacts:\n if name == contact.name:\n return contact\n\n return None", "def by_name(cls, name):\n return dbsession.query(cls).filter_by(_name=str(name)).first()", "def by_name(cls, name):\n return dbsession.query(cls).filter_by(_name=str(name)).first()", "def by_name(cls, name):\n return dbsession.query(cls).filter_by(_name=str(name)).first()", "def collection_find_by_name(self, name):\n raise Exception(\"NEVER BEEN TESTED!\")\n try:\n return CastleCollection(name, self, look_for_name=True)\n except:\n raise", "def get_by_name(self, course_name):\n course = Course.query.filter_by(name=course_name).first()\n\n return course", "def _get_kit_by_component(self, comp_name, comp_version=None):\n kit_list = self._kit_db_api.getKitList()\n kits = [\n kit\n for kit in kit_list\n for component in kit.getComponentList()\n if component.getName() == comp_name and\n (comp_version is None or\n component.getVersion() == comp_version)\n ]\n if not kits:\n raise KitNotFound(\n 'Kit containing component [%s] not found' % (comp_name))\n\n if len(kits) > 1:\n raise ComponentNotFound(\n 'Kit name must be specified, multiple kits contain '\n 'component: {}'.format(comp_name)\n )\n\n return kits[0]" ]
[ "0.8581954", "0.8132362", "0.8047409", "0.75000876", "0.6909813", "0.67308456", "0.65872496", "0.6536908", "0.6505384", "0.6497451", "0.64877367", "0.64358634", "0.64024574", "0.63259006", "0.62867415", "0.62573177", "0.6255165", "0.62408423", "0.62342554", "0.6208173", "0.62029594", "0.62001723", "0.61956346", "0.6188797", "0.61416125", "0.61416125", "0.61416125", "0.6128013", "0.61102504", "0.61004" ]
0.8775755
0
Get a mapping of component names to components held by the manager. Returns Dict[str, Any] A mapping of component names to components.
def list_components(self) -> Dict[str, Any]: return {c.name: c for c in self._components}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_components(self) -> Dict[str, Any]:\n return self._manager.list_components()", "def _getComponentsInfo(self):\n result = {}\n et = ElementTree()\n components = self.agentCompleteConfig.listComponents_() + \\\n self.agentCompleteConfig.listWebapps_()\n for comp in components:\n compConfig = getattr(self.agentCompleteConfig, comp)\n daemonXml = os.path.join(compConfig.componentDir, \"Daemon.xml\")\n if not os.path.exists(daemonXml):\n logging.warn(\"%s: can't read file '%s' of component '%s', ignored.\" %\n (self.__class__.__name__, daemonXml, comp))\n continue\n tree = et.parse(daemonXml)\n pid = None\n for child in tree.getchildren():\n if child.tag == \"ProcessID\":\n pid = child.get(\"Value\")\n if pid:\n result[comp] = pid # componentName, componentPID\n return result", "def get_components(self) -> Dict[str, pathlib.Path]:\n return {\n function_name: pathlib.Path(module_path) for function_name,\n module_path in self._config_parser[_COMPONENTS_SECTION].items()\n }", "def components_map(self):\r\n raise NotImplementedError", "def components(self):\r\n return [JSONComponent(c) for c\r\n in self.container.get(\"ComponentInstances\", [])]", "def components(self):\n return self.__components", "def components(self):\n return self.__components", "def components(self):\n return self._components", "def components(self):\n return self._components", "def components(self):\n return self._components", "def __components__():\n # Get the component registry of the active application.\n registry = context.app.component_registry\n # A shortcut: return cached components.\n if registry.components is not None:\n return registry.components\n # A list of `Component` subclasses defined in modules exported by addons.\n components = [Component]\n idx = 0\n while idx < len(components):\n for subclass in components[idx].__subclasses__():\n # Skip realizations.\n if issubclass(subclass, Realization):\n continue\n # Check if the component belongs to the current application.\n if subclass.__enabled__():\n components.append(subclass)\n idx += 1\n # Cache and return the components.\n registry.components = components\n return components", "def getComponentMap(pNodes, pInteractions):\n rpInteractions = reverseInteractions(pInteractions)\n componentMap = dict()\n for i in pNodes.keys():\n if pNodes[i] != \"complex\":\n continue\n componentMap[i] = []\n if i not in rpInteractions:\n continue\n for j in rpInteractions[i]:\n if rpInteractions[i][j] == \"component>\":\n componentMap[i].append(j)\n return(componentMap)", "def _GetComponents(\n self,\n ) -> Dict[str, Dict[str, Union[PrimitiveSchema, EnumSchema, MessageSchema]]]:\n self._CreateSchemas()\n if self.schema_objs is None: # Check required by mypy.\n raise AssertionError(\"OpenAPI type schemas not initialized.\")\n\n # The `Components Object` `components` field of the root `OpenAPI Object`.\n return {\n \"schemas\":\n cast(Dict[str, Union[PrimitiveSchema, EnumSchema, MessageSchema]],\n self.schema_objs),\n }", "def components(self) -> Dict[str, Any]:\n expected_modules, optional_parameters = self._get_signature_keys(self)\n components = {\n k: getattr(self, k) for k in self.config.keys() if not k.startswith(\"_\") and k not in optional_parameters\n }\n\n if set(components.keys()) != expected_modules:\n raise ValueError(\n f\"{self} has been incorrectly initialized or {self.__class__} is incorrectly implemented. Expected\"\n f\" {expected_modules} to be defined, but {components} are defined.\"\n )\n\n return components", "def components(self):\r\n return list(self._components)", "def iter_components(self):\n return self.components.values()", "def get_component_name_list(self):\n return self._component_name_list", "def components(self):\r\n children = self.container.findall(\"ComponentInstance\")\r\n return [XMLComponent(c) for c in children]", "def component_configurations(self):\n return self._component_configurations", "def components(self):\n # The '_components' attribute is defined according to the\n # subclass of Dyadic the instance belongs to.\n return self._components", "def get_graded_components(self):\r\n return self.components.keys()", "def get(name):\r\n return componentManager.components[name]", "def get_component_instance_lists(\n graph_client: GremlinClient, topology_id: str, topology_ref: str\n) -> Dict[str, List[Vertex]]:\n\n sgt: GraphTraversalSource = graph_client.topology_subgraph(\n topology_id, topology_ref\n )\n\n component_names: List[str] = sgt.V().values(\"component\").dedup().toList()\n\n output: Dict[str, List[Vertex]] = {}\n\n for component_name in component_names:\n\n output[component_name] = sgt.V().has(\"component\", component_name).toList()\n\n return output", "def get_components(self, which):\n mappings = self.representation_mappings.get(\n getattr(self, which).__class__, [])\n\n old_to_new = dict()\n for name in getattr(self, which).components:\n for m in mappings:\n if isinstance(m, RegexRepresentationMapping):\n pattr = re.match(m.repr_name, name)\n old_to_new[name] = m.new_name.format(*pattr.groups())\n\n elif m.repr_name == name:\n old_to_new[name] = m.new_name\n\n mapping = dict()\n for name in getattr(self, which).components:\n mapping[old_to_new.get(name, name)] = name\n\n return mapping", "def get_all_component_parameters(self) -> Dict[str, Any]:\n return self._node[\"app_data\"][\"component_parameters\"]", "def get_comp_vals(self, propname):\n if not isinstance(propname, str):\n return propname\n if propname.endswith('*'):\n try:\n return self[propname]\n except KeyError:\n pass\n try:\n vals = {}\n for comp in self.components.values():\n vals[comp.name] = comp[propname]\n return vals\n except KeyError:\n msg = f'{propname} not found on at least one component'\n raise Exception(msg)", "def get_components(self, key, analyte=None):\n out = {}\n for k, v in self.components.items():\n if key in k:\n if analyte is None:\n out[k] = v\n elif self.switches[analyte][k]:\n out[k] = v\n return out", "def get_components_from_file(self, filepath):\n mod = self.get_model_from_file(filepath)\n comp = mod.components\n compdict = {}\n for c in comp:\n c.__class__ = LEMSBrianComponent\n compdict[c.id] = c\n return compdict", "def nodes_in_components(\n components: DefaultDict[int, int]\n) -> DefaultDict[int, List]:\n content = defaultdict(list)\n for node, comp in components.items():\n content[comp].append(node)\n return content", "def get_components_from_registry(registry):\n\n unique_component_classes = set(registry.all().values())\n\n components = []\n for component_class in unique_component_classes:\n components.append(component_class())\n\n return components" ]
[ "0.75031936", "0.7189879", "0.67731893", "0.6669654", "0.6528711", "0.6451407", "0.6451407", "0.6361447", "0.6353952", "0.6353952", "0.6349827", "0.63444376", "0.62123674", "0.6171588", "0.6085922", "0.60758775", "0.6042098", "0.60234123", "0.60161865", "0.59968275", "0.5994169", "0.5863766", "0.5823518", "0.5806851", "0.5789012", "0.57674015", "0.574188", "0.57392883", "0.570074", "0.5695921" ]
0.7947298
0
Separately configure and set up the managers and components held by the component manager, in that order. The setup process involves applying default configurations and then calling the manager or component's setup method. This can result in new components as a side effect of setup because components themselves have access to this interface through the builder in their setup method.
def setup_components(self, builder: "Builder"): self._setup_components(builder, self._managers + self._components)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_component(self):\n self.conf, self.context = self._init_component()\n self.initialize()", "def setup(self, manager):\n self._manager = manager\n self._configured = True", "def _configure(self):\n pass", "def _configure(self):\n Component._configure(self)\n self.dataDim = self.inventory.dataDim\n self.reader = self.inventory.reader\n self.coordsys = self.inventory.coordsys\n return", "def create_allcomponents(self):\n\n # we store all components in a list/hash which we iterate for startup/shutdown/dumps debugging, and which can be used to lookup components\n self.components = MDictList()\n\n # setup log manager helper early so that log manager can receive messages (and queue them until startup)\n self.createappendcomp('logmanager', mlogger.MewloLogManager)\n\n # now update site state (log manager should catch this)\n self.set_statelabel(mconst.DEF_SITESTATE_INITIALIZE_START)\n\n # create (non-db-persistent) site settings -- these are set by configuration at runtime\n self.settings = self.createappendcomp('settings', MewloSettings)\n\n # database manager\n self.createappendcomp('dbmanager', mdbmanager_sqlalchemy.MewloDatabaseManagerSqlA)\n\n # component registry\n self.createappendcomp('registrymanager', mregistry.MewloRegistryManager)\n\n # signal dispatcher\n self.createappendcomp('signalmanager', msignal.MewloSignalManager)\n\n # rbac permission manager\n self.createappendcomp('rbacmanager', mrbac.MewloRbacManager)\n\n # create persistent(db) pack settings\n self.createappendcomp('packsettings', mdbsettings_pack.MewloSettingsDb_Pack)\n\n # collection of mewlo addon packs\n self.createappendcomp('packmanager', mpackmanager.MewloPackManager)\n\n # site addon manager\n #self.createappendcomp('siteaddonmanager', msiteaddon.MewloSiteAddonManager)\n\n # route manager\n self.createappendcomp('routemanager', mroute.MewloRouteManager)\n\n # navnode manager\n self.createappendcomp('navnodemanager', mnav.NavNodeManager)\n\n # template manager\n self.createappendcomp('templatemanager', mtemplate.MewloTemplateManager)\n\n # asset and alias manager\n self.createappendcomp('assetmanager', massetmanager.MewloAssetManager)\n\n # template helper (this is available inside template/views and provides helper functions like navigation menus, etc.)\n self.createappendcomp('templatehelper', mtemplatehelper.MewloTemplateHelper)\n\n # session manager\n self.createappendcomp('sessionmanager', msessionmanager.MewloSessionManager)\n\n # verification manager\n self.createappendcomp('verificationmanager', mverificationmanager.MewloVerificationManager)\n\n # user manager\n self.createappendcomp('usermanager', musermanager.MewloUserManager)\n\n # mail manager\n self.createappendcomp('mailmanager', mmailmanager.MewloMailManager)", "def components(build_reset, monkeypatch):\n controllers, visuals, htmls = create_components()\n\n app = App(__name__, rows=len(visuals), sidebar=True)\n for controller in controllers:\n # pylint: disable=protected-access\n assert COMPONENT_REGISTRY[controller._uuid] == controller\n app.add_sidebar(controller)\n\n for vis in visuals:\n # pylint: disable=protected-access\n assert COMPONENT_REGISTRY[vis._uuid] == vis\n app.add(vis)\n\n for htm in htmls:\n # pylint: disable=protected-access\n assert COMPONENT_REGISTRY[htm._uuid] == htm\n app.add_sidebar(htm)\n\n assert len(COMPONENT_REGISTRY) == len(controllers) + 2 * len(visuals) + len(htmls)\n\n # pylint: disable=protected-access\n app._build()\n\n # run second time to make sure nothing weird happens with subsequent builds\n app._build()\n\n with server_check(app) as server:\n yield server", "def setup(self, configuration, lifecycle_manager):\n self.configuration = configuration\n self.lifecycle = lifecycle_manager\n\n self.lifecycle.add_constraint(\n self.get_components_by_type,\n restrict_during=[\"initialization\", \"population_creation\"],\n )\n self.lifecycle.add_constraint(\n self.get_component, restrict_during=[\"population_creation\"]\n )\n self.lifecycle.add_constraint(\n self.list_components, restrict_during=[\"initialization\"]\n )", "def _initComponent(self):\n\n self.optimizer = self._initOptimizer()\n self.scheduler = self._initScheduler()", "def setup(self):\n raise NotImplementedError(\"Need to be implemented in subclasses\")", "def setup(self):\n raise NotImplementedError", "def setup(self):\n raise NotImplementedError", "def setup(self):\n raise NotImplementedError", "def setup(self):\n raise NotImplementedError", "def setup(self):\n\t\tScriptedLoadableModuleWidget.setup(self)\n\n\t\tself._loadUI()\n\n\t\t# Set scene in MRML widgets. Make sure that in Qt designer the top-level qMRMLWidget's\n\t\t# \"mrmlSceneChanged(vtkMRMLScene*)\" signal in is connected to each MRML widget's.\n\t\t# \"setMRMLScene(vtkMRMLScene*)\" slot.\n\t\t\n\n\t\t# Create logic class. Logic implements all computations that should be possible to run\n\t\t# in batch mode, without a graphical user interface.\n\t\tself.logic = postopProgrammingLogic()\n\n\t\t# Connections\n\t\tself._setupConnections()", "def configure(self):\r\n pass", "def configure(self):\n\n pass", "def configure(self):\n pass", "def configure(self):\n pass", "def setup_early(self):\n\n # create all helper/manager components first\n self.create_allcomponents()\n\n # set up config stuff\n self.setup_confighelper()\n\n # settings\n self.add_earlydefault_settings()\n self.add_settings_early()\n self.add_latesettings_aliases()\n\n # other stuff\n self.add_loggers()\n self.add_routes()\n self.add_navnodes()\n\n # site addons\n self.add_addons()\n\n # we add fallback loggers at END, after user-site added loggers\n self.add_fallback_loggers()\n\n # now update site state (log manager should catch this)\n self.set_statelabel(mconst.DEF_SITESTATE_INITIALIZE_END)", "def setup_class(cls):\n cls.expected_custom_component_configuration = dict(foo=\"bar\")\n\n cls.agent_config = AgentConfig(\n agent_name=\"agent_name\",\n author=\"author\",\n version=\"0.1.0\",\n default_routing={str(cls.old_protocol_id): str(cls.old_connection_id)},\n default_connection=str(cls.old_connection_id),\n )\n\n cls.agent_config.protocols = {cls.old_protocol_id}\n cls.agent_config.contracts = {cls.old_contract_id}\n cls.agent_config.connections = {cls.old_connection_id}\n cls.agent_config.skills = {cls.old_skill_id}\n cls.agent_config.component_configurations[\n ComponentId(ComponentType.PROTOCOL, cls.old_protocol_id)\n ] = cls.expected_custom_component_configuration\n cls.agent_config.component_configurations[\n ComponentId(ComponentType.CONTRACT, cls.old_contract_id)\n ] = cls.expected_custom_component_configuration\n cls.agent_config.component_configurations[\n ComponentId(ComponentType.CONNECTION, cls.old_connection_id)\n ] = cls.expected_custom_component_configuration\n cls.agent_config.component_configurations[\n ComponentId(ComponentType.SKILL, cls.old_skill_id)\n ] = cls.expected_custom_component_configuration\n\n replace_component_ids(cls.agent_config, cls.replacements)", "def _configure(self):\n Application._configure(self)\n\n return", "def antenny_init_components(self):\n if self.antenny_config is None:\n print(\"Please load a config before initializing components\")\n if not self.antenny_config.check():\n print(\"Config {} is not valid, failed to initialize\".format(self.antenny_config.get_name()))\n print(\"If you believe this is an error, or you have modified the base components of the antenny board, \"\n \"please check Config class as well as the default configs for more details.\")\n\n self.imu_init()\n self.pwm_controller_init()\n self.elevation_servo_init()\n self.azimuth_servo_init()\n self.screen_init()\n self.gps_init()\n self.telemetry_init()\n self.platform_init()", "def initialize(self):\n super(QtBaseWidgetComponent, self).initialize()", "def set_up():\n ResourcesManager().set_up()\n LocatorUtil().load_locators()", "def configure(self):\n\n self.platform.configure()", "def configure(self) -> None:", "def load_components(self, on_initialize=False):\n for name, component_data in self.components.items():\n start_on_initialize = component_data.get('on_initialize', False)\n if start_on_initialize == on_initialize:\n component_data.pop('on_initialize', None)\n\n # inject parent module in component\n if not component_data.get('options', None):\n component_data['options'] = {}\n\n component_data['options']['module'] = self\n component = RonObject.instanceObject(component_data)\n setattr(self, name, component)", "def _setup(self):\n raise NotImplementedError()", "def configure(self):", "def configure(self):" ]
[ "0.7187612", "0.6177336", "0.6071428", "0.60488313", "0.60485494", "0.60239357", "0.59740335", "0.5972695", "0.59304833", "0.5877998", "0.5877998", "0.5877998", "0.5877998", "0.5877996", "0.5876117", "0.5864202", "0.582577", "0.582577", "0.57516897", "0.57322896", "0.5726736", "0.57188195", "0.57034343", "0.5698514", "0.56854826", "0.56766367", "0.5656185", "0.5647224", "0.56468034", "0.56468034" ]
0.7832467
0
Get all components that are an instance of ``component_type``.
def get_components_by_type( self, component_type: Union[type, Tuple[type, ...]] ) -> List[Any]: return self._manager.get_components_by_type(component_type)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_components_by_type(\n self, component_type: Union[type, Tuple[type, ...]]\n ) -> List[Any]:\n return [c for c in self._components if isinstance(c, component_type)]", "def get_components(self, filter_type=None):\n\n if filter_type is None:\n out = self.components\n elif isinstance(filter_type, str):\n out = {}\n cls = co.str_to_comp(filter_type)\n for comp in self.get_components():\n if isinstance(self.components[comp], cls):\n out[comp] = self.components[comp]\n else:\n out = {}\n for comp in self.get_components():\n if isinstance(self.components[comp], filter_type):\n out[comp] = self.components[comp]\n\n return out", "def components(self):\r\n children = self.container.findall(\"ComponentInstance\")\r\n return [XMLComponent(c) for c in children]", "def components(self):\r\n return [JSONComponent(c) for c\r\n in self.container.get(\"ComponentInstances\", [])]", "def __components__():\n # Get the component registry of the active application.\n registry = context.app.component_registry\n # A shortcut: return cached components.\n if registry.components is not None:\n return registry.components\n # A list of `Component` subclasses defined in modules exported by addons.\n components = [Component]\n idx = 0\n while idx < len(components):\n for subclass in components[idx].__subclasses__():\n # Skip realizations.\n if issubclass(subclass, Realization):\n continue\n # Check if the component belongs to the current application.\n if subclass.__enabled__():\n components.append(subclass)\n idx += 1\n # Cache and return the components.\n registry.components = components\n return components", "def components(self):\r\n return list(self._components)", "def components(self):\r\n return self.q(css=Component.BODY_SELECTOR).map(\r\n lambda el: Component(self.browser, el.get_attribute('data-locator'))).results", "def queryComponent(type=None, filter=None, all=0):", "def components(self, predicate=None):\n \n if predicate is None:\n return self._get(\"components\").json()\n else:\n return self._get(\"components/search\", params={\"predicate\":predicate}).json()", "def get_items_of_type(self, item_type):\n return (item for item in self.items if item.get_type() == item_type)", "def list_components(self, request, context):\n response = ListComponentsResponse()\n for component in self._delegator.list_components():\n response.components.append(component)\n return response", "def list(self,\n component_type=None,\n cursor=None,\n included_fields=None,\n page_size=None,\n sort_ascending=None,\n sort_by=None,\n summary=None,\n sync=None,\n ):\n return self._invoke('list',\n {\n 'component_type': component_type,\n 'cursor': cursor,\n 'included_fields': included_fields,\n 'page_size': page_size,\n 'sort_ascending': sort_ascending,\n 'sort_by': sort_by,\n 'summary': summary,\n 'sync': sync,\n })", "def get_instance_of_type(self, instance_type):\n query = read_query('typing/instance_of_type') % instance_type\n response = self._submit_query(query)\n return [elem['name']['value'] for elem in response] if response else []", "def components(self) -> List[IngredientObjectComponents]:\n return self._components", "def get_components(state: ChemicalSystem) -> ParseCompRet:\n def _get_single_comps(comp_list, comptype):\n ret_comps = [comp for comp in comp_list\n if isinstance(comp, comptype)]\n if ret_comps:\n return ret_comps[0]\n else:\n return None\n\n solvent_comp: Optional[SolventComponent] = _get_single_comps(\n list(state.values()), SolventComponent\n )\n\n protein_comp: Optional[ProteinComponent] = _get_single_comps(\n list(state.values()), ProteinComponent\n )\n\n small_mols = []\n for comp in state.components.values():\n if isinstance(comp, SmallMoleculeComponent):\n small_mols.append(comp)\n\n return solvent_comp, protein_comp, small_mols", "def components(self):\n return self._components", "def components(self):\n return self._components", "def components(self):\n return self._components", "def components(self):\n return self.__components", "def components(self):\n return self.__components", "def test_get_component_descriptors_by_type_using_get(self):\n pass", "def get_all_components(self, platform: RuntimeProcessorType) -> List[Component]:\n components: List[Component] = []\n\n catalogs = self._component_cache.get(platform.name, {})\n for catalog_name, catalog_properties in catalogs.items():\n components.extend(list(catalog_properties.get(\"components\", {}).values()))\n\n if not components and platform != RuntimeProcessorType.LOCAL:\n self.log.error(f\"No components could be found in any catalog for platform type '{platform.name}'.\")\n\n return components", "def list_components(self) -> Dict[str, Any]:\n return self._manager.list_components()", "def get_components_from_registry(registry):\n\n unique_component_classes = set(registry.all().values())\n\n components = []\n for component_class in unique_component_classes:\n components.append(component_class())\n\n return components", "def get_objects_by_type(self, object_type):\n\n # Get dictionary of objects by type.\n try:\n object_dict = self.model_map['object'][object_type]\n except KeyError:\n # This object type isn't in the model map.\n return None\n\n # Extract the object dictionaries and put them in list for\n # return.\n out = [value[1] for value in object_dict.values()]\n\n # The 'out' list can be empty if the object type is mapped,\n # but all the objects have been removed.\n if len(out) == 0:\n return None\n else:\n return out", "def iter_components(self):\n return self.components.values()", "def get_component(self):\n component = []\n component = [self.component_type, self.component_value, self.spot]\n\n if component[2] != None:\n print component\n return component", "def get_store_component(type):\n store_components = StoreComponent.query.filter(\n StoreComponent.component_type == type) # no need to order\n store_components_data = [\n component.to_dict() for component in store_components.all()]\n return jsonify(store_components=store_components_data)", "def components(self):\n # The '_components' attribute is defined according to the\n # subclass of Dyadic the instance belongs to.\n return self._components", "def component_type(self):\n return self._component_type" ]
[ "0.8520174", "0.6889397", "0.618709", "0.59998345", "0.5893793", "0.58591384", "0.57775843", "0.5769797", "0.5767334", "0.5746458", "0.57118094", "0.56562984", "0.56372815", "0.5633266", "0.5631094", "0.56097656", "0.56097656", "0.5608077", "0.5572066", "0.5572066", "0.5561409", "0.5537017", "0.5530008", "0.54918766", "0.5461508", "0.54536843", "0.5420654", "0.5411062", "0.5407856", "0.53964794" ]
0.8547882
0
Get a mapping of component names to components held by the manager. Returns Dict[str, Any] A dictionary mapping component names to components.
def list_components(self) -> Dict[str, Any]: return self._manager.list_components()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_components(self) -> Dict[str, Any]:\n return {c.name: c for c in self._components}", "def _getComponentsInfo(self):\n result = {}\n et = ElementTree()\n components = self.agentCompleteConfig.listComponents_() + \\\n self.agentCompleteConfig.listWebapps_()\n for comp in components:\n compConfig = getattr(self.agentCompleteConfig, comp)\n daemonXml = os.path.join(compConfig.componentDir, \"Daemon.xml\")\n if not os.path.exists(daemonXml):\n logging.warn(\"%s: can't read file '%s' of component '%s', ignored.\" %\n (self.__class__.__name__, daemonXml, comp))\n continue\n tree = et.parse(daemonXml)\n pid = None\n for child in tree.getchildren():\n if child.tag == \"ProcessID\":\n pid = child.get(\"Value\")\n if pid:\n result[comp] = pid # componentName, componentPID\n return result", "def get_components(self) -> Dict[str, pathlib.Path]:\n return {\n function_name: pathlib.Path(module_path) for function_name,\n module_path in self._config_parser[_COMPONENTS_SECTION].items()\n }", "def components_map(self):\r\n raise NotImplementedError", "def components(self):\r\n return [JSONComponent(c) for c\r\n in self.container.get(\"ComponentInstances\", [])]", "def components(self):\n return self.__components", "def components(self):\n return self.__components", "def __components__():\n # Get the component registry of the active application.\n registry = context.app.component_registry\n # A shortcut: return cached components.\n if registry.components is not None:\n return registry.components\n # A list of `Component` subclasses defined in modules exported by addons.\n components = [Component]\n idx = 0\n while idx < len(components):\n for subclass in components[idx].__subclasses__():\n # Skip realizations.\n if issubclass(subclass, Realization):\n continue\n # Check if the component belongs to the current application.\n if subclass.__enabled__():\n components.append(subclass)\n idx += 1\n # Cache and return the components.\n registry.components = components\n return components", "def getComponentMap(pNodes, pInteractions):\n rpInteractions = reverseInteractions(pInteractions)\n componentMap = dict()\n for i in pNodes.keys():\n if pNodes[i] != \"complex\":\n continue\n componentMap[i] = []\n if i not in rpInteractions:\n continue\n for j in rpInteractions[i]:\n if rpInteractions[i][j] == \"component>\":\n componentMap[i].append(j)\n return(componentMap)", "def _GetComponents(\n self,\n ) -> Dict[str, Dict[str, Union[PrimitiveSchema, EnumSchema, MessageSchema]]]:\n self._CreateSchemas()\n if self.schema_objs is None: # Check required by mypy.\n raise AssertionError(\"OpenAPI type schemas not initialized.\")\n\n # The `Components Object` `components` field of the root `OpenAPI Object`.\n return {\n \"schemas\":\n cast(Dict[str, Union[PrimitiveSchema, EnumSchema, MessageSchema]],\n self.schema_objs),\n }", "def components(self):\n return self._components", "def components(self):\n return self._components", "def components(self):\n return self._components", "def components(self) -> Dict[str, Any]:\n expected_modules, optional_parameters = self._get_signature_keys(self)\n components = {\n k: getattr(self, k) for k in self.config.keys() if not k.startswith(\"_\") and k not in optional_parameters\n }\n\n if set(components.keys()) != expected_modules:\n raise ValueError(\n f\"{self} has been incorrectly initialized or {self.__class__} is incorrectly implemented. Expected\"\n f\" {expected_modules} to be defined, but {components} are defined.\"\n )\n\n return components", "def iter_components(self):\n return self.components.values()", "def component_configurations(self):\n return self._component_configurations", "def components(self):\r\n return list(self._components)", "def components(self):\r\n children = self.container.findall(\"ComponentInstance\")\r\n return [XMLComponent(c) for c in children]", "def get_graded_components(self):\r\n return self.components.keys()", "def components(self):\n # The '_components' attribute is defined according to the\n # subclass of Dyadic the instance belongs to.\n return self._components", "def get_component_name_list(self):\n return self._component_name_list", "def get(name):\r\n return componentManager.components[name]", "def get_all_component_parameters(self) -> Dict[str, Any]:\n return self._node[\"app_data\"][\"component_parameters\"]", "def get_comp_vals(self, propname):\n if not isinstance(propname, str):\n return propname\n if propname.endswith('*'):\n try:\n return self[propname]\n except KeyError:\n pass\n try:\n vals = {}\n for comp in self.components.values():\n vals[comp.name] = comp[propname]\n return vals\n except KeyError:\n msg = f'{propname} not found on at least one component'\n raise Exception(msg)", "def get_component_instance_lists(\n graph_client: GremlinClient, topology_id: str, topology_ref: str\n) -> Dict[str, List[Vertex]]:\n\n sgt: GraphTraversalSource = graph_client.topology_subgraph(\n topology_id, topology_ref\n )\n\n component_names: List[str] = sgt.V().values(\"component\").dedup().toList()\n\n output: Dict[str, List[Vertex]] = {}\n\n for component_name in component_names:\n\n output[component_name] = sgt.V().has(\"component\", component_name).toList()\n\n return output", "def get_components(self, key, analyte=None):\n out = {}\n for k, v in self.components.items():\n if key in k:\n if analyte is None:\n out[k] = v\n elif self.switches[analyte][k]:\n out[k] = v\n return out", "def get_components(self, which):\n mappings = self.representation_mappings.get(\n getattr(self, which).__class__, [])\n\n old_to_new = dict()\n for name in getattr(self, which).components:\n for m in mappings:\n if isinstance(m, RegexRepresentationMapping):\n pattr = re.match(m.repr_name, name)\n old_to_new[name] = m.new_name.format(*pattr.groups())\n\n elif m.repr_name == name:\n old_to_new[name] = m.new_name\n\n mapping = dict()\n for name in getattr(self, which).components:\n mapping[old_to_new.get(name, name)] = name\n\n return mapping", "def get_components_from_file(self, filepath):\n mod = self.get_model_from_file(filepath)\n comp = mod.components\n compdict = {}\n for c in comp:\n c.__class__ = LEMSBrianComponent\n compdict[c.id] = c\n return compdict", "def nodes_in_components(\n components: DefaultDict[int, int]\n) -> DefaultDict[int, List]:\n content = defaultdict(list)\n for node, comp in components.items():\n content[comp].append(node)\n return content", "def name(self):\n return \"component_manager\"" ]
[ "0.79906577", "0.7245051", "0.6733757", "0.66009647", "0.6538484", "0.63692576", "0.63692576", "0.6338896", "0.6323031", "0.63061506", "0.6275394", "0.626935", "0.626935", "0.6257535", "0.60413235", "0.60004747", "0.5986872", "0.5979982", "0.5975458", "0.59395707", "0.59297246", "0.5853907", "0.5821086", "0.5818231", "0.5814481", "0.5750759", "0.57457674", "0.5745703", "0.5714806", "0.5708505" ]
0.747328
1
\ creates gaussian kernel with side length l and a sigma of sig
def gkern(l=5, sig=1.): ax = np.linspace(-(l - 1) / 2., (l - 1) / 2., l) xx, yy = np.meshgrid(ax, ax) kernel = np.exp(-0.5 * (np.square(xx) + np.square(yy)) / np.square(sig)) return kernel
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gkern(l=5, sig=1.):\n\n ax = np.arange(-l // 2 + 1., l // 2 + 1.)\n xx, yy = np.meshgrid(ax, ax)\n\n kernel = np.exp(-(xx**2 + yy**2) / (2. * sig**2))\n\n return kernel / np.sum(kernel)", "def gkern(l, sig=1.):\n\n ax = np.linspace(-(l - 1) / 2., (l - 1) / 2., l)\n xx, yy = np.meshgrid(ax, ax)\n\n kernel = np.exp(-0.5 * (np.square(xx) + np.square(yy)) / np.square(sig))\n\n return kernel / np.sum(kernel)", "def gaussian2d(l, sigma=1.0):\n\n ax = np.arange(-l//2 + 1.0, l//2 + 1.0)\n xx, yy = np.meshgrid(ax, ax)\n\n kernel = (1.0 / math.sqrt(2.0 * math.pi * sigma**2)) * np.exp(-(xx**2 + yy**2)/(2.0*sigma**2))\n\n return np.asarray(kernel, dtype=np.float32)", "def isotropic_Gaussian(ksize=15, l=6):\n\n V = np.array([[1, 0], [0, -1]])\n D = np.array([[l, 0], [0, l]])\n Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))\n k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize)\n\n return k", "def gaussian_kernel(size, sigma): \n \n kernel = np.zeros((size, size))\n\n #####################################\n # START YOUR CODE HERE #\n #####################################\n k = (size - 1) / 2\n sigma_sq = sigma ** 2\n pi_sigma = 1/(2 * np.pi * sigma_sq)\n for i in range(size):\n for j in range(size):\n kernel[i, j] = pi_sigma * np.exp(-0.5 * ((i-k)**2 + (j-k)**2) / (sigma_sq))\n ######################################\n # END OF YOUR CODE #\n ######################################\n\n return kernel", "def gaussian_kernel(training_ex, landmark, sigma=0.1):\n return np.exp(-(np.linalg.norm(training_ex - landmark) ** 2 / (2 * (sigma ** 2))))", "def gaussian2d(filter_size=5, sig=1.0):\n ax = np.arange(-filter_size // 2 + 1., filter_size // 2 + 1.)\n xx, yy = np.meshgrid(ax, ax)\n kernel = np.exp(-0.5 * (np.square(xx) + np.square(yy)) / np.square(sig))\n return kernel / np.sum(kernel)", "def gaussian_kernel(size, sigma):\n\n kernel = np.zeros((size, size))\n\n ### YOUR CODE HERE\n k = (size-1)/2\n factor = 1/(2*np.pi*sigma**2)\n for i in range(size):\n for j in range(size):\n exponent = -((i-k)**2 +(j-k)**2)/(2*sigma**2)\n kernel[i,j] = factor*np.exp(exponent)\n ### END YOUR CODE\n\n return kernel", "def gauss_ker(k, sig):\n\tx = np.linspace(-(k//2), (k//2), k)\n\tgx, gy = np.meshgrid(x, x)\n\tkernel = np.exp(-1*(gx**2 + gy**2)/(2*(sig**2)))\n\treturn kernel", "def _gaussian_kernel(self, xdata, l1, sigma_f, sigma_noise=2e-2):\n num_total_points = tf.shape(xdata)[1]\n\n # Expand and take the difference\n # [B, 1, num_total_points, x_size]\n xdata1 = tf.expand_dims(xdata, axis=1)\n # [B, num_total_points, 1, x_size]\n xdata2 = tf.expand_dims(xdata, axis=2)\n # [B, num_total_points, num_total_points, x_size]\n diff = xdata1 - xdata2\n\n # [B, y_size, num_total_points, num_total_points, x_size]\n norm = tf.square(diff[:, None, :, :, :] / l1[:, :, None, None, :])\n\n norm = tf.reduce_sum(\n norm, -1) # [B, data_size, num_total_points, num_total_points]\n\n # [B, y_size, num_total_points, num_total_points]\n kernel = tf.square(sigma_f)[:, :, None, None] * tf.exp(-0.5 * norm)\n\n # Add some noise to the diagonal to make the cholesky work.\n kernel += (sigma_noise**2) * tf.eye(num_total_points)\n\n return kernel", "def makeGaussianKernel(sigma: float) -> np.ndarray:\n\n # Your code here.\n kernel_size = 8*sigma+1\n kernel = np.zeros([kernel_size,kernel_size], dtype=float)\n center = kernel_size//2\n \n \n s = 2*(sigma**2)\n sum_val = 0\n for i in range(0,kernel_size):\n for j in range(0,kernel_size):\n x = i-center\n y = j-center\n kernel[i,j] = np.exp(-(x**2+y**2) / s)\n sum_val += kernel[i,j]\n #/(np.pi * s)\n sum_val = 1/sum_val\n print(\"here is the kernel\", kernel*sum_val)\n return kernel*sum_val", "def gaussianKernel(size, sigma=1):\n\n colourers.info(f'Creating gaussian kernel of size {size} with sigma of {sigma}')\n size = int(size) // 2\n x, y = np.mgrid[-size:size+1, -size:size+1]\n normal = 1 / (2.0 * np.pi * sigma**2)\n g = np.exp(-((x**2 + y**2) / (2.0 * sigma ** 2))) * normal\n return g", "def unnorm_gaussian2d(l, sigma=1.0):\n\n ax = np.arange(-l//2 + 1.0, l//2 + 1.0)\n xx, yy = np.meshgrid(ax, ax)\n\n kernel = np.exp(-(xx**2 + yy**2)/(2.0*sigma**2))\n\n return np.asarray(kernel, dtype=np.float32)", "def __guassian_kernel(x, sigma=200):\n return (1 / (sqrt(2.*pi) * sigma)) * exp(-x ** 2 / (2.*sigma**2))", "def get_gaussian(nsig=1.5, kernlen=13):\n\n interval = (2*nsig+1.)/(kernlen)\n x = np.linspace(-nsig-interval/2., nsig+interval/2., kernlen+1)\n kern1d = np.diff(st.norm.cdf(x))\n kernel_raw = np.sqrt(np.outer(kern1d, kern1d))\n kernel = kernel_raw/kernel_raw.sum()\n return theano.shared(kernel.astype(\"float32\"), borrow=True)", "def gaussian3d(l, sigma=1.0):\n\n ax = np.arange(-l//2 + 1.0, l//2 + 1.0)\n xx, yy, zz = np.meshgrid(ax, ax, ax)\n\n kernel = np.exp(-(xx**2 + yy**2 + zz**2)/(2.0*sigma**2))\n\n return np.asarray(kernel, dtype=np.float32)", "def gkern2(kernlen=21, nsig=3):\n # create nxn zeros\n inp = np.zeros((kernlen, kernlen))\n # set element at the middle to one, a dirac delta\n inp[kernlen//2, kernlen//2] = 1\n # gaussian-smooth the dirac, resulting in a gaussian filter mask\n kernel = scipy.ndimage.filters.gaussian_filter(inp, nsig)\n\n return kernel", "def gauss_kernels(size, sigma=1.0):\n if size < 3:\n size = 3\n\n m = size / 2\n x, y = np.mgrid[-m:m + 1, -m:m + 1]\n kernel = np.exp(-(x * x + y * y) / (2 * sigma * sigma))\n kernel_sum = kernel.sum()\n\n if not sum == 0:\n kernel = kernel / kernel_sum\n\n return kernel", "def gaussian_kernel(shape: Tuple[int, int]=(3, 3), sigma: float=0.5):\n m, n = [int((ss - 1.) / 2.) for ss in shape]\n y, x = np.ogrid[-m:m + 1, -n:n + 1]\n kernel = np.exp(-(x * x + y * y) / (2. * sigma * sigma))\n kernel[kernel < np.finfo(kernel.dtype).eps * kernel.max()] = 0\n sumh = kernel.sum()\n if sumh != 0:\n kernel /= sumh\n return kernel", "def gauss_kernel(radius, n_sigmas=8):\n sizex = int(n_sigmas * radius)\n sizey = int(n_sigmas * radius)\n radius = float(radius)\n xc = 0.5 * sizex\n yc = 0.5 * sizey\n y, x = np.mgrid[0:sizey - 1, 0:sizex - 1]\n x = x - xc\n y = y - yc\n x = x / radius\n y = y / radius\n g = np.exp(-0.5 * (x ** 2 + y ** 2))\n return g / (2 * np.pi * radius ** 2) # g.sum()", "def gaussian_kernel(dim, sigma):\n kernel = np.zeros(dim)\n\n if dim%2 == 0:\n begin = dim//2-1\n else:\n begin = dim//2\n\n for i in range(dim):\n kernel[i] = gaussian(i-begin, sigma)\n\n return kernel", "def gauss_kern(size, sigma=1.0):\n h1 = size[0]\n h2 = size[1]\n x, y = np.mgrid[0:h2, 0:h1]\n x = x-h2/2\n y = y-h1/2\n g = np.exp( -( x**2 + y**2 ) / (2*sigma**2) );\n return g / g.sum()", "def generate_gaussian_kernel(shape=(3,3),sigma=0.8):\n m,n = [(ss-1.)/2. for ss in shape]\n y,x = np.ogrid[-m:m+1,-n:n+1]\n h = np.exp( -(x*x + y*y) / (2.*sigma*sigma) )\n h[ h < np.finfo(h.dtype).eps*h.max() ] = 0\n sumh = h.sum()\n if sumh != 0:\n h /= sumh\n return h", "def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6):\n\n v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.]))\n V = np.array([[v[0], v[1]], [v[1], -v[0]]])\n D = np.array([[l1, 0], [0, l2]])\n Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))\n k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize)\n\n return k", "def gkern(kernlen=21, nsig=3):\n interval = (2 * nsig + 1.) / (kernlen)\n x = np.linspace(-nsig - interval / 2., nsig + interval / 2., kernlen + 1)\n kern1d = np.diff(st.norm.cdf(x))\n kernel_raw = np.sqrt(np.outer(kern1d, kern1d))\n kernel = kernel_raw / kernel_raw.sum()\n return kernel;", "def gaussian_k(x0, y0, sigma, height, width):\n y = np.arange(0, width, 1, float)\n x = np.arange(0, height, 1, float)[:, np.newaxis]\n return np.exp(-((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))", "def gaussian_kernel(size, sigma):\n\n m, n = [(s - 1.) / 2. for s in size]\n y, x = np.ogrid[-m:m+1, -n:n+1]\n h = np.exp(-(x*x + y*y) / (2. * sigma * sigma))\n h[h < np.finfo(h.dtype).eps*h.max()] = 0\n sumh = h.sum()\n if sumh != 0: h /= sumh\n return h", "def _gaussian_kernel(self, xdata, l1, sigma_f, sigma_noise=2e-2):\n num_total_points = tf.shape(xdata)[1]\n\n # Expand and take the difference\n xdata1 = tf.expand_dims(xdata, axis=1) # [B, 1, num_total_points, x_size]\n xdata2 = tf.expand_dims(xdata, axis=2) # [B, num_total_points, 1, x_size]\n diff = xdata1 - xdata2 # [B, num_total_points, num_total_points, x_size]\n\n # [B, y_size, num_total_points, num_total_points, x_size]\n if self._kernel == 'PER':\n norm = 2*tf.square(tf.math.sin(3.14*diff[:, None, :, :, :])) / l1[:, :, None, None, :]\n norm = tf.reduce_sum(norm, -1) # [B, data_size, num_total_points, num_total_points]\n # [B, y_size, num_total_points, num_total_points]\n kernel = tf.square(sigma_f)[:, :, None, None] * tf.exp(-norm)\n\n else: # if kernel is normal gaussian\n norm = tf.square(diff[:, None, :, :, :] / l1[:, :, None, None, :])\n norm = tf.reduce_sum(norm, -1) # [B, data_size, num_total_points, num_total_points]\n # [B, y_size, num_total_points, num_total_points]\n kernel = tf.square(sigma_f)[:, :, None, None] * tf.exp(-0.5*norm)\n\n # Add some noise to the diagonal to make the cholesky work.\n kernel += (sigma_noise**2) * tf.eye(num_total_points)\n\n return kernel", "def gaussian_kernel(sigma, truncate=4.0):\n\n sigma = float(sigma)\n radius = int(truncate * sigma + 0.5)\n\n x, y = np.mgrid[-radius:radius + 1, -radius:radius + 1]\n sigma = sigma**2\n\n k = 2 * np.exp(-0.5 * (x**2 + y**2) / sigma)\n k = k / np.sum(k)\n\n return k", "def GaussianKernel(sigma: float = 1., width: int = 0):\n assert not ((width is None or width == 0) and\n (sigma is None or sigma == 0)), \\\n \"GaussianKernel :: both sigma ({}) & width ({}) are not valid\".format(\n sigma, width)\n\n if width is None or width == 0:\n width = int(2.0 * 3.0 * sigma + 1.0)\n if width % 2 == 0:\n width += 1\n\n if sigma is None or sigma == 0:\n sigma = (width - 1)/6.\n half = width//2\n x, y = np.meshgrid(np.linspace(-half, half, width),\n np.linspace(-half, half, width), indexing='xy')\n w = np.exp(- (x**2 + y**2) / (2.*(sigma**2)))\n w /= np.sum(w)\n return torch.from_numpy(w.astype(np.float32)).view(1, 1, width, width)" ]
[ "0.7510109", "0.750365", "0.74863845", "0.7277165", "0.7234427", "0.7157976", "0.7145621", "0.7110056", "0.7061145", "0.70183206", "0.69730043", "0.69548696", "0.6933574", "0.6907124", "0.6884259", "0.6877862", "0.6862947", "0.68579936", "0.6832131", "0.6814453", "0.67858434", "0.67484385", "0.6741314", "0.6706976", "0.6694306", "0.668957", "0.66891843", "0.6683393", "0.6682465", "0.6681274" ]
0.76391137
0
Computes the histogram of the input image
def compute_histogram(self, image): hist = [0] * 256 x, y = image.shape[:2] #print(image.shape) for i in range(x): for j in range(y): hist[image[i, j]] += 1 return hist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hist(img):\n bottom_half = img[img.shape[0]//2:,:] # 0:img.shape[0]//2 is the top half\n histogram = bottom_half.sum(axis=0) \n \n return histogram", "def compute_histogram(self, image):\n hist = [0] * 256\n [h, w] = image.shape\n print(h,w)\n i = 0\n while i < 256:\n for row in range(h):\n for col in range(w):\n if image[row, col] == i:\n hist[i] += 1\n #print(hist[i])\n i += 1\n\n return hist", "def histogram(img):\n BINS = 8\n RANGE = np.tile(np.array([0, 255]), (3, 1))\n\n # histogram of the first image\n r = np.ravel(img[:, :, 0])\n g = np.ravel(img[:, :, 1])\n b = np.ravel(img[:, :, 2])\n hist, endpoints = np.histogramdd([r, g, b], bins = BINS, range = RANGE)\n\n # normalize the images\n return hist/np.sum(hist)", "def compute_histogram(self, image):\n\n # in-built function to calculate histogram\n print(\"size of image: \", np.shape(image))\n print(\"number of pixels: \", np.shape(image)[0] * np.shape(image)[1])\n # hist1 = np.ravel(cv2.calcHist([image], [0], None, [256], [0, 256]))\n # hist = np.ravel(cv2.calcHist([image], [0], None, [256], [0, 256]))\n\n # created function to calculate histogram\n hist = np.zeros(256)\n [rows, columns] = np.shape(image)\n for k in range(256):\n count = 0\n for i in range(rows):\n for j in range(columns):\n if image[i, j] == k:\n count = count + 1\n hist[k] = count\n\n # print(\"Check if histogram is same: \", np.array_equal(hist, hist1))\n\n return hist", "def OF1_CalculateRawHistogram(image):\n h = np.zeros(256, np.float_)\n for i in np.nditer(image):\n h[i - 1] = h[i - 1] + 1\n\n return h", "def calc_histogram(self, img_data):\n\n histogram = [0] * self.color_depth\n\n for w in range(img_data.shape[0]):\n for h in range(img_data.shape[1]):\n pixel = img_data[w][h]\n histogram[pixel] += 1\n\n return histogram", "def compute_histogram(image, n_bins, color_space=\"RGB\"):\n\n n_channels = 1 if color_space == \"GRAY\" else image.shape[2]\n\n hist_channels = list(range(n_channels))\n hist_bins = [n_bins,]*n_channels\n hist_range = [0, 256]*n_channels\n\n hist = cv.calcHist([image], hist_channels, None, hist_bins,\n hist_range)\n hist = cv.normalize(hist, hist, alpha=0, beta=1,\n norm_type=cv.NORM_MINMAX).flatten() # change histogram range from [0,256] to [0,1]\n return hist", "def histogram(self, image):\n\n response = self._send_request(\"histogram\", files=dict(image=image))\n return response[self._layer]['histogram']", "def equalise_hist(image, bin_count=256):\n # TODO: your histogram equalization code\n #define arrays\n image = img_as_ubyte(image)\n row,col = image.shape\n new_image = np.zeros((row,col),dtype='uint8') \n\n # compute the value of each grayscale,and save in image_hist \n image_hist = np.bincount(image.flatten(), minlength=(bin_count))\n\n # normalise n[]\n norm_arr = (np.cumsum(image_hist)/(image.size))*(bin_count-1)\n norm_arr = norm_arr.astype('uint8')\n \n #Compute a normalized cumulative histogram\n for x in range(row):\n for y in range(col):\n new_image[x,y] = norm_arr[image[x,y]]\n \n return new_image", "def histograma(p):\n img = read_img(p)\n show_histograma(img.reshape((-1)))", "def _histogram(image,\n min,\n max,\n bins):\n\n return numpy.histogram(image, bins, (min, max))[0]", "def get_histogram(folder_name, image_name, save_location):\n print(\"Getting histogram for:\" + str(folder_name) + '/' + str(image_name))\n image = cv2.imread(folder_name + '/' + image_name, cv2.IMREAD_ANYDEPTH)\n plt.hist(image.ravel(), 256, [0, 65535])\n plt.xlabel('Pixel Intensity')\n plt.ylabel('Number of pixels')\n plt.title('Histogram of normalised reference image. Overnight2')\n plt.savefig(save_location + 'histogram.png')\n plt.savefig(save_location + 'histogram.eps', format='eps')\n # plt.show()", "def OF1_CalculateNormalizedHistogram(image):\n\n raw = OF1_CalculateRawHistogram(image)\n norm = np.zeros(256, np.float_)\n\n for i in range(256):\n norm[i] = raw[i] / image.size\n\n return norm", "def color_histogram_hsv(img, nbin=10, xmin=0, xmax=255, normalized=True):\n ndim = img.ndim\n bins = np.linspace(xmin, xmax, nbin+1)\n hsv = matplotlib.color.rgb_to_hsv(img/xmax) * xmax\n imhist, bin_edges = np.histogram(hsv[:, :, 0], bins=bins, density=normalized)\n imhist = imhist * np.diff(bin_edges)\n return imhist", "def img_histogram(img):\n\n plt.figure()\n\n if len(img.shape) > 2:\n\n plt.subplot(3,1,1)\n plt.hist(img[:,:,0].ravel(),bins=range(257),color='b')\n plt.title('Image Histogram')\n plt.legend('Blue')\n plt.xlabel('Pixel Values')\n plt.ylabel('Frequency')\n\n plt.subplot(3,1,2)\n plt.hist(img[:,:,1].ravel(),bins=range(257),color='g')\n plt.legend('Green')\n plt.xlabel('Pixel Values')\n plt.ylabel('Frequency')\n\n plt.subplot(3,1,3)\n plt.hist(img[:,:,2].ravel(),bins=range(257),color='r')\n plt.legend('Red')\n plt.xlabel('Pixel Values')\n plt.ylabel('Frequency')\n\n plt.ion()\n plt.show()\n\n else:\n\n plt.hist(img[:,:].ravel(),bins=range(257))\n plt.title('Image Histogram - Grayscale')\n plt.xlabel('Pixel Values')\n plt.ylabel('Frequency')\n\n plt.ion()\n plt.show()", "def compute_histogram(im, block_factor=3, color_space='HSV'):\n\n # Shape = rows and columns\n remainder_rows = im.shape[0] % block_factor\n remainder_cols = im.shape[1] % block_factor\n\n im_block = cv2.copyMakeBorder(im, block_factor - remainder_rows, 0, block_factor - remainder_cols, 0,\n cv2.BORDER_CONSTANT)\n\n windowsize_r = int(im_block.shape[0] / block_factor)\n windowsize_c = int(im_block.shape[1] / block_factor)\n\n # print(im_block.shape)\n # print(str(windowsize_r)+' '+str(windowsize_c))\n # cv2.imshow(\"fullImg\", im_block)\n\n hist = []\n for r in range(0, im_block.shape[0], windowsize_r):\n for c in range(0, im_block.shape[1], windowsize_c):\n hist_blocks = []\n window = im_block[r:r + windowsize_r, c:c + windowsize_c]\n if color_space == 'GRAY':\n window_gray = cv2.cvtColor(window, cv2.COLOR_BGR2GRAY)\n hist_block = cv2.calcHist([window_gray], [0], None, [256], [0, 256])\n cv2.normalize(hist_block, hist_block, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX)\n hist_blocks.append(hist_block)\n elif color_space == 'RGB':\n hist_block = cv2.calcHist([window], [0], None, [256], [0, 256])\n cv2.normalize(hist_block, hist_block, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX)\n hist_blocks.append(hist_block)\n hist_block = cv2.calcHist([window], [1], None, [256], [0, 256])\n cv2.normalize(hist_block, hist_block, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX)\n hist_blocks.append(hist_block)\n hist_block = cv2.calcHist([window], [2], None, [256], [0, 256])\n cv2.normalize(hist_block, hist_block, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX)\n hist_blocks.append(hist_block)\n elif color_space == 'HSV':\n window = cv2.cvtColor(window, cv2.COLOR_BGR2HSV)\n hist_block = cv2.calcHist([window], [0], None, [256], [0, 256])\n cv2.normalize(hist_block, hist_block, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX)\n hist_blocks.append(hist_block)\n hist_block = cv2.calcHist([window], [1], None, [256], [0, 256])\n cv2.normalize(hist_block, hist_block, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX)\n hist_blocks.append(hist_block)\n hist_block = cv2.calcHist([window], [2], None, [256], [0, 256])\n cv2.normalize(hist_block, hist_block, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX)\n hist_blocks.append(hist_block)\n \n hist.append(hist_blocks)\n\n return hist", "def getHistogram( self, img):\n bins = 256\n range_scale = [0,254]\n nivel_transparencia = 0.5\n plt.hist(img.ravel(),bins,range_scale, label=\"histogram\", alpha=nivel_transparencia);\n plt.legend(loc='upper right')\n plt.show()", "def fullhistogram(img):\n maxt = img.max()\n if maxt == 0:\n return np.array([img.size])\n return nhistogram(img, np.arange(maxt+2))[0]", "def calculate_histogram(img, channel):\n\n # histogram arrays for each channel\n hist_gs_or_red = np.zeros((256, 1), dtype=np.int32)\n hist_green = np.zeros((256, 1), dtype=np.int32)\n hist_blue = np.zeros((256, 1), dtype=np.int32)\n\n # Calculate the histogram for red channel for RGB images\n # or the the first channel for gray-scale of shape (M, N, 1) images.\n if channel == [0]:\n # one-dimensional array\n if img.ndim == 1:\n raise Exception('Cannot calculate the hist of one-dimensional array.')\n\n # if there is one channel, or in case of gray-scale images, it's OK!\n elif img.ndim == 2:\n for pixel in np.ceil(img.flatten()).astype(np.int):\n hist_gs_or_red[pixel] = hist_gs_or_red[pixel] + 1\n\n # an RGB image\n elif img.ndim == 3:\n for pixel in np.ceil(img[:, :, 0:1].flatten()).astype(np.int):\n hist_gs_or_red[pixel] = hist_gs_or_red[pixel] + 1\n\n # more than 3 dimensions\n else:\n raise Exception('Cannot calculate the hist of more than 3-dimensional array.')\n\n return hist_gs_or_red\n\n # Calculate the histogram of green channel for RGB images\n elif channel == [1]:\n # Not 3-D array that represent the image with 3 color channels.\n if img.ndim <= 2:\n raise Exception('Cannot calculate the hist of green channel for non-rgb images/ 3-D array')\n\n # If it's a 3-D array of 3 color channels\n elif img.ndim == 3:\n for pixel in np.ceil(img[:, :, 1:2].flatten()).astype(np.int):\n hist_green[pixel] = hist_green[pixel] + 1\n\n # more than 3 dimensions\n else:\n raise Exception('Cannot calculate the hist of more than 3-dimensional array.')\n return hist_green\n\n # Calculate the histogram of green channel for RGB images\n elif channel == [2]:\n if img.ndim <= 2:\n raise Exception('Cannot calculate the hist of blue channel for non-rgb images/ 3-D array')\n elif img.ndim == 3:\n for pixel in np.ceil(img[:, :, 2:].flatten()).astype(np.int):\n hist_blue[pixel] = hist_blue[pixel] + 1\n return hist_blue\n\n # Invalid value of channel parameter\n else:\n raise Exception('ValueError: only [0], [1], [2] are possible as value for the channel parameter.')", "def calculateHistogram(self):\n \n # Define color map\n colors = [ (255,0,0),(0,255,0),(0,0,255) ]\n # Define empty image to plot histogram in\n plot_to_fill = np.zeros((280,400,3))\n # Define bins of the histogram\n bins = np.arange(256).reshape(256,1)\n \n # Boucle sur les canaux\n for channel, color in enumerate(colors):\n # Calcul de l'histogramme\n hist_item = cv2.calcHist(self.frame,[channel],None,[256],[0,256])\n # Normalisation\n cv2.normalize(hist_item,hist_item,0,255,cv2.NORM_MINMAX)\n # Conversion\n hist = np.int32(np.around(hist_item))\n pts = np.int32(np.column_stack((bins, hist)))\n cv2.polylines(plot_to_fill, [pts], False, color)\n # Mettre dans le bon sens\n histplot = np.flipud(plot_to_fill)\n histplot = np.uint8(histplot)\n \n # Conversion en objet QPixelMap\n self.histplot_qpix = self.convertToQPixelmap(histplot)", "def histograms(self, *args, **kwargs):\n return _image.image_histograms(self, *args, **kwargs)", "def histogram_equalize(im_orig):\n\n color_flag = False\n image = im_orig\n\n\n if len(im_orig.shape) == 3: #RGB image\n color_flag = True\n y_im = rgb2yiq(im_orig)\n image = y_im[:, :, 0]\n\n image *= NORMALIZE\n hist_orig, bins = np.histogram(image, range(BINS))\n hist_cum = np.cumsum(hist_orig) #cumulative distribution function\n\n cum = ((hist_cum - hist_cum.min()) / ( hist_cum.max() - hist_cum.min())) * NORMALIZE\n\n im_eq = cum[image.astype(np.uint8)]\n\n hist_eq, bins = np.histogram(im_eq, range(BINS)) #before getting back to float64 does the histogram)\n\n im_eq /= NORMALIZE\n im_eq = im_eq.astype(np.float64)\n\n\n if color_flag:\n y_im[:, :, 0] = im_eq\n im_eq = yiq2rgb(y_im)\n\n im_eq = im_eq.clip(0,1)\n return [im_eq, hist_orig, hist_eq]", "def histeq( im, nbr_bins = 256):\n\t# get image histogram \n\timhist, bins = histogram( im.flatten(), nbr_bins, normed = True) \n\tcdf = imhist.cumsum() \n\t# cumulative distribution function cdf = 255 * cdf / cdf[-1] \n\t# normalize \n\t# use linear interpolation of cdf to find new pixel values \n\tim2 = interp( im.flatten(), bins[:-1], cdf) \n\treturn im2.reshape( im.shape), cdf", "def histograma(frame):\n #Imagen mejorada por Equalizacion de Histogramas.\n histograma = cv2.equalizeHist(frame)\n return histograma", "def color_histogram_hsv(im, nbin=10, xmin=0, xmax=255, normalized=True):\n ndim = im.ndim\n bins = np.linspace(xmin, xmax, nbin + 1)\n hsv = matplotlib.colors.rgb_to_hsv(im / xmax) * xmax\n imhist, bin_edges = np.histogram(hsv[:, :, 0],\n bins=bins,\n density=normalized)\n imhist = imhist * np.diff(bin_edges)\n\n return imhist", "def show_histogram(im):\n\n if im.ndim == 2:\n # Input image is single channel\n plt.hist(im.flatten(), 256, range=(0, 250), fc='k')\n plt.show()\n\n elif im.ndim == 3:\n # Input image is three channels\n fig = plt.figure()\n fig.add_subplot(311)\n plt.hist(im[..., 0].flatten(), 256, range=(0, 250), fc='b')\n fig.add_subplot(312)\n plt.hist(im[..., 1].flatten(), 256, range=(0, 250), fc='g')\n fig.add_subplot(313)\n plt.hist(im[..., 2].flatten(), 256, range=(0, 250), fc='r')\n plt.show()", "def describe(self, image, mask=None):\n histogram = cv2.calcHist([image], [0, 1, 2], mask, self.bins, [0, 256, 0, 256, 0, 256])\n cv2.normalize(histogram, histogram)\n\n return histogram.flatten()", "def __get_color_histogram(self, image, seed, hist_res):\n \n L=[]\n N=len(seed)\n for i in range(N):\n \n L.append(image[seed[i][1],seed[i][0]])\n image_part=np.array(L)\n \n \n hist, bins= np.histogramdd(image_part,bins=hist_res,range=((0,255),(0,255),(0,255)) )\n #hist= ndimage.gaussian_filter(hist,sigma=7) # Gaussian smoothing\n\n return hist /np.linalg.norm(hist)", "def histeq(im,nbr_bins=256):\r\n # Calculate histogram of images\r\n imhist,bins = histogram(im.flatten(),nbr_bins,normed=True)\r\n cdf = imhist.cumsum() # cumulative distribution function\r\n cdf = 255 * cdf / cdf[-1] # 归一化\r\n # Using the linear interpolation of cumulative distribution function, the new pixel value is calculated.\r\n im2 = interp(im.flatten(),bins[:-1],cdf)\r\n return im2.reshape(im.shape), cdf", "def BICHistogram(self):\n if not self._bicHistogram is 0:\n return self._bicHistogram\n hsvimg = self.HsvImage()\n #Note that in OpenCV hsv uses the ranges [0,179], [0,255] and [0,255] respectively\n histogram = numpy.zeros(56, dtype=numpy.float32)\n [width, height, depth] = hsvimg.shape\n swidth = width-1\n sheight = height-1\n for y in xrange(height):\n for x in xrange(width):\n #index = self.HsvBin(hsvimg[x][y])\n #if index != self.HsvBin(hsvimg[min(x+1, swidth)][min(y+1, sheight)]) or index != self.HsvBin(hsvimg[min(x+1, swidth)][max(y-1, 0)]) or index != self.HsvBin(hsvimg[max(x-1, 0)][min(y+1, sheight)]) or index != self.HsvBin(hsvimg[max(x-1, 0)][max(y-1, 0)]):\n index=self.HsvBin(x, y)\n if index != self.HsvBin(min(x+1, swidth),min(y+1, sheight)) or index != self.HsvBin(min(x+1, swidth),max(y-1, 0)) or index != self.HsvBin(max(x-1, 0),min(y+1, sheight)) or index != self.HsvBin(max(x-1, 0),max(y-1, 0)):\n histogram[28+index] += 1\n else:\n histogram[index] += 1\n histogram /= width*height\n sHistogram = numpy.zeros(56, dtype=numpy.float32)\n sHistogram[0] = 0.25 * histogram[20] + 0.5 * histogram[0] + 0.25 * histogram[1]\n sHistogram[20] = 0.5 * histogram[20] + 0.25 * histogram[0] + 0.25 * histogram[19]\n \n for i in xrange(1, 19):\n sHistogram[i] = 0.25 * histogram[i-1] + 0.5 * histogram[i] + 0.25 * histogram[i+1]\n \n sHistogram[28] = 0.25 * histogram[48] + 0.5 * histogram[28] + 0.25 * histogram[29]\n sHistogram[48] = 0.5 * histogram[48] + 0.25 * histogram[28] + 0.25 * histogram[47]\n \n for i in xrange(29, 47):\n sHistogram[i] = 0.25 * histogram[i-1] + 0.5 * histogram[i] + 0.25 * histogram[i+1]\n self._bicHistogram = sHistogram\n return sHistogram" ]
[ "0.8064423", "0.8042533", "0.8037536", "0.7930632", "0.76470864", "0.7604351", "0.75007343", "0.73157203", "0.7244618", "0.71898323", "0.7063043", "0.6988698", "0.69718844", "0.69492954", "0.6931104", "0.6850683", "0.6834464", "0.6798194", "0.6795921", "0.6792963", "0.67720056", "0.67577064", "0.67555946", "0.67118055", "0.6676134", "0.6666913", "0.66627383", "0.6646342", "0.66344947", "0.65969664" ]
0.82736444
0
Comptues the binary image of the the input image based on histogram analysis and thresholding take as input
def binarize(self, image, threshold): bin_img = image.copy() for i in range(image.shape[0]): for j in range(image.shape[1]): if image[i, j] >= threshold: bin_img[i, j] = 0 else: bin_img[i, j] = 255 return bin_img
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def binarize(self, image, threshold):\n\n bin_img = image.copy()\n [h, w] = bin_img.shape\n opt_threshold = threshold\n print(opt_threshold)\n for row in range(h):\n for col in range(w):\n if bin_img[row, col] > opt_threshold: #greater than threshld white(general)\n bin_img[row, col] = 255 #0 instead of 1\n else: #less than threshold black(general)\n bin_img[row, col] = 0 #0 instead of 1\n\n\n #reverse the cases\n\n return bin_img", "def binarize(self, image):\n\n [rows, columns] = np.shape(image)\n bin_img = np.zeros((rows, columns), dtype=int)\n print(\"############## Using to binarize an image ##############\")\n hist = self.compute_histogram(image)\n threshold = self.find_optimal_threshold(hist)\n for i in range(rows):\n for j in range(columns):\n if image[i, j] < threshold:\n bin_img[i, j] = 0\n else:\n bin_img[i, j] = 255\n # print(\"binary image: \\n\", bin_img)\n\n return bin_img", "def __call__(self, img, *args, **kwargs):\n _, img_binarized = cv.threshold(img, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)\n\n return img_binarized", "def bin_thres_img(img, ksize=3):\n # Apply each of the thresholding functions\n gradx = abs_sobel_thresh(img, orient='x', sobel_kernel=ksize, thresh=(20, 100))\n grady = abs_sobel_thresh(img, orient='y', sobel_kernel=ksize, thresh=(20, 100))\n\n mag_binary = mag_thresh(img, sobel_kernel=ksize, mag_thresh=(30, 100))\n dir_binary = dir_threshold(img, sobel_kernel=ksize, thresh=(0.7, 1.3))\n\n hls_binary = hls_select(img, thresh=(170, 255))\n\n combined = np.zeros_like(dir_binary)\n combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1)) | hls_binary == 1] = 1\n return combined", "def binarize_image(img, verbose=False):\n h, w = img.shape[:2]\n\n # creat an empty image with the same size as the passed frame to the function\n binary_output = np.zeros(shape=(h, w), dtype=np.uint8)\n\n # using HSV, Find yellow lanes in the image (min [0, 70, 70] and max [50, 255, 255] were selected to detect yellow at all conditions in the image)\n HSV_yellow_lanes = hsv_select(img, thresh=(\n [0, 100, 100], [50, 255, 255]), channel='all', verbose=False)\n\n #HSV_yellow_lanes = thresh_frame_in_HSV(img, yellow_HSV_th_min, yellow_HSV_th_max, verbose=False)\n\n # add the yellow mask to the binary image\n binary_output = np.logical_or(binary_output, HSV_yellow_lanes)\n\n # using Histogram Equalization, Find white lanes in the image\n histo_white_lanes = histo_image(img, verbose=False)\n\n # add the white mask to the binary image\n binary_output = np.logical_or(binary_output, histo_white_lanes)\n\n # apply sobel mask to the image\n sobel_mask = abs_sobel_thresh(\n img, orient='xy', sobel_kernel=9, thresh=(50, 200), verbose=False)\n\n # apply a light morphology to \"fill the gaps\" in the binary image\n kernel = np.ones((6, 6), np.uint8)\n closing = cv2.morphologyEx(sobel_mask.astype(\n np.uint8), cv2.MORPH_CLOSE, kernel)\n\n kernel = np.ones((2, 2), np.uint8)\n opening = cv2.morphologyEx(closing.astype(\n np.uint8), cv2.MORPH_OPEN, kernel)\n\n # add the sobel mask to the binary image\n binary_output = np.logical_or(opening, binary_output)\n\n # using HLS, Find lanes in the image\n hls_s_binary = hls_select(img, thresh=(200, 255), channel='S')\n\n # add the HLS mask to the binary image\n binary_output = np.logical_or(binary_output, hls_s_binary)\n\n # apply a light morphology to \"fill the gaps\" in the binary image\n kernel = np.ones((5, 5), np.uint8)\n binary_output = cv2.morphologyEx(binary_output.astype(\n np.uint8), cv2.MORPH_CLOSE, kernel)\n\n \n if verbose:\n f, ax = plt.subplots(2, 3)\n f.set_facecolor('white')\n\n ax[0, 0].imshow(cv2.cvtColor(img, code=cv2.COLOR_BGR2RGB), cmap='gray')\n ax[0, 0].set_title('Original')\n ax[0, 0].set_axis_off()\n \n ax[0, 1].imshow(HSV_yellow_lanes, cmap='gray')\n ax[0, 1].set_title('Yellow mask')\n ax[0, 1].set_axis_off()\n\n ax[0, 2].imshow(histo_white_lanes, cmap='gray')\n ax[0, 2].set_title('white mask')\n ax[0, 2].set_axis_off()\n\n ax[1, 2].imshow(sobel_mask, cmap='gray')\n ax[1, 2].set_title('Sobel mask')\n ax[1, 2].set_axis_off()\n\n ax[1, 0].imshow(binary_output, cmap='gray')\n ax[1, 0].set_title('OUTPUT')\n ax[1, 0].set_axis_off()\n\n ax[1, 1].imshow(closing, cmap='gray')\n ax[1, 1].set_title('closing')\n ax[1, 1].set_axis_off()\n\n # ax[1, 2].imshow(opening, cmap='gray')\n # ax[1, 2].set_title('opening')\n # ax[1, 2].set_axis_off()\n plt.show()\n return binary_output, closing, opening", "def get_binary_image_array_from_equalized_grayscale(frame):\n\t\n\teq_global = cv2.equalizeHist(frame.gray)\n\t#_, th = cv2.threshold(eq_global, thresh=240, maxval=255, type=cv2.THRESH_BINARY)\n\t_, th = cv2.threshold(eq_global, thresh=240, maxval=255, type=cv2.THRESH_TOZERO)\n\treturn th", "def image_binary(image_convert):\n image_bit=cv2.bitwise_not(image_convert)\n _, image_bina = cv2.threshold(image_bit, 125, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n image_bina=image_bina/255.0\n return image_bina", "def image_thresholding(image: np.ndarray):\n #  Resize image to a shape of (48, 48)\n image = image_as_square(image)\n\n # Find threshold using Otsu filter\n threshold: float = filters.threshold_otsu(image)\n binary = image > threshold\n\n binary_image = np.where(image, binary, 0) * 255\n\n #  Resize the iamge back to a shape of (2304, )\n return image_as_array(image)", "def create_binary(image):\n #Channel 1 of the output image highlights the area consisting of the nuclei\n channel1=image[:,:,0]\n \n # Channel 2 of the output image consists of the boundaries between adjoining nuclei\n channel2=image[:,:,1]\n _,channel1=cv2.threshold(channel1, 127,255,cv2.THRESH_BINARY) \n _,channel2=cv2.threshold(channel2, 127,255,cv2.THRESH_BINARY) \n \n #Subtracting channel 2 from channel 1 to get the desired output\n img1=channel1-channel2\n \n return img1", "def q_2(input_file, output_file):\n img = cv2.imread(input_file, cv2.IMREAD_COLOR)\n \n # Convert image to gray channel\n np_img = np.array(img)\n b = np_img[:,:,0]\n g = np_img[:,:,1]\n r = np_img[:,:,2]\n img_gray = 0.21 * b + 0.72 * g + 0.07 * r\n img_gray = np.array(img_gray, dtype='uint8')\n # Histogram equalization\n w,h=img_gray.shape\n H=count(img_gray)\n y=np.array([])\n # sap xep lai mang theo thu tu tu 0-255\n x=H.reshape(1,256)\n y=np.append(y,x[0,0])\n # T[i]=[i-1]+h[i]\n for i in range(255):\n k=x[0,i+1]+y[i]\n y=np.append(y,k)\n # chia theo cong thuc\n y=np.round(y/(w*h)*255)\n for i in range(w):\n for j in range(h):\n k=img_gray[i,j]\n img_gray[i,j]=y[k]\n cv2.imwrite(output_file, img_gray)", "def binarize(img):\n image = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n image = cv.GaussianBlur(image, (3, 3), 0)\n ret, image = cv.threshold(image, 0, 255, cv.THRESH_BINARY_INV | cv.THRESH_OTSU)\n return image", "def thresholdImage(frame, binarizationThreshold=30):\n return cv2.threshold(frame, binarizationThreshold, 255, cv2.THRESH_BINARY)[1]", "def procces_image(self, image):\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n ret, processed_image = cv2.threshold(image, 75, 255, cv2.THRESH_BINARY_INV)\n return processed_image", "def binarize_image(input_image, threshold_value=177):\n gray_image = grayscale_image(input_image)\n bin_image = cv2.GaussianBlur(gray_image, (5, 5), 0)\n _, bin_image = cv2.threshold(bin_image,\n threshold_value,\n 255,\n cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n return bin_image", "def bin_thresh(img, threshold):\r\n _, new_img = cv2.threshold(img, threshold, 255, cv2.THRESH_BINARY) # 255 is the max value\r\n\r\n # make some smoothing to get rid of unnecessary ~splashes of color\r\n # parameters are the width and height of gaussian kernel, and standard deviation in X and Y direction,\r\n # sigmaX & sigmaY respectively. If only sigmaX is specified, sigmaY is taken as same as sigmaX.\r\n # If both are given as zeros, they are calculated from kernel size.\r\n new_img = cv2.GaussianBlur(new_img, (9, 9), 0)\r\n return new_img", "def get_binary_image(grayscale_image):\n _, thresholded_image = cv2.threshold(grayscale_image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n return thresholded_image", "def binarize(img, s_thres=(170, 255), l_thres=(50, 255), sobel_thres=(30, 80)):\n hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)\n\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))\n hls[:, :, 1] = clahe.apply(hls[:, :, 1])\n\n l_image = hls[:, :, 1]\n l_blur = cv2.GaussianBlur(l_image, (0, 0), 9)\n l_image = cv2.addWeighted(l_image, 1, l_blur, -1, 0)\n l_image = cv2.normalize(l_image, np.zeros_like(l_image), 0, 255, cv2.NORM_MINMAX, cv2.CV_8UC1)\n l_binary = np.zeros_like(l_image)\n l_binary[(l_image >= l_thres[0]) & (l_image <= l_thres[1])] = 1\n\n # Sobel x\n # gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # gray = hls[:, :, 1]\n # sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0) # Take the derivative in x\n # abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal\n # scaled_sobel = np.uint8(255 * abs_sobelx / np.max(abs_sobelx))\n # sxbinary = np.zeros_like(scaled_sobel)\n # sxbinary[(scaled_sobel >= sobel_thres[0]) & (scaled_sobel <= sobel_thres[1])] = 1\n # sxbinary = s_binary\n\n s_channel = hls[:, :, 2]\n s_channel = cv2.normalize(s_channel, np.zeros_like(s_channel), 0, 255, cv2.NORM_MINMAX, cv2.CV_8UC1)\n s_binary = np.zeros_like(s_channel)\n s_binary[(s_channel >= s_thres[0]) & (s_channel <= s_thres[1])] = 1\n\n # Combine the two binary thresholds\n combined_binary = np.zeros_like(s_binary)\n combined_binary[(s_binary == 1) | (l_binary == 1)] = 1\n\n # we filter out the lines with too many active pixels\n combined_binary_rows = combined_binary.sum(1)\n combined_binary[combined_binary_rows > (combined_binary.shape[1] / 2)] = 0\n\n return combined_binary", "def scale(img):\n result = [[1 if x > BINARY_THRESHOLD else 0 for x in row] for row in img]\n return result", "def threshold(self, config ):\n java_object = pbg.gateway.jvm.boofcv.factory.filter.binary.FactoryThresholdBinary.\\\n threshold(config.java_obj,self.boof_image_type)\n return InputToBinary(java_object)", "def binarize(X, *, threshold=..., copy=...):\n ...", "def apply_thresholding_img(img, t1, t2):\n hist_threshold = np.where(img >= t1, img, 255)\n hist_threshold = np.where(hist_threshold < t2, 0, hist_threshold)\n return hist_threshold", "def process(self):\n self.output_image = cv.adaptiveThreshold(\n self.input_image,\n # self.MIN_THRESHOLD,\n self.MAX_PIXEL_VALUE,\n cv.ADAPTIVE_THRESH_GAUSSIAN_C,\n cv.THRESH_BINARY_INV,\n self.BLOCK_SIZE,\n self.CONSTANT,\n )\n return self.output_image", "def binarize(img, s_thresh=(150, 255), sx_thresh=(20, 100)):\n img = np.copy(img)\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HLS).astype(np.float)\n l_channel = hsv[:, :, 1]\n s_channel = hsv[:, :, 2]\n\n sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0)\n abs_sobelx = np.absolute(sobelx)\n scaled_sobel = np.uint8(255 * abs_sobelx / np.max(abs_sobelx))\n\n # Threshold x gradient\n sxbinary = np.zeros_like(scaled_sobel)\n sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1\n\n # Threshold color channel\n s_binary = np.zeros_like(s_channel)\n s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1\n\n img_bin = np.zeros_like(sxbinary)\n img_bin[(sxbinary == 1) | (s_binary == 1)] = 1\n\n return img_bin", "def compute_histogram(self, image):\n\n # in-built function to calculate histogram\n print(\"size of image: \", np.shape(image))\n print(\"number of pixels: \", np.shape(image)[0] * np.shape(image)[1])\n # hist1 = np.ravel(cv2.calcHist([image], [0], None, [256], [0, 256]))\n # hist = np.ravel(cv2.calcHist([image], [0], None, [256], [0, 256]))\n\n # created function to calculate histogram\n hist = np.zeros(256)\n [rows, columns] = np.shape(image)\n for k in range(256):\n count = 0\n for i in range(rows):\n for j in range(columns):\n if image[i, j] == k:\n count = count + 1\n hist[k] = count\n\n # print(\"Check if histogram is same: \", np.array_equal(hist, hist1))\n\n return hist", "def calculate_binaries(dict_data):\n list_all_preprocessed_binaries = []\n for index_patient, patient in enumerate(dict_data):\n # pick and convert image\n image = dict_data[patient][1]\n image = image.astype(\"uint8\")\n # blur image\n image_blurred = cv2.medianBlur(image, 29)\n # segment image using k-means segmentation\n image_segmented = run_kmean_on_single_image(image_blurred, k=10,\n precision=10000, max_iterations=1000)\n # find lower threshold for binarizing images\n \"\"\" the idea i had here was that all the electrodes always occupy the same area on each picture.\n this function basically returns the pixel value, at which we need to threshold in our binary\n function, so that all pixels that have a higher intensity will collectively make up at least \n \"fraction_of_image_threshold\" percent of the picture - electrodes seem to take up about 5-10% of each\n image\"\"\"\n lower_threshold = intelligent_get_threshold(image_segmented,\n fraction_of_image_threshold=0.08)\n # binarize image\n image_binary = binarize_image(image_segmented, \n lower_threshold=lower_threshold, upper_threshold=255)\n list_all_preprocessed_binaries.append(image_binary)\n return list_all_preprocessed_binaries", "def histo_image(image, verbose=False):\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n histo_global = cv2.equalizeHist(gray)\n\n _, histo = cv2.threshold(histo_global, thresh=250,\n maxval=255, type=cv2.THRESH_BINARY)\n\n if verbose:\n plt.imshow(histo, cmap='gray')\n plt.show()\n\n return histo", "def imthresh(img):\n img_vecs = img.flatten()\n\n # pre-calculate the histogram and cumulative histogram.\n vbins = np.arange(0, 257, 1)\n img_hist, hist_edges = np.histogram(img_vecs, vbins)\n vbins = (hist_edges[:-1] + hist_edges[1:])/2\n \n hist_times_gray = np.cumsum(img_hist * np.arange(0, 256, 1))\n cum_hist = np.cumsum(img_hist)\n\n # A first approximation of the background mean mean_1 is the mean of the corner pixels.\n # The third corner's index seems to be wrong!\n m, n = img.shape\n sum_bg = np.sum(img_vecs[[0, n - 1, n * (m - 1), m * n - 1]])\n num_pix_bg = 4\n mean1 = sum_bg/4\n mean2 = (np.sum(img_vecs) - sum_bg)/(m *n - num_pix_bg)\n threshold_val = np.uint8(np.ceil((mean1 + mean2)/2))\n\n\n if (threshold_val != 0) and (cum_hist[threshold_val - 1] == 0):\n threshold_val_old = threshold_val\n\n threshold_val_old = 0 # weird\n while threshold_val != threshold_val_old:\n threshold_val_old = threshold_val\n mean1 = hist_times_gray[threshold_val - 1]/cum_hist[threshold_val - 1]\n mean2 = (hist_times_gray[-1] - hist_times_gray[threshold_val - 1])/(cum_hist[-1] - cum_hist[threshold_val - 1])\n\n threshold_val = np.uint8(np.ceil((mean1 + mean2)/2))\n\n\n img_out = img >= threshold_val\n return img_out, threshold_val", "def BICHistogram(self):\n if not self._bicHistogram is 0:\n return self._bicHistogram\n hsvimg = self.HsvImage()\n #Note that in OpenCV hsv uses the ranges [0,179], [0,255] and [0,255] respectively\n histogram = numpy.zeros(56, dtype=numpy.float32)\n [width, height, depth] = hsvimg.shape\n swidth = width-1\n sheight = height-1\n for y in xrange(height):\n for x in xrange(width):\n #index = self.HsvBin(hsvimg[x][y])\n #if index != self.HsvBin(hsvimg[min(x+1, swidth)][min(y+1, sheight)]) or index != self.HsvBin(hsvimg[min(x+1, swidth)][max(y-1, 0)]) or index != self.HsvBin(hsvimg[max(x-1, 0)][min(y+1, sheight)]) or index != self.HsvBin(hsvimg[max(x-1, 0)][max(y-1, 0)]):\n index=self.HsvBin(x, y)\n if index != self.HsvBin(min(x+1, swidth),min(y+1, sheight)) or index != self.HsvBin(min(x+1, swidth),max(y-1, 0)) or index != self.HsvBin(max(x-1, 0),min(y+1, sheight)) or index != self.HsvBin(max(x-1, 0),max(y-1, 0)):\n histogram[28+index] += 1\n else:\n histogram[index] += 1\n histogram /= width*height\n sHistogram = numpy.zeros(56, dtype=numpy.float32)\n sHistogram[0] = 0.25 * histogram[20] + 0.5 * histogram[0] + 0.25 * histogram[1]\n sHistogram[20] = 0.5 * histogram[20] + 0.25 * histogram[0] + 0.25 * histogram[19]\n \n for i in xrange(1, 19):\n sHistogram[i] = 0.25 * histogram[i-1] + 0.5 * histogram[i] + 0.25 * histogram[i+1]\n \n sHistogram[28] = 0.25 * histogram[48] + 0.5 * histogram[28] + 0.25 * histogram[29]\n sHistogram[48] = 0.5 * histogram[48] + 0.25 * histogram[28] + 0.25 * histogram[47]\n \n for i in xrange(29, 47):\n sHistogram[i] = 0.25 * histogram[i-1] + 0.5 * histogram[i] + 0.25 * histogram[i+1]\n self._bicHistogram = sHistogram\n return sHistogram", "def compute_histogram(self, image):\n hist = [0] * 256\n [h, w] = image.shape\n print(h,w)\n i = 0\n while i < 256:\n for row in range(h):\n for col in range(w):\n if image[row, col] == i:\n hist[i] += 1\n #print(hist[i])\n i += 1\n\n return hist", "def segmentation_bin(image_gray, th):\n try:\n if len(image_gray.shape) > 2:\n print('[ERROR]: Dimension > 2. Is an image gray?')\n return None \n \n ret, image_bin = cv2.threshold(image_gray, th, 255, cv2.THRESH_BINARY_INV)\n \n return image_bin\n except:\n print('[ERROR]: could not segmentation image')\n return None" ]
[ "0.7681446", "0.727308", "0.7155279", "0.70369035", "0.6960089", "0.6878597", "0.6873943", "0.6869752", "0.6862886", "0.68430334", "0.68059856", "0.67681766", "0.67679167", "0.6710474", "0.66879267", "0.6674513", "0.66720676", "0.666158", "0.6618548", "0.6603418", "0.6565325", "0.6558376", "0.65255654", "0.64880496", "0.6474988", "0.64670426", "0.6454608", "0.6410681", "0.63768536", "0.637225" ]
0.73140323
1
Append new animation. If \p _widget exists in animations, then it target will be changed
def _addLinearAnimation(self, _widget, _target): self._linear_animations[_widget] = _target
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _addPulseAnimation(self, _widget, _target):\n self._pulse_animations[_widget] = _target", "def add_animation(self, animation, key):\n\t\tif animation.from_value == animation.to_value:\n\t\t\treturn\n\t\tanimation.attribute = key\n\t\tanimation.layer = self\n\t\tself.animations[key] = animation", "def on_animation_complete(self, file_path: str):\n self.animation_widget = AnimationWidget(file_path, self.outer_widget)\n self.full_snap_button.setDisabled(False)\n\n # self.main_layout_wide.replaceWidget(self.image_widget, self.animation_widget)\n prev_image_ix = self.main_layout_wide.indexOf(self.image_widget)\n # self.main_layout_wide.removeWidget(self.image_widget)\n self.image_widget.hide() # FIXME Yuck\n self.main_layout_wide.addWidget(self.animation_widget, Qt.Horizontal)\n self.animation_widget.movie.finished.connect(self.replace_original_img_widget)\n # self.animation_widget.movie.setScaledSize(QSize(900, 900))\n self.animation_widget.movie.start()\n self.animation_widget.show()", "def add_move_unit_animation(self, animation: 'MoveUnitAnimation') -> None:\n if animation is None:\n return\n for child in self.children:\n if isinstance(child, MoveUnitAnimation):\n child.end()\n self.add_child(animation)", "def push_animation(self, animations: List[UIAnimation] = [], names: List[str] = []):\n for name in names[::-1]:\n if name in self.saved_animations:\n n_animation = self.saved_animations[name]\n for anim in n_animation[::-1]:\n self.queued_animations.insert(0, anim)\n \n for animation in animations[::-1]:\n animation.component = self\n self.queued_animations.insert(0, animation)", "def toggleAnimationMenu(self):\n\n if self.uiAnimation is None:\n print \"Add animation frame\"", "def registerAnimation(self, *args):\n return _osgAnimation.AnimationManagerBase_registerAnimation(self, *args)", "def add_widget(self, widget):\n widget.clear()\n widget.parent = self\n self.widgets.append(widget)\n widget.dirty = 1", "def setAnimations(*args):", "def setAnimations(*args):", "def setAnimations(*args):", "def setAnimations(*args):", "def setAnimations(*args):", "def setAnimations(*args):", "def setAnimations(*args):", "def setAnimations(*args):", "def setAnimations(*args):", "def setAnimations(*args):", "def setAnimations(*args):", "def update(self, *args):\n return _osgAnimation.Animation_update(self, *args)", "def start_animation(self) -> None:\n increment_values = {0: 1, self.original_height: -1}\n self.increment = increment_values.get(self.current_height, 0) # Compressed if", "def set_animated(self,val):\n for line in self.lines:\n line.set_animated(val)", "def save_animation(self, animation: UIAnimation, name: str):\n if name in self.saved_animations:\n self.saved_animations[name].append(animation)\n else:\n self.saved_animations[name] = [animation]", "def _add_widget(self, widget):\n self._grid.addWidget(\n widget, self._row_index, self._column_id, 1, self._column_span\n )\n self._row_index += 1", "def start_animation(self, duration):\n self.effect = QGraphicsOpacityEffect()\n self.setGraphicsEffect(self.effect)\n\n self.animation1 = QPropertyAnimation(self.effect, b\"opacity\")\n self.animation1.setDuration(duration)\n self.animation1.setStartValue(1)\n self.animation1.setEndValue(0)\n\n self.animation2 = QPropertyAnimation(self.effect, b\"opacity\")\n self.animation2.setDuration(duration)\n self.animation2.setStartValue(0)\n self.animation2.setEndValue(1)\n\n self.ga = QSequentialAnimationGroup()\n self.ga.addAnimation(self.animation1)\n self.ga.addAnimation(self.animation2)\n self.ga.setLoopCount(-1)\n self.ga.start()", "def loadAnim2Layout(self, itemList):\n animList = []\n\n for eachItem in itemList:\n currentPath = str(eachItem.toolTip(0))\n if os.path.isdir(currentPath):\n directoryList = os.listdir(currentPath)\n\n for eachFile in directoryList:\n if os.path.isfile('%s/%s' % (currentPath, eachFile)):\n if eachFile.endswith('.anim'):\n animList.append('%s/%s' % (currentPath, eachFile))\n\n row = -1\n column = 0\n coordinateList = []\n for index in range(len(animList)):\n if index % 4:\n column += 1\n coordinateList.append([row, column])\n else:\n row += 1\n column = 0\n coordinateList.append([row, column])\n\n # tool buttons\n for index in range(len(animList)):\n animLabel = os.path.splitext(os.path.basename(animList[index]))[0]\n\n # tool button\n toolButton = hoverToolBtn(gifPath=animList[index].replace('.anim', '.gif'),\n templateGIFPath=self.templateGIF,\n movie=self.movie,\n recordBtn=self.recordBtn,\n parent=self.animWidget)\n toolButton.setFixedSize(90, 90)\n toolButton.setObjectName('toolButton_%s' % animLabel)\n toolButton.setText(animLabel)\n toolButton.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)\n\n # Icons\n animIconPath = animList[index].replace('.anim', '.gif')\n icon = QtGui.QIcon()\n movie = QtGui.QMovie(animIconPath)\n movie.jumpToFrame(0)\n movie.stop()\n icon.addPixmap(QtGui.QPixmap(movie.currentPixmap()), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n toolButton.setIcon(icon)\n toolButton.setIconSize(QtCore.QSize(80, 70))\n\n self.animWidgetLayout.addWidget(toolButton, coordinateList[index][0], coordinateList[index][1], 1, 1)\n\n # import anims\n toolButton.clicked.connect(partial(self.setCurrentAnim, animList[index]))", "def do_animations(self):\n self.animate_bloop(700, 160, 50)", "def at_anim(seq, anim, d):\n at(\"ANIM\", seq, [anim, d])", "def add_frame(\n self : \"animation\",\n frame : \"matplotlib.figure.Figure\",\n facecolor : \"str\" = 'white'\n ):\n self._make_animation_from_raw_list([frame], facecolor=facecolor)", "def queue_animation(self, animations: List[UIAnimation] = [], names: List[str] = [], force: bool = False):\n if not force and self.is_animating():\n return\n for name in names:\n if name in self.saved_animations:\n n_animation = self.saved_animations[name]\n for anim in n_animation:\n anim.component = self\n self.queued_animations.append(anim)\n for animation in animations:\n animation.component = self\n self.queued_animations.append(animation)" ]
[ "0.7235232", "0.640118", "0.60980463", "0.5986769", "0.5909382", "0.57442415", "0.5643073", "0.5543253", "0.5395823", "0.5395823", "0.5395823", "0.5395823", "0.5395823", "0.5395823", "0.5395823", "0.5395823", "0.5395823", "0.5395823", "0.5395823", "0.5262023", "0.52438366", "0.5224949", "0.5185468", "0.51725644", "0.51234335", "0.5119241", "0.51045686", "0.50749224", "0.50457686", "0.5037176" ]
0.7617919
0
Append new animation. If \p _widget exists in animations, then it target will be changed
def _addPulseAnimation(self, _widget, _target): self._pulse_animations[_widget] = _target
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _addLinearAnimation(self, _widget, _target):\n self._linear_animations[_widget] = _target", "def add_animation(self, animation, key):\n\t\tif animation.from_value == animation.to_value:\n\t\t\treturn\n\t\tanimation.attribute = key\n\t\tanimation.layer = self\n\t\tself.animations[key] = animation", "def on_animation_complete(self, file_path: str):\n self.animation_widget = AnimationWidget(file_path, self.outer_widget)\n self.full_snap_button.setDisabled(False)\n\n # self.main_layout_wide.replaceWidget(self.image_widget, self.animation_widget)\n prev_image_ix = self.main_layout_wide.indexOf(self.image_widget)\n # self.main_layout_wide.removeWidget(self.image_widget)\n self.image_widget.hide() # FIXME Yuck\n self.main_layout_wide.addWidget(self.animation_widget, Qt.Horizontal)\n self.animation_widget.movie.finished.connect(self.replace_original_img_widget)\n # self.animation_widget.movie.setScaledSize(QSize(900, 900))\n self.animation_widget.movie.start()\n self.animation_widget.show()", "def add_move_unit_animation(self, animation: 'MoveUnitAnimation') -> None:\n if animation is None:\n return\n for child in self.children:\n if isinstance(child, MoveUnitAnimation):\n child.end()\n self.add_child(animation)", "def push_animation(self, animations: List[UIAnimation] = [], names: List[str] = []):\n for name in names[::-1]:\n if name in self.saved_animations:\n n_animation = self.saved_animations[name]\n for anim in n_animation[::-1]:\n self.queued_animations.insert(0, anim)\n \n for animation in animations[::-1]:\n animation.component = self\n self.queued_animations.insert(0, animation)", "def toggleAnimationMenu(self):\n\n if self.uiAnimation is None:\n print \"Add animation frame\"", "def registerAnimation(self, *args):\n return _osgAnimation.AnimationManagerBase_registerAnimation(self, *args)", "def add_widget(self, widget):\n widget.clear()\n widget.parent = self\n self.widgets.append(widget)\n widget.dirty = 1", "def setAnimations(*args):", "def setAnimations(*args):", "def setAnimations(*args):", "def setAnimations(*args):", "def setAnimations(*args):", "def setAnimations(*args):", "def setAnimations(*args):", "def setAnimations(*args):", "def setAnimations(*args):", "def setAnimations(*args):", "def setAnimations(*args):", "def update(self, *args):\n return _osgAnimation.Animation_update(self, *args)", "def start_animation(self) -> None:\n increment_values = {0: 1, self.original_height: -1}\n self.increment = increment_values.get(self.current_height, 0) # Compressed if", "def set_animated(self,val):\n for line in self.lines:\n line.set_animated(val)", "def save_animation(self, animation: UIAnimation, name: str):\n if name in self.saved_animations:\n self.saved_animations[name].append(animation)\n else:\n self.saved_animations[name] = [animation]", "def _add_widget(self, widget):\n self._grid.addWidget(\n widget, self._row_index, self._column_id, 1, self._column_span\n )\n self._row_index += 1", "def start_animation(self, duration):\n self.effect = QGraphicsOpacityEffect()\n self.setGraphicsEffect(self.effect)\n\n self.animation1 = QPropertyAnimation(self.effect, b\"opacity\")\n self.animation1.setDuration(duration)\n self.animation1.setStartValue(1)\n self.animation1.setEndValue(0)\n\n self.animation2 = QPropertyAnimation(self.effect, b\"opacity\")\n self.animation2.setDuration(duration)\n self.animation2.setStartValue(0)\n self.animation2.setEndValue(1)\n\n self.ga = QSequentialAnimationGroup()\n self.ga.addAnimation(self.animation1)\n self.ga.addAnimation(self.animation2)\n self.ga.setLoopCount(-1)\n self.ga.start()", "def loadAnim2Layout(self, itemList):\n animList = []\n\n for eachItem in itemList:\n currentPath = str(eachItem.toolTip(0))\n if os.path.isdir(currentPath):\n directoryList = os.listdir(currentPath)\n\n for eachFile in directoryList:\n if os.path.isfile('%s/%s' % (currentPath, eachFile)):\n if eachFile.endswith('.anim'):\n animList.append('%s/%s' % (currentPath, eachFile))\n\n row = -1\n column = 0\n coordinateList = []\n for index in range(len(animList)):\n if index % 4:\n column += 1\n coordinateList.append([row, column])\n else:\n row += 1\n column = 0\n coordinateList.append([row, column])\n\n # tool buttons\n for index in range(len(animList)):\n animLabel = os.path.splitext(os.path.basename(animList[index]))[0]\n\n # tool button\n toolButton = hoverToolBtn(gifPath=animList[index].replace('.anim', '.gif'),\n templateGIFPath=self.templateGIF,\n movie=self.movie,\n recordBtn=self.recordBtn,\n parent=self.animWidget)\n toolButton.setFixedSize(90, 90)\n toolButton.setObjectName('toolButton_%s' % animLabel)\n toolButton.setText(animLabel)\n toolButton.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)\n\n # Icons\n animIconPath = animList[index].replace('.anim', '.gif')\n icon = QtGui.QIcon()\n movie = QtGui.QMovie(animIconPath)\n movie.jumpToFrame(0)\n movie.stop()\n icon.addPixmap(QtGui.QPixmap(movie.currentPixmap()), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n toolButton.setIcon(icon)\n toolButton.setIconSize(QtCore.QSize(80, 70))\n\n self.animWidgetLayout.addWidget(toolButton, coordinateList[index][0], coordinateList[index][1], 1, 1)\n\n # import anims\n toolButton.clicked.connect(partial(self.setCurrentAnim, animList[index]))", "def do_animations(self):\n self.animate_bloop(700, 160, 50)", "def at_anim(seq, anim, d):\n at(\"ANIM\", seq, [anim, d])", "def add_frame(\n self : \"animation\",\n frame : \"matplotlib.figure.Figure\",\n facecolor : \"str\" = 'white'\n ):\n self._make_animation_from_raw_list([frame], facecolor=facecolor)", "def queue_animation(self, animations: List[UIAnimation] = [], names: List[str] = [], force: bool = False):\n if not force and self.is_animating():\n return\n for name in names:\n if name in self.saved_animations:\n n_animation = self.saved_animations[name]\n for anim in n_animation:\n anim.component = self\n self.queued_animations.append(anim)\n for animation in animations:\n animation.component = self\n self.queued_animations.append(animation)" ]
[ "0.7618043", "0.640304", "0.60984266", "0.5988012", "0.591077", "0.5744295", "0.5644169", "0.5541437", "0.53966963", "0.53966963", "0.53966963", "0.53966963", "0.53966963", "0.53966963", "0.53966963", "0.53966963", "0.53966963", "0.53966963", "0.53966963", "0.526167", "0.5243988", "0.5226069", "0.51871574", "0.51711214", "0.51232994", "0.51198745", "0.5105097", "0.5077206", "0.5045271", "0.50383836" ]
0.7234984
1
Updates input icons relative to mouse state
def _updateOnMouseState(self, state): x = state.X.abs y = state.Y.abs mscale = self.mouse_icon.getScale() if (x + mscale[0] + self.mouse_offset) > render_engine.Window.width: x = x - mscale[0] - 10 else: x += self.mouse_offset if (y + mscale[1] + self.mouse_offset) > render_engine.Window.height: y = y - mscale[1] - 10 else: y += self.mouse_offset self.mouse_icon.setPosition((x, y))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self):\n self.mousePos = pygame.mouse.get_pos()\n self.update_button_hover_status()", "def update_button_hover_status(self):\n for button in self.playing_buttons:\n button.update(self.mousePos)", "def update_reset_button(self):\r\n if self.board.hovered_tiles and self.is_left_mouse_down:\r\n self.reset_button.draw_uhoh()\r\n else:\r\n self.reset_button.draw_smiley()", "def _on_pick(self, event):\n pix_id = event.ind[-1]\n xx, yy, aa = u.Quantity(self.geom.pix_x[pix_id]).value, \\\n u.Quantity(self.geom.pix_y[pix_id]).value, \\\n u.Quantity(np.array(self.geom.pix_area)[pix_id])\n if self.geom.pix_type.startswith(\"hex\"):\n self._active_pixel.xy = (xx, yy)\n else:\n rr = sqrt(aa)\n self._active_pixel.xy = (xx - rr / 2., yy - rr / 2.)\n self._active_pixel.set_visible(True)\n self._active_pixel_label.set_x(xx)\n self._active_pixel_label.set_y(yy)\n self._active_pixel_label.set_text(f\"{pix_id:003d}\")\n self._active_pixel_label.set_visible(True)\n self._update()\n self.on_pixel_clicked(pix_id) # call user-function", "def OnMouseIn( self, event ):\n self.whichChoice = 1\n event.context.triggerRedraw(1)", "def shell_icon_changed(self, icon):\n raise NotImplementedError", "def handle_mouse_press(self, event):", "def update_imgs(self):\n\n for b in self.gamebuttons:\n b.update_img()\n self.start_but.update_img()", "def icon(self, new_icon):\r\n self.set({\"icon\": new_icon})", "def OnUpdateUIImage(self, event):\n index = GK_SHAPE_TYPE.index(\"image\")\n if self.m_style_ctrl.GetSelection() == GK_SHAPE_TYPE.index(\"image\"):\n event.Enable(True)\n else:\n event.Enable(False)", "def mouse_in(event):\r\n\r\n if str(event.type) == 'Enter':\r\n about_content.config(cursor=\"hand2\")\r\n else:\r\n about_content.config(cursor=\"arrow\")", "def update_input_states(self, input_values):", "def update(self, delta_time):\r\n #for pixels in self.pixel:\r\n for line in self.cursor:\r\n line.draw()\r\n \r\n self.check_keys()", "def handle_input_event(self):\n\n self.markerPos = self.get_mouse_coordinate()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n raise QuitRequestedError\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n raise QuitRequestedError\n if event.type == pygame.MOUSEBUTTONDOWN:\n if Event.is_valid_placement_stage(self.event):\n self.choice = self.get_mouse_coordinate()\n self.event = Event.next(self.event)\n self.timestep_watch.reset()\n\n liberties = self.env.liberty_after_next_steps(self.env.turn, self.env.getOpponent())\n self.env.printField(liberties)\n print()\n # self.env.printFlipNum(self.env.turn)\n # print(self.env.update_num_disks_can_filp(self.choice[0], self.choice[1], self.env.turn))\n\n # print(\"Click \", pos, \"coordinates: \", row, col)", "def _updateButtons(self, event):\n selectedIndex = self.colorlist.GetSelection()\n number = self.colorlist.GetCount()\n try:\n if not 0 <= selectedIndex < number:\n self.buttondown.Enable(False)\n self.buttonup.Enable(False)\n self.buttonremove.Enable(False)\n self.buttonedit.Enable(False)\n elif selectedIndex == 0:\n self.buttondown.Enable(True)\n self.buttonup.Enable(False)\n self.buttonremove.Enable(True)\n self.buttonedit.Enable(True)\n elif selectedIndex == number - 1:\n self.buttondown.Enable(False)\n self.buttonup.Enable(True)\n self.buttonremove.Enable(True)\n self.buttonedit.Enable(True)\n else:\n self.buttondown.Enable(True)\n self.buttonup.Enable(True)\n self.buttonremove.Enable(True)\n self.buttonedit.Enable(True)\n except wx.PyDeadObjectError:\n pass", "def update(self, mouse_pos):\n if self.blocked:\n return\n if hasattr(self, 'collide_rect'):\n rect = self.collide_rect\n else:\n rect = self.rect\n hover = rect.collidepoint(mouse_pos)\n if hover:\n self.image = self.hover_image\n else:\n self.image = self.idle_image\n self.hover = hover", "def mousePressEventEnabled(self, ev):\n\n self._btns.append(ev.button())\n if QtCore.Qt.MidButton in self._btns or QtCore.Qt.ControlModifier & ev.modifiers():\n self._midButtonPrevious = ev.pos().x()\n elif QtCore.Qt.RightButton in self._btns:\n self._state[1] = max(self._scaleFrom(ev.pos().x() / float(self.width()), 1), self._state[0])\n elif QtCore.Qt.LeftButton in self._btns:\n self._state[0] = min(self._scaleFrom(ev.pos().x() / float(self.width()), 0), self._state[1])\n\n self._param.update()", "def _update_image(self):\n button = self.buttons.checkedButton()\n if button is None:\n return\n\n button.click()", "def update(self, input, tick):\n if input.left: print(\"left\")\n if input.right: print(\"right\")\n if input.up: print(\"up\")\n if input.down: print(\"down\")\n if input.button1: print(\"button1\")\n if input.button2: print(\"button2\")", "def mouse_in(self, event):\r\n self['background'] = '#E5F3FF'", "def icon_activated(self, reason):\n if reason == QSystemTrayIcon.DoubleClick:\n pass\n elif reason == QSystemTrayIcon.Trigger:\n if self.keyboardStatus:\n self.keyboardStatus = False\n else:\n self.keyboardStatus = True\n if self.keyboardStatus:\n self.change_keyboard(self.selectedKeyboard)\n else:\n self.change_keyboard(0)\n elif reason == QSystemTrayIcon.MiddleClick:\n pass\n else:\n pass", "def ev_MOUSEMOTION(self, event):", "def onMouseDispatcher(self, event):\n\n if self.ui.checkEditNone.isChecked():\n self.onMouseNormal(event)\n elif self.ui.checkEditBuildPoints.isChecked():\n self.onMouseEdit(event)\n elif self.ui.checkEditHorizonMask.isChecked():\n self.onMouseEdit(event)\n elif self.ui.checkPolarAlignment.isChecked():\n self.onMouseStar(event)", "def enableInputImages(self, **inputImages):\n self.logger.debug('Updating enabled input images types with %s', inputImages)\n self.inputImages.update(inputImages)\n self.logger.debug('Enabled input images types: %s', self.inputImages)", "def ev_mousebuttonup(self, event: MouseButtonUp) -> None:", "def _icons(self):", "def _selectionChangedSlot(self, _):\r\n\r\n self._updateButtonStates()", "def update(self):\n if self.state['enabled']:\n if not self.state['blue'] and not self.state['return']:\n self.update_normal()\n elif self.state['blue']:\n self.update_blue()\n elif self.state['return']:\n self.update_return()\n self.last_position = (self.rect.centerx, self.rect.centery)", "def update_icon(self, _widget, _callback_data):\n\t\t\n\t\tprint \"in update_icon for \", self.name\n\t\tself.icon = self.__window.get_icon()\n\t\tself.icon.save(self.imgpath, \"png\")\n\t\tif not self.pile is None:\n\t\t\tself.pile.update_child_icon(self)\n\t\treturn", "def on_mouse_press(self, x, y, button):\n\n pass" ]
[ "0.63612306", "0.6020158", "0.5917613", "0.5873303", "0.5816151", "0.5782121", "0.57058483", "0.5668941", "0.56663585", "0.56530684", "0.5630077", "0.55906516", "0.5553169", "0.5495981", "0.54906726", "0.5464941", "0.5448922", "0.54335225", "0.5425156", "0.54172546", "0.53634053", "0.5361676", "0.5344855", "0.5316094", "0.53042126", "0.5302701", "0.5293129", "0.52847475", "0.5283876", "0.5265915" ]
0.6588073
0
This funtion should perform the job of projecting the input pointcloud onto the frame of an image captured by a camera with camera matrix as given, of dimensions as given, in pixels. points is an 3 x N array where the ith entry is an (x, y, z) point in 3D space, in the reference frame of the depth camera. This corresponds to the tf frame camera_depth_optical_frame. However, the image is taken by an RGB camera, with reference frame camera_color_optical_frame. (trans, rot) together give the translation vector and rotation matrix that transform points in the depth camera frame to points in the RGB camera frame. For each point in points, compute the pixel coordinates (u, v) onto which that point would be projected. This function should return a 2 x N integer array of pixel coordinates. The ith entry should be the index (u, v) of the pixel onto which the ith point in the pointcloud should get projected. Use the point projection model introduced in the lab documentation to perform this projection. Note that this function should be able to operate on large pointclouds very efficiently. Make good use of numpy functions to vectorize and to act on the entire pointcloud at once.
def project_points(points, cam_matrix, trans, rot): # STEP 1: Transform pointcloud into new reference frame. points = np.dot(rot, points) + trans[:, None] # STEP 2: Project new pointcloud onto image frame using K matrix. # gives a 3 x N array of image plane coordinates in homogenous coordinates. homo_pixel_coords = np.dot(cam_matrix, points) # STEP 3: Convert homogenous coordinates to regular 2D coordinates. # To do this, you need to divide the first two coordinates of homo_pixel_coords # by the third coordinate. pixel_coords = homo_pixel_coords[:2] / homo_pixel_coords[2] # STEP 4: Convert to integers. Take the floor of pixel_coords then cast it # to an integer type, like numpy.int32 pixel_coords = np.int32(np.floor(pixel_coords)) return pixel_coords
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def project(points, camera_params, theta):\n \"\"\"\n Function takes input of 3d_points, transformations and Convert 3-D points to 2-D by projecting onto images. \n Input:\n points: 3D points in world frame\n camera_params: parameters of camera corrosponding to the point\n theta: Needed For PAL camera to specify the sub camera index for the points\n Output:\n points_proj: 2D reprojected points for 3D points \n\n \"\"\"\n # Convert the 3D points to Camera Frame by rotaion followes by translation\n points_proj1 = rotate(points[:,0:3], camera_params[:, :3])\n points_proj1 += camera_params[:, 3:6]\n # FOR PAL: Converting into the Sub-camera Frame by respective rotation\n thetas = theta * np.pi / 3 \n points_proj = np.copy(points_proj1)\n points_proj[:,0] = points_proj1[:,0]*np.cos(thetas) - points_proj1[:,2]*np.sin(thetas)\n points_proj[:,2] = points_proj1[:,0]*np.sin(thetas) + points_proj1[:,2]*np.cos(thetas)\n # Avoiding Zero error\n for i in range(len(points_proj)):\n if(points_proj[i,2]==0):\n points_proj[i,0] = 0\n points_proj[i,1] = 0\n points_proj[i,2] = 1\n # 2D projection\n points_proj = points_proj[:, :2] / points_proj[:, 2, np.newaxis]\n f = camera_params[:, 6]\n k1 = camera_params[:, 7]\n k2 = camera_params[:, 8]\n n = np.sum(points_proj**2, axis=1)\n r = 1 + k1 * n + k2 * n**2\n points_proj *= (r * f)[:, np.newaxis]\n return points_proj", "def project(self, point_cloud, round_px=True):\n if not isinstance(point_cloud, PointCloud) and not (isinstance(point_cloud, Point) and point_cloud.dim == 3):\n raise ValueError('Must provide PointCloud or 3D Point object for projection')\n if point_cloud.frame != self._frame:\n raise ValueError('Cannot project points in frame %s into camera with frame %s' %(point_cloud.frame, self._frame))\n\n points_proj = self.S.dot(point_cloud.data) + self.t\n if len(points_proj.shape) == 1:\n points_proj = points_proj[:, np.newaxis]\n point_depths = np.tile(points_proj[2,:], [3, 1])\n points_proj = np.divide(points_proj, point_depths)\n if round_px:\n points_proj = np.round(points_proj)\n\n if isinstance(point_cloud, Point):\n return Point(data=points_proj[:2,:].astype(np.int16), frame=self._frame)\n return ImageCoords(data=points_proj[:2,:].astype(np.int16), frame=self._frame)", "def project_to_image(self, point_cloud, round_px=True):\n if not isinstance(point_cloud, PointCloud) and not (isinstance(point_cloud, Point) and point_cloud.dim == 3):\n raise ValueError('Must provide PointCloud or 3D Point object for projection')\n if point_cloud.frame != self._frame:\n raise ValueError('Cannot project points in frame %s into camera with frame %s' %(point_cloud.frame, self._frame))\n\n points_proj = self.S.dot(point_cloud.data) + self.t\n if len(points_proj.shape) == 1:\n points_proj = points_proj[:, np.newaxis]\n point_depths = points_proj[2,:]\n point_z = np.tile(point_depths, [3, 1])\n points_proj = np.divide(points_proj, point_z)\n if round_px:\n points_proj = np.round(points_proj)\n points_proj = points_proj[:2,:].astype(np.int16)\n\n valid_ind = np.where((points_proj[0,:] >= 0) & \\\n (points_proj[1,:] >= 0) & \\\n (points_proj[0,:] < self.width) & \\\n (points_proj[1,:] < self.height))[0]\n\n depth_data = np.zeros([self.height, self.width])\n depth_data[points_proj[1,valid_ind], points_proj[0,valid_ind]] = point_depths[valid_ind]\n return DepthImage(depth_data, frame=self.frame)", "def get_projections(self, points_in_camera_frame: ARRAY_LIKE,\n image: int = 0, temperature: Real = 0) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n\n # ensure the input is an array\n points_in_camera_frame = np.asarray(points_in_camera_frame)\n\n # apply misalignment to the points\n if self.estimate_multiple_misalignments:\n if np.any(self.misalignment[image]): # optimization to avoid matrix multiplication\n points_in_camera_frame = rotvec_to_rotmat(self.misalignment[image]).squeeze() @ \\\n points_in_camera_frame\n\n else:\n if np.any(self.misalignment): # optimization to avoid matrix multiplication\n points_in_camera_frame = rotvec_to_rotmat(self.misalignment).squeeze() @ points_in_camera_frame\n\n # get the unitless image plane location\n pinhole_locations = points_in_camera_frame[:2] / points_in_camera_frame[2]\n\n # get the distorted image plane location\n image_locations = self.apply_distortion(pinhole_locations)\n\n # add the temperature based scaling\n image_locations *= self.get_temperature_scale(temperature)\n\n # get the pixel locations of the points, need to mess with transposes due to numpy broadcasting rules\n picture_locations = ((self.intrinsic_matrix[:, :2] @ image_locations).T + self.intrinsic_matrix[:, 2]).T\n\n return pinhole_locations, image_locations, picture_locations", "def project(points, camera_params):\n points_proj = rotate(points, camera_params[:, :3])\n points_proj += camera_params[:, 3:6]\n points_proj = -points_proj[:, :2] / points_proj[:, 2, np.newaxis]\n f = camera_params[:, 6]\n k1 = camera_params[:, 7]\n k2 = camera_params[:, 8]\n n = np.sum(points_proj ** 2, axis=1)\n r = 1 + k1 * n + k2 * n ** 2\n points_proj *= (r * f)[:, np.newaxis]\n return points_proj", "def project(points, camera_params):\n # print(camera_params.shape)\n points_proj = rotate(points, camera_params[:, :3])\n points_proj += camera_params[:, 3:6]\n points_proj = -points_proj[:, :2] / points_proj[:, 2, np.newaxis]\n f = 2360*np.ones(camera_params.shape[0])\n # np.ones()\n # n = np.sum(points_proj**2, axis=1)\n r = 1\n points_proj *= (r * f)[:, np.newaxis]\n return points_proj", "def project(points, camera_params):\n points_proj = rotate(points, camera_params[:, :3])\n points_proj += camera_params[:, 3:6]\n points_proj = points_proj[:, :2] / points_proj[:, 2, np.newaxis]\n f = camera_params[:, 6]\n k1 = camera_params[:, 7]\n k2 = camera_params[:, 8]\n n = np.sum(points_proj ** 2, axis=1)\n r = 1 + k1 * n + k2 * n ** 2\n points_proj *= (r * f)[:, np.newaxis]\n return points_proj", "def fun(params,n_cameras,n_points,camera_indices,point_indices,points_3d , points_2d):\n camera_params = params[:n_cameras * 6].reshape((n_cameras, 6))\n # points_3d = points_3d.T\n # points_3d = params[n_cameras * 7:].reshape((n_points, 3))\n # print(point_indices)\n points_proj = project(points_3d[point_indices], camera_params[camera_indices])\n return (points_proj - points_2d).ravel()", "def project_points(self, points_3d, camera):\n batch_size = points_3d.shape[0]\n device = points_3d.device\n cam_t = torch.stack([camera[:, 1], camera[:, 2], 2 * self.focal_length / (self.img_res * camera[:, 0] + 1e-09)], dim=-1)\n camera_center = camera.new_zeros([batch_size, 2])\n rot_t = torch.eye(3, device=device, dtype=points_3d.dtype).unsqueeze(0).expand(batch_size, -1, -1)\n joints_2d = perspective_projection(points_3d, rotation=rot_t, translation=cam_t, focal_length=self.focal_length, camera_center=camera_center)\n return joints_2d", "def convert_depth_frame_to_pointcloud(depth_image, camera_intrinsics ):\r\n\t\r\n\t[height, width] = depth_image.shape\r\n\r\n\tnx = np.linspace(0, width-1, width)\r\n\tny = np.linspace(0, height-1, height)\r\n\tu, v = np.meshgrid(nx, ny)\r\n\tx = (u.flatten() - camera_intrinsics.ppx)/camera_intrinsics.fx\r\n\ty = (v.flatten() - camera_intrinsics.ppy)/camera_intrinsics.fy\r\n\r\n\tz = depth_image.flatten() / 1000;\r\n\tx = np.multiply(x,z)\r\n\ty = np.multiply(y,z)\r\n\r\n\tx = x[np.nonzero(z)]\r\n\ty = y[np.nonzero(z)]\r\n\tz = z[np.nonzero(z)]\r\n\r\n\treturn x, y, z", "def convert_depth_frame_to_pointcloud(depth_image, camera_intrinsics ):\n\t\n\t[height, width] = depth_image.shape\n\n\tnx = np.linspace(0, width-1, width)\n\tny = np.linspace(0, height-1, height)\n\tu, v = np.meshgrid(nx, ny)\n\tx = (u.flatten() - camera_intrinsics.ppx)/camera_intrinsics.fx\n\ty = (v.flatten() - camera_intrinsics.ppy)/camera_intrinsics.fy\n\n\tz = depth_image.flatten() / 1000;\n\tx = np.multiply(x,z)\n\ty = np.multiply(y,z)\n\n\tx = x[np.nonzero(z)]\n\ty = y[np.nonzero(z)]\n\tz = z[np.nonzero(z)]\n\n\treturn x, y, z", "def project_points(X, K, R, T, distortion_flag=False, distortion_params=None):\n # Project points from 3d world coordinates to 2d image coordinates\n\n #get projection matrix\n pmatrix = projection_matrix(R, T, K)\n\n #add 4th component to points\n ones = np.ones([1,len(X[0])])\n xones=np.row_stack((X,ones))\n\n #calculate pixel coordinates\n X_camera = pmatrix.dot(xones)\n\n return X_camera", "def project_and_draw(img, X_3d, K, R, T, distortion_flag, distortion_parameters):\n # call your \"project_points\" function to project 3D points to camera coordinates\n # draw the projected points on the image and save your output image here\n # cv.imwrite(output_name, img_array)\n X_camera = project_points(X_3D,K,R,T,distortion_flag,distortion_parameters)\n\n newimg=copy.copy(img)\n color = (0, 230, 0)\n if not distortion_flag:\n color = (0,0,230)\n\n Xp = []\n Xp.append([])\n Xp.append([])\n\n for cur in range(0,np.shape(X_camera)[1]):\n x = X_camera[0,cur]\n y = X_camera[1,cur]\n z = X_camera[2,cur]\n xp = int(x/z)\n yp = int(y/z)\n Xp[0].append(xp)\n Xp[1].append(yp)\n Xp2 = np.row_stack((Xp,np.ones(len(Xp[0]))))\n if(distortion_flag):\n Xp2 = distort(Xp2,K,distortion_parameters)\n\n for cur in range(0, np.shape(X_camera)[1]):\n x = Xp2[0, cur]\n y = Xp2[1, cur]\n newimg = cv.circle(newimg, (int(x), int(y)), 2, color, 0)\n\n #cv.imshow(\"Test\",newimg)\n #cv.waitKey(0)\n\n return newimg", "def _project_pointcloud(self, cloud):\n\n assert isinstance(cloud, PointCloud2)\n\n pc1 = PointCloud()\n pc1.header = cloud.header\n # hack the time! dont move the robot :-0\n pc1.header.stamp = rospy.Time.now()\n \n \n pc1.points = [Point32(*p) for p in pc2.read_points(cloud)]\n\n self._tf_listener.waitForTransform(pc1.header.frame_id,\n self._image_info.tf_frame, \n rospy.Time(0), \n rospy.Duration(4))\n\n image_frame_cloud = self._tf_listener.transformPointCloud (\n self._image_info.tf_frame, \n pc1)\n min_x, max_x, min_y, max_y = 640, 0, 480, 0 # TODO: remove hard coded image size!\n for pt in image_frame_cloud.points:\n u, v = self._image_info.project3dToPixel((pt.x, pt.y, pt.z))\n if v < min_y:\n min_y = int(v)\n if v > max_y:\n max_y = int(v)\n if u < min_x:\n min_x = int(u)\n if u > max_x:\n max_x = int(u)\n location = (((min_x, min_y), (max_x, max_y)))\n rospy.loginfo(\"Transformed cloud into image plane\")\n return location", "def as_point_cloud(self):\n far = 1000.0 # max depth in meters.\n intrinsic_mat = self.camera_setup.get_intrinsic_matrix()\n width, height = self.camera_setup.width, self.camera_setup.height\n # 2d pixel coordinates\n pixel_length = width * height\n u_coord = repmat(np.r_[0:width:1], height, 1).reshape(pixel_length)\n v_coord = repmat(np.c_[0:height:1], 1, width).reshape(pixel_length)\n normalized_depth = np.reshape(self.frame, pixel_length)\n\n # p2d = [u,v,1]\n p2d = np.array([u_coord, v_coord, np.ones_like(u_coord)])\n\n # P = [X,Y,Z]\n p3d = np.dot(inv(intrinsic_mat), p2d)\n p3d *= normalized_depth * far\n\n # [[X1,Y1,Z1],[X2,Y2,Z2], ... [Xn,Yn,Zn]]\n locations = np.asarray(np.transpose(p3d))\n # Transform the points in 3D world coordinates.\n to_world_transform = self.camera_setup.get_unreal_transform()\n point_cloud = to_world_transform.transform_points(locations)\n return point_cloud", "def project_onto_image(self, points_in_camera_frame: ARRAY_LIKE, image: int = 0,\n temperature: Real = 0) -> np.ndarray:\n\n _, __, picture_locations = self.get_projections(points_in_camera_frame, image, temperature=temperature)\n\n return picture_locations", "def depth_image_to_point_cloud(depth, intrinsics_matrix, dtype=tf.float32):\n with K.name_scope('depth_image_to_point_cloud'):\n intrinsics_matrix = tf.to_float(intrinsics_matrix)\n fy = intrinsics_matrix[1, 1]\n fx = intrinsics_matrix[0, 0]\n # center of image y coordinate\n center_y = intrinsics_matrix[2, 1]\n # center of image x coordinate\n center_x = intrinsics_matrix[2, 0]\n depth = tf.to_float(tf.squeeze(depth))\n # y, x\n y_shape, x_shape = K.int_shape(depth)\n\n y, x = tf.meshgrid(K.arange(y_shape),\n K.arange(x_shape),\n indexing='ij')\n\n x = tf.to_float(K.flatten(x))\n y = tf.to_float(K.flatten(y))\n depth = K.flatten(depth)\n\n assert K.int_shape(y) == K.int_shape(x)\n assert K.int_shape(y) == K.int_shape(depth)\n\n X = (x - center_x) * depth / fx\n Y = (y - center_y) * depth / fy\n\n assert K.int_shape(y) == K.int_shape(x)\n assert K.int_shape(y) == K.int_shape(depth)\n\n XYZ = K.stack([X, Y, depth], axis=-1)\n\n assert K.int_shape(XYZ) == (y_shape * x_shape, 3)\n\n XYZ = K.reshape(XYZ, [y_shape, x_shape, 3])\n return XYZ", "def convert_pointcloud_to_depth(pointcloud, camera_intrinsics):\r\n\r\n\tassert (pointcloud.shape[0] == 3)\r\n\tx_ = pointcloud[0,:]\r\n\ty_ = pointcloud[1,:]\r\n\tz_ = pointcloud[2,:]\r\n\r\n\tm = x_[np.nonzero(z_)]/z_[np.nonzero(z_)]\r\n\tn = y_[np.nonzero(z_)]/z_[np.nonzero(z_)]\r\n\r\n\tx = m*camera_intrinsics.fx + camera_intrinsics.ppx\r\n\ty = n*camera_intrinsics.fy + camera_intrinsics.ppy\r\n\r\n\treturn x, y", "def project_points(X, K, R, T, distortion_flag=False, distortion_params=None):\n # Project points from 3d world coordinates to 2d image coordinates\n X_camera = np.matmul(R, X) + T\n X_camera = X_camera / X_camera[2, :] # Normalize\n\n if distortion_flag:\n radiusSq = (X_camera[0, :] * X_camera[0, :]) + (X_camera[1, :] * X_camera[1, :])\n X_camera = X_camera * (1 + (distortion_params[0] * radiusSq) + (distortion_params[1] * (radiusSq * radiusSq)) + (distortion_params[4] * (radiusSq * radiusSq * radiusSq)))\n # X_camera = (X_camera * (1 + (distortion_params[0] * radiusSq) + (distortion_params[1] * (radiusSq * radiusSq)) + (distortion_params[4] * (radiusSq * radiusSq * radiusSq)))\n # + (2 * distortion_params[2] * X_camera[0,:] * X_camera[1,:]) + distortion_params[3] * (radiusSq + (2 * X_camera * X_camera)))\n\n X_camera[2, :] = 1.0\n X_camera = np.matmul(K, X_camera)\n X_camera = X_camera[:2, :]\n\n return X_camera", "def projective_inverse_warp_torch2(\n img, depth, pose, src_intrinsics, tgt_intrinsics, tgt_height, tgt_width, ret_flows=False):\n batch, height, width, channels = img.shape\n # Construct pixel grid coordinates (x, y, 1) for each pixel.\n # Duplicated for N (e.g. 4) of INPUT images (batch)\n pixel_coords = meshgrid_abs_torch(batch, tgt_height, tgt_width, img.device, False)\n\n # Note: \"target\" here means actually \"ref image\", forget about the ground truth targets!\n # You project pixels from \"target\" to the multiple inputs, not the other way round\n # Convert pixel coordinates to the target camera frame, 3D camera coords (X, Y, Z), seems OK so far...\n # Note: these are points in 3D camera coords (C) of the target camera, not world coords (W) !!!\n cam_coords = pixel2cam_torch(depth, pixel_coords, tgt_intrinsics)\n\n # Construct a 4x4 intrinsic matrix, why? wouldn't 3x4 suffice?\n filler = torch.tensor([[[0., 0., 0., 1.]]], device=img.device)\n filler = filler.repeat(batch, 1, 1)\n src_intrinsics4 = torch.cat([src_intrinsics, torch.zeros([batch, 3, 1], device=img.device)], axis=2)\n src_intrinsics4 = torch.cat([src_intrinsics4, filler], axis=1)\n\n # Get a 4x4 transformation matrix from 'target' camera frame to 'source'\n # pixel frame, looks OK\n proj_tgt_cam_to_src_pixel = torch.matmul(src_intrinsics4, pose)\n src_pixel_coords = cam2pixel_torch(cam_coords, proj_tgt_cam_to_src_pixel)\n\n # print(f'src_pixel_coords shape {src_pixel_coords.shape}')\n # print(f'src_pixel_coords {L(src_pixel_coords[:, :, :3,:])}')\n\n # Now we get trouble !\n if False:\n print(('src_pixel_coords', src_pixel_coords.shape, src_pixel_coords.dtype))\n for i in range(2):\n t = src_pixel_coords[0, :, :, i]\n print((i, t.min().item(), t.max().item()))\n sys.exit(0)\n\n # src_pixel_coords = (src_pixel_coords + torch.tensor([0.5, 0.5], device=img.device)) / torch.tensor([width, height],\n # device=img.device)\n\n src_pixel_coords = src_pixel_coords / torch.tensor([width-1, height-1], device=img.device)\n\n output_img = resampler_wrapper_torch(img, src_pixel_coords)\n if ret_flows:\n return output_img, src_pixel_coords - cam_coords\n else:\n return output_img", "def project_to_image_plane(self, point_in_world, timestamp):\n\n camera_info = CameraInfo()\n\n fx = self.config['camera_info']['focal_length_x']\n fy = self.config['camera_info']['focal_length_y']\n\n camera_info.width = self.config['camera_info']['image_width']\n camera_info.height = self.config['camera_info']['image_height']\n\n #print(\"fx {}, fy {}\".format(fx, fy))\n\n camera_info.K = np.array([[fx, 0, camera_info.width / 2],\n [0, fy, camera_info.height / 2],\n [0, 0, 1.]], dtype=np.float32)\n camera_info.P = np.array([[fx, 0, camera_info.width / 2, 0],\n [0, fy, camera_info.height / 2, 0],\n [0, 0, 1., 0]])\n camera_info.R = np.array([[1., 0, 0],\n [0, 1., 0],\n [0, 0, 1.]], dtype=np.float32)\n\n camera = PinholeCameraModel()\n camera.fromCameraInfo(camera_info)\n\n #print(\"point_in_world = {}\".format(str(point_in_world)))\n #print(\"camera projection matrix \", camera.P)\n\n # get transform between pose of camera and world frame\n trans = None\n point_in_camera_space = None\n point_in_image = None\n bbox_points_camera_image = []\n\n euler_transforms = (\n math.radians(90), # roll along X to force Y axis 'up'\n math.radians(-90 + -.75), # pitch along Y to force X axis towards 'right', with slight adjustment for camera's 'yaw'\n math.radians(-9) # another roll to orient the camera slightly 'upwards', (camera's 'pitch')\n )\n euler_axes = 'sxyx'\n\n try:\n self.listener.waitForTransform(\"/base_link\",\n \"/world\", timestamp, rospy.Duration(0.1))\n (trans, rot) = self.listener.lookupTransform(\"/base_link\",\n \"/world\", timestamp)\n\n camera_orientation_adj = tf.transformations.quaternion_from_euler(*euler_transforms, axes=euler_axes)\n\n trans_matrix = self.listener.fromTranslationRotation(trans, rot)\n camera_orientation_adj = self.listener.fromTranslationRotation((0, 0, 0), camera_orientation_adj)\n\n #print(\"trans {}, rot {}\".format(trans, rot))\n #print(\"transform matrix {}\".format(trans_matrix))\n\n point = np.array([point_in_world.x, point_in_world.y, point_in_world.z, 1.0])\n\n # this point should match what you'd see from being inside the vehicle looking straight ahead.\n point_in_camera_space = trans_matrix.dot(point)\n\n #print(\"point in camera frame {}\".format(point_in_camera_space))\n\n final_trans_matrix = camera_orientation_adj.dot(trans_matrix)\n\n # this point is from the view point of the camera (oriented along the camera's rotation quaternion)\n point_in_camera_space = final_trans_matrix.dot(point)\n\n #print(\"point in camera frame adj {}\".format(point_in_camera_space))\n\n bbox_points = [(point_in_camera_space[0] - 0.5, point_in_camera_space[1] - 1.1, point_in_camera_space[2], 1.0),\n (point_in_camera_space[0] + 0.5, point_in_camera_space[1] + 1.1, point_in_camera_space[2], 1.0),\n (point_in_camera_space[0] - 0.5, point_in_camera_space[1] - 1.1, point_in_camera_space[2], 1.0),\n (point_in_camera_space[0] + 0.5, point_in_camera_space[1] + 1.1, point_in_camera_space[2], 1.0)]\n\n # these points represent the bounding box within the camera's image\n for p in bbox_points:\n bbox_points_camera_image.append(camera.project3dToPixel(p))\n\n # print(\"point in image {}\".format(bbox_points_camera_image))\n\n except (tf.Exception, tf.LookupException, tf.ConnectivityException):\n rospy.logerr(\"Failed to find camera to map transform\")\n\n return bbox_points_camera_image", "def perspective_projection(points, rotation, translation,\n focal_length, camera_center, distortion=None):\n batch_size = points.shape[0]\n \n # Extrinsic\n if rotation is not None:\n points = torch.einsum('bij,bkj->bki', rotation, points)\n\n if translation is not None:\n points = points + translation.unsqueeze(1)\n\n if distortion is not None:\n kc = distortion\n points = points[:,:,:2] / points[:,:,2:]\n \n r2 = points[:,:,0]**2 + points[:,:,1]**2\n dx = (2 * kc[:,[2]] * points[:,:,0] * points[:,:,1] \n + kc[:,[3]] * (r2 + 2*points[:,:,0]**2))\n\n dy = (2 * kc[:,[3]] * points[:,:,0] * points[:,:,1] \n + kc[:,[2]] * (r2 + 2*points[:,:,1]**2))\n \n x = (1 + kc[:,[0]]*r2 + kc[:,[1]]*r2.pow(2) + kc[:,[4]]*r2.pow(3)) * points[:,:,0] + dx\n y = (1 + kc[:,[0]]*r2 + kc[:,[1]]*r2.pow(2) + kc[:,[4]]*r2.pow(3)) * points[:,:,1] + dy\n \n points = torch.stack([x, y, torch.ones_like(x)], dim=-1)\n \n \n # Intrinsic\n K = torch.zeros([batch_size, 3, 3], device=points.device)\n K[:,0,0] = focal_length\n K[:,1,1] = focal_length\n K[:,2,2] = 1.\n K[:,:-1, -1] = camera_center\n\n # Apply camera intrinsicsrf\n points = points / points[:,:,-1].unsqueeze(-1)\n projected_points = torch.einsum('bij,bkj->bki', K, points)\n projected_points = projected_points[:, :, :-1]\n\n return projected_points", "def projective_inverse_warp_torch3(\n img, depth, pose, src_intrinsics, tgt_intrinsics, tgt_height, tgt_width, ret_flows=False):\n batch, height, width, channels = img.shape\n # Construct pixel grid coordinates (x, y, 1) for each pixel.\n # Duplicated for N (e.g. 4) of INPUT images (batch)\n #delta_xy = src_center_xy - torch.tensor([float(tgt_width - 1) / 2, float(tgt_height - 1) / 2], device=src_center_xy.device)\n #delta_xyz = torch.cat([delta_xy, torch.zeros([batch, 1], device=delta_xy.device)], dim=1).unsqueeze(-1).unsqueeze(-1)\n # delta xyz [batch, 3, 1, 1]\n pixel_coords = meshgrid_abs_torch(batch, tgt_height, tgt_width, img.device, False)\n #pixel_coords = pixel_coords + delta_xyz\n\n # Note: \"target\" here means actually \"ref image\", forget about the ground truth targets!\n # You project pixels from \"target\" to the multiple inputs, not the other way round\n # Convert pixel coordinates to the target camera frame, 3D camera coords (X, Y, Z), seems OK so far...\n # Note: these are points in 3D camera coords (C) of the target camera, not world coords (W) !!!\n cam_coords = pixel2cam_torch(depth, pixel_coords, tgt_intrinsics)\n\n # Construct a 4x4 intrinsic matrix, why? wouldn't 3x4 suffice?\n filler = torch.tensor([[[0., 0., 0., 1.]]], device=img.device)\n filler = filler.repeat(batch, 1, 1)\n src_intrinsics4 = torch.cat([src_intrinsics, torch.zeros([batch, 3, 1], device=img.device)], axis=2)\n src_intrinsics4 = torch.cat([src_intrinsics4, filler], axis=1)\n\n # Get a 4x4 transformation matrix from 'target' camera frame to 'source'\n # pixel frame, looks OK\n proj_tgt_cam_to_src_pixel = torch.matmul(src_intrinsics4, pose)\n src_pixel_coords = cam2pixel_torch(cam_coords, proj_tgt_cam_to_src_pixel)\n\n # print(f'src_pixel_coords shape {src_pixel_coords.shape}')\n # print(f'src_pixel_coords {L(src_pixel_coords[:, :, :3,:])}')\n\n # Now we get trouble !\n if False:\n print(('src_pixel_coords', src_pixel_coords.shape, src_pixel_coords.dtype))\n for i in range(2):\n t = src_pixel_coords[0, :, :, i]\n print((i, t.min().item(), t.max().item()))\n sys.exit(0)\n\n # src_pixel_coords = (src_pixel_coords + torch.tensor([0.5, 0.5], device=img.device)) / torch.tensor([width, height],\n # device=img.device)\n\n src_pixel_coords = src_pixel_coords / torch.tensor([width-1, height-1], device=img.device)\n\n output_img = resampler_wrapper_torch(img, src_pixel_coords)\n if ret_flows:\n return output_img, src_pixel_coords - cam_coords\n else:\n return output_img", "def project_points_img(points, proj_mat, width, height):\n pixels = proj_mat.dot(points)\n pixels = np.divide(pixels[:2, :], pixels[2, :]).transpose().astype(np.int)\n\n # Remove pixels that are outside the image\n pixels[:, 0] = np.clip(pixels[:, 0], 0, width)\n pixels[:, 1] = np.clip(pixels[:, 1], 0, height)\n # mask_x = (pixels[:, 0] < width) & (pixels[:, 0] > 0)\n # mask_y = (pixels[:, 1] < height) & (pixels[:, 1] > 0)\n\n # # Return the pixels and points that are inside the image\n # pixels = pixels[mask_x & mask_y]\n return pixels", "def cam2pixel(cam_coords, proj_c2p_rot, proj_c2p_tr, padding_mode):\n b, _, h, w = cam_coords.size()\n cam_coords_flat = cam_coords.view(b, 3, -1) # [B, 3, H*W]\n if proj_c2p_rot is not None:\n pcoords = proj_c2p_rot.bmm(cam_coords_flat)\n else:\n pcoords = cam_coords_flat\n\n if proj_c2p_tr is not None:\n pcoords = pcoords + proj_c2p_tr # [B, 3, H*W]\n X = pcoords[:, 0]\n Y = pcoords[:, 1]\n Z = pcoords[:, 2].clamp(min=1e-8)\n\n X_norm = 2 * (X / Z) / (w - 1) - 1 # Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1) [B, H*W]\n Y_norm = 2 * (Y / Z) / (h - 1) - 1 # Idem [B, H*W]\n if padding_mode == 'zeros':\n X_mask = ((X_norm > 1) + (X_norm < -1)).detach()\n X_norm[X_mask] = 2 # make sure that no point in warped image is a combinaison of im and gray\n Y_mask = ((Y_norm > 1) + (Y_norm < -1)).detach()\n Y_norm[Y_mask] = 2\n\n pixel_coords = torch.stack([X_norm, Y_norm], dim=2) # [B, H*W, 2]\n return pixel_coords.view(b, h, w, 2)", "def fourPointTransform(image, points):\r\n\r\n topLeft, topRight, bottomLeft, bottomRight = sortFourPoints(points)\r\n\r\n # Determine the maximum width\r\n topWidth = np.sqrt(((topRight[0] - topLeft[0]) ** 2) + ((topRight[1] - topLeft[1]) ** 2))\r\n bottomWidth = np.sqrt(((bottomRight[0] - bottomLeft[0]) ** 2) + ((bottomRight[1] - bottomLeft[1]) ** 2))\r\n width = max(int(topWidth), int(bottomWidth))\r\n\r\n # Determine the maximum height\r\n leftHeight = np.sqrt(((topLeft[0] - bottomLeft[0]) ** 2) + ((topLeft[1] - bottomLeft[1]) ** 2))\r\n rightHeight = np.sqrt(((topRight[0] - bottomRight[0]) ** 2) + ((topRight[1] - bottomRight[1]) ** 2))\r\n height = max(int(leftHeight), int(rightHeight))\r\n\r\n source = np.array([topLeft, topRight, bottomRight, bottomLeft], dtype=\"float32\")\r\n\r\n destination = np.array([[0, 0],\r\n [width - 1, 0],\r\n [width - 1, height - 1],\r\n [0, height - 1]], dtype=\"float32\")\r\n\r\n transformMatrix = cv2.getPerspectiveTransform(source, destination)\r\n\r\n return cv2.warpPerspective(image, transformMatrix, (width, height))", "def perspective_projection(points, rotation, translation, focal_length, camera_center):\n batch_size = points.shape[0]\n K = torch.zeros([batch_size, 3, 3], device=points.device)\n K[:, 0, 0] = focal_length\n K[:, 1, 1] = focal_length\n K[:, 2, 2] = 1.0\n K[:, :-1, -1] = camera_center\n points = torch.einsum('bij,bkj->bki', rotation, points)\n points = points + translation.unsqueeze(1)\n projected_points = points / points[:, :, -1].unsqueeze(-1)\n projected_points = torch.einsum('bij,bkj->bki', K, projected_points)\n projected_points = projected_points[:, :, :-1]\n return projected_points", "def perspective_projection(points, rotation, translation,\n focal_length, camera_center):\n batch_size = points.shape[0]\n K = torch.zeros([batch_size, 3, 3], device=points.device)\n K[:, 0, 0] = focal_length\n K[:, 1, 1] = focal_length\n K[:, 2, 2] = 1.\n K[:, :-1, -1] = camera_center\n\n # Transform points\n points = torch.einsum('bij,bkj->bki', rotation, points)\n points = points + translation.unsqueeze(1)\n\n # Apply perspective distortion\n projected_points = points / points[:, :, -1].unsqueeze(-1)\n\n # Apply camera intrinsics\n projected_points = torch.einsum('bij,bkj->bki', K, projected_points)\n\n return projected_points[:, :, :-1]", "def depth_image_to_point_cloud2(depth, intrinsics_matrix, dtype=np.float32):\n\n depth = np.squeeze(depth)\n XYZ = np.zeros(depth.shape + (3,))\n\n fx = intrinsics_matrix[0, 0]\n fy = intrinsics_matrix[1, 1]\n # center of image x coordinate\n center_x = intrinsics_matrix[2, 0]\n # center of image y coordinate\n center_y = intrinsics_matrix[2, 1]\n y_range = depth.shape[0]\n x_range = depth.shape[1]\n XYZ = depth_image_to_point_cloud_numba2(depth, y_range, x_range, center_y, center_x, fy, fx, XYZ)\n\n return XYZ.astype(dtype)", "def get_2d_points(image, rotation_vector, translation_vector, camera_matrix, val):\n point_3d = []\n dist_coeffs = np.zeros((4,1))\n rear_size = val[0]\n rear_depth = val[1]\n point_3d.append((-rear_size, -rear_size, rear_depth))\n point_3d.append((-rear_size, rear_size, rear_depth))\n point_3d.append((rear_size, rear_size, rear_depth))\n point_3d.append((rear_size, -rear_size, rear_depth))\n point_3d.append((-rear_size, -rear_size, rear_depth))\n \n front_size = val[2]\n front_depth = val[3]\n point_3d.append((-front_size, -front_size, front_depth))\n point_3d.append((-front_size, front_size, front_depth))\n point_3d.append((front_size, front_size, front_depth))\n point_3d.append((front_size, -front_size, front_depth))\n point_3d.append((-front_size, -front_size, front_depth))\n point_3d = np.array(point_3d, dtype=np.float).reshape(-1, 3)\n \n # Map to 2D image points\n (point_2d, _) = cv2.projectPoints(point_3d,rotation_vector,translation_vector,camera_matrix,dist_coeffs)\n point_2d = np.int32(point_2d.reshape(-1, 2))\n return point_2d" ]
[ "0.75861025", "0.73979294", "0.73724365", "0.70940655", "0.7059192", "0.7011868", "0.69861317", "0.6886492", "0.685968", "0.6813133", "0.6795302", "0.6780843", "0.6771063", "0.676563", "0.6705942", "0.66217476", "0.65333843", "0.6509686", "0.64969933", "0.6453741", "0.6448303", "0.6444768", "0.6443234", "0.64050543", "0.6394137", "0.6328982", "0.6328004", "0.62937623", "0.6284238", "0.62643975" ]
0.7957179
0
Attempts to purchase all goods listed in the dict order, depositing them in the Player's cargo holds. Does not care about max cargo. dryRun simply checks if the purchase is possible. remaining controls if the order should be 100% purchased (True), or only purchase goods the player lacks Returns False if some goods are unavailable, 0 if insufficient credits, True if the purchase would work
def buyCargo(self, order, dryRun=False, remaining=True): ply = self.window.playerShip shop = self.planet.goods toBuy = order.copy() for mat in toBuy: if remaining and mat in ply.cargo: toBuy[mat] -= ply.cargo[mat].quantity if toBuy[mat] > 0: if mat not in shop: return False if shop[mat]*toBuy[mat] > ply.credits: return 0 if not dryRun: for mat,amt in toBuy.items(): ply.addCargo(mat, amt) ply.credits -= shop[mat] * amt return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def complete_purchase(self, customer_credit=0):\r\n \r\n #take the products first, then tell customer how many tickets to take\r\n #requires IChat interface to be passed to tell customers how many tickets to take\r\n \r\n #switch to list view in the collection window\r\n print(\"YES\")\r\n self._slow_click(target=self._images.get_trade(\"list_view_collection_window\"))\r\n print(\"NO\")\r\n \r\n running_total = self.search_for_products()\r\n running_total -= customer_credit\r\n \r\n print(\"running total is \" + str(running_total))\r\n if running_total == 0 or not running_total:\r\n self.cancel_trade()\r\n return False\r\n \r\n total_tickets_notice = 'Please take %i tickets.' % running_total\r\n self.Ichat.type_msg(total_tickets_notice)\r\n \r\n #wait for the customer to get the tickets, then click confirm\r\n if not self.preconfirm_scan_purchase(running_total): \r\n self.cancel_trade()\r\n \r\n self.go_to_confirmation()\r\n print(\"starting confirmation scan\")\r\n #run a final confirmation scan to check the products and tickets taken\r\n products_bought = self.confirmation_scan(tickets_to_give=running_total, credit=customer_credit)\r\n \r\n self.Ichat.close_current_chat()\r\n \r\n if products_bought:\r\n self._slow_click(target=self._images.get_trade(\"confirm_button\", \"confirm\"))\r\n wait(Pattern(self._images.get_ok_button()), 600)\r\n self._slow_click(target=self._images.get_ok_button())\r\n products_bought[\"total_tickets\"] = running_total\r\n \r\n return products_bought\r\n \r\n else:\r\n self.cancel_trade()\r\n return False", "def generate_orders(self, good):\n surplus = self.inventory.surplus(good)\n if surplus >= 1: # sell inventory\n # the original only old one item here\n sell_amount = surplus\n order = self.create_sell_order(good, surplus)\n if order:\n # print('{} sells {} {}'.format(self.pop_job.title, sell_amount, good.name))\n self.market.sell(order)\n else: # buy more\n shortage = self.inventory.shortage(good)\n free_space = self.inventory.empty_space\n\n if shortage > 0:\n if shortage <= free_space:\n # enough space for ideal order\n limit = shortage\n else:\n # not enough space for ideal order\n limit = math.floor(free_space / shortage)\n\n if limit > 0:\n order = self.create_buy_order(good, limit)\n if order:\n # print('{} buys {} {}'.format(self.pop_job.title, limit, good.name))\n self.market.buy(order)\n # else:\n # print(\"{} has no shortage of {} (has shortage: {})\".format(self.pop_job.title, good.title, shortage))", "def play(self):\n hand = self.state.hand\n supply = self.state.supply\n money = count_money(hand) - self.state.used_money\n if supply['Province'] > 0 and money >= Province.Cost:\n self.game_client.buy('Province')\n elif supply['Duchy'] > 0 and money >= Duchy.Cost:\n self.game_client.buy('Duchy')\n elif supply['Estate'] > 0 and money >= Estate.Cost:\n self.game_client.buy('Estate')\n\n self.game_client.done()", "def do_buy(self, args):\n if not self._check_args(args):\n return\n else:\n self.wallet.get_coins_from_faucet(args)", "def allow_purchase_order(self):\n return self._allow_purchase_order", "def are_ingredients_sufficient(order_ingredients):\n for item in order_ingredients:\n if order_ingredients[item] > resources[item]:\n print(f\"Sorry there is not enough {item}.\")\n return False\n return True", "def confirm_purchase(self, data, batch):\n logger.info('AddStockInventory purchase confirm initiated')\n try:\n with Transaction().start(DBNAME, 1) as transaction:\n transaction.context = config.get_config().context\n batch = batch\n data = data\n purchase = self.Purchase.search([('batch_number', '=', batch)])[-1]\n if purchase.state == 'processing':\n return False\n lines = purchase.lines\n party = self.Party.search(['name', '=', 'Purchase'])[-1]\n for i in data:\n product = self.Product.search([('code', '=', i['code']),\n ('description', '=', 'Stock'),\n ('type', '=', 'goods')])[-1]\n supplier = self.Party.search(['name', '=', i['supplier']])[-1]\n for j in lines:\n if j.product == product:\n pro = j.product\n template = pro.template\n template.list_price = Decimal(i['rate'])\n template.save()\n pro.save()\n j.quantity = float(i['quantity'])\n j.supplier = supplier\n j.save()\n purchase.party = party\n payment, = self.PaymentTerm.search(['name', '=', 'Full Payment'])\n purchase.payment_term = payment\n purchase.invoice_address = party.addresses[0]\n user = self.User(id=1)\n purchase.company = user.main_company\n purchase.save()\n # transaction.cursor.commit()\n purchase.quote((purchase,))\n purchase.confirm((purchase,))\n purchase.process((purchase,))\n transaction.cursor.commit()\n save = self.save_inventory(data, batch)\n if save:\n return True\n else:\n raise Exception('could not save or confirm')\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False", "def apply_peddler_effect(self, player: Player) -> None:\n \"\"\"\n Buy 1 cube (any resource but gold) from the stock with 1 denier.\n \"\"\"\n # Remark: Hard-coded! We don't use the tags <cost><n_deniers>-1 and <gain><CHOICES>... in <game_elements><buildings><player_buildings><player_building><secondary_effect>.\n money_resource_cost, qty_cost = Money.money, -1 # type: MoneyResource, int\n if player.current_money_resources[money_resource_cost] + \\\n qty_cost < 0: # Has the player enough money or resource?\n print(indent(4) + player.txt_name_money_resources_workers_PPs_deck(True, True, False, False, False) +\n ' and can\\'t apply the effect because he/she doesn\\'t have enough money or resource as ' +\n str(qty_cost) + ' ' + money_resource_cost.name + '(s) required.')\n else:\n resource_gain_choices, qty_gain = [resource for resource in Resource.resources.values()\n if not resource.is_wild()], \\\n +1 # type: List[Resource], int\n resource_gain = player.choose_buy_resource(money_resource_cost, qty_cost, resource_gain_choices,\n qty_gain) # type: Resource\n if resource_gain is None:\n print(indent(4) + player.txt_name_money_resources_workers_PPs_deck(True, True, False, False, False) +\n ' and had chosen to don\\'t apply the effect.')\n else:\n print(indent(4) + player.name() + ' wants to consume ' + str(qty_cost) + ' ' +\n money_resource_cost.name + '(s) to obtain ' + str(qty_gain) + ' ' + resource_gain.name + '(s).')\n player.current_money_resources[money_resource_cost] += qty_cost\n player.current_money_resources[resource_gain] += qty_gain\n print(indent(4) + player.txt_name_money_resources_workers_PPs_deck(True, True, False, False, False) +\n ' once the effect applied.')", "def test_discard_buy(self):\n self.plr.test_input = [\"finish selecting\", \"discard gold\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 2)\n self.assertEqual(self.plr.actions.get(), 1)\n self.assertEqual(self.plr.buys.get(), 2)\n self.assertNotIn(\"Gold\", self.plr.piles[Piles.HAND])", "def get_plan(\n self,\n requirement_dct,\n deposited_dct=None,\n print_output=True,\n outcome=False,\n gold_demand=True,\n exp_demand=True,\n language=None,\n exclude=None,\n non_cn_compat=False,\n ):\n status_dct = {\n 0: \"Optimization terminated successfully. \",\n 1: \"Iteration limit reached. \",\n 2: \"Problem appears to be infeasible. \",\n 3: \"Problem appears to be unbounded. \",\n 4: \"Numerical difficulties encountered.\",\n }\n stt = time.time()\n requirement_dct, requirement_lang = self.convert_requirements(requirement_dct)\n if language is None:\n language = requirement_lang\n deposited_dct, _ = self.convert_requirements(None)\n\n demand_lst = [0 for x in range(len(self.item_array))]\n for k, v in requirement_dct.items():\n demand_lst[self.item_id_rv[k]] = v\n for k, v in deposited_dct.items():\n demand_lst[self.item_dct_rv[k]] -= v\n\n if exclude is None:\n exclude = set()\n else:\n exclude = set(exclude)\n\n is_stage_alive = []\n for stage in self.stage_array:\n if stage in exclude:\n is_stage_alive.append(False)\n continue\n if non_cn_compat:\n try:\n if int(stage.lstrip(\"S\")[0]) > NON_CN_WORLD_NUM:\n is_stage_alive.append(False)\n continue\n except ValueError:\n pass\n is_stage_alive.append(True)\n\n if exclude or non_cn_compat:\n BackTrace = [\n copy.copy(self.stage_array),\n copy.copy(self.cost_lst),\n copy.copy(self.probs_matrix),\n copy.copy(self.cost_exp_offset),\n copy.copy(self.cost_gold_offset),\n ]\n self.stage_array = self.stage_array[is_stage_alive]\n self.cost_lst = self.cost_lst[is_stage_alive]\n self.probs_matrix = self.probs_matrix[is_stage_alive]\n self.cost_exp_offset = self.cost_exp_offset[is_stage_alive]\n self.cost_gold_offset = self.cost_gold_offset[is_stage_alive]\n\n solution, dual_solution, excp_factor = self._get_plan_no_prioties(\n demand_lst, outcome, gold_demand, exp_demand\n )\n x, status = solution.x / excp_factor, solution.status\n y = dual_solution.x\n n_looting, n_convertion = x[: len(self.cost_lst)], x[len(self.cost_lst) :]\n\n cost = np.dot(x[: len(self.cost_lst)], self.cost_lst)\n gcost = np.dot(x[len(self.cost_lst) :], self.convertion_cost_lst) / 0.004\n gold = -np.dot(n_looting, self.cost_gold_offset) / 0.004\n exp = -np.dot(n_looting, self.cost_exp_offset) * 7400 / 30.0\n\n if status != 0:\n raise ValueError(status_dct[status])\n\n stages = []\n for i, t in enumerate(n_looting):\n if t >= 0.1:\n target_items = np.where(self.probs_matrix[i] >= 0.02)[0]\n items = {}\n for idx in target_items:\n if len(self.item_id_array[idx]) != 5:\n continue\n try:\n name_str = self.itemdata[language][int(self.item_id_array[idx])]\n except KeyError:\n # Fallback to CN if language is unavailable\n name_str = self.itemdata[\"zh_CN\"][int(self.item_id_array[idx])]\n items[name_str] = float2str(self.probs_matrix[i, idx] * t)\n stage = {\n \"stage\": self.stage_array[i],\n \"count\": float2str(t),\n \"items\": items,\n }\n stages.append(stage)\n\n crafts = []\n for i, t in enumerate(n_convertion):\n if t >= 0.1:\n idx = np.argmax(self.convertion_matrix[i])\n item_id = self.item_id_array[idx]\n try:\n target_id = self.itemdata[language][int(item_id)]\n except KeyError:\n target_id = self.itemdata[\"zh_CN\"][int(item_id)]\n materials = {}\n for k, v in self.convertions_dct[item_id].items():\n try:\n key_name = self.itemdata[language][int(k)]\n except KeyError:\n key_name = self.itemdata[\"zh_CN\"][int(k)]\n materials[key_name] = str(v * int(t + 0.9))\n synthesis = {\n \"target\": target_id,\n \"count\": str(int(t + 0.9)),\n \"materials\": materials,\n }\n crafts.append(synthesis)\n elif t >= 0.05:\n idx = np.argmax(self.convertion_matrix[i])\n item_id = self.item_id_array[idx]\n try:\n target_name = self.itemdata[language][int(item_id)]\n except KeyError:\n target_name = self.itemdata[\"zh_CN\"][int(item_id)]\n materials = {}\n for k, v in self.convertions_dct[item_id].items():\n try:\n key_name = self.itemdata[language][int(k)]\n except KeyError:\n key_name = self.itemdata[\"zh_CN\"][int(k)]\n materials[key_name] = \"%.1f\" % (v * t)\n synthesis = {\n \"target\": target_name,\n \"count\": \"%.1f\" % t,\n \"materials\": materials,\n }\n crafts.append(synthesis)\n\n values = [\n {\"level\": \"1\", \"items\": []},\n {\"level\": \"2\", \"items\": []},\n {\"level\": \"3\", \"items\": []},\n {\"level\": \"4\", \"items\": []},\n {\"level\": \"5\", \"items\": []},\n ]\n for i, item_id in enumerate(self.item_id_array):\n if len(item_id) == 5 and y[i] > 0.1:\n try:\n item_name = self.itemdata[language][int(item_id)]\n except KeyError:\n item_name = self.itemdata[\"zh_CN\"][int(item_id)]\n item_value = {\"name\": item_name, \"value\": \"%.2f\" % y[i]}\n values[int(self.item_id_array[i][-1]) - 1][\"items\"].append(item_value)\n for group in values:\n group[\"items\"] = sorted(\n group[\"items\"], key=lambda k: float(k[\"value\"]), reverse=True\n )\n\n res = {\n \"lang\": language,\n \"cost\": int(cost),\n \"gcost\": int(gcost),\n \"gold\": int(gold),\n \"exp\": int(exp),\n \"stages\": stages,\n \"craft\": crafts,\n \"values\": list(reversed(values)),\n }\n\n if print_output:\n print(\n status_dct[status]\n + (\" Computed in %.4f seconds,\" % (time.time() - stt))\n )\n\n if print_output:\n print(\n \"Estimated total cost: %d, gold: %d, exp: %d.\"\n % (res[\"cost\"], res[\"gold\"], res[\"exp\"])\n )\n print(\"Loot at following stages:\")\n for stage in stages:\n display_lst = [k + \"(%s) \" % stage[\"items\"][k] for k in stage[\"items\"]]\n print(\n \"Stage \"\n + stage[\"stage\"]\n + \"(%s times) ===> \" % stage[\"count\"]\n + \", \".join(display_lst)\n )\n\n print(\"\\nSynthesize following items:\")\n for synthesis in crafts:\n display_lst = [\n k + \"(%s) \" % synthesis[\"materials\"][k]\n for k in synthesis[\"materials\"]\n ]\n print(\n synthesis[\"target\"]\n + \"(%s) <=== \" % synthesis[\"count\"]\n + \", \".join(display_lst)\n )\n\n print(\"\\nItems Values:\")\n for i, group in reversed(list(enumerate(values))):\n display_lst = [\n \"%s:%s\" % (item[\"name\"], item[\"value\"]) for item in group[\"items\"]\n ]\n print(\"Level %d items: \" % (i + 1))\n print(\", \".join(display_lst))\n\n if exclude:\n self.stage_array = BackTrace[0]\n self.cost_lst = BackTrace[1]\n self.probs_matrix = BackTrace[2]\n self.cost_exp_offset = BackTrace[3]\n self.cost_gold_offset = BackTrace[4]\n\n return res", "def test_lpdaac_good(self):\n self.assertIsNone(api.inventory.check(self.lpdaac_order_good))", "def purchase_order(self, ship_id, good, quantity):\n payload = {'shipId': ship_id, 'good': good, 'quantity': quantity}\n r = requests.post(self.base_url + f'/users/{self.username}/purchase-orders', headers=self.auth_header, params=payload)\n return r.text", "def is_resource_sufficient(order_ingredients):\r\n for item in order_ingredients:\r\n if order_ingredients[item] > resources[item]:\r\n print(f\"Sorry there is not enough {item}.\")\r\n return False\r\n return True", "def trial(trial_no, num_actions):\n\tlogging.info(\"Trial %s\", trial_no)\n\tpq = PriorityQueue()\n\tinsert_delete(pq, int(num_actions))\n\titems = []\n\twhile not pq.empty():\n\t\titems.append(pq.get())\n\tif len(items) < 2:\n\t\tlogging.info(\"Order is trivially preserved; fewer than 2 items.\")\n\tpreserved = True\n\tfor i1, i2 in it.izip(items, items[1:]):\n\t\tif i1 >= i2:\n\t\t\tpreserved = False\n\t\t\tbreak\n\tlogging.info(\"Order%s preserved.\", {True: \"\", False: \" not\"}[preserved])\n\tlog_items(items)", "def prepare_order(request):\n distributer_id = request.GET.get('distributer')\n billing_address_id = request.GET.get('bill_addr')\n pickup_method = 2 # this is left in place if we ever decide to have door-to-door deliveries - otherwise it should be deleted\n cart = Cart.objects.get_or_create(user=request.user, processed_to_order=False)[0]\n user_bill_addr = UserBillingAddress.objects.get_or_create(pk=billing_address_id, user=request.user)[0]\n distributer = Distributer.objects.get(pk=distributer_id)\n\n # Create order\n order = Order()\n order.user = request.user\n order.distributer = distributer\n order.subtotal = cart.subtotal\n order.tax_total = cart.tax_total\n order.total = cart.total\n order.discount_for_returned_package = 0 #TODO implement returned packaging\n order.to_pay = 0 #TODO implement returned packaging\n order.delivery_method = pickup_method\n order.save()\n\n # create all order items\n for item in cart.cartitem_set.all():\n order_item = OrderItem()\n order_item.order = order\n order_item.item_name = str(item.item)\n order_item.item_price = item.item.price\n order_item.item_quantity = item.quantity\n order_item.item_decimal_quantity = 0 #TODO implement decimal quantity\n order_item.item_unit_of_measure = \"kom\" #TODO implement decimal quantity\n order_item.item_tax_bracket = item.item.tax_bracket\n order_item.item_subtotal = item.line_subtotal\n order_item.item_tax_total = item.line_tax_total\n order_item.item_total = item.line_tax_total\n if item.item.package_type == None:\n order_item.item_package = None\n order_item.item_package_price = 0\n else:\n order_item.item_package = item.item.package_type.type\n order_item.item_package_price = item.item.package_type.price\n order_item.item_package_subtotal = item.line_package_subtotal\n order_item.item_package_tax_total = item.line_package_tax_total\n order_item.item_package_total = item.line_package_total\n order_item.save()\n\n billing_address = OrderBillingAddress()\n billing_address.order = order\n billing_address.name = user_bill_addr.name\n billing_address.surname = user_bill_addr.surname\n billing_address.street_name = user_bill_addr.street_name\n billing_address.street_nr = user_bill_addr.street_nr\n billing_address.zip_code = user_bill_addr.zip_code\n billing_address.city = user_bill_addr.city\n billing_address.country = user_bill_addr.country\n billing_address.vat_nr = user_bill_addr.vat_nr\n billing_address.vat_taxpayer = user_bill_addr.vat_taxpayer\n billing_address.save()\n\n return redirect(reverse('orders_overview', kwargs={'pk': str(order.pk)}))", "def coffee_machine():\n MENU = {\n \"espresso\": {\n \"ingredients\": {\n \"water\": 50,\n \"coffee\": 18,\n },\n \"cost\": 1.5,\n },\n \"latte\": {\n \"ingredients\": {\n \"water\": 200,\n \"milk\": 150,\n \"coffee\": 24,\n },\n \"cost\": 2.5,\n },\n \"cappuccino\": {\n \"ingredients\": {\n \"water\": 250,\n \"milk\": 100,\n \"coffee\": 24,\n },\n \"cost\": 3.0,\n }\n }\n my_ingredients= {\"water\":300,\"milk\":200,\"coffee\":100}\n flag=True\n can_make=True\n while flag:\n sum_money=0\n coffee=input(\"What would you want?(espresso/latte/cappuccino):\\n\").lower()\n if coffee==\"off\":\n flag=False\n break\n try:\n for item in my_ingredients:\n if coffee != \"espresso\":\n if my_ingredients[\"water\"] >= MENU[coffee][\"ingredients\"][\"water\"]:\n if my_ingredients[\"milk\"] >= MENU[coffee][\"ingredients\"][\"milk\"]:\n if my_ingredients[\"coffee\"] >= MENU[coffee][\"ingredients\"][\"coffee\"]:\n my_ingredients[\"water\"]-=MENU[coffee][\"ingredients\"][\"water\"]\n my_ingredients[\"milk\"]-=MENU[coffee][\"ingredients\"][\"milk\"]\n my_ingredients[\"coffee\"]-=MENU[coffee][\"ingredients\"][\"coffee\"]\n can_make = True\n break\n can_make, flag = False, False\n print(\"Sorry, there are not enough ingredients\")\n break\n else:\n if my_ingredients[\"water\"] >= MENU[coffee][\"ingredients\"][\"water\"]:\n if my_ingredients[\"coffee\"] >= MENU[coffee][\"ingredients\"][\"coffee\"]:\n my_ingredients[\"water\"] -= MENU[coffee][\"ingredients\"][\"water\"]\n my_ingredients[\"coffee\"] -= MENU[coffee][\"ingredients\"][\"coffee\"]\n can_make = True\n break\n can_make,flag = False,False\n print(\"Sorry, there are not enough ingredients\")\n break\n if can_make:\n print(\"Please insert coins\")\n num_quarters=int(input(\"How many quarters?: \"))\n num_dimes = int(input(\"How many dimes?: \"))\n num_nickles = int(input(\"How many nickles?: \"))\n num_pennies = int(input(\"How many pennies?: \"))\n sum_money=num_quarters*0.25+num_dimes*0.1+num_nickles*0.05+num_pennies*0.1**2\n if sum_money<MENU[coffee][\"cost\"]: print(\"Sorry,that's not enough money. Money refunded.\")\n elif sum_money==MENU[coffee][\"cost\"]:\n print(\"Here is your {}, Enjoy!\".format(coffee))\n else:\n print(\"Here is ${} in change\".format(round(sum_money-MENU[coffee][\"cost\"],2)))\n print(\"Here is your {}, Enjoy!\".format(coffee))\n except:\n print(\"Fix your input please\")", "def test_multiple_creates_do_not_increase_products(self):\n for i in xrange(0, 10):\n modified_po = copy.deepcopy(base_purchase_order)\n self.assertEqual(Supply.objects.get(pk=1).quantity, 10)\n \n resp = self.client.post('/api/v1/purchase-order/', format='json', data=modified_po)\n \n self.assertEqual(resp.status_code, 201, msg=resp)\n \n po_data = resp.data\n self.assertEqual(po_data['status'], 'AWAITING APPROVAL')\n\n item1 = po_data['items'][0]\n #self.assertEqual(item1['supply']['id'], 1)\n self.assertEqual(item1['status'], u'Ordered')\n\n item2 = po_data['items'][1]\n #self.assertEqual(item1['supply']['id'], 2)\n self.assertEqual(item1['status'], u'Ordered')\n \n #Test database values\n po = PurchaseOrder.objects.get(pk=resp.data['id'])\n self.assertEqual(po.status, 'AWAITING APPROVAL')\n for item in po.items.all():\n self.assertEqual(item.status, u\"Ordered\")\n \n supplier = Supplier.objects.get(pk=1)\n\n supply = Supply.objects.get(pk=1)\n self.assertEqual(supply.quantity, 10)\n self.assertEqual(supply.products.filter(supplier=supplier).count(), 1)\n\n supply = Supply.objects.get(pk=2)\n self.assertEqual(supply.quantity, 10)\n self.assertEqual(supply.products.filter(supplier=supplier).count(), 1)", "def donate(self):\n\n # Get item\n import converter\n self.hero.inventory_menu()\n item = prompt(\"Select a weapon, shield or armor to donate. Or \\\npress enter to exit. \").lower()\n item = converter.convert(item)\n\n # If item is a weapon, shield or armor, accept the donation\n if isinstance(item, items.Weapon) or isinstance(item, items.Shield) or isinstance(item, items.Armor):\n if item in self.hero.inventory:\n self.donations.append(item)\n self.hero.drop(item)\n self.sort_donations()\n prompt(\"\\\"Thank you for your donation.\\\"\")\n else:\n prompt(\"You don't have one!\")\n\n # If item is a real item but is not in the above classes, do not accept.\n elif item != False:\n prompt(\"That type of item is not needed.\")", "def test_deal_sufficient_cards(self):\n cards = self.deck._deal(10)\n self.assertEqual(len(cards), 10)\n self.assertEqual(self.deck.count(), 42)", "def purchase_places(self):\n places = randint(1, 50)\n self.client.post(f\"{host}/purchasePlaces\",\n {\"competition\": places, \"club\": CLUB, \"places\": COMPETITION},\n )", "def _validateSale(self, player: Player, company: PublicCompany, amount: int, kwargs: MutableGameState):\n my_purchases = kwargs.purchases[kwargs.stock_round_count].get(player, [])\n\n my_stock = player.hasStock(company)\n potential_owners = company.potentialPresidents()\n\n validations = [\n err(company not in my_purchases,\n \"You can't sell something you already bought: {} {}\",\n company.id, company.short_name),\n\n err(\n my_stock >= amount,\n \"You must have as much stock than you are trying to sell {}\",\n amount\n ),\n\n err(\n company.availableStock(StockPurchaseSource.BANK) + amount <= 60,\n \"You can't sell that much ({}); the bank can only have 50 shares max.\",\n amount\n ),\n\n err(\n len(company.potentialPresidents() - {player}) > 0 or my_stock - amount >= 20,\n \"There are no other potential presidents, so you can't sell your shares. {} / {} (original stock: {})\",\n \",\".join([p.id for p in company.potentialPresidents()]),\n company.name,\n str(company.owners.get(player))\n\n ),\n\n err(amount % STOCK_CERTIFICATE == 0,\n \"You can only sell in units of 10 stocks ({})\".format(amount),\n ),\n\n err(kwargs.stock_round_count > 1,\n \"You can only sell after the first stock round.\")\n ]\n\n return self.validate(validations)", "async def _vis_buy(self, ctx, *args):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n number, item = ch.parse_number_and_name(args)\n if item:\n await ctx.send(vis_helpers.shop_buy(ctx.user_object, item, number))", "def take_turn(self):\n if self.fired:\n return None\n\n self.tick_needs()\n # TODO: Currently dropping Trash, stuff that doesn't satisfy, where ever\n # May want to look for Trash Can at some point\n # Dropping first Trash item found when inventory full\n if self.inventory_full():\n trash = filter(lambda x: any(s not in self.needs for s in x.satisfies), self.inventory)\n for t in trash:\n print(f\"{self.name} dropped {t.name}\")\n self.drop_item(t)\n break\n\n # If not preoccupied, check needs and do stuff\n if not self.occupied:\n self.check_needs()\n self.move_to_target()", "def check_prerequisites(self, prequisites):\n for prereq in prequisites:\n # check for origin requirements\n if prereq.origin: \n if prereq.origin not in self.origins:\n print(f\"ORIGIN WRONG\")\n return False\n # check for additional header requirements\n if prereq.additional_header:\n if (not prereq.additional_header.open_flag and \n prereq.additional_header not in self.headers.all()):\n print(f\"WE DONT HAVE THE RIGHT HEADER SELECTED\")\n return False \n # check for header/skill requirements\n # did the user purchase the required header, or is the header open?\n if prereq.header:\n if (not prereq.header.open_flag and \n prereq.header not in self.headers.all()):\n print(f\"WE DONT HAVE THE RIGHT HEADER SELECTED\")\n return False \n # check for the number of different skills in the header.\n purchased_skills = HeaderSkill.objects.filter(\n header=prereq.header,\n skill__id__in=self.skills.values_list('skill__skill_id', flat=True)\n )\n if prereq.number_of_different_skills > purchased_skills.count(): \n print(f\"NUMBER OF SKILLS WRONG:{prereq.number_of_different_skills}:{purchased_skills.count()}\")\n return False\n # figure out the total skill points\n total = 0\n for skill in purchased_skills:\n total += skill.header.cost * skill.characterskills_set.get(character=self).count\n # check for skill requirements\n if prereq.skill:\n try:\n result = self.skills.get(skill__skill=prereq.skill)\n return result.count >= prereq.number_of_purchases\n except CharacterSkills.DoesNotExist:\n return False\n # if we made it this far, we can assume all prerequisites\n # have been met.\n return True", "def sufficient_resources(menu, drink, resources):\r\n menu['espresso']['ingredients']['milk'] = 0 # because espresso doesn't need any milk\r\n if resources['Water'] < menu[drink]['ingredients']['water']:\r\n print(\"Sorry there isn't enough water to make the drink.\")\r\n return False\r\n elif resources['Milk'] < menu[drink]['ingredients']['milk']:\r\n print(\"Sorry there isn't enough milk to make the drink.\")\r\n return False\r\n elif resources['Coffee'] < menu[drink]['ingredients']['coffee']:\r\n print(\"Sorry there isn't enough coffee to make the drink.\")\r\n return False\r\n else:\r\n return True", "def check_up(order_ingredients):\n for item in order_ingredients:\n if order_ingredients[item] >= resources[item]:\n print(f\"Sorry there in not {item}\")\n return False\n return True", "def hook_buy_this_card(self, game, player):\n totrash = [c for c in player.piles[Piles.PLAYED] if c.isTreasure()]\n for c in totrash:\n player.output(f\"Mint trashing {c.name}\")\n player.trash_card(c)", "def trader(backpack):\n loot = [[\"gold coin\", \"other\", 1]]\n loot2 = [[\"corn\", \"food\", 1]]\n print(\"\\nTrader says: \")\n if \"corn\" in backpack:\n x = input(\"-Hey! So you want sell some corn mate?\\n(write yes or no): \")\n x = x.lower()\n if x == \"yes\":\n try:\n remove_corn = int(input(\"-How much u wanna sell?: \"))\n if remove_corn > backpack[\"corn\"][0]:\n print(\"-You dont have that much corn in ur backpack \")\n enter()\n else:\n print(\"-Thanks for corn :) \")\n inve.remove_item(backpack, loot2, remove_corn)\n inve.add_to_inventory(backpack, loot, remove_corn)\n enter()\n except ValueError:\n print(\"(U need to write a number): \")\n enter()\n elif x == \"no\":\n print(\"-Come to me when u wanna sell corn \")\n enter()\n else:\n print(\"(Your answer need to be yes or no) \")\n enter()\n else:\n print(\"-You dont have any corn, come to me when u get some \")\n enter()\n return backpack", "def purchase(self, item_type):", "def is_resource_sufficient(self, drink):\n can_make = True\n for item in drink.ingredients:\n if drink.ingredients[item] > self.resources[item]:\n print(f\"Sorry there is not enough {item}.\")\n can_make = False\n return can_make" ]
[ "0.5479375", "0.53681403", "0.5336852", "0.5133763", "0.51209956", "0.50936574", "0.5068974", "0.5063557", "0.4996775", "0.4974593", "0.49507624", "0.494863", "0.4935163", "0.49151012", "0.49076858", "0.4898226", "0.4866557", "0.48556313", "0.48369843", "0.48305112", "0.48296085", "0.47918698", "0.4768346", "0.47589257", "0.47412682", "0.47340015", "0.47250265", "0.47238478", "0.47207466", "0.47180384" ]
0.70759
0
Initialize with a QPrinter object and a list of pages. pageList may be a list of twotuples (num, page). Otherwise, the pages are numbered from 1 in the progress message. The pages are copied.
def __init__(self, printer, pageList, parent=None): super().__init__(parent) self.printer = printer self.setPageList(pageList)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setPageList(self, pageList):\n self.pageList = []\n for n, page in enumerate(pageList, 1):\n if isinstance(page, tuple):\n pageNum, page = page\n else:\n pageNum = n\n page = page.copy()\n # set zoom to 1.0 so computations based on geometry() are\n # accurate enough\n page.updateSize(page.dpi, page.dpi, 1.0)\n self.pageList.append((pageNum, page))", "def work(self):\n p = self.printer\n p.setFullPage(True)\n painter = QPainter(p)\n for n, (num, page) in enumerate(self.pageList):\n if self.isInterruptionRequested():\n self.aborted = True\n return p.abort()\n self.progress.emit(num, n+1, len(self.pageList))\n if n:\n p.newPage()\n painter.save()\n # center on the page and use scale 100% (TEMP)\n r = p.pageRect()\n m = QTransform()\n m.translate(r.center().x(), r.center().y())\n m.scale(p.logicalDpiX() / page.dpi, p.logicalDpiY() / page.dpi)\n m.rotate(page.rotation * 90)\n m.scale(page.scaleX, page.scaleY)\n m.translate(page.pageWidth / -2, page.pageHeight / -2)\n painter.setTransform(m, True)\n page.print(painter)\n painter.restore()\n return painter.end()", "def __init__(\n self,\n page: int = 1,\n count: int = 100\n ):\n\n self.__page = page\n self.__count = count", "def __init__(self, job, parent=None):\n super().__init__(parent)\n self._job = job\n job.progress.connect(self.showProgress)\n job.finished.connect(self.jobFinished)\n self.canceled.connect(job.requestInterruption)\n self.setMinimumDuration(0)\n self.setRange(0, len(job.pageList))\n self.setLabelText(\"Preparing to print...\")", "def __init__(self, page_index, page_size, total_items, data):\n self.page_index = page_index\n self.page_size = page_size\n self.total_items = total_items\n self.data = data", "def setPageSequence(self, pageSequenceList):\r\n\r\n for index in range(self.pageCount() - 1, -1, -1):\r\n page = self.page(index)\r\n if page:\r\n self.removePage(page)\r\n\r\n count = 0\r\n for pageTitle in pageSequenceList:\r\n self.insertPage(self.pageDictionary[pageTitle],\r\n pageTitle,\r\n count)\r\n count = count + 1\r\n self.showPage(self.page(0))", "def set_new_page(self):\n self.num += 1\n c = self.canvas\n c.showPage()\n self.decorate()\n self.x = self.marginsides\n self.lastx = self.marginsides\n self.y = self.height - self.margintop\n #self.print_text([\"Page %s\" % unicode(self.num)], fontsize=8,\n # style=\"right\")\n self.put_page_num()\n #self.x = self.marginsides\n #self.lastx = self.x\n #self.y = self.y - 32\n self.pagebegin = 1", "def __init__(self, printer, output):\n\t\timport revitron\n\n\t\tif not printer or not output:\n\t\t\trevitron.Log().warning('PDF exporter is not configured!')\n\t\t\tsys.exit()\n\n\t\tself.printer = printer\n\t\tself.output = output\n\t\tself.manager = revitron.DOC.PrintManager\n\t\tself.sizes = dict()\n\n\t\tif self.manager.PrinterName.lower() != self.printer.lower():\n\t\t\tprint('Setting current printer to: ' + self.printer)\n\t\t\tprint('Please submit your sheets to be exported again ...')\n\t\t\tself.manager.SelectNewPrintDriver(self.printer)\n\t\t\tself.manager.Apply()\n\t\t\tsys.exit()\n\n\t\tself.manager.PrintRange = revitron.DB.PrintRange.Select\n\t\tself.manager.PrintToFile = True\n\t\tself.manager.CombinedFile = False\n\t\tself.manager.Apply()\n\n\t\tfor size in self.manager.PaperSizes:\n\t\t\tself.sizes[size.Name] = size", "def __init__(self, display_properties=None, document_properties=None, pages_count=None, default_page_config=None):\n\n self._display_properties = None\n self._document_properties = None\n self._pages_count = None\n self._default_page_config = None\n\n if display_properties is not None:\n self.display_properties = display_properties\n if document_properties is not None:\n self.document_properties = document_properties\n self.pages_count = pages_count\n if default_page_config is not None:\n self.default_page_config = default_page_config", "def create(dlg):\n page = PrinterPage()\n return page", "def __init__(self,parent,pageNames=[],**kw):\n Frame.__init__(self, parent, kw)\n self.grid_location(0,0)\n self.columnconfigure(0,weight=1)\n self.rowconfigure(1,weight=1)\n self.tabBar=Frame(self)\n self.tabBar.grid(row=0,column=0,sticky=EW)\n self.activePage=StringVar(self)\n self.defaultPage=''\n self.pages={}\n for name in pageNames:\n self.AddPage(name)", "def showProgress(self, page, num, total):\n self.setValue(num)\n self.setLabelText(\"Printing page {page} ({num} of {total})...\".format(\n page=page, num=num, total=total))", "def init_django_paginator(self, pages):\n self._npages = pages.num_pages\n self._data = pages", "def __init__(self, number, title, paragraphs):\n self.number = number\n self.title = title\n self.paragraphs = []\n for paragraph_lines in paragraphs:\n new_pragraph = Paragraph.Paragraph(paragraph_lines)\n self.paragraphs.append(new_pragraph)", "def __init__(self, *args):\n this = _ida_hexrays.new_vc_printer_t(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(\n self,\n caller,\n inp,\n always_page=False,\n session=None,\n justify=False,\n justify_kwargs=None,\n exit_on_lastpage=False,\n exit_cmd=None,\n page_formatter=str,\n **kwargs,\n ):\n self._caller = caller\n self._always_page = always_page\n\n if not session:\n # if not supplied, use the first session to\n # determine screen size\n sessions = caller.sessions.get()\n if not sessions:\n return\n session = sessions[0]\n self._session = session\n\n self._justify = justify\n self._justify_kwargs = justify_kwargs\n self.exit_on_lastpage = exit_on_lastpage\n self.exit_cmd = exit_cmd\n self._exit_msg = _(\"|xExited pager.|n\")\n self._kwargs = kwargs\n\n self._data = None\n\n self._pages = []\n self._npos = 0\n\n self._npages = 1\n self._paginator = self.paginator_index\n self._page_formatter = str\n\n # set up individual pages for different sessions\n height = max(4, session.protocol_flags.get(\"SCREENHEIGHT\", {0: _SCREEN_HEIGHT})[0] - 4)\n self.width = session.protocol_flags.get(\"SCREENWIDTH\", {0: _SCREEN_WIDTH})[0]\n # always limit number of chars to 10 000 per page\n self.height = min(10000 // max(1, self.width), height)\n\n # does initial parsing of input\n self.init_pages(inp)\n\n # kick things into gear\n self.start()", "def init_pages(self, inp):\n if inherits_from(inp, \"evennia.utils.evtable.EvTable\"):\n # an EvTable\n self.init_evtable(inp)\n self._paginator = self.paginator_index\n elif isinstance(inp, QuerySet):\n # a queryset\n self.init_queryset(inp)\n self._paginator = self.paginator_slice\n elif isinstance(inp, Paginator):\n self.init_django_paginator(inp)\n self._paginator = self.paginator_django\n elif not isinstance(inp, str):\n # anything else not a str\n self.init_iterable(inp)\n self._paginator = self.paginator_slice\n elif \"\\f\" in inp:\n # string with \\f line-break markers in it\n self.init_f_str(inp)\n self._paginator = self.paginator_index\n else:\n # a string\n self.init_str(inp)\n self._paginator = self.paginator_index", "def __init__(self, ctx, data: dict, title, page_num: int = 1, timeout = 60, size = 5):\n self.ctx = ctx\n self.size = size\n self.title = title\n self.timeout = timeout\n self.msg = None\n self.controls = {\"⬅\": self.prev_page, \"❌\": self.close_menu, \"➡\": self.next_page}\n\n self.data = list(txt_frmt.d_chunk(data, size))\n self.page_num = utils.clamp(page_num, len(self.pages), 1)", "def write_pages(page_range, pdf_read_object, pdf_write_object):\n for page_num in page_range:\n page = pdf_read_object.getPage(page_num)\n pdf_write_object.addPage(page)", "def page(self, number):\n bottom = (number - 1) * self.per_page\n if number > 1:\n bottom += 1\n top = bottom + self.per_page\n if top + self.orphans >= self.count:\n top = self.count\n elif number == 1:\n top += 1\n logger.debug(\"Bottom: %s; Top: %s\", bottom, top)\n return Page(\n self.name,\n self.url,\n self.object_list[bottom:top],\n number,\n self,\n self.settings,\n )", "def __init__(self, urls, pages=None, texts=None):\n assert isinstance(urls, list) and len(urls) > 1, (\n \"Not enough links to proceed!\")\n self.urls = urls", "def __init__(self, collector, callback=None, *args, **kw):\n if callable(callback):\n self.callback = callback\n self.callbackArgs = args\n self.callbackKeyword = kw\n else:\n self.callback = None\n self._stillPaging = 1\n self.collector = collector\n collector.broker.registerPageProducer(self)", "def __init__(self,\n first: 'PortsPaginatedCollectionFirst',\n limit: int,\n total_count: int,\n *,\n next: 'PortsPaginatedCollectionNext' = None,\n ports: List['Port'] = None) -> None:\n self.first = first\n self.limit = limit\n self.next = next\n self.total_count = total_count\n self.ports = ports", "def drawPages(self, pageSelection=None):\n doc = self.parent\n\n w, h, _ = doc.getMaxPageSizes(pageSelection)\n w2 = 2*w # Make spread width\n for pn, pages in doc.getSortedPages():\n #if pageSelection is not None and not page.y in pageSelection:\n # continue\n # Create a new DrawBot viewport page to draw template + page, if not already done.\n # In case the document is oversized, then make all pages the size of the document, so the\n # pages can draw their crop-marks. Otherwise make DrawBot pages of the size of each page.\n # Size depends on the size of the larges pages + optional decument padding.\n page = pages[0] # TODO: Make it work if there as multiple pages on the same page number.\n pw, ph = w, h # Copy from main (w, h), since they may be altered.\n if self.pl > self.MIN_PADDING and self.pt > self.MIN_PADDING and self.pb > self.MIN_PADDING and self.pr > self.MIN_PADDING:\n pw += self.pl + self.pr\n ph += self.pt + self.pb\n if self.originTop:\n origin = self.pl, self.pt, 0\n else:\n origin = self.pl, self.pb, 0\n else:\n pw = page.w # No padding defined, follow the size of the page.\n ph = page.h\n origin = (0, 0, 0)\n pw2 = 2*pw\n\n if (pn % 2 == 0): # Is even?\n newPage(pw2, ph) # Make page in DrawBot of self size, actual page may be smaller if showing cropmarks.\n # View may have defined a background\n if self.style.get('fill') is not None:\n setFillColor(self.style['fill'])\n rect(0, 0, pw2, ph)\n else: # Odd, shift origin to right\n origin = origin[0]+pw, origin[1], origin[2]\n\n if self.drawBefore is not None: # Call if defined\n self.drawBefore(page, origin, self)\n\n self.drawPageFrame(page, origin)\n\n # Use the (docW, docH) as offset, in case cropmarks need to be displayed.\n page.draw(origin, self)\n\n if self.drawAfter is not None: # Call if defined\n self.drawAfter(page, origin, self)\n\n # Self.infoElements now may have collected elements needed info to be drawn, after all drawing is done.\n # So the info boxes don't get covered by regular page content.\n for e in self.elementsNeedingInfo.values():\n self._drawElementsNeedingInfo()", "def create_page_ZU_P99_JA(self, template, page_number, lst_position, lst_data):\n # ログ\n log.debug(self, args=None, kwargs=constant.DEBUG_MODE)\n try:\n packet = io.BytesIO()\n # Create a new PDF with Report lab\n can_page = canvas.Canvas(packet, pagesize=letter)\n # Font definition\n pdfmetrics.registerFont(TTFont(constant.FONT_NAME, constant.FONT_PATH))\n can_page.setFont(constant.FONT_NAME, constant.FONT_SIZE_DEFAULT)\n # add the \"watermark\" (which is the new pdf) on the existing page\n page = template.getPage(page_number - 1)\n # write Label in debug mode\n self.write_label(can_page, page)\n if not lst_position or not lst_data:\n pass\n else:\n # create list mix position and data\n lst_mix = list(zip(lst_position, lst_data))\n # add information to the specified position\n for values in lst_mix:\n if values[0][\"TYPE\"] == \"DATA\":\n self.write_data_pdf(can_page, values[1], values[0].get(\"POSITION\")[0][0],\n values[0].get(\"POSITION\")[0][1],\n values[0].get(\"LEN_TEXT\", constant.LEN_TEXT_DEFAULT),\n values[0].get(\"CHAR_SPACE\", constant.CHAR_SPACE_DEFAULT),\n values[0].get(\"FONT_SIZE\", constant.FONT_SIZE_DEFAULT),\n values[0].get(\"ALIGNMENT\", constant.ALIGNMENT_DEFAULT),\n values[0].get(\"COLOR\", constant.COLOR_DEFAULT),\n values[0].get(\"BORDER\", constant.BORDER_DEFAULT),\n values[0].get(\"BORDER_COLOR\", constant.BORDER_COLOR_DEFAULT),\n values[0].get(\"BORDER_THICKNESS\", constant.BORDER_THICKNESS_DEFAULT),\n values[0].get(\"BORDER_STYLE\", constant.BORDER_STYLE_DEFAULT))\n elif values[0][\"TYPE\"] == \"CIRCLE\":\n circles_dict = dict(zip(values[0].get(\"VALUES\"), values[0].get(\"POSITION\")))\n self.draw_circles(can_page, values[1], circles_dict, values[0].get(\"OPTION\"))\n elif values[0][\"TYPE\"] == \"LINE\":\n xy_end = [values[0].get(\"POSITION\")[0][0] + self.get_width_text(values[1], constant.FONT_PATH),\n values[0].get(\"POSITION\")[0][1]]\n self.draw_line(can_page, values[0].get(\"POSITION\")[0], xy_end, values[0].get(\"OPTION\"))\n elif values[0][\"TYPE\"] == \"BOX\":\n self.draw_rectangle(can_page, values[0].get(\"POSITION\")[0][0],\n values[0].get(\"POSITION\")[0][1], values[0].get(\"DIMENSION\")[0][0],\n values[0].get(\"DIMENSION\")[0][1],\n values[0].get(\"LINE_WIDTH\", constant.LINE_WIDTH_DEFAULT),\n values[0].get(\"STROKE_COLOR\", constant.STROKE_COLOR_DEFAULT),\n values[0].get(\"FILL_COLOR\"),\n values[0].get(\"BORDER_DASH\", constant.BORDER_DASH_DEFAULT),\n values[0].get(\"MODE_STROKE\", constant.MODE_STROKE_DEFAULT),\n values[0].get(\"MODE_FILL\", constant.MODE_FILL_DEFAULT),\n values[0].get(\"TEXT_COLOR\", constant.TEXT_COLOR_DEFAULT))\n\n can_page.save()\n # move to the beginning of the StringIO buffer\n packet.seek(0)\n # Initialize PdfFileReader\n new_pdf = PdfFileReader(packet)\n # merge information to page\n # rotate angle is 0 degree\n if page['/Rotate'] == 0:\n page.mergePage(new_pdf.getPage(0))\n # rotate angle is 90 degree\n elif page['/Rotate'] == 90:\n page.mergeRotatedScaledTranslatedPage(new_pdf.getPage(0), page['/Rotate'], 1, 595, 0, expand=False)\n # rotate angle is 180 degree\n elif page['/Rotate'] == 180:\n page.mergeRotatedScaledTranslatedPage(new_pdf.getPage(0), page['/Rotate'], 1, 845, 595, expand=False)\n # rotate angle is 270 degree\n else:\n page.mergeRotatedScaledTranslatedPage(new_pdf.getPage(0), page['/Rotate'], 1, 0, 595, expand=False)\n # start rotate pdf file\n # get layout from .json file\n layout = self.json_data[\"CONFIG\"].get(\"layout\")\n # file direction is portrait and file direction on default mode\n if layout == \"portrait\" or layout is None:\n if page.mediaBox.upperRight[0] < page.mediaBox.upperRight[1]:\n if page['/Rotate'] == 90:\n page.rotateClockwise(270)\n elif page.mediaBox.upperRight[0] > page.mediaBox.upperRight[1]:\n if page['/Rotate'] != 270:\n page.rotateClockwise(270)\n # file direction is landscape\n elif layout == \"landscape\":\n if page.mediaBox.upperRight[0] > page.mediaBox.upperRight[1]:\n if page['/Rotate'] == 270:\n page.rotateClockwise(270)\n elif page.mediaBox.upperRight[0] < page.mediaBox.upperRight[1]:\n if page['/Rotate'] != 90:\n page.rotateClockwise(270)\n # file direction is not change\n elif layout == \"free\":\n pass\n # end rotate file pdf\n log.info(self)\n return page\n except:\n # 例外処理\n log.error(traceback.format_exc())", "def _generatePagesSelector(self, dParams, cItems, cItemsPerPage, iPage):\n\n if WuiDispatcherBase.ksParamPageNo in dParams:\n del dParams[WuiDispatcherBase.ksParamPageNo]\n\n sHrefPtr = '<a href=\"?%s&%s=' % (webutils.encodeUrlParams(dParams).replace('%', '%%'),\n WuiDispatcherBase.ksParamPageNo)\n sHrefPtr += '%d\">%s</a>'\n\n cNumOfPages = (cItems + cItemsPerPage - 1) / cItemsPerPage;\n cPagesToDisplay = 10\n cPagesRangeStart = iPage - cPagesToDisplay / 2 \\\n if not iPage - cPagesToDisplay / 2 < 0 else 0\n cPagesRangeEnd = cPagesRangeStart + cPagesToDisplay \\\n if not cPagesRangeStart + cPagesToDisplay > cNumOfPages else cNumOfPages\n # Adjust pages range\n if cNumOfPages < cPagesToDisplay:\n cPagesRangeStart = 0\n cPagesRangeEnd = cNumOfPages\n\n # 1 2 3 4...\n sHtmlPager = '&nbsp;\\n'.join(sHrefPtr % (x, str(x + 1)) if x != iPage else str(x + 1)\n for x in range(cPagesRangeStart, cPagesRangeEnd))\n if cPagesRangeStart > 0:\n sHtmlPager = '%s&nbsp; ... &nbsp;\\n' % (sHrefPtr % (0, str(1))) + sHtmlPager\n if cPagesRangeEnd < cNumOfPages:\n sHtmlPager += ' ... %s\\n' % (sHrefPtr % (cNumOfPages, str(cNumOfPages + 1)))\n\n # Prev/Next (using << >> because &laquo; and &raquo are too tiny).\n if iPage > 0:\n dParams[WuiDispatcherBase.ksParamPageNo] = iPage - 1\n sHtmlPager = ('<a title=\"Previous page\" href=\"?%s\">&lt;&lt;</a>&nbsp;&nbsp;\\n'\n % (webutils.encodeUrlParams(dParams), )) \\\n + sHtmlPager;\n else:\n sHtmlPager = '&lt;&lt;&nbsp;&nbsp;\\n' + sHtmlPager\n\n if iPage + 1 < cNumOfPages:\n dParams[WuiDispatcherBase.ksParamPageNo] = iPage + 1\n sHtmlPager += '\\n&nbsp; <a title=\"Next page\" href=\"?%s\">&gt;&gt;</a>\\n' % (webutils.encodeUrlParams(dParams),)\n else:\n sHtmlPager += '\\n&nbsp; &gt;&gt;\\n'\n\n return sHtmlPager", "def pages(self):\n # The page list comes in three sections. Given radius=3:\n # 0 1 2 ... n-2 n-1 n n+1 n+2 ... m-2 m-1 m\n # Alas, some caveats:\n # - These sections might overlap.\n # - The current page might not be integral.\n delta = self.radius - 1 # since the below two are off by one\n before_current = int(math.ceil(self.current_page - 1))\n after_current = int(math.floor(self.current_page + 1))\n pages = []\n\n # First through current\n if before_current - delta <= 1:\n pages.extend(range(0, before_current + 1))\n else:\n pages.append(None)\n pages.extend(range(\n before_current - delta, before_current + 1))\n\n # Current\n pages.append(self.current_page)\n\n # Current through end\n if self.last_page is None:\n # Don't know the last page. Show one more and ..., if appropriate\n if self.next_item and \\\n after_current * self.page_size <= self.maximum_skip:\n\n pages.append(after_current)\n pages.append(None)\n return pages\n\n if after_current + delta >= self.last_page - 1:\n pages.extend(range(\n after_current, self.last_page + 1))\n else:\n pages.extend(range(after_current, after_current + delta + 1))\n pages.append(None)\n\n return pages", "def page(self, number):\n self._page_number = number\n return super().page(number)", "def addPageNumber(canvas, doc):\n page_num = canvas.getPageNumber()\n text = \"Pag %s\" % page_num\n canvas.drawRightString(200*mm, 10*mm, text)", "def __init__(self, indent=1, width=80, depth=None, stream=None):\n\n PrettyPrinter.__init__(self,indent=1, width=80, depth=None, stream=None)" ]
[ "0.6433763", "0.6421175", "0.5657116", "0.5523885", "0.5351193", "0.5295668", "0.52508706", "0.52336335", "0.5163632", "0.5067127", "0.5064911", "0.50349927", "0.500849", "0.49356413", "0.4856026", "0.4848449", "0.48073077", "0.48051938", "0.48050612", "0.48048058", "0.47916797", "0.4774779", "0.47623438", "0.47571978", "0.47518763", "0.47432634", "0.46996945", "0.46910173", "0.4677464", "0.46609122" ]
0.7309939
0
Set the pagelist to print. pageList may be a list of twotuples (num, page). Otherwise, the pages are numbered from 1 in the progress message. The pages are copied.
def setPageList(self, pageList): self.pageList = [] for n, page in enumerate(pageList, 1): if isinstance(page, tuple): pageNum, page = page else: pageNum = n page = page.copy() # set zoom to 1.0 so computations based on geometry() are # accurate enough page.updateSize(page.dpi, page.dpi, 1.0) self.pageList.append((pageNum, page))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setPageSequence(self, pageSequenceList):\r\n\r\n for index in range(self.pageCount() - 1, -1, -1):\r\n page = self.page(index)\r\n if page:\r\n self.removePage(page)\r\n\r\n count = 0\r\n for pageTitle in pageSequenceList:\r\n self.insertPage(self.pageDictionary[pageTitle],\r\n pageTitle,\r\n count)\r\n count = count + 1\r\n self.showPage(self.page(0))", "def __init__(self, printer, pageList, parent=None):\n super().__init__(parent)\n self.printer = printer\n self.setPageList(pageList)", "def work(self):\n p = self.printer\n p.setFullPage(True)\n painter = QPainter(p)\n for n, (num, page) in enumerate(self.pageList):\n if self.isInterruptionRequested():\n self.aborted = True\n return p.abort()\n self.progress.emit(num, n+1, len(self.pageList))\n if n:\n p.newPage()\n painter.save()\n # center on the page and use scale 100% (TEMP)\n r = p.pageRect()\n m = QTransform()\n m.translate(r.center().x(), r.center().y())\n m.scale(p.logicalDpiX() / page.dpi, p.logicalDpiY() / page.dpi)\n m.rotate(page.rotation * 90)\n m.scale(page.scaleX, page.scaleY)\n m.translate(page.pageWidth / -2, page.pageHeight / -2)\n painter.setTransform(m, True)\n page.print(painter)\n painter.restore()\n return painter.end()", "def set_new_page(self):\n self.num += 1\n c = self.canvas\n c.showPage()\n self.decorate()\n self.x = self.marginsides\n self.lastx = self.marginsides\n self.y = self.height - self.margintop\n #self.print_text([\"Page %s\" % unicode(self.num)], fontsize=8,\n # style=\"right\")\n self.put_page_num()\n #self.x = self.marginsides\n #self.lastx = self.x\n #self.y = self.y - 32\n self.pagebegin = 1", "def setPageOrder(self,value):\n self.PDFreactorConfiguration.in1[\"pageOrder\"] = value", "def showProgress(self, page, num, total):\n self.setValue(num)\n self.setLabelText(\"Printing page {page} ({num} of {total})...\".format(\n page=page, num=num, total=total))", "def progress(i, my_list, message=\"\"):\n\tmy_progress = (i / len(my_list)) * 100\n\tmy_progress = str(round(my_progress, 1)) + \"% \" + message\n\tsys.stdout.write('\\r')\n\tsys.stdout.write(my_progress)\n\tsys.stdout.flush()", "def number_of_pages(self, number_of_pages):\n self._number_of_pages = number_of_pages", "def addPageNumber(canvas, doc):\n page_num = canvas.getPageNumber()\n text = \"Pag %s\" % page_num\n canvas.drawRightString(200*mm, 10*mm, text)", "def setPageSize(x,y):\n dislin.page(x,y)", "def page(self, number):\n self._page_number = number\n return super().page(number)", "def set_page(self, page):\n self.page = int(page)\n\n if self.page <= 0:\n # set first page, which depends on a maximum set\n if self.get_max_per_page() > 0:\n self.page = 1\n else:\n self.page = 0", "def page_number_label(self, page_number_label):\n\n self._page_number_label = page_number_label", "def draw_page_number(self, page_count, position_x=285, position_y=5):\n if page_count > 0:\n self.setFillGray(0.2)\n self.setFont(\"Helvetica\", 8)\n self.drawRightString(\n position_x * mm, position_y * mm, \"Page %d/%d\" %\n (self._pageNumber, page_count))", "def print_num_list(self, l):\n self.print_newline()\n for num, item in enumerate(l):\n self._write(\" %i. %s\\n\" % (num + 1, item))\n num += 1\n self.print_newline()", "def write_pages(page_range, pdf_read_object, pdf_write_object):\n for page_num in page_range:\n page = pdf_read_object.getPage(page_num)\n pdf_write_object.addPage(page)", "def scrollPage(self, pageCount):\n self['value'] = self.guiItem.getValue() + self.guiItem.getPageSize() * pageCount", "def on_mscreenNotebook_switch_page(self, notebook, page, page_num):\n\n pool_t = POOL_TYPE_L[page_num]\n self._set_parameterGrid_values_to_pool(pool_t)", "def setCurrentListPosition(*args):", "def setCurrentListPosition(*args):", "def addPageNum(self, canvas, doc):\n canvas.saveState()\n canvas.setFont('Times-Roman', 10)\n page_num_txt = \"{}\".format(doc.page)\n canvas.drawCentredString(\n 0.75 * inch,\n 0.75 * inch,\n page_num_txt,\n )\n canvas.restoreState()", "def set_page_status(self, start, page_num, status):\n assert status in ['0', '1'], 'invalid status[%s] for page status '\\\n 'in allocator[%s]' % (status, str(self))\n start += self.s_allocator_header\n end = start + page_num\n assert start >= 0 and end <= self._header_size, 'invalid end[%d] of pages '\\\n 'in allocator[%s]' % (end, str(self))\n memcopy(self._base[start:end], str(status * page_num))", "def merge_files_in_order(pdf_list, list_only, output_file):\n output_file = output_file + \".pdf\"\n if not list_only:\n output = PdfFileWriter()\n outputStream = file(output_file, \"wb\")\n total_pages = 0 \n for pdf_in in pdf_list:\n try:\n pdf = PdfFileReader(file(pdf_in, \"rb\"))\n num_pages = pdf.getNumPages()\n except IOError:\n print \"skipping \", pdf_in\n continue\n if list_only:\n print pdf_in, ':', num_pages\n else:\n for i in range(num_pages):\n output.addPage(pdf.getPage(i))\n output.write(outputStream)\n total_pages += num_pages\n would_be = \"would be\"\n if not list_only:\n outputStream.close()\n would_be = \"\"\n print total_pages, \"pages\", would_be, \"written to\", output_file", "def setListDoc(self, list):\n if list is None: list__o = None\n else: list__o = list._o\n libxml2mod.xmlSetListDoc(list__o, self._o)", "def draw_page_number(self, page_count):\n self.setFont(\"Helvetica\", 9)\n label_str = \"Page {} of {}\".format(self._pageNumber, page_count)\n self.drawRightString(8.25 * INCH, 0.50 * INCH, label_str)", "def setup_list(self) -> None:\n style = self.current_line.next_line.line_parts[0].style.copy()\n\n if self.list_style is None:\n self.list_style = {}\n elif isinstance(self.list_style, str):\n self.list_style = process_style(self.list_style, self.pdf)\n\n if not isinstance(self.list_style, dict):\n raise TypeError(\n 'list_style must be a str or a dict. Value: {}'\n .format(self.list_style)\n )\n\n style.update(self.list_style)\n line_part = PDFTextLinePart(style, self.fonts)\n\n self.current_line_used_fonts.add(\n (line_part.state.font_family, line_part.state.font_mode)\n )\n\n if self.list_indent is None:\n self.list_indent = line_part.get_word_width(str(self.list_text))\n elif not isinstance(self.list_indent, (float, int)):\n raise TypeError(\n 'list_indent must be int or float. Value: {}'\n .format(self.list_indent)\n )\n\n self.list_state = line_part.state\n self.current_line.max_width -= self.list_indent", "def create_page_ZU_P99_JA(self, template, page_number, lst_position, lst_data):\n # ログ\n log.debug(self, args=None, kwargs=constant.DEBUG_MODE)\n try:\n packet = io.BytesIO()\n # Create a new PDF with Report lab\n can_page = canvas.Canvas(packet, pagesize=letter)\n # Font definition\n pdfmetrics.registerFont(TTFont(constant.FONT_NAME, constant.FONT_PATH))\n can_page.setFont(constant.FONT_NAME, constant.FONT_SIZE_DEFAULT)\n # add the \"watermark\" (which is the new pdf) on the existing page\n page = template.getPage(page_number - 1)\n # write Label in debug mode\n self.write_label(can_page, page)\n if not lst_position or not lst_data:\n pass\n else:\n # create list mix position and data\n lst_mix = list(zip(lst_position, lst_data))\n # add information to the specified position\n for values in lst_mix:\n if values[0][\"TYPE\"] == \"DATA\":\n self.write_data_pdf(can_page, values[1], values[0].get(\"POSITION\")[0][0],\n values[0].get(\"POSITION\")[0][1],\n values[0].get(\"LEN_TEXT\", constant.LEN_TEXT_DEFAULT),\n values[0].get(\"CHAR_SPACE\", constant.CHAR_SPACE_DEFAULT),\n values[0].get(\"FONT_SIZE\", constant.FONT_SIZE_DEFAULT),\n values[0].get(\"ALIGNMENT\", constant.ALIGNMENT_DEFAULT),\n values[0].get(\"COLOR\", constant.COLOR_DEFAULT),\n values[0].get(\"BORDER\", constant.BORDER_DEFAULT),\n values[0].get(\"BORDER_COLOR\", constant.BORDER_COLOR_DEFAULT),\n values[0].get(\"BORDER_THICKNESS\", constant.BORDER_THICKNESS_DEFAULT),\n values[0].get(\"BORDER_STYLE\", constant.BORDER_STYLE_DEFAULT))\n elif values[0][\"TYPE\"] == \"CIRCLE\":\n circles_dict = dict(zip(values[0].get(\"VALUES\"), values[0].get(\"POSITION\")))\n self.draw_circles(can_page, values[1], circles_dict, values[0].get(\"OPTION\"))\n elif values[0][\"TYPE\"] == \"LINE\":\n xy_end = [values[0].get(\"POSITION\")[0][0] + self.get_width_text(values[1], constant.FONT_PATH),\n values[0].get(\"POSITION\")[0][1]]\n self.draw_line(can_page, values[0].get(\"POSITION\")[0], xy_end, values[0].get(\"OPTION\"))\n elif values[0][\"TYPE\"] == \"BOX\":\n self.draw_rectangle(can_page, values[0].get(\"POSITION\")[0][0],\n values[0].get(\"POSITION\")[0][1], values[0].get(\"DIMENSION\")[0][0],\n values[0].get(\"DIMENSION\")[0][1],\n values[0].get(\"LINE_WIDTH\", constant.LINE_WIDTH_DEFAULT),\n values[0].get(\"STROKE_COLOR\", constant.STROKE_COLOR_DEFAULT),\n values[0].get(\"FILL_COLOR\"),\n values[0].get(\"BORDER_DASH\", constant.BORDER_DASH_DEFAULT),\n values[0].get(\"MODE_STROKE\", constant.MODE_STROKE_DEFAULT),\n values[0].get(\"MODE_FILL\", constant.MODE_FILL_DEFAULT),\n values[0].get(\"TEXT_COLOR\", constant.TEXT_COLOR_DEFAULT))\n\n can_page.save()\n # move to the beginning of the StringIO buffer\n packet.seek(0)\n # Initialize PdfFileReader\n new_pdf = PdfFileReader(packet)\n # merge information to page\n # rotate angle is 0 degree\n if page['/Rotate'] == 0:\n page.mergePage(new_pdf.getPage(0))\n # rotate angle is 90 degree\n elif page['/Rotate'] == 90:\n page.mergeRotatedScaledTranslatedPage(new_pdf.getPage(0), page['/Rotate'], 1, 595, 0, expand=False)\n # rotate angle is 180 degree\n elif page['/Rotate'] == 180:\n page.mergeRotatedScaledTranslatedPage(new_pdf.getPage(0), page['/Rotate'], 1, 845, 595, expand=False)\n # rotate angle is 270 degree\n else:\n page.mergeRotatedScaledTranslatedPage(new_pdf.getPage(0), page['/Rotate'], 1, 0, 595, expand=False)\n # start rotate pdf file\n # get layout from .json file\n layout = self.json_data[\"CONFIG\"].get(\"layout\")\n # file direction is portrait and file direction on default mode\n if layout == \"portrait\" or layout is None:\n if page.mediaBox.upperRight[0] < page.mediaBox.upperRight[1]:\n if page['/Rotate'] == 90:\n page.rotateClockwise(270)\n elif page.mediaBox.upperRight[0] > page.mediaBox.upperRight[1]:\n if page['/Rotate'] != 270:\n page.rotateClockwise(270)\n # file direction is landscape\n elif layout == \"landscape\":\n if page.mediaBox.upperRight[0] > page.mediaBox.upperRight[1]:\n if page['/Rotate'] == 270:\n page.rotateClockwise(270)\n elif page.mediaBox.upperRight[0] < page.mediaBox.upperRight[1]:\n if page['/Rotate'] != 90:\n page.rotateClockwise(270)\n # file direction is not change\n elif layout == \"free\":\n pass\n # end rotate file pdf\n log.info(self)\n return page\n except:\n # 例外処理\n log.error(traceback.format_exc())", "def pages_count(self, pages_count):\n if pages_count is None:\n raise ValueError(\"Invalid value for `pages_count`, must not be `None`\")\n\n self._pages_count = pages_count", "def pages(worklist):\n pagination = SortKeyPagination(size=2)\n facets = Facets(\n self._default_library, None, None, order=Facets.ORDER_TITLE\n )\n pages = []\n while pagination:\n pages.append(worklist.works(\n self._db, facets, pagination, self.search\n ))\n pagination = pagination.next_page\n\n # The last page should always be empty -- that's how we\n # knew we'd reached the end.\n assert [] == pages[-1]\n\n # Return all the other pages for verification.\n return pages[:-1]", "def reorder(self):\n self.npages = len(self)\n self.pageindex = []\n for i, page in enumerate(self):\n page.prev = self[i-1].namext\n if i == self.npages-1: i = -1\n page.next = self[i+1].namext\n page.first = self[0].namext\n page.last = self[-1].namext\n page.document = self\n self.pageindex.append(page)" ]
[ "0.661028", "0.64478004", "0.5916455", "0.56009513", "0.5516022", "0.5461271", "0.54369324", "0.52646035", "0.5221855", "0.51948494", "0.51564723", "0.5154594", "0.503851", "0.5032567", "0.49833974", "0.49527058", "0.4946429", "0.49033383", "0.49002624", "0.49002624", "0.48961437", "0.48935148", "0.4880909", "0.4868306", "0.48676014", "0.48358113", "0.48337466", "0.48137435", "0.47931558", "0.47719586" ]
0.7608234
0
Paint the pages to the printer in the background.
def work(self): p = self.printer p.setFullPage(True) painter = QPainter(p) for n, (num, page) in enumerate(self.pageList): if self.isInterruptionRequested(): self.aborted = True return p.abort() self.progress.emit(num, n+1, len(self.pageList)) if n: p.newPage() painter.save() # center on the page and use scale 100% (TEMP) r = p.pageRect() m = QTransform() m.translate(r.center().x(), r.center().y()) m.scale(p.logicalDpiX() / page.dpi, p.logicalDpiY() / page.dpi) m.rotate(page.rotation * 90) m.scale(page.scaleX, page.scaleY) m.translate(page.pageWidth / -2, page.pageHeight / -2) painter.setTransform(m, True) page.print(painter) painter.restore() return painter.end()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_page(page, stream):\n bleed = {\n side: page.style[f'bleed_{side}'].value\n for side in ('top', 'right', 'bottom', 'left')}\n marks = page.style['marks']\n stacking_context = StackingContext.from_page(page)\n draw_background(\n stream, stacking_context.box.background, clip_box=False, bleed=bleed,\n marks=marks)\n draw_background(stream, page.canvas_background, clip_box=False)\n draw_border(stream, page)\n draw_stacking_context(stream, stacking_context)", "def drawPages(self, pageSelection=None):\n doc = self.parent\n\n w, h, _ = doc.getMaxPageSizes(pageSelection)\n w2 = 2*w # Make spread width\n for pn, pages in doc.getSortedPages():\n #if pageSelection is not None and not page.y in pageSelection:\n # continue\n # Create a new DrawBot viewport page to draw template + page, if not already done.\n # In case the document is oversized, then make all pages the size of the document, so the\n # pages can draw their crop-marks. Otherwise make DrawBot pages of the size of each page.\n # Size depends on the size of the larges pages + optional decument padding.\n page = pages[0] # TODO: Make it work if there as multiple pages on the same page number.\n pw, ph = w, h # Copy from main (w, h), since they may be altered.\n if self.pl > self.MIN_PADDING and self.pt > self.MIN_PADDING and self.pb > self.MIN_PADDING and self.pr > self.MIN_PADDING:\n pw += self.pl + self.pr\n ph += self.pt + self.pb\n if self.originTop:\n origin = self.pl, self.pt, 0\n else:\n origin = self.pl, self.pb, 0\n else:\n pw = page.w # No padding defined, follow the size of the page.\n ph = page.h\n origin = (0, 0, 0)\n pw2 = 2*pw\n\n if (pn % 2 == 0): # Is even?\n newPage(pw2, ph) # Make page in DrawBot of self size, actual page may be smaller if showing cropmarks.\n # View may have defined a background\n if self.style.get('fill') is not None:\n setFillColor(self.style['fill'])\n rect(0, 0, pw2, ph)\n else: # Odd, shift origin to right\n origin = origin[0]+pw, origin[1], origin[2]\n\n if self.drawBefore is not None: # Call if defined\n self.drawBefore(page, origin, self)\n\n self.drawPageFrame(page, origin)\n\n # Use the (docW, docH) as offset, in case cropmarks need to be displayed.\n page.draw(origin, self)\n\n if self.drawAfter is not None: # Call if defined\n self.drawAfter(page, origin, self)\n\n # Self.infoElements now may have collected elements needed info to be drawn, after all drawing is done.\n # So the info boxes don't get covered by regular page content.\n for e in self.elementsNeedingInfo.values():\n self._drawElementsNeedingInfo()", "def format_page(pdf, cfg, page_mapping):\n\n # pick a standard indent that almost every chunk will fit (except for intros and probably verse 10 and greater)\n STANDARD_LABEL_INDENT_LENGTH = myStringWidth('8) ', cfg.FONT_FACE, cfg.SONGLINE_SIZE)\n\n # REMEMBER: we are in the 1st Quadrant (like Math) ... lower left is (0,0)\n y = 0\n\n outline_level = 0\n\n # set clip region\n pdf.saveState() # so we can restore to no clip after this page\n\n if cfg.DEBUG_MARGINS:\n pdf.rect(page_mapping.startx, page_mapping.starty,\n page_mapping.endx-page_mapping.startx,page_mapping.endy-page_mapping.starty)\n\n # make a bounding box to keep from printing out of bounds\n p = pdf.beginPath()\n p.rect(page_mapping.startx, page_mapping.starty,\n page_mapping.endx-page_mapping.startx,page_mapping.endy-page_mapping.starty)\n pdf.clipPath(p, stroke=0)\n\n # draw page items\n for item in page_mapping.page:\n if isinstance(item, Songbook):\n # add to outline\n key = str(hash(('SONGBOOK ' + item.title)))\n pdf.bookmarkPage(key, left=page_mapping.startx, top=page_mapping.starty-y)\n outline_level = 0\n pdf.addOutlineEntry(item.title, key, level=outline_level)\n outline_level = 1\n\n # SONGBOOK TITLE\n if not cfg.HIDE_BOOKTITLE:\n y = print_line(pdf, font_face=cfg.FONT_FACE, font_size=cfg.BOOKTITLE_SIZE, y_offset=y,\n line_space=cfg.BOOKTITLE_SPACE, page_mapping=page_mapping, line=item.title)\n # SONG\n elif isinstance(item, Song):\n # add to outline\n key = str(hash('SONG(%d): %s' % (item.num, item.title)))\n pdf.bookmarkPage(key, left=page_mapping.startx, top=page_mapping.starty-y)\n pdf.addOutlineEntry(item.title, key, level=outline_level)\n #XXX: here we could add stuff to make index entries linkable\n\n # SONG TITLE\n for i, title_line in enumerate(item.title_wrapped):\n if i == 0: # first line\n indent = 0\n else:\n indent = item.num_width\n\n y = print_line(pdf, font_face=cfg.FONT_FACE, font_size=cfg.SONGTITLE_SIZE, y_offset=y,\n x_offset=indent, line_space=cfg.SONGTITLE_SPACE, page_mapping=page_mapping, line=title_line)\n\n # small_text after title\n for sm_line in item.small_text:\n y = print_line(pdf, font_face=cfg.FONT_FACE, font_size=cfg.SMALL_SIZE, y_offset=y,\n line_space=cfg.SMALL_SPACE, page_mapping=page_mapping, line=sm_line)\n\n # introduction if applicable -- not shown when chords are not shown\n if item.introduction and cfg.DISPLAY_CHORDS:\n y = print_line(pdf, font_face=cfg.FONT_FACE, font_size=cfg.SONGCHORD_SIZE, y_offset=y,\n line_space=cfg.SONGCHORD_SPACE, page_mapping=page_mapping, line=item.introduction)\n\n # VERSE OR CHORUS\n elif isinstance(item, Chunk):\n y += cfg.SONGCHUNK_B4\n\n # calulate prefix text for the chunk\n if item.type == 'chorus':\n label = 'Chorus:'\n elif item.type == 'verse':\n label = '%d)' % item.num\n elif item.type == 'bridge':\n label = 'Bridge:'\n elif item.type == 'pre-chorus':\n label = 'Pre-Chorus:'\n elif item.type == 'final chorus':\n label = 'Final Chorus:'\n elif item.type == 'ending':\n label = 'Ending:'\n elif item.type == 'introduction':\n label = 'Introduction:'\n else:\n label = ''\n\n\n if item.type in VARIABLE_INDENT: # these chunks are indented by num of chars in label\n label_length = max(myStringWidth(label+' ', cfg.FONT_FACE, cfg.SONGLINE_SIZE), STANDARD_LABEL_INDENT_LENGTH)\n # type indented no label gets an extra indent\n if item.type == INDENT_NO_LABEL:\n label_length *= 2\n else: # everything else gets a standard indent\n label_length = STANDARD_LABEL_INDENT_LENGTH\n\n # print the chunk lines\n if item.type == 'introduction' and not cfg.DISPLAY_CHORDS: # introduction is not shown when chords are not shown\n pass\n else:\n for count, line in enumerate(item.lines):\n if count == 0: # on the first line in the chunk write the label: chorus, 1), 2), 3) ...\n if cfg.DISPLAY_CHORDS and item.has_chords() and item.type == 'verse': #for verses with chords, we move the label down \n new_y = print_line(pdf, font_face=cfg.FONT_FACE, font_size=cfg.SONGLINE_SIZE, y_offset=y+cfg.SONGCHORD_SIZE+cfg.SONGCHORD_SPACE, x_offset=0, line_space=cfg.SONGLINE_SPACE, page_mapping=page_mapping, line=label)\n else: \n new_y = print_line(pdf, font_face=cfg.FONT_FACE, font_size=cfg.SONGLINE_SIZE, y_offset=y, x_offset=0,\n line_space=cfg.SONGLINE_SPACE, page_mapping=page_mapping, line=label)\n if item.type not in VARIABLE_INDENT: # standard indent, with chunk body on next line\n y = new_y # so we update y ... in other cases y not updated, so same line used\n #else: ignore new_y and we print on same line below\n\n\n # shrink font size, or wrap the line if that lets us fit\n # if resize != 0 we are shrinking, else we wrap\n font_size = cfg.SONGLINE_SIZE\n if cfg.RESIZE_PERCENT == 0:\n # font size does not change. \n font_size = font_size \n \n else:\n # reduce font size as much as needed but don't pass x% original\n while (label_length + myStringWidth(line.text, cfg.FONT_FACE, font_size)) > (page_mapping.endx - page_mapping.startx) and font_size > cfg.SONGLINE_SIZE * cfg.RESIZE_PERCENT:\n font_size = font_size * 0.99 # reduce 1%\n #print 'reducing from', cfg.SONGLINE_SIZE, 'to', font_size, '%2.2f%%' % (font_size / cfg.SONGLINE_SIZE)\n \n # we have a font -- lets use it\n #DBG:sav_y = y\n if cfg.DISPLAY_CHORDS and item.has_chords():\n y = print_chords(pdf, cfg, font_size=font_size, y_offset=y, x_offset=label_length, page_mapping=page_mapping, line=line)\n y = print_line(pdf, font_face=cfg.FONT_FACE, font_size=font_size, y_offset=y, x_offset=label_length,\n line_space=cfg.SONGLINE_SPACE, page_mapping=page_mapping, line=line.text)\n #DBG:pdf.setStrokeColor('green')\n #DBG:pdf.rect(page_mapping.startx+label_length, page_mapping.starty-(sav_y),\n #DBG: pdf.stringWidth(line.text, cfg.FONT_FACE, font_size), -line.height)\n #DBG:pdf.setStrokeColor('red')\n #DBG:pdf.rect(page_mapping.startx+label_length, page_mapping.starty-(sav_y),\n #DBG: pdf.stringWidth(line.text, cfg.FONT_FACE, font_size), sav_y-y)\n #DBG:# reset\n #DBG:pdf.setStrokeColor('black')\n #DBG:pdf.setFillColor('black')\n\n if item.last_chunk:\n y += cfg.SONGCHUNK_B4\n for line in item.copyright_footer:\n y = print_line(pdf, font_face=cfg.FONT_FACE, font_size=cfg.COPYRIGHT_SIZE, y_offset=y,\n line_space=0, page_mapping=page_mapping, line=line)\n y += cfg.COPYRIGHT_SPACE # COPYRIGHT SPACE is padding between copyright lines \n\n # any parting space\n y += item.height_after\n\n #DBG:pdf.rect(page_mapping.startx+5, page_mapping.starty - (starty+cfg.SONGLINE_SIZE), 20, starty-y)\n # INDEX\n elif isinstance(item, Index) and cfg.DISPLAY_INDEX != INDEX_OFF: # top-level index which contains index entries\n if cfg.DISPLAY_INDEX == INDEX_NO_PAGE_BREAK:\n y += cfg.INDEX_TITLE_B4 # only add space when index not starting on a new page\n y = print_line(pdf, font_face=cfg.INDEX_TITLE_FONT, font_size=cfg.INDEX_TITLE_SIZE, y_offset=y, \n line_space=cfg.INDEX_TITLE_SPACE, page_mapping=page_mapping, line=\"Alphabetical Index\")\n\n # SCRIP INDEX\n elif isinstance(item, ScripIndex) and cfg.DISPLAY_SCRIP_INDEX != INDEX_OFF: # top-level scrip_index which contains index entries\n if cfg.DISPLAY_SCRIP_INDEX == INDEX_NO_PAGE_BREAK:\n y += cfg.INDEX_TITLE_B4 # only add space when scrip index not starting on a new page\n y = print_line(pdf, font_face=cfg.INDEX_TITLE_FONT, font_size=cfg.INDEX_TITLE_SIZE, y_offset=y, \n line_space=cfg.INDEX_TITLE_SPACE, page_mapping=page_mapping, line=\"Scripture Index\")\n\n # CAT INDEX\n elif isinstance(item, CatIndex) and cfg.DISPLAY_CAT_INDEX != INDEX_OFF: # top-level cat_index which contains index entries\n if cfg.DISPLAY_CAT_INDEX == INDEX_NO_PAGE_BREAK:\n y += cfg.INDEX_TITLE_B4 # adding space because cat_index not starting on a new page\n y = print_line(pdf, font_face=cfg.INDEX_TITLE_FONT, font_size=cfg.INDEX_TITLE_SIZE, y_offset=y, \n line_space=cfg.INDEX_TITLE_SPACE, page_mapping=page_mapping, line=\"Category Index\")\n\n # CAT INDEX Category\n elif isinstance(item, Category) and cfg.DISPLAY_CAT_INDEX != INDEX_OFF: # Category inside cat_index\n y += cfg.INDEX_CAT_B4 # add space before the category\n y = print_line(pdf, font_face=cfg.INDEX_CAT_FONT, font_size=cfg.INDEX_CAT_SIZE, y_offset=y, \n line_space=cfg.INDEX_CAT_SPACE, page_mapping=page_mapping, line=item.category)\n\n # CAT INDEX ITEM\n elif isinstance(item, CatIndexEntry) and cfg.DISPLAY_CAT_INDEX != INDEX_OFF:\n # print only the song number at this time -- don't save y since we are going to print on the line again\n print_line(pdf, font_face=cfg.INDEX_SONG_FONT, font_size=cfg.INDEX_SONG_SIZE, y_offset=y, line_space=cfg.INDEX_SONG_SPACE,\n page_mapping=page_mapping, line=str(item.song.num))\n # now print the index text with a consistent x offset so everything lines up\n y = print_line(pdf, font_face=cfg.INDEX_SONG_FONT, font_size=cfg.INDEX_SONG_SIZE, y_offset=y, line_space=cfg.INDEX_SONG_SPACE,\n x_offset=max(cfg.INDEX_SONG_SIZE, cfg.INDEX_FIRST_LINE_SIZE)*2, page_mapping=page_mapping, line=item.index_text)\n\n # INDEX ITEMS (after CatIndexEntry because CatIndexEntry is a subclass of IndexEntry)\n elif isinstance(item, IndexEntry) and (cfg.DISPLAY_INDEX != INDEX_OFF or cfg.DISPLAY_SCRIP_INDEX != INDEX_OFF):\n if item.is_song_title:\n LINE_SIZE = cfg.INDEX_SONG_SIZE\n LINE_SPACE= cfg.INDEX_SONG_SPACE\n FONT = cfg.INDEX_SONG_FONT\n else:\n LINE_SIZE = cfg.INDEX_FIRST_LINE_SIZE\n LINE_SPACE= cfg.INDEX_FIRST_LINE_SPACE\n FONT = cfg.INDEX_FIRST_LINE_FONT\n\n # print only the song number at this time -- don't save y since we are going to print on the line again\n print_line(pdf, font_face=FONT, font_size=LINE_SIZE, y_offset=y, line_space=LINE_SPACE,\n page_mapping=page_mapping, line=str(item.song.num))\n # now print the index text with a consistent x offset so everything lines up\n y = print_line(pdf, font_face=FONT, font_size=LINE_SIZE, y_offset=y, line_space=LINE_SPACE,\n x_offset=max(cfg.INDEX_SONG_SIZE, cfg.INDEX_FIRST_LINE_SIZE)*2, page_mapping=page_mapping, line=item.index_text)\n \n # restore original clip settings\n pdf.restoreState()\n\n # debug -- print page (small page here) rect\n #DBG:print '%d x %d rect at (%d, %d)' % (page_mapping.endx-page_mapping.startx, page_mapping.endy-page_mapping.starty,\n #DBG: page_mapping.startx, page_mapping.starty)\n #XXX: uncomment last 2 lines to have a border around each page\n #pdf.rect(page_mapping.startx, page_mapping.starty,\n # page_mapping.endx-page_mapping.startx,page_mapping.endy-page_mapping.starty,\n # fill=0)\n if page_height(page_mapping.page) != y:\n print 'Page:', pdf.getPageNumber(), 'Expected page height:', page_height(page_mapping.page), 'not equal to actual page height:', y\n #DBG:pdf.rect(page_mapping.startx, page_mapping.starty,\n #DBG: page_mapping.endx-page_mapping.startx,-page_height(page_mapping.page),\n #DBG: fill=0)", "def OnPaint(self, event):\r\n \r\n dc = wx.PaintDC(self)\r\n dc.SetFont(self.GetFont())\r\n\r\n if self.GetPageCount() > 0:\r\n self.Render(dc, self)", "def page(self, lines, dpi):\r\n self.__setdpi(dpi)\r\n rows = len(lines)\r\n page = self.__newpage(rows)\r\n row = 0\r\n for line in lines:\r\n if (self.__debug): print(row, line, len(line))\r\n elongated = False\r\n y = row * self.__height + self.__margin\r\n column = 0\r\n for char in line:\r\n x = column % self.__columns * self.__width + self.__margin\r\n c = ord(char)\r\n if (char == self.__elongate):\r\n elongated = True\r\n continue\r\n if (c > 127 and c < 255): c = c & 0b10001111\r\n i = c - ord(' ')\r\n if (i > 0 and i < len(self.__chars)):\r\n clip = self.__chars[i]\r\n mask = self.__masks[i]\r\n clip_width = self.__width\r\n if (elongated):\r\n clip_width *= 2\r\n clip = clip.resize((clip_width, self.__height))\r\n mask = mask.resize((clip_width, self.__height))\r\n box = (x, y, x + clip_width, y + self.__height)\r\n page.paste(clip, box, mask)\r\n if (elongated):\r\n column += 2\r\n elongated = False\r\n else:\r\n column += 1\r\n row += 1\r\n return page", "def draw(self):\n self.screen.fill(WHITE)\n self.color_invalid()\n self.draw_selected()\n self.shade_locked_cells()\n self.draw_grid()\n self.draw_buttons()\n self.draw_numbers()", "def draw(self, screen: pygame.Surface) -> None:\n page = self.pages[self.current_page]\n # Draw background\n screen.blit(page.background, (0, 0))\n # Draw buttons to screen\n for button in page.buttons:\n if button.image is not None:\n screen.blit(button.image, button.rect)\n screen.blit(button.text, button.rect)\n # Draw highlights if mouse is hovering over button\n if button.tag not in ('display', 'output') and \\\n button.rect.collidepoint(self.mouse_pos):\n surf = create_trans_surf(button.rect.width, button.rect.height, 50, (100, 255, 100))\n screen.blit(surf, button.rect)", "def set_new_page(self):\n self.num += 1\n c = self.canvas\n c.showPage()\n self.decorate()\n self.x = self.marginsides\n self.lastx = self.marginsides\n self.y = self.height - self.margintop\n #self.print_text([\"Page %s\" % unicode(self.num)], fontsize=8,\n # style=\"right\")\n self.put_page_num()\n #self.x = self.marginsides\n #self.lastx = self.x\n #self.y = self.y - 32\n self.pagebegin = 1", "def draw(self):\n for section in self.sections:\n canvas_reset(self.canvas)\n section.draw(self.canvas)", "def cover_page(self) -> NoReturn:\n self.add_page()\n self.set_font_size(30)\n self.set_text_color(255, 0, 0)\n self.set_y(int(self.h / 2))\n self.set_x(self.l_margin + self.r_margin)\n self.cell(0, self.line_height, 'Report on Emission Data', align='C',\n ln=2)", "def draw(self, width, height):\n \n line_spacing = 20\n \n\n #TODO:Smart algorithm to map mouse position to the scrolling speed\n #zooming level should go here\n \n if self.scroll > 20:\n self.factor = self.scroll * 0.1\n\n elif self.scroll < -20:\n self.factor = abs(self.scroll) * 0.1\n \n elif abs(self.scroll) > 50:\n self.factor = 5\n self.scroll = 50\n\n else:\n self.factor = 0\n \n output_text = \"\"\n\n if self.text:\n l = self.min_text\n l1 = l\n l2 = l + 1\n \n tab_previous = self.tab_index[l]\n \n while l < self.max_text:\n \n #Find all the lines with the same indentation level\n while l < self.line_count - 2 and self.tab_index[l + 1] == tab_previous:\n l2 += 1 \n l += 1\n \n self.tab_cairo += tab_previous * 20\n font_size = int(self.zoom - (tab_previous * self.factor))*pango.SCALE\n \n #Set a minimum font size\n if font_size < 8000:\n font_size = 8000\n \n pango.FontDescription.set_size(self.desc, font_size)\n self.pg.set_font_description(self.desc)\n \n #Adjust line spacing as font size decreases\n line_spacing -= tab_previous * 0.5 \n\n self.cr.move_to(self.tab_cairo, self.max_cairo)\n \n output_text = '\\n'.join(self.text[l1:l2])\n \n self.pg.set_text(output_text)\n self.cr.show_layout(self.pg)\n\n self.max_cairo += line_spacing * (l2 - l1) \n \n #Reset all values\n self.tab_cairo = 20\n line_spacing = 20\n l += 1\n \n try:\n tab_previous = self.tab_index[l]\n \n except IndexError:\n tab_previous = self.tab_index[-1]\n \n l1 = l\n l2 = l + 1", "def psprint(self, filename):\n\n # The portrait A4 page is, in mm, WxH=210x297. Let's have a safety\n # margin of 7mm all around it, and the usable area becomes 196x283.\n W = 196.0\n H = 283.0\n x1, y1, x2, y2 = self._c.bbox(\"all\")\n options = {\n \"pageanchor\": \"sw\",\n \"x\": \"%fp\" % x1,\n \"y\": \"%fp\" % y1,\n \"height\": \"%fp\" % (y2-y1),\n \"width\": \"%fp\" % (x2-x1),\n \"pagex\": \"0\",\n \"pagey\": \"0\",\n \"file\": filename,\n \"colormode\": \"mono\",\n }\n # ??? I think I'm doing all this viewport math sensibly, BUT I\n # still get a weird asymmetric margin around the thing, and I\n # haven't got a clue how to get rid of it.\n yscale = (y2-y1) / H\n xscale = (x2-x1) / W\n # The direction with the greater scaling factor is the limiting one\n if xscale > yscale:\n options[\"pagewidth\"] = \"%fm\" % W\n else:\n options[\"pageheight\"] =\"%fm\" % H\n self._c.update()\n apply(self._c.postscript, (), options)", "def draw(self, screen):\n pg.draw.rect(screen, self.bg_color, self.rect)\n\n for y, surf in enumerate(self.images):\n # Don't blit below the rect area.\n if y * self.font_height + self.font_height > self.rect.h:\n break\n screen.blit(surf, (self.rect.x, self.rect.y+y*self.font_height))", "def draw(self):\n self.screen.fill(BACKGROUND_COLOR)\n self.cannon.draw(self.screen)\n self.objects.draw(self.screen)", "def draw(self):\n\t\tfor i in range(0, self.size):\n\t\t\tprint('\\n' + \"----\" * self.size)\n\t\t\tfor j in range(0, self.size):\n\t\t\t\tprint(self.grid[i][j] + ' |', end=\" \")\n\t\tprint('\\n'+ \"----\" * self.size + '\\n')", "def draw(self):\r\n self.screen.fill(self.color_bg) \r\n for t in self.thumbs: t.draw(self.screen) \r\n pygame.display.flip()\r\n self.clock.tick(60)", "def show(self):\n for page in self.pagemap:\n # Home cursor on the page\n # Set page\n self.write_cmd(self.CMD_SET_PAGE | self.pagemap[page])\n # Set lower bits of column\n self.write_cmd(self.CMD_SET_COLUMN_LOWER | (0 & 0xF))\n # Set upper bits of column\n self.write_cmd(self.CMD_SET_COLUMN_UPPER | ((0 >> 4) & 0xF))\n\n # Page start row\n row_start = page << 7\n # Page stop row\n row_stop = (page + 1) << 7\n # slice page from buffer and pack bits to bytes then send to display\n self._dc_pin.value = True\n with self.spi_device as spi:\n spi.write(self.buffer[row_start:row_stop]) # pylint: disable=no-member", "def draw(self):\n self.draw_occupied_cells()\n self.draw_open_cells()\n self.draw_edges()\n plt.xlabel(\"Red\")\n plt.ylabel(\"Black\")\n plt.title('Hex')\n self.camera.snap()", "def generate_pdf_background(pisafile, pagesize, is_portrait, context={}):\n # don't move up, we are preventing circular import\n from xhtml2pdf.xhtml2pdf_reportlab import PmlImageReader\n output = pisaFileObject(None, \"application/pdf\") # build temporary file\n img = PmlImageReader(\n WaterMarks.get_img_with_opacity(pisafile, context)\n )\n x, y, width, height = WaterMarks.get_size_location(img, context, pagesize, is_portrait)\n\n canvas = Canvas(output.getNamedFile(), pagesize=pagesize)\n canvas.drawImage(img, x, y, width, height, mask='auto')\n\n \"\"\"\n iw, ih = img.getSize()\n pw, ph = pagesize\n\n width = pw # min(iw, pw) # max\n wfactor = float(width) / iw\n height = ph # min(ih, ph) # max\n hfactor = float(height) / ih\n factor_min = min(wfactor, hfactor)\n factor_max = max(wfactor, hfactor)\n \n if is_portrait:\n w = iw * factor_min\n h = ih * factor_min\n canvas.drawImage(img, 0, ph - h, w, h)\n else:\n h = ih * factor_max\n w = iw * factor_min\n canvas.drawImage(img, 0, 0, w, h)\n \"\"\"\n canvas.save()\n\n return output", "def print(self, text, dpi):\r\n lines = self.__buildlines(text)\r\n return self.page(lines, dpi)", "def start(self):\n\t\t# Set printer\n\t\twin32print.SetDefaultPrinter(self.printer)\n\t\thandle = win32print.OpenPrinter(self.printer, None)\n\n\t\t# Get existing printer settings\n\t\tsettings = win32print.GetPrinter(handle, self._LEVEL)\n\t\tsettings['pDevMode'].PaperSize = self.papersize\n\t\tsettings['pDevMode'].Copies = self.copies\n\n\t\t# Update printer settings\n\t\t# Exceptions are raised, but changes are applied.\n\t\ttry:\n\t\t\twin32print.SetPrinter(handle, self._LEVEL, settings, 0)\n\t\texcept:\n\t\t\tpass\n\n\t\twin32api.ShellExecute(0, 'print', self.document, None, '.', 0)\n\t\twin32print.ClosePrinter(handle)", "def setPageFill(ncolor):\n dislin.pagfll(ncolor)", "def on_page(canvas, doc):\n today_str = datetime.date.today().isoformat()\n page_width = doc.pagesize[0]\n page_height = doc.pagesize[1]\n canvas.saveState()\n\n # Title.\n canvas.setFont('Helvetica-Bold', 18)\n canvas.drawCentredString(\n 3.65 * INCH,\n page_height - 0.75 * INCH,\n \"Art Mart Inventory Sheet\"\n )\n canvas.drawCentredString(\n 3.65 * INCH,\n page_height - 1.0 * INCH,\n \"Check In/Out\"\n )\n\n # Upper-right block.\n canvas.setFont('Helvetica', 10)\n canvas.drawString(\n 6.65 * INCH,\n page_height - 0.60 * INCH,\n \"Office Use Only\"\n )\n canvas.setFont('Helvetica', 9)\n canvas.drawRightString(7.15 * INCH, page_height - 0.80 * INCH, \"Recd. Via\")\n canvas.drawRightString(7.15 * INCH, page_height - 1.00 * INCH, \"Date Recd\")\n canvas.drawString(7.25 * INCH, page_height - 0.80 * INCH, \"_________\")\n canvas.drawString(7.25 * INCH, page_height - 1.00 * INCH, \"_________\")\n\n # Second block.\n canvas.setFont('Helvetica-Bold', 12)\n canvas.drawRightString(1.65 * INCH, page_height - 1.45 * INCH, \"Artist #\")\n canvas.drawRightString(1.65 * INCH, page_height - 1.75 * INCH, \"Store\")\n canvas.drawRightString(3.15 * INCH, page_height - 1.45 * INCH, \"Artist\")\n canvas.drawRightString(3.15 * INCH, page_height - 1.75 * INCH, \"Email\")\n canvas.drawRightString(6.75 * INCH, page_height - 1.45 * INCH, \"Date\")\n canvas.drawRightString(6.75 * INCH, page_height - 1.75 * INCH, \"Phone\")\n canvas.setFont('Helvetica', 12)\n email = \"[email protected]\"\n canvas.drawString(1.75 * INCH, page_height - 1.45 * INCH, \"2587\")\n canvas.drawString(1.75 * INCH, page_height - 1.75 * INCH, \"Boulder\")\n canvas.drawString(3.25 * INCH, page_height - 1.45 * INCH, \"Linda Schutter\")\n canvas.drawString(3.25 * INCH, page_height - 1.75 * INCH, email)\n canvas.drawString(6.85 * INCH, page_height - 1.45 * INCH, today_str)\n canvas.drawString(6.85 * INCH, page_height - 1.75 * INCH, \"720.318.4099\")\n\n # Bottom block.\n canvas.setFont('Helvetica', 9)\n canvas.drawCentredString(\n page_width / 2.0,\n 1.35 * INCH,\n \"All checkin and out sheets should be signed below after being\" +\n \" verified by the Artist and an Art Mart Employee\"\n )\n canvas.drawRightString(1.45 * INCH, 0.98 * INCH, \"Signed\")\n sig_line = \"____________________________________________\"\n canvas.drawString(1.55 * INCH, 0.98 * INCH, sig_line)\n canvas.drawString(4.80 * INCH, 0.98 * INCH, sig_line)\n canvas.drawCentredString(3.00 * INCH, 0.80 * INCH, \"Art Mart Employee\")\n canvas.drawCentredString(\n 6.10 * INCH,\n 0.80 * INCH,\n \"Artist or Representative\"\n )\n\n canvas.restoreState()", "def drawBoard(self):\r\n self.outer.draw(self.surface)\r\n self.background.draw(self.surface)\r\n for point in self.points:\r\n point.draw(self.surface)\r\n point.drawCheckers(self.surface)\r\n self.dice.draw(self.surface)\r\n self.message.draw(self.surface)\r\n self.checkerBox.draw(self.surface)\r\n self.checkerBox.drawCheckers(self.surface)\r\n for bar in self.bar:\r\n bar.draw(self.surface)\r\n bar.drawCheckers(self.surface)\r\n pygame.display.flip()", "def draw(self):\n self.drawLine()\n\n for l in range(0, self.height):\n print(\"|\", end='', flush=True)\n for c in range(0, self.width):\n print(\" \" + str(self.grid[l][c]) + \" |\", end='', flush=True)\n print(\"\\n\", end='', flush=True)\n\n self.drawLine()", "def save(self):\n num_pages = len(self._saved_page_states)\n for state in self._saved_page_states:\n self.__dict__.update(state)\n self.draw_page_number(num_pages)\n reportlab.pdfgen.canvas.Canvas.showPage(self)\n reportlab.pdfgen.canvas.Canvas.save(self)", "def _draw(self):\n\n self._set_percentage()\n spaces = \"\".join([' ' for _ in range(len(str(self.percentage)), 5)])\n porc = \"\\r\" + str(self.text) + spaces + str(self.percentage) + \"%[\"\n pos = (((self.step / (self.end - self.start) * 100) * (self.width - len(porc))) / 100)\n self._write(porc)\n for i in range(int(pos)):\n self._write(self.bar)\n self._write(next(self.pacman))\n for i in range(int(pos), len(self.candybar) - 18):\n self._write(self.candybar[i])\n self._write(\"] > \" if self.follower and self.step < self.len else \"]\")\n\n sys.stdout.flush()\n\n if self.step == self.len:\n self._write(\"\\n\")", "def graphics_loop(self, font):\n self.screen.blit(self.background, (0, 0))\n if not self.scroll:\n self.all_sprites.draw(self.screen)\n else:\n self.draw_onscreen()\n #display which step we're on\n if pygame.font:\n text = font.render(str(self.stepid), 1, (255, 255, 255))\n textpos = text.get_rect(centerx = int(\n (self.screen.get_width() * 0.5)))\n self.screen.blit(text, textpos)\n pygame.display.flip()\n #cap at x fps\n self.clock.tick(self.max_fps)", "def _render(self, gc, points):\n with gc:\n gc.set_antialias(True)\n self._draw_default_axes(gc)\n self._draw_default_grid(gc)\n if len(points)>0:\n gc.clip_to_rect(self.x, self.y, self.width, self.height)\n gc.set_stroke_color(self.color_)\n gc.set_line_width(self.line_width)\n gc.set_line_dash(self.line_style_)\n\n gc.begin_path()\n gc.lines(points)\n gc.stroke_path()\n\n return", "def draw(self, window):\n\n if(self.page == Page.Game):\n self.draw_game_page(window);\n elif(self.page == Page.Save):\n self.draw_save_page(window);\n\n pygame.display.flip();" ]
[ "0.6472516", "0.6322351", "0.6206114", "0.61876047", "0.61129534", "0.6107652", "0.60719824", "0.60593873", "0.59743327", "0.59252787", "0.5870063", "0.58362466", "0.5776248", "0.57747906", "0.575061", "0.5728778", "0.5693118", "0.5666412", "0.5624035", "0.56164974", "0.5597774", "0.5584313", "0.5573402", "0.55381703", "0.5512593", "0.55004936", "0.54847", "0.5459005", "0.5446109", "0.5441269" ]
0.718377
0
Initializes ourselves with the print job and optional parent widget.
def __init__(self, job, parent=None): super().__init__(parent) self._job = job job.progress.connect(self.showProgress) job.finished.connect(self.jobFinished) self.canceled.connect(job.requestInterruption) self.setMinimumDuration(0) self.setRange(0, len(job.pageList)) self.setLabelText("Preparing to print...")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, printer, parent=None):\n QtGui.QWidget.__init__(self, printer, parent)", "def __init__(self, printer, pageList, parent=None):\n super().__init__(parent)\n self.printer = printer\n self.setPageList(pageList)", "def __init__(self, job):\n self.job = job\n\n self.info_widgets_list = [] # (sub)widget containing the job information\n self.file_widgets_list = [] # (sub)widget containing the last lines of certain files\n self.metadata_widgets_list = [] # (sub)widget containing the job information from the metadata\n self.update_info() # read job info\n\n self.handler = WeakMethod(self.update_info)\n self.job.state_change_handlers.append(self.handler) # add handler to update information when the job status changes\n\n self.widget = urwid.Padding(None, align='center')\n self.update() # add the job info to the widget\n\n BaseTimedWidgetWrap.__init__(self, self.widget)", "def __init__(self, printer):\n super(BashParentEnvironment, self).__init__()\n self._printer = printer", "def __init__(self, printer):\n super(PowershellParentEnvironment, self).__init__()\n self._printer = printer", "def __init__(self, printer, output):\n\t\timport revitron\n\n\t\tif not printer or not output:\n\t\t\trevitron.Log().warning('PDF exporter is not configured!')\n\t\t\tsys.exit()\n\n\t\tself.printer = printer\n\t\tself.output = output\n\t\tself.manager = revitron.DOC.PrintManager\n\t\tself.sizes = dict()\n\n\t\tif self.manager.PrinterName.lower() != self.printer.lower():\n\t\t\tprint('Setting current printer to: ' + self.printer)\n\t\t\tprint('Please submit your sheets to be exported again ...')\n\t\t\tself.manager.SelectNewPrintDriver(self.printer)\n\t\t\tself.manager.Apply()\n\t\t\tsys.exit()\n\n\t\tself.manager.PrintRange = revitron.DB.PrintRange.Select\n\t\tself.manager.PrintToFile = True\n\t\tself.manager.CombinedFile = False\n\t\tself.manager.Apply()\n\n\t\tfor size in self.manager.PaperSizes:\n\t\t\tself.sizes[size.Name] = size", "def __init__(self):\n\t\tself.walltime_edit = urwid.Edit( ('editcp',\"walltime=\"), \"200:00:00\" )\n\t\tself.nodes_edit = urwid.IntEdit( ('editcp', \"nodes=\"), 0 )\n\t\tself.myri_ppn_edit = urwid.IntEdit( ('editcp', \"myri:ppn=\"), 4)\n\t\tself.workdir_edit = urwid.Edit( (\"editcp\", \"WORKDIR(-d) \"), '~/qjob_output')\n\t\tself.runtime_output_checkbox = urwid.CheckBox(\"See output while running\")\n\t\tself.other_options_edit = urwid.Edit( (\"editcp\", \"others:\"), '-q cmb -j oe -S /bin/bash')\n\t\tself.source_bash_profile_checkbox = urwid.CheckBox(\"source ~/.bash_profile\")\n\t\tself.source_bash_profile_checkbox.set_state(True)\n\t\tself.just_write_down_checkbox = urwid.CheckBox(\"Write jobfile. No submission.\")\n\t\tself.jobname_prefix_edit = urwid.Edit( (\"editcp\", \"jobname_prefix:\"), '~/qjob/job')\n\t\tself.jobnumber_edit = urwid.IntEdit( (\"editcp\", \"job number:\"), 0)\n\t\tself.job_content_reset_button = urwid.Button(\"Job Content Reset\", self.job_content_reset)\n\t\tself.exit_button = urwid.Button(\"Exit\", self.program_exit)\n\t\tself.job_edit = urwid.Edit( ('editcp',\"\"), multiline=True )\n\t\t\n\t\tself.items = [\n\t\turwid.Padding(\n\t\t\turwid.Columns(\n\t\t\t\t[\n\t\t\t\turwid.AttrWrap( self.walltime_edit, 'editbx', 'editfc' ),\n\t\t\t\turwid.AttrWrap( self.nodes_edit, 'editbx', 'editfc'),\n\t\t\t\turwid.AttrWrap( self.myri_ppn_edit, 'editbx', 'editfc'),\n\t\t\t\t],\n\t\t\t\t2 ), \n\t\t\t('fixed left',2), ('fixed right',2)),\n\t\tblank,\n\t\turwid.Padding(\n\t\t\turwid.Columns(\n\t\t\t\t[\n\t\t\t\turwid.AttrWrap( self.workdir_edit, 'editbx', 'editfc' ), \n\t\t\t\turwid.AttrWrap( self.runtime_output_checkbox, 'buttn', 'buttnf'),\n\t\t\t\t],\n\t\t\t\t2),\n\t\t\t('fixed left',2), ('fixed right',2)),\n\t\tblank,\n\t\turwid.Padding(\n\t\t\turwid.AttrWrap( self.other_options_edit, 'editbx', 'editfc' ), ('fixed left',2), ('fixed right',2)),\n\t\tblank,\n\t\turwid.Padding(\n\t\t\turwid.GridFlow(\n\t\t\t\t[\n\t\t\t\turwid.AttrWrap( self.source_bash_profile_checkbox, 'buttn','buttnf'),\n\t\t\t\turwid.AttrWrap( self.just_write_down_checkbox, 'buttn', 'buttnf'),\n\t\t\t\turwid.AttrWrap( self.jobname_prefix_edit, 'editbx', 'editfc' ),\n\t\t\t\turwid.AttrWrap( self.jobnumber_edit, 'editbx', 'editfc' ),\n\t\t\t\turwid.AttrWrap(self.job_content_reset_button, 'buttn', 'buttnf'),\n\t\t\t\turwid.AttrWrap(self.exit_button, 'buttn', 'buttnf'),\n\t\t\t\t],\n\t\t\t\t34, 2, 1, 'left'),\n\t\t\t('fixed left',2), ('fixed right',2)),\n\t\tblank,\n\t\turwid.Padding(\n\t\t\turwid.Pile(\n\t\t\t[\n\t\t\turwid.Text('One line one job. One job with >1 commands put on one line, separated by ;'),\n\t\t\turwid.AttrWrap(self.job_edit, 'editbx', 'editfc'),\n\t\t\t], 1),\n\t\t\t('fixed left',2), ('fixed right',2) )\n\t\t\t\n\t\t]\n\t\t\n\t\tself.listbox = urwid.ListBox( self.items )\n\t\t\n\t\tinstruct = urwid.Text(\"Job submission program based on Urwid. F8 to submit, F12 to quit.\")\n\t\theader = urwid.AttrWrap( instruct, 'header' )\n\t\t\n\t\tself.footer_text = urwid.Text(\"Mar 15th, 2008 by Yu Huang\")\n\t\tfooter = urwid.AttrWrap(self.footer_text, 'footer')\n\t\t\n\t\tself.top_frame = urwid.Frame(urwid.AttrWrap(self.listbox, 'body'), header, footer)", "def __init__(self, execution):\n self.execution = execution\n\n self.form = {}\n if len(PARAMS) == 0:\n input_boxes = [] #[urwid.Text('Changing the default parameters not allowed')]\n else:\n input_boxes = [urwid.Text('Change the default parameters for the jobs:')]\n for k, v in PARAMS.items():\n edit_box = urwid.Edit(('edittxt', v + ': '), str(self.execution.job_params.get(k, PARAM_DEFAULT_VALUE)))\n input_boxes.append(urwid.AttrMap(edit_box, 'editbx', 'editfc'))\n self.form[k] = edit_box\n\n input_boxes.append(create_button('Change', self.resubmit))\n\n self.widget = urwid.Padding(urwid.Pile(input_boxes), align='center')\n\n BaseTimedWidgetWrap.__init__(self, self.widget)", "def __init__(self, job, callback=None):\n self.job = job\n\n self.callback = callback\n\n self.form = {}\n input_boxes = []\n for k, v in PARAMS.items():\n edit_box = urwid.Edit(('edittxt', v + ': '), str(self.job.params.get(k, PARAM_DEFAULT_VALUE)))\n input_boxes.append(urwid.AttrMap(edit_box, 'editbx', 'editfc'))\n self.form[k] = edit_box\n\n input_boxes.append(create_button('Resubmit', self.resubmit))\n\n self.widget = urwid.Padding(urwid.Pile(input_boxes), align='center')\n\n BaseTimedWidgetWrap.__init__(self, self.widget)", "def _init_display(self):\n raise NotImplementedError", "def __init__(self, parent, msg = \"\"):\n super().__init__()\n self._parent = parent\n self.isHidden = True\n self._progress = 0\n self._progressBar = None\n self.msg = msg\n\n pixmap = QtGui.QPixmap(380, 100)\n pixmap.fill(QtGui.QColor(\"darkgreen\"))\n\n self._splash = QSplashScreen(pixmap)\n self._splash.setParent(self._parent)\n\n self.add_progressbar()", "def __init__(self, parent):\n self.parent = parent\n self.dialog = None", "def __init__(self, *args):\n this = _ida_hexrays.new_qstring_printer_t(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, parent=None):\n self._window = None\n\n self.setup_ui()", "def start(self):\n\t\t# Set printer\n\t\twin32print.SetDefaultPrinter(self.printer)\n\t\thandle = win32print.OpenPrinter(self.printer, None)\n\n\t\t# Get existing printer settings\n\t\tsettings = win32print.GetPrinter(handle, self._LEVEL)\n\t\tsettings['pDevMode'].PaperSize = self.papersize\n\t\tsettings['pDevMode'].Copies = self.copies\n\n\t\t# Update printer settings\n\t\t# Exceptions are raised, but changes are applied.\n\t\ttry:\n\t\t\twin32print.SetPrinter(handle, self._LEVEL, settings, 0)\n\t\texcept:\n\t\t\tpass\n\n\t\twin32api.ShellExecute(0, 'print', self.document, None, '.', 0)\n\t\twin32print.ClosePrinter(handle)", "def create_job(self):\n job = Job()\n process = Process()\n process.process_graph = {\"load_collection1\": {\"process_id\": \"load_collection\", \"arguments\": {}}}\n\n job.process = process\n\n self.dlg = JobAdaptDialog(iface=self.iface, job=job, backend=self.backend, main_dia=self)\n self.dlg.manualButton.setIcon(QIcon(os.path.join(os.path.dirname(__file__),\n 'images/info_icon.png')))\n self.dlg.setWindowFlags(Qt.WindowStaysOnTopHint)\n self.dlg.show()", "def __init__(self, *args, **kwargs):\n _gdi_.PrinterDC_swiginit(self,_gdi_.new_PrinterDC(*args, **kwargs))", "def __init__(self, *args):\n this = _ida_hexrays.new_vc_printer_t(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, parent=None):\n\n super().__init__(parent=parent)\n\n self.window_title = \"Goal Calibration\"\n\n screen_layout = QVBoxLayout()\n\n self.toolbar = ToolbarComponent(\n self.window_title, \"Back to Session\\n Record Check\")\n\n screen_layout.addWidget(self.toolbar)\n\n screen_layout.addWidget(ProfileLabel(\n \"Please set up the device your appropriate distance from the goal, then click Next\"))\n\n # Image frames shown in this\n self.image_label = QLabel()\n screen_layout.addWidget(self.image_label)\n\n # Initializing the thread to run the camera\n self.thread = None\n\n self.next_page_button = GenericButton(\"Next\")\n self.next_page_button.clicked.connect(self.cleanup_steps)\n screen_layout.addWidget(self.next_page_button)\n\n self.updated_temp_goal_image = None\n\n self.setLayout(screen_layout)", "def initUI(self):\n startbtn = QPushButton(\"Start Recroding\", self)\n startbtn.move(30, 50)\n\n stopbtn = QPushButton(\"Stop Recording\", self)\n stopbtn.move(150, 50)\n\n initbtn = QPushButton(\"Initilize\", self)\n initbtn.move(30, 100)\n\n plotbtn = QPushButton(\"Plot\", self)\n plotbtn.move(150, 100)\n\n startbtn.clicked.connect(self.start_recording)\n stopbtn.clicked.connect(self.stop_recording)\n initbtn.clicked.connect(self.init_recording)\n plotbtn.clicked.connect(self.plot_signals)\n\n self.statusBar()\n self.statusBar().showMessage('Click Init')\n\n self.setGeometry(300, 300, 290, 150)\n self.setWindowTitle('Recorder 1.0')\n self.setWindowIcon(QIcon(\"./Static/Images/icon.jpg\"))\n self.show()", "def __init__(self, parent=None):\n super(ProgressDlg, self).__init__(parent)\n self.setupUi(self)", "def initialize(self):\n self.setWindowTitle(\"Playlist Maker\")\n self.setGeometry(0,0, 800, 494)\n self.mbox = QVBoxLayout()\n self.hbox = QHBoxLayout()\n self.hbtnbox = QHBoxLayout()", "def __init__(self):\n # setup the database session\n engine = create_engine('sqlite:///%s'%os.path.join(APP_DIR,config.get('Inkcut','database_dir'),config.get('Inkcut','database_name')))\n\n Session.configure(bind=engine)\n self.session = Session()\n\n self.job = None\n self.ui = {\n 'main_window':MainWindow(self),\n 'device_dialog':DeviceDialog(self),\n }\n self.statusbar = self.ui['main_window'].widgets['statusbar']", "def __init__(self, parent, song: Song) -> None:\n\n self.parent: \"ProgressHandler\" = parent\n self.song = song\n\n # Clean up the song name\n # from weird unicode characters\n self.song_name = \"\".join(\n char\n for char in self.song.display_name\n if char not in [chr(i) for i in BAD_CHARS]\n )\n\n self.progress: int = 0\n self.old_progress: int = 0\n self.status = \"\"\n\n if not self.parent.simple_tui:\n self.task_id = self.parent.rich_progress_bar.add_task(\n description=escape(self.song_name),\n message=\"Download Started\",\n total=100,\n completed=self.progress,\n start=False,\n visible=(not self.parent.quiet),\n )", "def init_jobs(self):\n self.jobsTableWidget.clear()\n self.jobsTableWidget.setColumnCount(6)\n self.jobsTableWidget.setHorizontalHeaderLabels(['Job Id', 'Description/Error', 'Submission Date', 'Status',\n 'Execute', 'Display'])\n header = self.jobsTableWidget.horizontalHeader()\n header.setSectionResizeMode(0, QtWidgets.QHeaderView.ResizeToContents)\n header.setSectionResizeMode(1, QtWidgets.QHeaderView.Stretch)\n header.setSectionResizeMode(2, QtWidgets.QHeaderView.ResizeToContents)\n header.setSectionResizeMode(3, QtWidgets.QHeaderView.ResizeToContents)\n header.setSectionResizeMode(4, QtWidgets.QHeaderView.ResizeToContents)\n header.setSectionResizeMode(5, QtWidgets.QHeaderView.ResizeToContents)", "def __init__(self, parent, frame):\n\t\tself.frame = frame\n\n\t\t# Populate line edit with shot name\n\t\tself.frame.shot_lineEdit.setText(parent.self_name)", "def __init__( self, record, preview_pixmap, window_size=None ):\n\n self.record = record\n self.preview_pixmap = preview_pixmap\n\n super().__init__( window_size )", "def __init__(self, parent):\r\n\r\n pre = wx.PrePyWindow()\r\n \r\n self._tabs = None\r\n self._rect = wx.Rect(0, 0, 200, 200)\r\n self._tab_ctrl_height = 20\r\n self._tab_rect = wx.Rect() \r\n self._parent = parent\r\n \r\n self.PostCreate(pre)", "def __init__(self, parent):\r\n Frame.__init__(self, parent) \r\n \r\n self.parent = parent\r\n self.initUI()", "def __jobSelectedSetup(self):\n self.__jobSelectedLineEdit = QtWidgets.QLineEdit()\n self.__jobSelectedLineEdit.setMaximumWidth(300)\n self.__jobSelectedLineEdit.setFocusPolicy(QtCore.Qt.NoFocus)\n self.__jobSelectedLineEdit.setFont(cuegui.Constants.STANDARD_FONT)\n self.__toolbar.addWidget(self.__jobSelectedLineEdit)\n self.__monitorCue.single_click.connect(self.__jobSelectedHandle)" ]
[ "0.7487978", "0.67818165", "0.67569965", "0.64124125", "0.6201404", "0.6139698", "0.5940359", "0.5934324", "0.59159744", "0.5873766", "0.5867913", "0.58311474", "0.58015513", "0.5784232", "0.5742336", "0.57028717", "0.57011944", "0.5694068", "0.5670847", "0.56562907", "0.55983716", "0.55757254", "0.555624", "0.55528814", "0.55324", "0.55317026", "0.5520565", "0.55098045", "0.5508911", "0.54973793" ]
0.7860893
0
This method will enable delivery confirmations and schedule the first message to be sent to RabbitMQ
def start_publishing(self): print(f"{self._connection_param}: Issuing consumer related RPC commands") # self._channel.confirm_delivery(self.on_delivery_confirmation) self.schedule_next_message(self.SLOW_SEND)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prepare_for_delivery(self, by=None):", "def prepare_for_delivery(self, by=None):", "def prepare_for_delivery(self, by=None):", "def keystone_amq(self):\n\n connection = pika.BlockingConnection(pika.ConnectionParameters(\n host=self.rabbit_host,\n port=int(self.rabbit_port),\n credentials=pika.PlainCredentials(\n self.rabbit_user,\n self.rabbit_pass))\n )\n channel = connection.channel()\n channel.exchange_declare(exchange='keystone', type='topic')\n channel.queue_declare(queue=\"zcp-keystone\", exclusive=True)\n channel.queue_bind(exchange='keystone',\n queue=\"zcp-keystone\",\n routing_key='notifications.#')\n channel.basic_consume(self.keystone_callback,\n queue=\"zcp-keystone\",\n no_ack=True)\n channel.start_consuming()", "def test_delivery_of_queued_messages(self):\n yield self.connect('127.0.0.1', self.pbPort)\n\n localConfig = copy.copy(self.defaultConfig)\n localConfig.id = str(randint(10, 99))\n localConfig.requeue_delay = 2\n localConfig.submit_sm_throughput = 20\n yield self.add(localConfig)\n\n # Send 150 messages to the queue\n submitCounter = 0\n submit_sm_pdu = copy.copy(self.SubmitSmPDU)\n while submitCounter < 150:\n submit_sm_pdu.params['short_message'] = '%s' % submitCounter\n yield self.submit_sm(localConfig.id, submit_sm_pdu, self.SubmitSmBill.user.uid)\n submitCounter += 1\n\n # Wait for 20 seconds\n yield waitFor(20)\n\n # Start the connector again\n yield self.start(localConfig.id)\n\n # Wait for 30 seconds, all the rest of the queue must be sent\n yield waitFor(50)\n\n yield self.stop(localConfig.id)\n\n # Wait for unbound state\n yield waitFor(20)\n\n # Assertions\n # Take the lastClient (and unique one) and assert received message\n self.assertEqual(len(self.SMSCPort.factory.lastClient.submitRecords), 150)", "def test_redelivery_of_rejected_messages_after_restart(self):\n yield self.connect('127.0.0.1', self.pbPort)\n\n localConfig = copy.copy(self.defaultConfig)\n localConfig.id = '#67-%s' % randint(10, 9999)\n localConfig.requeue_delay = 1\n localConfig.submit_sm_throughput = 1\n yield self.add(localConfig)\n yield self.start(localConfig.id)\n\n # Send 4 messages to the queue\n submitCounter = 0\n submit_sm_pdu = copy.copy(self.SubmitSmPDU)\n while submitCounter < 4:\n submit_sm_pdu.params['short_message'] = '%s' % submitCounter\n msgid = yield self.submit_sm(localConfig.id, submit_sm_pdu, self.SubmitSmBill.user.uid)\n submitCounter += 1\n\n # Wait for 5 seconds before stopping\n yield waitFor(5)\n\n yield self.stop(localConfig.id)\n\n # Wait for unbound state\n yield waitFor(5)\n\n # Save the count before starting the connector\n _submitRecordsCount = len(self.SMSCPort.factory.lastClient.submitRecords)\n\n # Wait for 5 seconds before starting again\n yield waitFor(5)\n\n # Start the connector again\n yield self.start(localConfig.id)\n\n # Wait for 10 seconds before stopping , all the rest of the queue must be sent\n yield waitFor(10)\n\n yield self.stop(localConfig.id)\n\n # Wait for unbound state\n yield waitFor(10)\n\n # Update the counter\n _submitRecordsCount += len(self.SMSCPort.factory.lastClient.submitRecords)\n\n # Assertions\n self.assertEqual(_submitRecordsCount, 4)", "def keystone_amq(self):\n\n connection = pika.BlockingConnection(pika.ConnectionParameters(host=self.rabbit_host,\n credentials=pika.PlainCredentials(\n username=self.rabbit_user,\n password=self.rabbit_pass)))\n channel = connection.channel()\n result = channel.queue_declare(exclusive=True)\n queue_name = result.method.queue\n channel.exchange_declare(exchange='keystone', type='topic')\n channel.queue_bind(exchange='openstack', queue=queue_name, routing_key='notifications.#')\n channel.queue_bind(exchange='keystone', queue=queue_name, routing_key='keystone.#')\n\n channel.basic_consume(self.keystone_callback, queue=queue_name, no_ack=True)\n channel.start_consuming()", "def start_amqp(self):\n try:\n self.conn = amqp.Connection(self.amqp['host'], self.amqp['user'],\n self.amqp['password'],\n virtual_host=self.amqp['vhost'])\n self.channel = self.conn.channel()\n self.channel.exchange_declare(self.amqp['routing_key'], 'fanout')\n except socket.error:\n return False\n return True", "def preProcess(self, msg):\n\n # open connection\n self.conn = Connection(\n user=self.user, password=self.password,\n vhost=self.vhost, host=self.host,\n heartbeat=self.heartbeat, debug=self.debug)\n\n # create AMQP channel\n self.channel = self.conn.channel()\n self.channel.exchange.declare(self.exchange, self.exchange_type)\n self.channel.queue.declare(self.queue, self.auto_delete)\n self.channel.queue.bind(self.queue, self.exchange, self.routing_key)", "def preProcess(self, msg):\n\n # open connection\n self.conn = Connection(\n user=self.user, password=self.password,\n vhost=self.vhost, host=self.host,\n heartbeat=self.heartbeat, debug=self.debug)\n\n # create AMQP channel\n self.channel = self.conn.channel()\n self.channel.exchange.declare(self.exchange, self.exchange_type)\n self.channel.queue.declare(self.queue, self.auto_delete)\n self.channel.queue.bind(self.queue, self.exchange, self.routing_key)", "def preProcess(self, msg):\n\n # open connection\n self.conn = Connection(\n user=self.user, password=self.password,\n vhost=self.vhost, host=self.host,\n heartbeat=self.heartbeat, debug=self.debug)\n\n # create AMQP channel\n self.channel = self.conn.channel()\n self.channel.exchange.declare(self.exchange, self.exchange_type)\n self.channel.queue.declare(self.queue, self.auto_delete)\n self.channel.queue.bind(self.queue, self.exchange, self.routing_key)", "def receive_confirmation(self):\n #print(\"(%d) receive_confirmation:\" % int(time.time()))\n #print(\" **> state:\", self.state)\n if self.state != KeyExchangeManager.STATE_CONFIRMING:\n return\n rand_time = int(KeyExchangeManager.KEY_REFRESH_INTERVAL*random.uniform(0.9, 1.1))\n self.set_invoke_timer(rand_time)\n self._set_delete_timer(self.key_name, KeyExchangeManager.KEY_OBSOLETE_TIMER)\n self.key_name = self.pending_key_name\n self._set_state(KeyExchangeManager.STATE_ESTABLISHED)\n #print(\"*STATE_ESTABLISHED\")", "def test_redelivery_of_rejected_messages(self):\n yield self.connect('127.0.0.1', self.pbPort)\n\n localConfig = copy.copy(self.defaultConfig)\n localConfig.id = '#67-%s' % randint(10, 99)\n localConfig.submit_sm_throughput = 3\n yield self.add(localConfig)\n yield self.start(localConfig.id)\n\n # Send 60 messages to the queue\n startAt = datetime.now()\n submitCounter = 0\n submit_sm_pdu = copy.copy(self.SubmitSmPDU)\n while submitCounter < 60:\n submit_sm_pdu.params['short_message'] = '%s' % submitCounter\n yield self.submit_sm(localConfig.id, submit_sm_pdu, self.SubmitSmBill.user.uid)\n submitCounter += 1\n\n receivedSubmits = self.SMSCPort.factory.lastClient.submitRecords\n counter = 0\n _receivedSubmitsCount = 0\n # Wait for 40 seconds before checking if all submits were delivered\n # It will check for throughput in each iteration\n while counter < 30:\n receivedSubmits = self.SMSCPort.factory.lastClient.submitRecords\n\n _receivedSubmitsCount = len(receivedSubmits)\n\n # Wait some time\n yield waitFor(1)\n\n counter += 1\n endAt = datetime.now()\n\n yield self.stop(localConfig.id)\n\n # Wait for unbound state\n yield waitFor(2)\n\n # Assertions\n # Take the lastClient (and unique one) and assert received message\n self.assertEqual(len(self.SMSCPort.factory.lastClient.submitRecords), 60)", "def on_message_received(ch, method, properties, body):\n # the body contains the command flag followed by a colon ':' and the message for the drone\n # decode the body to utf8\n received_bytes = body.decode('utf-8')\n # split the received_bytes to get the command _flag and message\n recieved_message = received_bytes.split(':')\n # since rabbit mq body is a byte\n if (str(recieved_message[0]) == \"c01\"):\n # c01 - command center orders the drone to deliver a item\n print(\"Order Received from the command center to deliver an item to the following address \\n\", str(\n recieved_message[1]))\n time.sleep(2)\n # print in the drone's console that the item has been lift off\n print('\\nLifting off the Item to the delivery address.')\n print('\\nUpdating Status to the command centre ......')\n # Assume the drone has reached the delivery address . Now send a\n # message to the warehouse command center that it has reached the\n # delivery area\n time.sleep(5)\n rpc_sendback(\"c02\")\n # Assume the drone has delivered the item and issue the status message\n # to the command center\n time.sleep(5)\n rpc_sendback(\"c03\")\n # #Assume the drone has reached the parking spot and issue the message to the command center that is available for next instruction\n time.sleep(5)\n rpc_sendback(\"c04\")\n\n else:\n print(\"Received Instruction from Warehouse \" +\n str(recieved_message[1]))\n channel.basic_ack(delivery_tag=method.delivery_tag)\n # channel.start_consuming()", "def ready_for_delivery(self, *args, **kwargs):\n return self.get_queryset().exclude(success=True).filter(\n success=None, # doen't re-send failed messages\n deliver_on__lte=timezone.now()\n )", "def send(self):\n if self._stopping:\n return\n\n mytype = 'text/plain'\n\n try:\n if isinstance(json.loads(self.message),dict):\n mytype = 'application/json'\n except (TypeError,json.JSONDecodeError):\n if (isinstance(self.message,dict)):\n mytype = 'application/json'\n self.message = json.dumps(self.message)\n else:\n self.message = str(self.message)\n\n properties = pika.BasicProperties(app_id='sender',\n content_type=mytype)\n\n self._channel.basic_publish(self.exchange, self.routing_key, self.message, properties)\n self._message_number += 1\n self._deliveries.append(self._message_number)\n self.logger.info('published message # %i', self._message_number)", "def perform_setup():\n global credentials, connection, channel\n credentials = pika.PlainCredentials('guest', 'guest') # AUTH via Default guest user on RabbitMQ\n connection = pika.BlockingConnection(pika.ConnectionParameters(\"127.0.0.1\", 5672, '/', credentials)) # Using rabbit-mq container name to access the RabbitMQ container from other containers\n channel = connection.channel()\n channel.queue_declare(queue='poll', durable=True)", "def on_delivery_confirmation(self, method_frame):\n confirmation_type = method_frame.method.NAME.split('.')[1].lower()\n\n self.logger.info('received %s for %s', confirmation_type, method_frame.method.delivery_tag)\n if confirmation_type == 'ack':\n self._acked += 1\n elif confirmation_type == 'nack':\n self._nacked += 1\n\n self._deliveries.remove(method_frame.method.delivery_tag)\n self.logger.info('published %i messages, %i yet to confirm, %i acked and %i nacked', self._message_number,\n len(self._deliveries), self._acked, self._nacked)\n self.stop()", "def publish_message(self):\n\n message_count = 0\n while message_count < self._messages:\n message_count += 1\n message_body = \"task number %i\" %(message_count)\n self._channel.basic_publish(exchange='',\n routing_key=self._queue_name,\n body=message_body,\n properties=pika.BasicProperties(\n delivery_mode=2 # make message persistant\n ))\n print(\"Published message %i\" %(message_count))\n time.sleep(self._message_interval)", "def on_confirmation(self, ch, method, header, body):\n print \" [x] Received confirmation %r\" % (body,)\n self.now_playing(body)\n ch.basic_ack(delivery_tag=method.delivery_tag)", "def handle_delivery(channel, method, header, body):\n print('Got task, calculating')\n channel_out.basic_publish(\n body=\"PENDING\",\n exchange='',\n routing_key='task_result',\n properties=pika.BasicProperties(headers={'task_id': header.headers.get('task_id')}),\n )\n result = bruteforce(body)\n if result:\n print(result)\n channel_out.queue_bind('tasks_result', 'tasks', 'tasks_result')\n channel_out.basic_publish(\n body=result,\n exchange='',\n routing_key='tasks_result',\n properties=pika.BasicProperties(headers={'task_id': header.headers.get('task_id')}),\n )\n # cr.execute(\"\"\"UPDATE tasks SET status=\"DONE\", result=%s WHERE id=%s\"\"\" % (result, header.headers.get('task_id')))\n # conn.commit()\n # else:\n # cr.execute(\n # \"\"\"UPDATE tasks SET status=\"FAILTURE\", result='undefined' WHERE id=%s\"\"\" % (header.headers.get('task_id'), ))\n # conn.commit()", "def amqp(self, **options):\n pass", "def test_sending_mail(self):\n\n appt_date = datetime.date.today() + datetime.timedelta(days=7) # Default for email\n reminders.Patient.objects.filter(\n pk__in=[self.test_patient.pk, self.other_patient.pk]\n ).update(next_visit=appt_date)\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n\n self.startRouter()\n self.router.logger.setLevel(logging.DEBUG)\n\n # run email job\n from afrims.apps.reminders.app import daily_email_callback\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertTrue(self.test_contact.email in message.to)\n self.stopRouter()", "def test_send_queued_mail(self):\n # Make sure that send_queued_mail with empty queue does not raise error\n call_command('send_queued_mail', processes=1)\n\n Email.objects.create(from_email='[email protected]',\n to=['[email protected]'], status=STATUS.queued)\n Email.objects.create(from_email='[email protected]',\n to=['[email protected]'], status=STATUS.queued)\n call_command('send_queued_mail', processes=1)\n self.assertEqual(Email.objects.filter(status=STATUS.sent).count(), 2)\n self.assertEqual(Email.objects.filter(status=STATUS.queued).count(), 0)", "def _auto_email_send(self):\n records = self.search([('send_by', '=', 'mail')])\n\n for supplier in records:\n send_at = datetime.combine(fields.Date.today(),\n float_to_time(supplier.automatic_email_time, supplier.moment, supplier.tz)).astimezone(pytz.UTC).replace(tzinfo=None)\n if supplier.available_today and fields.Datetime.now() > send_at:\n lines = self.env['lunch.order'].search([('supplier_id', '=', supplier.id),\n ('state', '=', 'ordered'), ('date', '=', fields.Date.today())])\n\n if lines:\n order = {\n 'company_name': lines[0].company_id.name,\n 'currency_id': lines[0].currency_id.id,\n 'supplier_id': supplier.partner_id.id,\n 'supplier_name': supplier.name,\n 'email_from': supplier.responsible_id.email_formatted,\n }\n\n _lines = [{\n 'product': line.product_id.name,\n 'note': line.note,\n 'quantity': line.quantity,\n 'price': line.price,\n 'toppings': line.display_toppings,\n 'username': line.user_id.name,\n } for line in lines]\n\n order['amount_total'] = sum(line.price for line in lines)\n\n self.env.ref('lunch.lunch_order_mail_supplier').with_context(order=order, lines=_lines).send_mail(supplier.id)\n\n lines.action_confirm()", "def do_start(self):\n threading.Thread(group = None, \n target = self._subscribe_message, name = \"RabbitMQSubscribeThread\") .start()\n threading.Thread(group = None, \n target = self._publish_message, name = \"RabbitMQPublishThread\").start()", "def testSendNextMessage(self):\n self.mgr.isGoproBusy = True\n self.mgr.lastRequestSent = monotonic.monotonic()\n self.mgr.queueMsg(3)\n self.mgr.queueMsg(2)\n self.mgr.queueMsg(1)\n self.mgr.processMsgQueue()\n self.v.send_mavlink.assert_called_with(3)\n self.assertEqual( self.mgr.msgQueue.qsize(), 2)", "def test_basic():\n client = CloudAMQPClient(CLOUDAMQP_URL, TEST_QUEUE_NAME)\n send_msg = {'test': 'ans'}\n client.send_message(send_msg)\n received_msg = client.get_message()\n assert send_msg == received_msg\n print 'test_basic() passed'", "def send_rabbit_message (params ):\n print \"sending message to rabbitmq exchange\"\n logging.basicConfig()\n rabbitmq_host = params.get( 'host' )\n rabbitmq_port = params.get( 'port' )\n rabbitmq_username = params.get( 'user-name' )\n rabbitmq_password = params.get( 'password' )\n exchange_name = params.get( 'exchange' )\n routing_key = params.get( 'routing' )\n message = params.get( 'message' )\n \n amqp_url='amqp://'+rabbitmq_username+':'+rabbitmq_password+'@'+rabbitmq_host+':'+rabbitmq_port+'/%2f'\n amqp_url = str(amqp_url)\n parameters = pika.URLParameters(amqp_url)\n connection = pika.BlockingConnection(parameters)\n channel = connection.channel()\n \n channel.basic_publish(exchange=exchange_name,routing_key=routing_key,body=message)\n ## close connection at the end \n connection.close()", "def send_reminder(self):\n pass" ]
[ "0.6132878", "0.6132878", "0.6132878", "0.6066696", "0.6043291", "0.59991413", "0.59169674", "0.58299065", "0.5776203", "0.5776203", "0.5776203", "0.5642102", "0.56042206", "0.55661607", "0.55595404", "0.5532968", "0.5508831", "0.5474107", "0.5426646", "0.54043806", "0.5402466", "0.5399324", "0.5394945", "0.53762454", "0.53657144", "0.53496844", "0.53287965", "0.53283465", "0.53133494", "0.53094745" ]
0.631214
0
r""" Computes the chisquare value of the sample data Notes
def _chisquare_value(self): x2 = np.sum((np.absolute(self.observed - self.expected) - (0.5 * self.continuity_correction)) ** 2 / self.expected) return x2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute(real_data, synthetic_data):\n f_obs, f_exp = get_frequencies(real_data, synthetic_data)\n if len(f_obs) == len(f_exp) == 1:\n pvalue = 1.0\n else:\n _, pvalue = chisquare(f_obs, f_exp)\n\n return pvalue", "def calculate_chi_squared(self):\n chi = 0\n obsVals, expVals = self.calculate_obs_and_exp()\n for i in range(4):\n if expVals[i] != 0:\n chi += (obsVals[i] - expVals[i])**2 / expVals[i]\n return chi", "def chisquare(obs, exp=None):\n obs = N.array(obs)\n\n # get total number of observations\n nobs = N.sum(obs)\n\n # if no expected value are supplied assume equal distribution\n if exp == None:\n exp = N.ones(obs.shape) * nobs / N.prod(obs.shape)\n\n # make sure to have floating point data\n exp = exp.astype(float)\n\n # compute chisquare value\n chisq = N.sum((obs - exp )**2 / exp)\n\n # return chisq and probability (upper tail)\n return chisq, stats.chisqprob(chisq, N.prod(obs.shape) - 1)", "def compare_sums_chi(array1, array2):\n return stats.chisquare(array1, array2)", "def chisq(self, expected=None):\n if expected is None:\n expected = self.indep()\n if self.y == expected.y and self.x == expected.x:\n tot = 0.0\n for y,x in self.coords(False):\n tot += float(self.retrieve(y,x)-expected.retrieve(y,x))**2.0/float(expected.retrieve(y,x))\n return tot\n else:\n raise IndexError(\"Matrix Chi Squared invalid for dimensions \"+str(self.y)+\"x\"+str(self.x)+\" and \"+str(other.y)+\"x\"+str(other.x))", "def chi_sq ( ) :\n \n # get the list of all files\n die_file_paths = _sort_all_apropriate_files(options.input)\n temp_tuples = [ ]\n \n # open the files and arrange the info into tuples\n for die_file_path in die_file_paths :\n \n #print\n #print (\"loading die information from file: \" + die_file_path)\n die_description, die_roll_dict = _read_die_file (die_file_path)\n temp_tuples.append((die_description, die_roll_dict.values()))\n \n # analyze the info from each file with a chi squared test\n chi_sq_results = _calculate_chi_squared(temp_tuples)\n \n # display the results\n print (\"-----\")\n for desc_text in sorted(chi_sq_results.keys()) :\n \n (chi_sq_stat, p_value) = chi_sq_results[desc_text]\n print (\"analysis of die: \" + desc_text.strip())\n print (\"chi squared stat: \" + str(chi_sq_stat))\n print (\"p value: \" + str(p_value))\n print (\"-----\")", "def chi_square(self, collocation):\n terms = collocation.split()\n\n # compute 2 X 2 table here\n array = [[], []]\n array[0] = [float(self.combined_freq[collocation]), float(self.combined_freq[terms[1]] - self.combined_freq[collocation])]\n array[1] = [float(self.combined_freq[terms[0]]) - float(self.combined_freq[collocation]),\n float(len(self.combined_freq) - ((2.0 * float(self.combined_freq[collocation])) +\n float(self.combined_freq[terms[0]]) + float(self.combined_freq[terms[1]])))]\n\n # convert to nd array\n array = np.array(array)\n\n # compute chi square statistic\n chi_square = (float(len(self.combined_freq)) * pow(((array[0, 0] * array[1, 1]) - (array[0, 1] * array[1, 0])), 2)) /\\\n ((array[0, 0] + array[0, 1]) * (array[0, 0] + array[1, 0]) * (array[0, 1] + array[1, 1]) *\n (array[1, 0] + array[1, 1]))\n return chi_square", "def get_chisqrs(prf,diff,nbins): \n off_pulse = np.zeros(39)\n off_pulse[:20] = prf[:20]\n off_pulse[20:] = prf[45:] #Making off pulse region\n # print(\"Off pulse Region \",off_pulse)\n op_rms = np.var(off_pulse) #Rms\n # print(\"Off pulse RMS \",op_rms)\n s = 0\n for d in diff:\n s += d**2/op_rms\n\n s = s/(nbins - 1)\n # print(\"Chisqr value = \",s)\n\n return s", "def chisq_test(observed):\n\tn, k = observed.shape\n\trow = observed.sum(axis=0).reshape(1,-1)\n\tcol = observed.sum(axis=1).reshape(-1,1)\n\texpected = np.dot(col, row)/observed.sum()\n\t#chi2, pvalue = scipy.stats.mstats.chisquare(observed.ravel(), expected.ravel(), ddof = n+k-2)\n\tchi2 = (((observed-expected)**2)/expected).sum()\n\tpvalue = 1-scipy.stats.chi2.cdf(chi2, (n-1)*(k-1))\n\tmessage = \"\"\"\n\tPerforming the test of independence in\ta contingency table.\n\ttest statistic: %(chi2)s\n\tdegrees of freedom: %(df)s\n\tp-value: %(pvalue)s\n\t\"\"\" % {'chi2': chi2, 'df': (n-1)*(k-1), 'pvalue': pvalue}\n\tprint(message)\n\twarning = \"\"\"\n\tWarning message:\n\tChi-squared approximation may be incorrect\n\t\"\"\"\n\tif expected.min() < 5:\n\t\tprint(warning)\n\treturn chi2, pvalue", "def afriedmanchisquare(*args):\r\n k = len(args)\r\n if k < 3:\r\n raise ValueError, '\\nLess than 3 levels. Friedman test not appropriate.\\n'\r\n n = len(args[0])\r\n data = apply(pstats.aabut,args)\r\n data = data.astype(N.float_)\r\n for i in range(len(data)):\r\n data[i] = arankdata(data[i])\r\n ssbn = asum(asum(args,1)**2)\r\n chisq = 12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)\r\n return chisq, achisqprob(chisq,k-1)", "def lchisquare(f_obs,f_exp=None):\r\n k = len(f_obs) # number of groups\r\n if f_exp == None:\r\n f_exp = [sum(f_obs)/float(k)] * len(f_obs) # create k bins with = freq.\r\n chisq = 0\r\n for i in range(len(f_obs)):\r\n chisq = chisq + (f_obs[i]-f_exp[i])**2 / float(f_exp[i])\r\n return chisq, chisqprob(chisq, k-1)", "def pchisq(x, df):\n \n if df % 2 == 0:\n dchi = 0.5 * math.exp(-0.5 * x)\n f = 1.0 - 2.0 * dchi\n for i in range(4, df + 1, 2):\n dchi *= x / (i - 2)\n f -= 2.0 * dchi\n \n else:\n f = 2.0 * pnorm(math.sqrt(x), 0.0, 1.0) - 1.0\n dchi = math.exp(-0.5 * x) / math.sqrt(2.0 * math.pi * x)\n for i in range(3, df + 1, 2):\n dchi *= x / (i - 2)\n f -= 2.0 * dchi\n \n return f", "def my_chisq(ydata,ymod,deg=2,sd=None): \n # Chi-square statistic \n if sd==None:\n chisq=np.sum((ydata-ymod)**2) \n else:\n chisq=np.sum( ((ydata-ymod)/sd)**2 ) \n\n # Number of degrees of freedom assuming 2 free parameters \n nu=ydata.size-1-deg \n return chisq/nu", "def chi_square_analysis(obs_list):\r\n obs = np.array(obs_list)\r\n chi2, p, dof, expected = chi2_contingency(obs)\r\n return chi2, p, dof, expected", "def coi(self, s):\n return 2 ** 0.5 * s", "def schaffern3fcn(x: np.ndarray) -> np.ndarray:\n n = x.shape[1]\n assert n == 2, \"Schaffer function N. 3 is only defined on a 2D space.\"\n\n X = x[:, 0]\n Y = x[:, 1]\n\n numeratorcomp = (np.sin(np.cos(np.abs(X**2 - Y**2))) ** 2) - 0.5\n denominatorcomp = (1 + 0.001 * (X**2 + Y**2)) ** 2\n scores = 0.5 + numeratorcomp / denominatorcomp\n\n return scores", "def test_chi2(y0, y1, level):\n if len(y0) == 0 or len(y1) == 0:\n return True\n l0 = np.argmax(y0, axis=1)\n l1 = np.argmax(y1, axis=1)\n v, c = np.unique(np.append(l0,l1), return_counts=True)\n v0, c0 = np.unique(l0, return_counts=True)\n v1, c1 = np.unique(l1, return_counts=True)\n p = np.zeros(len(y0[0]))\n p0 = p.copy()\n p1 = p.copy()\n p[v] = c / np.sum(c)\n p0[v0] = c0 / np.sum(c0)\n p1[v1] = c1 / np.sum(c1)\n p0[p0==0] = 0.05\n p1[p1 == 0] = 0.05\n p[p==0] = 0.05\n _, p0_value = stat.chisquare(p0, p)\n _, p1_value = stat.chisquare(p1, p)\n if 1-p0_value > level or 1-p1_value > level:\n return False\n else:\n return True", "def task_6_2_1():\n # TODO Task 6.2.1: Your code goes here\n x = []\n for i in range(100):\n xi = 0\n xi = numpy.random.normal(0, 1)\n x.append(xi)\n\n bins = 15\n emp_n, emp_x = numpy.histogram(x, bins=bins)\n Ch_2 = ChiSquare(emp_x, emp_n)\n Ch_2.test_distribution(0.05, 0, 1)\n # pass", "def lfriedmanchisquare(*args):\r\n k = len(args)\r\n if k < 3:\r\n raise ValueError, 'Less than 3 levels. Friedman test not appropriate.'\r\n n = len(args[0])\r\n data = apply(pstats.abut,tuple(args))\r\n for i in range(len(data)):\r\n data[i] = rankdata(data[i])\r\n ssbn = 0\r\n for i in range(k):\r\n ssbn = ssbn + sum(args[i])**2\r\n chisq = 12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)\r\n return chisq, chisqprob(chisq,k-1)", "def euc_dist(self, squared=True):", "def chi(Mu, Y):\n return Y*(1-hg2f3(Mu,Y))", "def chi2s(h1s, h2s):\n return np.sum((h1s-h2s)**2/(h1s+h2s+1e-10), axis=1)", "def calc_chisq(func, xdata, ydata, yerrdata, *args):\n xdata = np.array(xdata)\n ydata = np.array(ydata)\n yerrdata = np.array(yerrdata)\n return np.sum(((ydata - func(xdata, *args)) / yerrdata) ** 2)", "def chi_squared(markers):\n new_markers = {}\n for marker in markers:\n line = markers[marker][0]\n a = line.count(\"a\")\n b = line.count(\"b\")\n length = a + b\n expect_a = length / 2\n expect_b = length / 2\n chisq = pow((a - expect_a), 2) / expect_a + pow((b - expect_b),\n 2) / expect_b\n if chisq <= 3.84:\n new_markers[marker] = markers[marker]\n new_markers[marker].append(chisq)\n else:\n print(f\"Marker discarded:\\t{marker}\\t{chisq}\")\n print(f\"Amount of markers:\\t{len(new_markers)}\")\n return new_markers", "def calculate_chi_square_p_value(A):\n nonzero_columns = np.where(A.any(axis=0))[0]\n A_nonzero_columns = A[:, nonzero_columns]\n _, p_value, _, _ = scipy.stats.chi2_contingency(A_nonzero_columns)\n return p_value", "def chi_c_real(params):\n Qi = Q_i(params)\n Qc = params['Q_e_real'].value\n return ((4 * Qc * Qi) /\n (Qc + Qi) ** 2)", "def calculate_chi2(target_data, source_data, q_min, q_max):\n\n matched_source_I = match_scatter_curves(target_data, source_data)\n\n # Get average I for experimental and calculated values over matched q\n # range\n matched_no = len(matched_source_I)\n expt_avg = np.mean(target_data[0:matched_no, 1])\n calc_avg = np.mean(matched_source_I)\n\n # Initial guess of the concentration:\n # ratio of experimental and calculated average intensities\n con = expt_avg / calc_avg\n\n if np.count_nonzero(target_data[:, 1]) == 0:\n print(\"Chi^2 calculations cannot proceed without target data\")\n sys.exit()\n else:\n \n if (target_data.shape[1] > 2) and (\n np.count_nonzero(target_data[:, 2]) != 0):\n \n # Call fortran code to calculate the reduced Chi2\n chi2 = sjp_util.calc_chi2(\n target_data[\n :, 0], target_data[\n :, 1], target_data[\n :, 2], matched_source_I, matched_no, q_min, q_max, con, False)\n \n else:\n #print \"For Chi^2 calculations an error column must be present\"\n #sys.exit()\n # Call fortran code to calculate the Pearson Chi2\n chi2 = sjp_util.calc_pearson(target_data[:,0],\n target_data[:,1],\n matched_source_I,\n matched_no,\n q_min,\n q_max,\n con,\n False)\n \n # 1/con is the scaling factor needed to multiply experimental I values\n # to compare with calculated data\n return chi2, 1.0 / con", "def _calculate_chi_squared (desc_rolls_tuples) :\n \n results = { }\n \n for desc_str, obs_roll_array in desc_rolls_tuples :\n \n # we aren't providing expected values because we expect the sides\n # of the die to be rolled with equal frequency,\n # and that's the default for the scipy version of chi squared;\n # scipy's chi squared method also calculates the degrees of\n # freedom based on the length of our array, which works in this case\n chi_sq_stat, p_value = chisquare(numpy.array(obs_roll_array))\n results[desc_str] = (chi_sq_stat, p_value)\n \n return results", "def test_coherency_regularized():\r\n\r\n for method in methods:\r\n f, c = tsa.coherency_regularized(tseries, 0.05, 1000,\r\n csd_method=method)\r\n npt.assert_array_almost_equal(c[0, 1], c[1, 0].conjugate())", "def schaffer(self, x):\r\n N = len(x);\r\n s = x[0:N-1]**2 + x[1:N]**2;\r\n return sum(s**0.25 * (np.sin(50*s**0.1)**2 + 1))" ]
[ "0.6880414", "0.6825153", "0.65589476", "0.63346523", "0.61934537", "0.6188989", "0.61722326", "0.61393964", "0.6095299", "0.6072217", "0.6061504", "0.60480016", "0.6028417", "0.600268", "0.59922135", "0.5937898", "0.59304774", "0.59027916", "0.588849", "0.5888303", "0.5886654", "0.58676213", "0.5854519", "0.5852623", "0.58337915", "0.582021", "0.5811886", "0.5791329", "0.578404", "0.57756805" ]
0.760204
0
r""" Finds the pvalue of the chisquare statistic. Notes
def _p_value(self): pval = chi2.sf(self.chi_square, self.degrees_of_freedom) return pval
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _chisquare_value(self):\n x2 = np.sum((np.absolute(self.observed - self.expected) - (0.5 * self.continuity_correction)) ** 2 /\n self.expected)\n\n return x2", "def _p_value(self):\n p_value = chi2.sf(self.test_statistic, 2)\n\n return p_value", "def compute(real_data, synthetic_data):\n f_obs, f_exp = get_frequencies(real_data, synthetic_data)\n if len(f_obs) == len(f_exp) == 1:\n pvalue = 1.0\n else:\n _, pvalue = chisquare(f_obs, f_exp)\n\n return pvalue", "def calculate_chi_square_p_value(A):\n nonzero_columns = np.where(A.any(axis=0))[0]\n A_nonzero_columns = A[:, nonzero_columns]\n _, p_value, _, _ = scipy.stats.chi2_contingency(A_nonzero_columns)\n return p_value", "def _calc_pval(self):\n t = self.beta / self.stderr_beta\n return (2. * (1. - stats.t.cdf(np.abs(t), self.n - 2)))[0]", "def calculate_chi_squared(self):\n chi = 0\n obsVals, expVals = self.calculate_obs_and_exp()\n for i in range(4):\n if expVals[i] != 0:\n chi += (obsVals[i] - expVals[i])**2 / expVals[i]\n return chi", "def evaluate(self, collocation, p_value_thresh=.1):\n cs = self.chi_square(collocation)\n p_value = self.cum_dist_func(cs)\n\n if p_value <= p_value_thresh:\n return str(cs), str(p_value), str(self.combined_freq[collocation])", "def cum_dist_func(self, chi_square_stat):\n # use 1 degree of freedom given df = (R-1) * (C-1); df == (2-1) * (2-1) == 1\n p_value = 1.0 - float(stats.chi2.cdf(chi_square_stat, 1))\n return p_value", "def get_pvalue_thd(self):\n terminals_values = []\n for terminal in self.feature_tree.get_terminals():\n temp = self.get_mannwitneyu_pvalue(terminal)\n terminals_values.append(temp)\n if temp == 1:\n print('non siginificant')\n while 0 in terminals_values:\n terminals_values.remove(0)\n self.pvalue_thd = min(self.pvalue_thd,np.mean(terminals_values))\n #print('pvalue_thd',self.pvalue_thd)", "def chisq_test(observed):\n\tn, k = observed.shape\n\trow = observed.sum(axis=0).reshape(1,-1)\n\tcol = observed.sum(axis=1).reshape(-1,1)\n\texpected = np.dot(col, row)/observed.sum()\n\t#chi2, pvalue = scipy.stats.mstats.chisquare(observed.ravel(), expected.ravel(), ddof = n+k-2)\n\tchi2 = (((observed-expected)**2)/expected).sum()\n\tpvalue = 1-scipy.stats.chi2.cdf(chi2, (n-1)*(k-1))\n\tmessage = \"\"\"\n\tPerforming the test of independence in\ta contingency table.\n\ttest statistic: %(chi2)s\n\tdegrees of freedom: %(df)s\n\tp-value: %(pvalue)s\n\t\"\"\" % {'chi2': chi2, 'df': (n-1)*(k-1), 'pvalue': pvalue}\n\tprint(message)\n\twarning = \"\"\"\n\tWarning message:\n\tChi-squared approximation may be incorrect\n\t\"\"\"\n\tif expected.min() < 5:\n\t\tprint(warning)\n\treturn chi2, pvalue", "def chi2_p_value(contingency_table: np.ndarray) -> List[float]:\n try:\n _, chi2_p_value, _, _ = stats.chi2_contingency(\n contingency_table, correction=False\n )\n except ValueError:\n chi2_p_value = np.nan\n return [chi2_p_value]", "def value(self):\n black, white = 0, 0\n for sq in Othello.squares():\n piece = self.__board[sq]\n if piece == BLACK: black += 1\n elif piece == WHITE: white += 1\n if black == white:\n return 0.5\n elif black > white:\n return 1\n else:\n return 0", "def pchisq(x, df):\n \n if df % 2 == 0:\n dchi = 0.5 * math.exp(-0.5 * x)\n f = 1.0 - 2.0 * dchi\n for i in range(4, df + 1, 2):\n dchi *= x / (i - 2)\n f -= 2.0 * dchi\n \n else:\n f = 2.0 * pnorm(math.sqrt(x), 0.0, 1.0) - 1.0\n dchi = math.exp(-0.5 * x) / math.sqrt(2.0 * math.pi * x)\n for i in range(3, df + 1, 2):\n dchi *= x / (i - 2)\n f -= 2.0 * dchi\n \n return f", "def chisq(self, expected=None):\n if expected is None:\n expected = self.indep()\n if self.y == expected.y and self.x == expected.x:\n tot = 0.0\n for y,x in self.coords(False):\n tot += float(self.retrieve(y,x)-expected.retrieve(y,x))**2.0/float(expected.retrieve(y,x))\n return tot\n else:\n raise IndexError(\"Matrix Chi Squared invalid for dimensions \"+str(self.y)+\"x\"+str(self.x)+\" and \"+str(other.y)+\"x\"+str(other.x))", "def chi(self):\n return self._chi", "def p_value(beta_hat_j, sigma_hat_j):\n if beta_hat_j > 0:\n return 2 - (1 * norm.cdf(beta_hat_j / sigma_hat_j))\n else:\n return 2 * norm.cdf(beta_hat_j / sigma_hat_j)", "def chi2(data, fdata, err):\n return sum(((data-fdata)/err)**2)", "def chi2P(chi, df):\n assert df & 1 == 0\n # If chi is very large, exp(-m) will underflow to 0.\n m = chi / 2.0\n sum = term = exp(-m)\n for i in range(1, df//2):\n term *= m / i\n sum += term\n # With small chi and large df, accumulated\n # roundoff error, plus error in\n # the platform exp(), can cause this to spill\n # a few ULP above 1.0. For\n # example, chi2P(100, 300) on my box\n # has sum == 1.0 + 2.0**-52 at this\n # point. Returning a value even a teensy\n # bit over 1.0 is no good.\n return min(sum, 1.0)", "def chi_sq ( ) :\n \n # get the list of all files\n die_file_paths = _sort_all_apropriate_files(options.input)\n temp_tuples = [ ]\n \n # open the files and arrange the info into tuples\n for die_file_path in die_file_paths :\n \n #print\n #print (\"loading die information from file: \" + die_file_path)\n die_description, die_roll_dict = _read_die_file (die_file_path)\n temp_tuples.append((die_description, die_roll_dict.values()))\n \n # analyze the info from each file with a chi squared test\n chi_sq_results = _calculate_chi_squared(temp_tuples)\n \n # display the results\n print (\"-----\")\n for desc_text in sorted(chi_sq_results.keys()) :\n \n (chi_sq_stat, p_value) = chi_sq_results[desc_text]\n print (\"analysis of die: \" + desc_text.strip())\n print (\"chi squared stat: \" + str(chi_sq_stat))\n print (\"p value: \" + str(p_value))\n print (\"-----\")", "def chauvenet_criterion(npoints, p=0.5):\n \n \n return np.abs(stats.norm.ppf(p/(2.*npoints), loc=0., scale=1.))", "def chisquare(obs, exp=None):\n obs = N.array(obs)\n\n # get total number of observations\n nobs = N.sum(obs)\n\n # if no expected value are supplied assume equal distribution\n if exp == None:\n exp = N.ones(obs.shape) * nobs / N.prod(obs.shape)\n\n # make sure to have floating point data\n exp = exp.astype(float)\n\n # compute chisquare value\n chisq = N.sum((obs - exp )**2 / exp)\n\n # return chisq and probability (upper tail)\n return chisq, stats.chisqprob(chisq, N.prod(obs.shape) - 1)", "def lambda_test(p_values, df=1):\n from scipy.stats import chi2\n assert np.max(p_values) <= 1 and np.min(p_values) >= 0, 'These do not appear to be p-values'\n\n chi_sq_scores = chi2.ppf(1 - p_values, df)\n return np.median(chi_sq_scores) / chi2.ppf(0.5, df)", "def _two_sided_p_value(t, df):\n return 2 * scipy.stats.t.cdf(-np.abs(t), df=df)", "def compute_pvalue(self):\n # Run permutation test\n self.PermutationTest()\n # TS obtained from the original B,T samples\n self.compute_obs_TS()\n \n # Mean and std of the TS distribution\n self.mu = np.mean(self.TS_tilde)\n self.sigma = np.std(self.TS_tilde)\n \n # Standardized test statistic (zero mean, unit variance)\n self.TS_prime = (self.TS_tilde - self.mu)/self.sigma\n self.TS_prime_obs = (self.TS_obs - self.mu)/self.sigma\n \n # Two-sided p-value from TS' distribution\n self.p_value = 2*(1 - 0.01 * stats.percentileofscore(self.TS_prime,\n abs(self.TS_prime_obs)) )\n \n # if 0, compute it from standard normal\n if self.p_value == 0.0:\n self.p_value = self.pvalue_gaussian()\n \n print(\"\")\n print(\"p-value = {:e}\".format(self.p_value))", "def test_chi2(y0, y1, level):\n if len(y0) == 0 or len(y1) == 0:\n return True\n l0 = np.argmax(y0, axis=1)\n l1 = np.argmax(y1, axis=1)\n v, c = np.unique(np.append(l0,l1), return_counts=True)\n v0, c0 = np.unique(l0, return_counts=True)\n v1, c1 = np.unique(l1, return_counts=True)\n p = np.zeros(len(y0[0]))\n p0 = p.copy()\n p1 = p.copy()\n p[v] = c / np.sum(c)\n p0[v0] = c0 / np.sum(c0)\n p1[v1] = c1 / np.sum(c1)\n p0[p0==0] = 0.05\n p1[p1 == 0] = 0.05\n p[p==0] = 0.05\n _, p0_value = stat.chisquare(p0, p)\n _, p1_value = stat.chisquare(p1, p)\n if 1-p0_value > level or 1-p1_value > level:\n return False\n else:\n return True", "def compare_sums_chi(array1, array2):\n return stats.chisquare(array1, array2)", "def chi2distance (obs_pval_hist,null_pval_hist):\n chi2 = (obs_pval_hist-null_pval_hist)**2/(obs_pval_hist+null_pval_hist) * 1/2\n\tchisum = np.sum(chi2)\n return chisum", "def ppf(self,x):\n ppfValue = self._distribution.inverseCdf(x,random())\n return ppfValue", "def ppf(self,x):\n ppfValue = self._distribution.inverseCdf(x,random())\n return ppfValue", "def _pvalues_all(self):\n return 2.0 * (1.0 - scs.t.cdf(np.abs(self._tstat_all), self._df_err))" ]
[ "0.7833191", "0.7402887", "0.71125257", "0.6845936", "0.6587544", "0.64726514", "0.64485335", "0.6297365", "0.6246761", "0.61838526", "0.61333144", "0.60871804", "0.60848254", "0.60680485", "0.6063416", "0.60562605", "0.59842485", "0.59655535", "0.59315586", "0.59314376", "0.59155834", "0.588386", "0.58626366", "0.584026", "0.5840012", "0.5822722", "0.5821515", "0.58090556", "0.58090556", "0.57959664" ]
0.752004
1
BibTeX comment explaining error
def bibtex(self): return "@comment{%(id)s: %(message)s}" % \ {'id': self.id, 'message': self.message}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def comment():", "def test_doc_with_comments():\n doc = CoNLL.conll2doc(input_str=RUSSIAN_SAMPLE)\n check_russian_doc(doc)", "def comment(self, content):\n pass", "def should_add_pr_comment(self):\n pass", "def test_issue_edit_comment_deprecated(self):\n pass", "def docstring_hack():\n pass", "def test_issue_get_comment(self):\n pass", "def test_issue_edit_comment(self):\n pass", "def test_double_comment(self):\n self.compare_tokens(\n \"## papān libbi[belly] (already in gloss, same spelling)\\n\",\n ['COMMENT', 'ID', 'NEWLINE']\n )", "def comment_for_run (ins, exp, runnum) :\n return dict_of_recs_for_run(ins, exp, runnum)['comment']", "def rtdComment(commentString, subarray=DEFAULT) :\n if commentString == None: return\n multiSubarray('comment', subarray, commentString)\n return", "def print_comment_v(text):\n print_comment(text, True)", "def cli(ctx, comment, metadata=\"\"):\n return ctx.gi.cannedcomments.add_comment(comment, metadata=metadata)", "def sys_comment(comment, is_error=False):\n COMMENT_STR = \"[*]\"\n if is_error:\n COMMENT_STR = \"[X]\"\n\n print_centered(\"{} {} {}\".format(COMMENT_STR, comment, COMMENT_STR), use_logo=True)\n\n return None", "def comment(self, comment):\r\n\r\n core.FW_conf['connection'].comment(comment)", "def test_like_a_comment(self):\n self.base_test()", "def test_dislike_a_comment(self):\n self.base_test()", "def comment(comment):\n return Effect(Comment(comment=comment))", "def supports_comment_book(self):\n return False", "def test_issue_create_comment(self):\n pass", "def triple_quote_docs():\n return", "def _generate_pr_comment_markdown(self, data):\n pass", "def comment(self, comment) :\n\t\ttry :\n\t\t\tself._comment = comment\n\t\texcept Exception as e:\n\t\t\traise e", "def testComment(self):\n dis_meta = DiseaseMeta()\n\n self.util.stringTypeTest(self, dis_meta, \"comment\")\n\n self.util.stringPropertyTest(self, dis_meta, \"comment\")", "def test_issue_delete_comment_deprecated(self):\n pass", "def getPreComment(self, address: ghidra.program.model.address.Address) -> unicode:\n ...", "def supports_comment_lookup(self):\n return False", "def DocString():\n return", "def lab9_q3():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"", "def set_lic_comment(self, doc, comment):\n if self.has_extr_lic(doc):\n if not self.extr_lic_comment_set:\n self.extr_lic_comment_set = True\n if validations.validate_is_free_form_text(comment):\n self.extr_lic(doc).comment = str_from_text(comment)\n return True\n else:\n raise SPDXValueError('ExtractedLicense::comment')\n else:\n raise CardinalityError('ExtractedLicense::comment')\n else:\n raise OrderError('ExtractedLicense::comment')" ]
[ "0.74034363", "0.6712097", "0.66532767", "0.63367194", "0.619787", "0.61909366", "0.60946673", "0.60149664", "0.59592676", "0.58991164", "0.5883395", "0.58689094", "0.5825342", "0.58130735", "0.5804278", "0.57896906", "0.57857484", "0.5733003", "0.57277036", "0.57164663", "0.57002527", "0.56791717", "0.5660783", "0.56421816", "0.56295174", "0.560997", "0.5603455", "0.55897707", "0.5572693", "0.55618346" ]
0.6803446
1
Corrects the BibTeX key because the MR API cannot get its act together
def correct_key(goodkey,code): db = pybtex.database.parse_string(code,"bibtex") keys = [key for key in db.entries.keys()] badkey = keys[0] return code.replace(badkey,goodkey)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mr_request(key):\n\n # reconstructing the BibTeX code block\n inCodeBlock = False\n code = \"\"\n\n # make the request\n payload = {\"fn\": 130, \"fmt\": \"bibtex\", \"pg1\": \"MR\", \"s1\": key}\n r = requests.get(path, params=payload)\n\n # 401 means not authenticated\n if r.status_code == 401:\n raise AuthenticationException()\n\n # anything but 200 means something else went wrong\n if not r.status_code == 200:\n raise Exception(\"Received HTTP status code \" + str(r.status_code))\n\n for line in r.text.split(\"\\n\"):\n if \"No publications results for\" in line:\n raise NotFoundError(\"No such publication\", key)\n\n if line.strip() == \"</pre>\": inCodeBlock = False\n\n if inCodeBlock:\n code = code + \"\\n\" + line\n\n if line.strip() == \"<pre>\": inCodeBlock = True\n\n return correct_key(key,code)", "def _validKey(entry):\n # be forward compatible to zope3 contained objects\n raw_id = getattr(entry, '__name__', '')\n if not raw_id:\n raw_id = entry.getId()\n\n # This substitution is based on the description of cite key restrictions at\n # http://bibdesk.sourceforge.net/manual/BibDesk%20Help_2.html\n return VALIDIDPAT.sub('', raw_id)", "def _get_raw_key(self, key_id):", "def safe_key(cls, db_key, transform_fn):\n cls._split_key(db_key.name())\n name = db_key.name().strip('()')\n unsafe_submission_key_name, unsafe_reviewer_id_or_name = name.split(\n ':', 1)[1].rsplit(':', 1)\n unsafe_reviewer_key = db.Key.from_path(\n models.Student.kind(), unsafe_reviewer_id_or_name)\n safe_reviewer_key = models.Student.safe_key(\n unsafe_reviewer_key, transform_fn)\n\n # Treating as module-protected. pylint: disable-msg=protected-access\n _, unit_id, unsafe_reviewee_key_name = (\n student_work.Submission._split_key(unsafe_submission_key_name))\n unsafe_reviewee_key = db.Key.from_path(\n models.Student.kind(), unsafe_reviewee_key_name)\n unsafe_submission_key = student_work.Submission.get_key(\n unit_id, unsafe_reviewee_key)\n safe_submission_key = student_work.Submission.safe_key(\n unsafe_submission_key, transform_fn)\n\n return db.Key.from_path(\n cls.kind(), cls.key_name(safe_submission_key, safe_reviewer_key))", "def make_citation_key(res):\n year = str(make_year(res))\n\n try:\n last_names = [x['family'] for x in res['author']]\n except KeyError as e:\n last_names = [\"Unknown\"]\n if len(last_names) >= 3:\n key = last_names[0].capitalize() + \"ETAL\" + year\n else:\n key = \"\".join(last_names) + year\n\n return clean_txt(key.replace(\" \", \"\"))", "def fix_document(key, value, _format, _meta):\n if key == \"Link\":\n url = value[2][0]\n if url.startswith(\"user-manual\") or url.startswith(\"developers-guide\"):\n # Return the link text\n return value[1]\n # Reformat the text inside block quotes\n elif key == \"BlockQuote\":\n try:\n first_string = value[0][\"c\"][0][\"c\"]\n if first_string == \"[!NOTE]\":\n value[0][\"c\"][0] = Strong([Str(\"Note:\")])\n return BlockQuote(value)\n elif first_string == \"[!INFO]\":\n value[0][\"c\"][0] = Strong([Str(\"Info:\")])\n return BlockQuote(value)\n elif first_string == \"[!TIP]\":\n value[0][\"c\"][0] = Strong([Str(\"Tip:\")])\n return BlockQuote(value)\n elif first_string == \"[!WARNING]\":\n value[0][\"c\"][0] = Strong([Str(\"Warning:\")])\n return BlockQuote(value)\n elif first_string == \"[!ATTENTION]\":\n value[0][\"c\"][0] = Strong([Str(\"Attention:\")])\n return BlockQuote(value)\n except Exception:\n return\n return", "def _add_key(self, key):\n key = key.strip().strip('@').lower()\n if self.homogenise_fields:\n if key in list(self.alt_dict.keys()):\n key = self.alt_dict[key]\n if not isinstance(key, ustr):\n return ustr(key, 'utf-8')\n else:\n return key", "def normalise_key(self, key):\n key = key.replace('-', '_')\n if key.startswith(\"noy_\"):\n key = key[4:]\n return key", "def _safe_key(self, key):\n if isinstance(key, str):\n key = key.encode('UTF-8')\n return key", "def prepare_key(self, key):\n return smart_str(key)", "def create_rel_doctitle_dict():\n claim_rel_docno_dict = {} #key is claim text, value is a set of doc_title that are relevant\n clm_sen_doc_title_dict = read_pickle(\"sen_doc_title_dict\")\n claim_sen_true_relevance_dict = read_pickle(\"claim_sen_relevance_dict_\"+curr_source)\n exclude = set(string.punctuation)\n docID_title_mapping_wiki_pickle = read_pickle(\"dicID_title_mapping_wiki_pickle\")\n \n title_docID_mapping_wiki_pickle = {}\n for (docID,doc_title) in docID_title_mapping_wiki_pickle.iteritems():\n non_asci_char = [c for c in doc_title if not 0 < ord(c) < 127]\n new_doc_title = doc_title\n for c in non_asci_char:\n new_doc_title = new_doc_title.replace(c,\"\")\n doc_title_no_punc = ''.join(ch for ch in new_doc_title if ch not in exclude)\n doc_title_no_space = doc_title_no_punc.replace(\" \",\"\")\n title_docID_mapping_wiki_pickle[doc_title_no_space] = docID\n# title_docID_mapping_wiki_pickle = dict((y,x) for x,y in docID_title_mapping_wiki_pickle.iteritems()) \n for (clm) in claim_sen_true_relevance_dict.keys():\n rel_docno_set = set()\n for (sen,rel_score) in claim_sen_true_relevance_dict[clm]:\n try: \n if rel_score == 1:\n sen_no_punc = ''.join(ch for ch in sen if ch not in exclude)\n sen_no_space = sen_no_punc.replace(\" \",\"\")\n curr_rel_doc_title = clm_sen_doc_title_dict[sen_no_space]\n non_asci_char = [c for c in curr_rel_doc_title if not 0 < ord(c) < 127]\n new_curr_doc_title = curr_rel_doc_title\n for c in non_asci_char:\n new_curr_doc_title = new_curr_doc_title.replace(c,\"\")\n curr_doc_title_no_punc = ''.join(ch for ch in new_curr_doc_title if ch not in exclude)\n curr_doc_title_no_space = curr_doc_title_no_punc.replace(\" \",\"\")\n rel_docno_set.add((title_docID_mapping_wiki_pickle[curr_doc_title_no_space],1))\n \n except Exception as err: \n sys.stderr.write('problem in sen:'+sen) \n print err.args\n \n rel_docno_list = [(docid,rel_score) for (docid,rel_score) in rel_docno_set]\n claim_rel_docno_dict[clm] = rel_docno_list\n save_pickle(\"claim_rel_docno_dict\", claim_rel_docno_dict)", "def make_bibtex(self):\n\n\t\t# bib = requests.request('GET', 'http://dx.doi.org/' + self.doi, ", "def mr2bib_dict(key_list):\n keys = []\n d = {}\n\n # validate keys\n for key in key_list:\n if is_valid(key):\n keys.append(key)\n else:\n d[key] = ReferenceErrorInfo(\"Invalid Mathematical Reviews identifier\", key)\n\n if len(keys) == 0:\n return d\n\n # make the api call\n entries = {}\n for key in keys:\n try:\n entry = mr_request(key)\n d[key] = Reference(entry)\n except NotFoundError as error:\n message, id = error.args\n d[key] = ReferenceErrorInfo(message, id)\n\n return d", "def key(key):\n return key", "def main(bib_fpath=None):\n\n if bib_fpath is None:\n bib_fpath = 'My Library.bib'\n\n # DEBUG = ub.argflag('--debug')\n # Read in text and ensure ascii format\n dirty_text = ut.readfrom(bib_fpath)\n\n from fixtex.fix_tex import find_used_citations, testdata_fpaths\n\n if exists('custom_extra.bib'):\n extra_parser = bparser.BibTexParser(ignore_nonstandard_types=False)\n parser = bparser.BibTexParser()\n ut.delete_keys(parser.alt_dict, ['url', 'urls'])\n print('Parsing extra bibtex file')\n extra_text = ut.readfrom('custom_extra.bib')\n extra_database = extra_parser.parse(extra_text, partial=False)\n print('Finished parsing extra')\n extra_dict = extra_database.get_entry_dict()\n else:\n extra_dict = None\n\n #udata = dirty_text.decode(\"utf-8\")\n #dirty_text = udata.encode(\"ascii\", \"ignore\")\n #dirty_text = udata\n\n # parser = bparser.BibTexParser()\n # bib_database = parser.parse(dirty_text)\n # d = bib_database.get_entry_dict()\n\n print('BIBTEXPARSER LOAD')\n parser = bparser.BibTexParser(ignore_nonstandard_types=False,\n common_strings=True)\n ut.delete_keys(parser.alt_dict, ['url', 'urls'])\n print('Parsing bibtex file')\n bib_database = parser.parse(dirty_text, partial=False)\n print('Finished parsing')\n\n bibtex_dict = bib_database.get_entry_dict()\n old_keys = list(bibtex_dict.keys())\n new_keys = []\n for key in ub.ProgIter(old_keys, label='fixing keys'):\n new_key = key\n new_key = new_key.replace(':', '')\n new_key = new_key.replace('-', '_')\n new_key = re.sub('__*', '_', new_key)\n new_keys.append(new_key)\n\n # assert len(ut.find_duplicate_items(new_keys)) == 0, 'new keys created conflict'\n assert len(ub.find_duplicates(new_keys)) == 0, 'new keys created conflict'\n\n for key, new_key in zip(old_keys, new_keys):\n if key != new_key:\n entry = bibtex_dict[key]\n entry['ID'] = new_key\n bibtex_dict[new_key] = entry\n del bibtex_dict[key]\n\n # The bibtext is now clean. Print it to stdout\n #print(clean_text)\n verbose = None\n if verbose is None:\n verbose = 1\n\n # Find citations from the tex documents\n key_list = None\n if key_list is None:\n cacher = ub.Cacher('texcite1', enabled=0)\n data = cacher.tryload()\n if data is None:\n fpaths = testdata_fpaths()\n key_list, inverse = find_used_citations(fpaths, return_inverse=True)\n # ignore = ['JP', '?', 'hendrick']\n # for item in ignore:\n # try:\n # key_list.remove(item)\n # except ValueError:\n # pass\n if verbose:\n print('Found %d citations used in the document' % (len(key_list),))\n data = key_list, inverse\n cacher.save(data)\n key_list, inverse = data\n\n # else:\n # key_list = None\n\n unknown_pubkeys = []\n debug_author = ub.argval('--debug-author', default=None)\n # ./fix_bib.py --debug_author=Kappes\n\n if verbose:\n print('Fixing %d/%d bibtex entries' % (len(key_list), len(bibtex_dict)))\n\n # debug = True\n debug = False\n if debug_author is not None:\n debug = False\n\n known_keys = list(bibtex_dict.keys())\n missing_keys = set(key_list) - set(known_keys)\n if extra_dict is not None:\n missing_keys.difference_update(set(extra_dict.keys()))\n\n if missing_keys:\n print('The library is missing keys found in tex files %s' % (\n ub.repr2(missing_keys),))\n\n # Search for possible typos:\n candidate_typos = {}\n sedlines = []\n for key in missing_keys:\n candidates = ut.closet_words(key, known_keys, num=3, subset=True)\n if len(candidates) > 1:\n top = candidates[0]\n if ut.edit_distance(key, top) == 1:\n # \"sed -i -e 's/{}/{}/g' *.tex\".format(key, top)\n import os\n replpaths = ' '.join([relpath(p, os.getcwd()) for p in inverse[key]])\n sedlines.append(\"sed -i -e 's/{}/{}/g' {}\".format(key, top, replpaths))\n candidate_typos[key] = candidates\n print('Cannot find key = %r' % (key,))\n print('Did you mean? %r' % (candidates,))\n\n print('Quick fixes')\n print('\\n'.join(sedlines))\n\n # group by file\n just = max([0] + list(map(len, missing_keys)))\n missing_fpaths = [inverse[key] for key in missing_keys]\n for fpath in sorted(set(ub.flatten(missing_fpaths))):\n # ut.fix_embed_globals()\n subkeys = [k for k in missing_keys if fpath in inverse[k]]\n print('')\n ut.cprint('--- Missing Keys ---', 'blue')\n ut.cprint('fpath = %r' % (fpath,), 'blue')\n ut.cprint('{} | {}'.format('Missing'.ljust(just), 'Did you mean?'), 'blue')\n for key in subkeys:\n print('{} | {}'.format(\n ut.highlight_text(key.ljust(just), 'red'),\n ' '.join(candidate_typos[key]))\n )\n\n # for key in list(bibtex_dict.keys()):\n\n if extra_dict is not None:\n # Extra database takes precidence over regular\n key_list = list(ut.unique(key_list + list(extra_dict.keys())))\n for k, v in extra_dict.items():\n bibtex_dict[k] = v\n\n full = ub.argflag('--full')\n\n for key in key_list:\n try:\n entry = bibtex_dict[key]\n except KeyError:\n continue\n self = BibTexCleaner(key, entry, full=full)\n\n if debug_author is not None:\n debug = debug_author in entry.get('author', '')\n\n if debug:\n ut.cprint(' --- ENTRY ---', 'yellow')\n print(ub.repr2(entry, nl=1))\n\n entry = self.fix()\n # self.clip_abstract()\n # self.shorten_keys()\n # self.fix_authors()\n # self.fix_year()\n # old_pubval = self.fix_pubkey()\n # if old_pubval:\n # unknown_pubkeys.append(old_pubval)\n # self.fix_arxiv()\n # self.fix_general()\n # self.fix_paper_types()\n\n if debug:\n print(ub.repr2(entry, nl=1))\n ut.cprint(' --- END ENTRY ---', 'yellow')\n bibtex_dict[key] = entry\n\n unwanted_keys = set(bibtex_dict.keys()) - set(key_list)\n if verbose:\n print('Removing unwanted %d entries' % (len(unwanted_keys)))\n ut.delete_dict_keys(bibtex_dict, unwanted_keys)\n\n if 0:\n d1 = bibtex_dict.copy()\n full = True\n for key, entry in d1.items():\n self = BibTexCleaner(key, entry, full=full)\n pub = self.publication()\n if pub is None:\n print(self.entry['ENTRYTYPE'])\n\n old = self.fix_pubkey()\n x1 = self._pubval()\n x2 = self.standard_pubval(full=full)\n # if x2 is not None and len(x2) > 5:\n # print(ub.repr2(self.entry))\n\n if x1 != x2:\n print('x2 = %r' % (x2,))\n print('x1 = %r' % (x1,))\n print(ub.repr2(self.entry))\n\n # if 'CVPR' in self.entry.get('booktitle', ''):\n # if 'CVPR' != self.entry.get('booktitle', ''):\n # break\n if old:\n print('old = %r' % (old,))\n d1[key] = self.entry\n\n if full:\n d1 = bibtex_dict.copy()\n\n import numpy as np\n import pandas as pd\n df = pd.DataFrame.from_dict(d1, orient='index')\n\n paged_items = df[~pd.isnull(df['pub_accro'])]\n has_pages = ~pd.isnull(paged_items['pages'])\n print('have pages {} / {}'.format(has_pages.sum(), len(has_pages)))\n print(ub.repr2(paged_items[~has_pages]['title'].values.tolist()))\n\n entrytypes = dict(list(df.groupby('pub_type')))\n if False:\n # entrytypes['misc']\n g = entrytypes['online']\n g = g[g.columns[~np.all(pd.isnull(g), axis=0)]]\n\n entrytypes['book']\n entrytypes['thesis']\n g = entrytypes['article']\n g = entrytypes['incollection']\n g = entrytypes['conference']\n\n def lookup_pub(e):\n if e == 'article':\n return 'journal', 'journal'\n elif e == 'incollection':\n return 'booksection', 'booktitle'\n elif e == 'conference':\n return 'conference', 'booktitle'\n return None, None\n\n for e, g in entrytypes.items():\n print('e = %r' % (e,))\n g = g[g.columns[~np.all(pd.isnull(g), axis=0)]]\n if 'pub_full' in g.columns:\n place_title = g['pub_full'].tolist()\n print(ub.repr2(ub.dict_hist(place_title)))\n else:\n print('Unknown publications')\n\n if 'report' in entrytypes:\n g = entrytypes['report']\n missing = g[pd.isnull(g['title'])]\n if len(missing):\n print('Missing Title')\n print(ub.repr2(missing[['title', 'author']].values.tolist()))\n\n if 'journal' in entrytypes:\n g = entrytypes['journal']\n g = g[g.columns[~np.all(pd.isnull(g), axis=0)]]\n\n missing = g[pd.isnull(g['journal'])]\n if len(missing):\n print('Missing Journal')\n print(ub.repr2(missing[['title', 'author']].values.tolist()))\n\n if 'conference' in entrytypes:\n g = entrytypes['conference']\n g = g[g.columns[~np.all(pd.isnull(g), axis=0)]]\n\n missing = g[pd.isnull(g['booktitle'])]\n if len(missing):\n print('Missing Booktitle')\n print(ub.repr2(missing[['title', 'author']].values.tolist()))\n\n if 'incollection' in entrytypes:\n g = entrytypes['incollection']\n g = g[g.columns[~np.all(pd.isnull(g), axis=0)]]\n\n missing = g[pd.isnull(g['booktitle'])]\n if len(missing):\n print('Missing Booktitle')\n print(ub.repr2(missing[['title', 'author']].values.tolist()))\n\n if 'thesis' in entrytypes:\n g = entrytypes['thesis']\n g = g[g.columns[~np.all(pd.isnull(g), axis=0)]]\n missing = g[pd.isnull(g['institution'])]\n if len(missing):\n print('Missing Institution')\n print(ub.repr2(missing[['title', 'author']].values.tolist()))\n\n # import utool\n # utool.embed()\n\n # Overwrite BibDatabase structure\n bib_database._entries_dict = bibtex_dict\n bib_database.entries = list(bibtex_dict.values())\n\n #conftitle_to_types_set_hist = {key: set(val) for key, val in conftitle_to_types_hist.items()}\n #print(ub.repr2(conftitle_to_types_set_hist))\n\n print('Unknown conference keys:')\n print(ub.repr2(sorted(unknown_pubkeys)))\n print('len(unknown_pubkeys) = %r' % (len(unknown_pubkeys),))\n\n writer = BibTexWriter()\n writer.contents = ['comments', 'entries']\n writer.indent = ' '\n writer.order_entries_by = ('type', 'author', 'year')\n\n new_bibtex_str = bibtexparser.dumps(bib_database, writer)\n\n # Need to check\n #jegou_aggregating_2012\n\n # Fix the Journal Abreviations\n # References:\n # https://www.ieee.org/documents/trans_journal_names.pdf\n\n # Write out clean bibfile in ascii format\n clean_bib_fpath = ub.augpath(bib_fpath.replace(' ', '_'), suffix='_clean')\n\n if not ub.argflag('--dryrun'):\n ut.writeto(clean_bib_fpath, new_bibtex_str)", "def format_bib_entry(e: BibDocument):\n if e.bibtex is not None:\n b = e.bibtex\n s = fix_string(b.get('title', b.get('ID', '?'))) + '\\n'\n s += format_author(b.get('author', b.get('editor', '?'))) + ' ' + b.get('year', '')\n if len(e.filepaths) > 0:\n s += ' [PDF]'\n return s\n else:\n return e.relpath()", "def bib_scalar(biblio, key):\n return biblio[key][0]", "def decode_key_from_mongo(fieldname):\r\n return urllib.unquote(fieldname)", "def getBibTeX(bibref,tag_suf,outFile):\n if bibref == '1988iras....1.....B':\n bibtex = ['>@article{1988iras....1.....B,\\n',\n ' title={Infrared astronomical satellite (IRAS) catalogs and atlases. Volume 1: Explanatory supplement},\\n',\n ' keywords = {All Sky Photography, Catalogs, Indexes (Documentation), Infrared Astronomy Satellite, Cosmology, Galaxies, Star Formation, Stellar Evolution, Astrophysics},\\n',\n ' author={Beichman, CA and Neugebauer, G and Habing, HJ and Clegg, PE and Chester, Thomas J},\\n',\n ' year=1988,\\n',\n ' volume = {1},\\n', \n ' month = jan,\\n', \n ' adsurl = {https://ui.adsabs.harvard.edu/abs/1988iras....1.....B},\\n'\n '}\\n']\n else:\n baseURL = 'https://ui.adsabs.harvard.edu/abs/'\n suf = '/exportcitation'\n lines = urllib.request.urlopen(baseURL+bibref+suf).readlines()\n lines = [l.decode('utf-8') for l in lines] # remove additional webpage encoding\n \n bibtex = []\n for l in range(0, len(lines)):\n if 'export-textarea ' in str(lines[l]):\n bibtex.append(str(lines[l]))\n t = l+1\n \n while '</textarea>' not in str(lines[t+1]):\n bibtex.append(str(lines[t])) \n t += 1\n \n for item in bibtex:\n if 'author' in item.split('=')[0]:\n auth = item.split('=')[1].split(',')[0]\n for i in string.punctuation:\n auth = auth.replace(i, '')\n auth = auth.replace(' ', '')\n if 'year' in item.split('=')[0]:\n yr = item.split('=')[1].split(',')[0]\n yr = yr.replace(' ', '')\n \n try:\n bibtex[0] = bibtex[0].split('>')[1].split('{')[0]+'{'+auth+yr+tag_suf+',\\n'\n except UnboundLocalError as ule:\n print(bibtex)\n print('')\n print(ule)\n sys.exit()\n \n with open(outFile, 'a') as o:\n for item in bibtex:\n item = item.replace('&#34;', '\"')\n item = item.replace('&#39;', \"'\")\n item = item.replace('&amp;', \"&\")\n o.write(item)\n o.write('\\n')\n \n return auth+yr+tag_suf", "def _key_func_2(entry: tuple[str, list]) -> str:\n key = unicodedata.normalize('NFD', entry[0].lower())\n if key.startswith('\\N{RIGHT-TO-LEFT MARK}'):\n key = key[1:]\n if key[0:1].isalpha() or key.startswith('_'):\n key = chr(127) + key\n return key", "def canonicalise(self, record):\n # only canonicalise DOIs (this function should only ever be called in the right context)\n # if bibjson_identifier.has_key(\"type\") and bibjson_identifier[\"type\"] != \"doi\":\n if record.has_type() and record.identifier_type != \"doi\":\n return\n \n # do we have enough information to canonicalise, raise an error\n # if not bibjson_identifier.has_key(\"id\"):\n if not record.has_id():\n raise models.LookupException(\"can't canonicalise an identifier without an 'id' property\")\n\n canonical = self.canonical_form(record.id)\n record.canonical = canonical", "def resolve_key(obj, _):\n return obj.key.decode()", "def get_key_id(self):", "def to_safe_annotation_key(key):\n safe_key = key.translate(str.maketrans(\"\", \"\", string.punctuation))\n return safe_key", "def _course_key(self):\r\n return \"slashes:{org}+{number}+{run}\".format(**self._course_dict)", "def get_bibtex_entry(doi, bibtext_cache={}, shortdoi_cache={}):\r\n bibtext = get_bibtext(doi, cache = bibtext_cache)\r\n if not bibtext:\r\n return None\r\n\r\n short_doi = shorten(doi, cache = shortdoi_cache)\r\n parser = BibTexParser()\r\n parser.ignore_nonstandard_types = False\r\n bibdb = bibtexparser.loads(bibtext, parser)\r\n entry, = bibdb.entries\r\n quoted_doi = urllib.request.quote(doi)\r\n entry['link'] = 'https://doi.org/{}'.format(quoted_doi)\r\n if 'author' in entry:\r\n entry['author'] = ' and '.join(entry['author'].rstrip(';').split('; '))\r\n entry['ID'] = short_doi[3:]\r\n return entry", "def get_clean_bib(bib):\n d = PyQuery(bib)\n div = d(\"div.csl-right-inline\").html()\n\n # zotero keeps the html escaped in the return value\n div = parser.unescape(div)\n\n return hyperlink_string(div)", "def keyify(content_type_pk, pk):\n return '%s:%s' % (content_type_pk, pk)", "def clean_as_inchikey(self):\n regexp = r\"[A-Z]{14}-[A-Z]{10}-[A-Z]\"\n found = re.search(regexp, self.dirty)\n if found is None:\n self.cleaned = \"\"\n else:\n self.cleaned = found[0]", "def clean(key):\n\treturn key.strip().replace('%','').replace(' ', '-')" ]
[ "0.61262125", "0.60784906", "0.58044267", "0.5669483", "0.5512459", "0.549996", "0.546403", "0.5428998", "0.54042065", "0.5403589", "0.53428215", "0.52821094", "0.52698106", "0.52677447", "0.52602893", "0.52487", "0.5242598", "0.52408123", "0.5232014", "0.51742405", "0.5154057", "0.5151143", "0.51312864", "0.50819904", "0.50779957", "0.5067962", "0.50673425", "0.50480324", "0.5046243", "0.5045921" ]
0.71422005
0
Fetches citations for keys in key_list into a dictionary indexed by key
def mr2bib_dict(key_list): keys = [] d = {} # validate keys for key in key_list: if is_valid(key): keys.append(key) else: d[key] = ReferenceErrorInfo("Invalid Mathematical Reviews identifier", key) if len(keys) == 0: return d # make the api call entries = {} for key in keys: try: entry = mr_request(key) d[key] = Reference(entry) except NotFoundError as error: message, id = error.args d[key] = ReferenceErrorInfo(message, id) return d
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_citations_ids_map(id_list):\n create_unverified_context()\n logging.debug('============== IN get_citations_ids_map: ================')\n logging.debug('============== ID LIST: ================')\n logging.debug(id_list)\n linked = {}\n for i in range(0, len(id_list)):\n handle = Entrez.elink(\n dbfrom=\"pubmed\", id=id_list[i], linkname=\"pubmed_pubmed_refs\")\n results = Entrez.read(handle)\n logging.debug('============== RESULTS: ================')\n logging.debug(results)\n handle.close()\n if len(results[0][\"LinkSetDb\"]) != 0:\n linked[id_list[i]] = [\n link[\"Id\"] for link in results[0][\"LinkSetDb\"][0][\"Link\"]\n ]\n logging.debug('============== LINKED ARTICLES: ================')\n logging.debug(linked)\n logging.debug('============== ARTICLE ID: ================')\n logging.debug(id_list[i])\n return linked", "def get_coauthors_dict(name_url_list, schl_name):\n\n\tpaper_info_dict = {}\n\n\tfor name_url in name_url_list:\n\t\t# Get a sub papers_info dictfor an individual author\n\t\tpapers_info = get_papers_info(name_url[1], paper_info_dict.keys())\n\t\t# add sub dict to full dict\n\t\tpaper_info_dict.update(papers_info)\n\t\n\tif \"Humanities\" in schl_name:\n\t\tschl_name = \"School of Humanities\"\n\n\treturn paper_info_dict", "def get_titles_dict(name_url_list):\n\tbib_dict = {}\n\t\n\tfor name_url in name_url_list:\n\t\ttitles = get_author_titles(name_url[1])\n\t\tbib_dict[name_url] = titles\n\n\treturn bib_dict", "def __generate_dict_of_keys_to_classification__(self):\n dict_of_assigned_citations = {}\n # duplicating citation dataset to filter as matches go on meaning\n # it should result in quicker allocation\n # can be removed to reduce memory load at expense of speed\n list_of_unassigned = []\n for key in self.dict_of_keywords:\n list_of_current_key = []\n for citation_instance in self.array_of_citations:\n if key == citation_instance.get_classification():\n list_of_current_key.append(citation_instance)\n if \"Unassigned\" == citation_instance.get_classification():\n list_of_unassigned.append(citation_instance)\n dict_of_assigned_citations[key] = list_of_current_key\n dict_of_assigned_citations[\"Unassigned\"] = list_of_unassigned\n return dict_of_assigned_citations", "def make_inchikey_dict(mol_list):\n inchikey_dict = {}\n for mol in mol_list:\n inchikey = mol.GetProp(ms_constants.SDF_TAG_INCHIKEY)\n if inchikey not in inchikey_dict:\n inchikey_dict[inchikey] = [mol]\n else:\n inchikey_dict[inchikey].append(mol)\n return inchikey_dict", "def build_dict(results, chunk):\n from math import inf\n from collections import defaultdict\n chunk = [int(au) for au in chunk]\n d = defaultdict(\n lambda: {\"first_year\": inf, \"pubs\": set(), \"coauth\": set(),\n \"n_coauth\": inf, \"n_pubs\": inf})\n for pub in results:\n if not pub.author_ids:\n continue\n authors = set([int(au) for au in pub.author_ids.split(\";\")])\n for focal in authors.intersection(chunk):\n d[focal][\"coauth\"].update(authors)\n d[focal][\"coauth\"].remove(focal)\n d[focal][\"pubs\"].add(pub.eid)\n d[focal][\"n_pubs\"] = len(d[focal][\"pubs\"])\n d[focal][\"n_coauth\"] = len(d[focal][\"coauth\"])\n if not pub.coverDate:\n continue\n first_year = min(d[focal][\"first_year\"], int(pub.coverDate[:4]))\n d[focal][\"first_year\"] = first_year\n return d", "def get_license_refs_dict(license_refs_list):\n license_ref_dict = {}\n if license_refs_list:\n for ref_dict in license_refs_list:\n license_ref_dict[ref_dict['licenseId']] = ref_dict['extractedText']\n return license_ref_dict", "def return_dict_of_assigned_citations_classifications(self):\n for citation_instance in self.array_of_citations:\n citation_string = \"\"\n citation_string = citation_string.join(\n citation_instance.get_author()) + \\\n citation_instance.get_title()\\\n + citation_instance.get_journal()\n max_keyword_matches = 0\n for key in self.dict_of_keywords:\n current_key_matches = 0\n for keyword in self.dict_of_keywords.get(key):\n pattern = re.compile(keyword)\n if pattern.search(citation_string):\n current_key_matches += 1\n if current_key_matches > max_keyword_matches:\n max_keyword_matches = current_key_matches\n citation_instance.set_classification(key)\n return self.__generate_dict_of_keys_to_classification__()", "def get_publications_by_author(cached_list, cached_set, author_name):\n publications = { 'dblp': [], 'cdblp': [] }\n author = DBLPQuery.author_distinct(cached_list, cached_set, author_name)\n\n if author['dblp'].__contains__('publications'):\n publications['dblp'] = author['dblp']['publications']\n# for pub in author['dblp']['publications']:\n# print(pub)\n\n if author['cdblp'].__contains__('publications'):\n publications['cdblp'] = author['cdblp']['publications']\n# for pub in author['cdblp']['publications']:\n# print(pub)\n return publications", "def return_dictionary_of_two_comparable_citations(self, dictionary_key_one,\n dictionary_key_two):\n list_of_one_citations = []\n list_of_two_citations = []\n list_of_unassigned_citations = []\n for citation in self.array_of_citations:\n citation_string = self.__generate_author_string__(\n citation.get_author()) + \" \" + citation.get_title() \\\n + \" \" + citation.get_journal()\n one_key_matches = self.__count_of_keyword_matches__(\n citation_string, dictionary_key_one)\n two_key_matches = self.__count_of_keyword_matches__(\n citation_string, dictionary_key_two)\n if one_key_matches > two_key_matches:\n citation.set_classification(dictionary_key_one)\n list_of_one_citations.append(citation)\n elif two_key_matches > one_key_matches:\n citation.set_classification(dictionary_key_two)\n list_of_two_citations.append(citation)\n else:\n citation.set_classification(\"Unassigned\")\n list_of_unassigned_citations.append(citation)\n\n return {dictionary_key_one: list_of_one_citations, dictionary_key_two:\n list_of_two_citations, \"Unassigned\":\n list_of_unassigned_citations}", "def connect_refs_to_species(species: list, citelist: list) -> dict:\n # create a dictionary with empty reference lists\n species_refs = {s.species: set() for s in species}\n # go through all citations\n # for c in tqdm(citelist):\n for c in citelist:\n if c.actual in species_refs:\n reflist = species_refs[c.actual]\n reflist |= {c.cite_key}\n return species_refs", "def build_query_dict(self, term_list, issn_list, year_list, jlist):\n journal_frame = self.make_jlist(jlist)\n\n search_terms = self.build_search_terms(term_list)\n dict1 = {}\n #This loop goes through and sets up a dictionary key with an ISSN number\n\n for issn in issn_list:\n\n issn_terms = ' AND ISSN(' + issn + ')'\n dict2 = {}\n #This loop goes and attaches all the years to the outer loop's key.\n for year in year_list:\n\n year_terms = \"AND PUBYEAR IS \" + str(year)\n querystring = search_terms + year_terms + issn_terms\n\n dict2[year] = querystring\n\n dict1[issn] = dict2\n\n return dict1", "def dict_filter(indict, key_list):\n \n return dict((key, value) for key, value in list(indict.items()) if key in key_list)", "def get_coauthored_publications_by_authors(cached_list, cached_set, author1_name, author2_name):\n publications = { 'cdblp': [], 'dblp': [] }\n pub1 = DBLPQuery.get_publications_by_author(cached_list, cached_set, author1_name)\n author2 = DBLPQuery.author_distinct(cached_list, cached_set, author2_name)\n #pub2 = DBLPQuery.get_publications_by_author(cached_list, cached_set, author2_name)\n for cdblp_pub in pub1.get('cdblp', []):\n authors = set(cdblp_pub.get('authors', []))\n authors_en = set(map(lambda a: CDBLPAuthor.getEnglishName(a)['full_name'], authors))\n if author2.get('cdblp', {}).get('author_name', {}).get('zh') in authors or author2.get('dblp', {}).get('author_name') in authors_en:\n publications['cdblp'].append(cdblp_pub)\n\n for dblp_pub in pub1.get('dblp', []):\n authors = set(map(lambda a: a.get('name'), dblp_pub.get('authors', [])))\n if author2.get('dblp', {}).get('author_name') in authors or author2.get('cdblp', {}).get('author_name', {}).get('full_name') in authors:\n publications['dblp'].append(dblp_pub)\n\n return publications", "def get_many_by_key(cache_key_f, item_keys, version=None):\r\n cache_key_to_item_key = {}\r\n for item_key in item_keys:\r\n cache_key = cache.make_key(cache_key_f(item_key), version=version)\r\n cache_key_to_item_key[cache_key] = item_key\r\n\r\n # request from cache\r\n from_cache = cache.get_many(cache_key_to_item_key.keys())\r\n\r\n results = {}\r\n for cache_key, value in from_cache.iteritems():\r\n item_key = cache_key_to_item_key[cache_key]\r\n results[item_key] = value\r\n return results", "def get_references(cls, pmids):\n\n references = cls.query.filter(cls.pmid.in_(pmids)).all()\n citations = {}\n\n for reference in references:\n citation_text = reference.authors + \". (\" + str(\n reference.year) + \"). \" + reference.title + \" \" + reference.journal + \".\"\n citations[reference.pmid] = citation_text\n\n return citations", "def get_commitments(s, clist):\n return dict((c.Resource, (c.Lower_bound, c.Upper_bound))\n for c in clist if (s in c))", "def recs_to_lookup(filename):\n d = {\"\": \"\"}\n for flds in nndb_recs(filename, [\"key\", \"val\"]):\n d[flds[\"key\"]] = flds[\"val\"]\n return d", "def filterKeys(document, keys):\n return {key: document[key] for key in keys}", "def getUniChemData(self, inchiKeyList):\n mapD = {\n 1: {\"name\": \"chembl\", \"baseUrl\": \"https://www.ebi.ac.uk/chembl/\", \"entryUrl\": \"https://www.ebi.ac.uk/chembldb/compound/inspect/\"},\n 3: {\"name\": \"pdb\", \"baseUrl\": \"http://www.ebi.ac.uk/pdbe/\", \"entryUrl\": \"http://www.ebi.ac.uk/pdbe-srv/pdbechem/chemicalCompound/show/\"},\n 2: {\"name\": \"drugbank\", \"baseUrl\": \"http://drugbank.ca/\", \"entryUrl\": \"http://www.drugbank.ca/drugs/\"},\n 5: {\"name\": \"pubchem_dotf\", \"baseUrl\": \"http://pubchem.ncbi.nlm.nih.gov/sources/sources.cgi\", \"entryUrl\": \"http://pubchem.ncbi.nlm.nih.gov/substance/\"},\n 4: {\"name\": \"gtopdb\", \"baseUrl\": \"http://www.guidetopharmacology.org\", \"entryUrl\": \"http://www.guidetopharmacology.org/GRAC/LigandDisplayForward?ligandId=\"},\n 11: {\"name\": \"ibm\", \"baseUrl\": \"http://www-935.ibm.com/services/us/gbs/bao/siip/nih/\", \"entryUrl\": \"http://www-935.ibm.com/services/us/gbs/bao/siip/nih/?sid=\"},\n 6: {\"name\": \"kegg_ligand\", \"baseUrl\": \"http://www.genome.jp/kegg/ligand.html\", \"entryUrl\": \"http://www.genome.jp/dbget-bin/www_bget?\"},\n 9: {\"name\": \"zinc\", \"baseUrl\": \"http://zinc15.docking.org\", \"entryUrl\": \"http://zinc15.docking.org/substances/\"},\n 8: {\"name\": \"nih_ncc\", \"baseUrl\": \"http://nihsmr.evotec.com/evotec/\", \"entryUrl\": \"\"},\n 10: {\"name\": \"emolecules\", \"baseUrl\": \"https://www.emolecules.com/\", \"entryUrl\": \"https://www.emolecules.com/cgi-bin/more?vid=\"},\n 12: {\"name\": \"atlas\", \"baseUrl\": \"http://www.ebi.ac.uk/gxa/home\", \"entryUrl\": \"http://www.ebi.ac.uk/gxa/query?conditionQuery=\"},\n 7: {\"name\": \"chebi\", \"baseUrl\": \"http://www.ebi.ac.uk/chebi/downloadsForward.do\", \"entryUrl\": \"http://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI%3A\"},\n 14: {\n \"name\": \"fdasrs\",\n \"baseUrl\": \"http://fdasis.nlm.nih.gov/srs/srs.jsp\",\n \"entryUrl\": \"http://fdasis.nlm.nih.gov/srs/ProxyServlet?mergeData=true&objectHandle=DBMaint&APPLICATION_NAME=fdasrs&actionHandle=default&nextPage=jsp/srs/ResultScreen.jsp&TXTSUPERLISTID=\",\n },\n 15: {\"name\": \"surechembl\", \"baseUrl\": \"https://www.surechembl.org/search/\", \"entryUrl\": \"https://www.surechembl.org/chemical/\"},\n 21: {\"name\": \"pubchem_tpharma\", \"baseUrl\": \"http://www.thomson-pharma.com/\", \"entryUrl\": \"http://pubchem.ncbi.nlm.nih.gov/substance/\"},\n 22: {\"name\": \"pubchem\", \"baseUrl\": \"http://pubchem.ncbi.nlm.nih.gov\", \"entryUrl\": \"http://pubchem.ncbi.nlm.nih.gov/compound/\"},\n 27: {\"name\": \"recon\", \"baseUrl\": \"https://vmh.uni.lu\", \"entryUrl\": \"https://vmh.uni.lu/\"},\n 28: {\"name\": \"molport\", \"baseUrl\": \"https://www.molport.com/shop/index\", \"entryUrl\": \"https://www.molport.com/shop/molecule-link/\"},\n 31: {\n \"name\": \"bindingdb\",\n \"baseUrl\": \"https://www.bindingdb.org/bind/index.jsp\",\n \"entryUrl\": \"http://www.bindingdb.org/bind/chemsearch/marvin/MolStructure.jsp?monomerid=\",\n },\n 41: {\"name\": \"swisslipids\", \"baseUrl\": \"http://www.swisslipids.org/\", \"entryUrl\": \"http://www.swisslipids.org/\"},\n 29: {\"name\": \"nikkaji\", \"baseUrl\": \"http://jglobal.jst.go.jp/en/\", \"entryUrl\": \"http://jglobal.jst.go.jp/en/redirect?Nikkaji_No=\"},\n 32: {\"name\": \"comptox\", \"baseUrl\": \"https://comptox.epa.gov/dashboard/\", \"entryUrl\": \"https://comptox.epa.gov/dashboard/\"},\n 33: {\"name\": \"lipidmaps\", \"baseUrl\": \"http://www.lipidmaps.org\", \"entryUrl\": \"http://www.lipidmaps.org/data/LMSDRecord.php?LMID=\"},\n 35: {\"name\": \"carotenoiddb\", \"baseUrl\": \"http://carotenoiddb.jp/index.html\", \"entryUrl\": \"http://carotenoiddb.jp/Entries/\"},\n 36: {\"name\": \"metabolights\", \"baseUrl\": \"http://www.ebi.ac.uk/metabolights/\", \"entryUrl\": \"http://www.ebi.ac.uk/metabolights/\"},\n 37: {\"name\": \"brenda\", \"baseUrl\": \"https://www.brenda-enzymes.org/index.php\", \"entryUrl\": \"https://www.brenda-enzymes.org/ligand.php?brenda_ligand_id=\"},\n 17: {\"name\": \"pharmgkb\", \"baseUrl\": \"https://www.pharmgkb.org\", \"entryUrl\": \"https://www.pharmgkb.org/drug/\"},\n 18: {\"name\": \"hmdb\", \"baseUrl\": \"http://www.hmdb.ca\", \"entryUrl\": \"http://www.hmdb.ca/metabolites/\"},\n 24: {\n \"name\": \"nmrshiftdb2\",\n \"baseUrl\": \"http://nmrshiftdb.nmr.uni-koeln.de/portal/media-type/html/user/anon/page/default.psml/js_pane/P-Home\",\n \"entryUrl\": \"http://nmrshiftdb.org/molecule/\",\n },\n 25: {\"name\": \"lincs\", \"baseUrl\": \"http://www.lincsproject.org/\", \"entryUrl\": \"http://identifiers.org/lincs.smallmolecule/\"},\n 39: {\"name\": \"chemicalbook\", \"baseUrl\": \"https://www.chemicalbook.com\", \"entryUrl\": \"https://www.chemicalbook.com/ChemicalProductProperty_EN_\"},\n 20: {\"name\": \"selleck\", \"baseUrl\": \"http://www.selleckchem.com\", \"entryUrl\": \"http://www.selleckchem.com/products/\"},\n 23: {\"name\": \"mcule\", \"baseUrl\": \"https://mcule.com\", \"entryUrl\": \"https://mcule.com/\"},\n 26: {\"name\": \"actor\", \"baseUrl\": \"https://actor.epa.gov\", \"entryUrl\": \"http://actor.epa.gov/actor/chemical.xhtml?casrn=\"},\n 34: {\"name\": \"drugcentral\", \"baseUrl\": \"http://drugcentral.org\", \"entryUrl\": \"http://drugcentral.org/drugcard/\"},\n 38: {\"name\": \"rhea\", \"baseUrl\": \"http://www.rhea-db.org\", \"entryUrl\": \"http://www.rhea-db.org/searchresults?q=CHEBI:\"},\n }\n oD = {}\n try:\n for ky in inchiKeyList:\n unc = unichem_client # pylint: disable=no-member\n # unc.set_format(\"json\")\n uDL = unc.get(ky)\n if uDL:\n qD = {}\n for uD in uDL:\n if \"src_id\" in uD and int(uD[\"src_id\"]) in mapD:\n qD[mapD[int(uD[\"src_id\"])][\"name\"]] = uD[\"src_compound_id\"]\n if qD:\n oD[ky] = qD\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n return oD", "def group_list_dict(matches, keys):\n target = collections.OrderedDict((key, []) for key in keys)\n for entry in matches:\n if entry is None:\n continue\n for key, value in entry.items():\n target[key].append(value)\n return target", "def build_course_dictionary(title_result_set, desc_result_set) -> Dict[str, List[str]]:\n\n course_dictionary = {} # placeholder dictionary\n\n for (tagged_title, tagged_description) in zip(title_result_set, desc_result_set): # iterate through multiple result sets\n full_title_desc_list = {}\n full_title_desc_list = [str(tagged_title.text)] + str(tagged_description.text).strip().splitlines() # remove trailing whitespace, then get list of lines\n course_dictionary[str(tagged_title.text)[:8]] = full_title_desc_list\n\n return course_dictionary", "def construct_occurrence_dico(data) :\n print('Constructing occurence dictionnaries...')\n\n p_kw_dico = dict()\n kw_p_dico = dict()\n full_stem_dico = {}\n for patent in data :\n patent_id = patent['id']\n #[keywords,stem_dico] = extract_keywords(patent[1]+\". \"+patent[2],patent_id)\n [keywords,stem_dico] = extract_keywords(patent['title']+\". \"+patent['abstract'],patent_id)\n #print(keywords)\n\n for k in keywords :\n # add to p_kw dico\n if k in kw_p_dico :\n kw_p_dico[k].append(patent_id)\n else :\n kw_p_dico[k]= [patent_id]\n #\n if patent_id in p_kw_dico :\n p_kw_dico[patent_id].append(k)\n else :\n p_kw_dico[patent_id] = [k]\n\n for k in stem_dico.keys():\n if k in full_stem_dico :\n full_stem_dico[k]=full_stem_dico[k].union(stem_dico[k])\n else :\n full_stem_dico[k] = stem_dico[k]\n\n return([p_kw_dico,kw_p_dico,full_stem_dico])", "def get_by_list_of_keys(dictionary: Dict, key_path: List[Any]) -> Dict:\n if len(key_path) == 1:\n return dictionary[key_path[0]]\n else:\n return get_by_list_of_keys(dictionary[key_path[0]], key_path[1:])", "def get_many(self, keys: Iterable, version: Optional[int] = None) -> Dict[str, Any]:\n d = {}\n for k in keys:\n val = self.get(k, version=version)\n if val is not None:\n d[k] = val\n return d", "def collect_citation_metadata(\n metadata: dict, references: List[pybtex.database.Entry]\n) -> dict:\n # Author list\n citation_authors = []\n for author_tier in [\"Core\", \"Developers\", \"Contributors\"]:\n for author in metadata[\"Authors\"][author_tier][\"List\"]:\n family_names, given_names = author[\"Name\"].split(\", \")\n citation_author = {\n \"family-names\": family_names,\n \"given-names\": given_names,\n }\n if \"Orcid\" in author:\n citation_author[\"orcid\"] = (\n \"https://orcid.org/\" + author[\"Orcid\"]\n )\n if \"Affiliations\" in author and len(author[\"Affiliations\"]) > 0:\n citation_author[\"affiliation\"] = \" and \".join(\n author[\"Affiliations\"]\n )\n citation_authors.append(citation_author)\n # References in CITATION.cff format\n citation_references = [to_cff_reference(entry) for entry in references]\n return {\n \"cff-version\": \"1.2.0\",\n \"message\": (\n \"Please cite SpECTRE in any publications that make use of its code\"\n \" or data. Cite the latest version that you use in your\"\n \" publication. The citation for this version is listed below.\"\n ),\n \"title\": metadata[\"Name\"],\n \"url\": metadata[\"Homepage\"],\n \"repository-code\": \"https://github.com/\" + metadata[\"GitHub\"],\n \"version\": metadata[\"Version\"],\n \"date-released\": metadata[\"PublicationDate\"],\n \"doi\": metadata[\"Doi\"],\n \"authors\": citation_authors,\n \"keywords\": metadata[\"Keywords\"],\n \"license\": metadata[\"License\"],\n \"references\": citation_references,\n }", "def collect_by_key(pair_iter):\n out = {}\n for (k, v) in pair_iter:\n out[k] = out.get(k, [])\n out[k].append(v)\n return out", "async def fetch_keyvals(\n self,\n client: OpenrCtrlCppClient.Async,\n areas: Set[Any],\n keyDumpParams: KeyDumpParams,\n ) -> Dict[str, Publication]:\n\n area_to_publication_dict = {}\n for area in areas:\n area_to_publication_dict[area] = await client.getKvStoreKeyValsFilteredArea(\n keyDumpParams, area\n )\n return area_to_publication_dict", "def formatting_cid_id_clusters(cid_id_list, other_id):\n # key: cid, value: list of ocns [ocn1, ocn2]\n cid_ids_dict = {}\n\n if cid_id_list:\n for cid_id in cid_id_list:\n cid = cid_id.get(\"cid\")\n id = cid_id.get(other_id)\n if cid in cid_ids_dict:\n cid_ids_dict[cid].append(id)\n else:\n cid_ids_dict[cid] = [id]\n\n return cid_ids_dict", "def get_self_citations(new_record_list, citationdic, initial_selfcitdict, config):\n i = 0 #just for debugging ..\n #get the tags for main author, coauthors, ext authors from config\n tags = ['first_author', 'additional_author', 'alternative_author_name']\n for t in tags:\n try:\n dummy = config.get(config.get(\"rank_method\", \"function\"), t)\n except:\n register_exception(prefix=\"attribute \"+t+\" missing in config\", alert_admin=True)\n return initial_selfcitdict\n\n r_mainauthortag = config.get(config.get(\"rank_method\", \"function\"), \"first_author\")\n r_coauthortag = config.get(config.get(\"rank_method\", \"function\"), \"additional_author\")\n r_extauthortag = config.get(config.get(\"rank_method\", \"function\"), \"alternative_author_name\")\n #parse the tags\n mainauthortag = tagify(parse_tag(r_mainauthortag))\n coauthortag = tagify(parse_tag(r_coauthortag))\n extauthortag = tagify(parse_tag(r_extauthortag))\n\n selfcites = initial_selfcitdict\n for k in new_record_list:\n if (i % 1000 == 0):\n mesg = \"Selfcites done \"+str(i)+\" of \"+str(len(new_record_list))+\" records\"\n write_message(mesg)\n task_update_progress(mesg)\n i = i+1\n #get the author of k\n authorlist = get_fieldvalues(k, mainauthortag)\n coauthl = get_fieldvalues(k, coauthortag)\n extauthl = get_fieldvalues(k, extauthortag)\n authorlist.append(coauthl)\n authorlist.append(extauthl)\n #author tag\n #print \"record \"+str(k)+\" by \"+str(authorlist)\n #print \"is cited by\"\n #get the \"x-cites-this\" list\n if citationdic.has_key(k):\n xct = citationdic[k]\n for c in xct:\n #get authors of c\n cauthorlist = get_fieldvalues(c, mainauthortag)\n coauthl = get_fieldvalues(c, coauthortag)\n extauthl = get_fieldvalues(c, extauthortag)\n cauthorlist.extend(coauthl)\n cauthorlist.extend(extauthl)\n #print str(c)+\" by \"+str(cauthorlist)\n for ca in cauthorlist:\n if (ca in authorlist):\n #found!\n if selfcites.has_key(k):\n val = selfcites[k]\n #add only if not there already\n if val:\n if not c in val:\n val.append(c)\n selfcites[k] = val\n else:\n #new key for selfcites\n selfcites[k] = [c]\n\n mesg = \"Selfcites done fully\"\n write_message(mesg)\n task_update_progress(mesg)\n\n return selfcites" ]
[ "0.63553035", "0.6239225", "0.6104354", "0.6002555", "0.58264047", "0.5761537", "0.5651292", "0.5648888", "0.5609229", "0.5563336", "0.5560674", "0.5547645", "0.55174834", "0.5494095", "0.5494074", "0.5486462", "0.5401576", "0.53930855", "0.5387903", "0.53725374", "0.53647405", "0.5363835", "0.5362577", "0.53601736", "0.5335622", "0.5303793", "0.5281166", "0.5266367", "0.52645236", "0.52560043" ]
0.62833595
1
Given the rates, add noise based on numreg
def add_white_noise(rates, numreg): rtemp = rates.copy().getA() sdrates = np.sqrt(rtemp * (1 - rtemp) / numreg) + 1e-10 noise = np.random.normal(0, sdrates) rtemp += noise return np.matrix(rtemp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def noise(self, freq: int, /) -> None:", "def add_uniform_noise(rates, percent):\n raise 0 < percent < 1 or AssertionError\n rtemp = rates.copy().getA()\n noise = np.random.uniform(1 - percent, 1 + percent, np.shape(rtemp))\n rtemp = rtemp * noise\n return np.matrix(rtemp)", "def add_noise(self, noise):\n if noise > 0.0:\n for key in self.counts:\n self.counts[key] *= 1.0 + noise * np.random.random_sample()", "def add_noise(self, data):", "def noiseReduction(self):\n pass", "def addNoise(pure,snr):\r\n watts = pure**2\r\n # Calculate signal power and convert to dB \r\n sig_avg_watts = np.mean(watts)\r\n sig_avg_db = 10 * np.log10(sig_avg_watts)\r\n # Calculate noise according to [2] then convert to watts\r\n noise_avg_db = sig_avg_db - snr\r\n noise_avg_watts = 10 ** (noise_avg_db / 10)\r\n # Generate an sample of white noise\r\n mean_noise = 0\r\n noise = np.random.normal(mean_noise, np.sqrt(noise_avg_watts), len(watts))\r\n \r\n return pure+noise", "def add_noise(image, noise, rate=0.05):\n\n if noise == \"gaussian\":\n row, col = image.shape\n var = ndimage.laplace(image).var()\n sigma = (var*rate) ** 0.5\n print(var, sigma)\n gauss = np.random.normal(loc=0, scale=sigma, size=(row, col)) * rate\n noisy = image + gauss\n # noisy = image + gauss\n return noisy\n\n elif noise == \"salt_pepper\":\n output = image.copy()\n black = 0\n white = 255\n probs = np.random.random(image.shape[:2])\n output[probs < (rate / 2)] = black\n output[probs > 1 - (rate / 2)] = white\n\n return output\n\n else:\n return image", "def add_noise(spectra: np.ndarray, maxLevel: float = 0.1, seed: int = 42) -> np.ndarray:\n np.random.seed(seed)\n spectra = spectra.copy()\n spectra[:, 1:] *= (1-maxLevel/2) + np.random.rand(spectra.shape[0], spectra.shape[1]-1) * maxLevel\n return spectra", "def addNoise(data, amp, scale):\n lfnData = addLFNoise(data, amp, scale)\n noisyData = addHFNoise(hfnData, amp)\n\n return noisyData", "def noise(self, stddev):\n #add noise to weights\n pass", "def _addNoise(self):\n self.dispNoise = self.dispRaw.copy()\n self.dispNoise[:, 0] += self.sigmaEast * numpy.random.randn(self.numStations)\n self.dispNoise[:, 1] += self.sigmaNorth * numpy.random.randn(self.numStations)\n self.dispNoise[:, 2] += self.sigmaUp * numpy.random.randn(self.numStations)\n return", "def add_noise(spectrum,rms):\n noise = np.random.randn(spectrum.data.shape[0])*rms\n noisy_data = spectrum.data + noise\n noisy_spec = pyspeckit.Spectrum(xarr=spectrum.xarr,data=noisy_data)\n return noisy_spec", "def gen_noise(num_signals, sig_len):\n\n r_noise = np.random.normal(0, 1, (num_signals, sig_len))\n c_noise = np.random.normal(0, 1, (num_signals, sig_len)) * 1j\n noise = np.add(r_noise, c_noise) / np.sqrt(2)\n return noise/(np.var(noise, axis=1)**.5)[:, None]", "def add_noise(self):\n self.noise = np.random.poisson(lam=self.lam, size=self.image.shape)\n self.image += self.noise\n return", "def add_noise(Y, sigma):\r\n return Y + np.random.normal(0, sigma, Y.shape)", "def add_noise(self):\n self.noise = np.random.poisson(lam=self.lam, size=self.im.shape)\n self.im += self.noise\n return", "def _add_noise(signal: np.array, noise_power: float) -> np.array:\n noise = np.sqrt(noise_power / 2) * np.random.randn(signal.size)\n return signal + noise", "def _add_noise(signal: np.array, noise_power: float) -> np.array:\n noise = np.sqrt(noise_power / 2) * np.random.randn(signal.size)\n return signal + noise", "def add_noise(arr, sigma):\n dims = arr.shape\n arr += sigma * noise(*dims)", "def noise(self, xs, ys):\n raise NotImplementedError", "def noiseon(delay=2.0, reference=False, subarray=DEFAULT) :\n multiSubarray('noiseSource', subarray, True, reference)\n multiSubarray('rfPower', subarray, False)\n sleep(delay) # Temporary - to allow for delay in correlator", "def pink_noise():\n global curr_tick\n octave = octave_lookup[curr_tick]\n curr_noise[octave] = int(white_noise() / (5-octave))\n curr_tick += 1\n if curr_tick >= len(octave_lookup):\n curr_tick = 0\n return sum(curr_noise)", "def add_noise(image):\n image += 10e-10 * np.random.randn(image.shape[0], image.shape[1], 1)\n \n return image", "def add_noise_at_snr(channel_in, snr):\n\n rms_channel = np.sqrt(np.mean(channel_in ** 2.0))\n noise_std = rms_channel / np.sqrt(10.0 ** (snr/10.0))\n\n return channel_in + np.random.normal(size=channel_in.shape, scale=noise_std)", "def addNoise(array,counts):\r\n if array.dtype == 'complex' :\r\n arrayout = addNoise(np.real(array),counts) + 1.0J * addNoise(np.imag(array),counts)\r\n else :\r\n if np.float64(counts) == 0.0e0 :\r\n arrayout = np.zeros(array.shape, dtype=arrayout.dtype)\r\n elif np.float64(counts) < 0.0e0 :\r\n print 'bg.addNoise : warning counts < 0'\r\n elif np.float64(counts) > 1.0e9 :\r\n arrayout = np.zeros(array.shape)\r\n arrayout = normaliseInt(array)\r\n arrayout = np.random.normal(arrayout*np.float64(counts),np.sqrt(arrayout*np.float64(counts)))/np.float64(counts)\r\n tot = np.sum(array)\r\n arrayout = normaliseInt(arrayout,tot)\r\n else :\r\n arrayout = np.zeros(array.shape)\r\n arrayout = normaliseInt(array)\r\n arrayout = np.random.poisson(arrayout*np.float64(counts))/np.float64(counts)\r\n tot = np.sum(array)\r\n arrayout = normaliseInt(arrayout,tot)\r\n return arrayout", "def add_noise_m(self, data):\n return self.range_to_m(self.add_noise(self.m_to_range(data)))", "def add_signal_to_noise(self):\n\n # noise\n noise = lal.CreateREAL8TimeSeries('blah', self.epoch, 0,\n self.td_noise.delta_t, lal.StrainUnit, \n int(self.td_noise.duration / self.td_noise.delta_t))\n noise.data.data = self.td_noise.data\n\n # signal\n signal = lal.CreateREAL8TimeSeries('blah',\n self.ext_params.geocent_peak_time, 0, self.td_signal.delta_t,\n lal.StrainUnit, int(self.td_signal.duration /\n self.td_signal.delta_t))\n signal.data.data = self.td_signal.data\n\n win = lal.CreateTukeyREAL8Window(len(signal.data.data),0.1)\n win.data.data[len(signal.data.data):] = 1.0\n #signal.data.data *= win.data.data\n\n # --- Scale to a target snr\n print '---'\n if self.target_snr is not None:\n\n tmp_sig = pycbc.types.TimeSeries(signal.data.data,\n delta_t=self.td_signal.delta_t)\n\n current_snr = pycbc.filter.sigma(tmp_sig, psd=self.psd,\n low_frequency_cutoff=self.f_low,\n high_frequency_cutoff=0.5/self.delta_t)\n\n signal.data.data *= self.target_snr / current_snr\n # ----\n\n # sum\n noise_plus_signal = lal.AddREAL8TimeSeries(noise, signal)\n\n self.td_response = \\\n pycbc.types.timeseries.TimeSeries(\\\n initial_array=np.copy(noise_plus_signal.data.data),\n delta_t=noise_plus_signal.deltaT,\n epoch=noise_plus_signal.epoch)\n\n # Finally, zero-pad the signal vector to have the same length as the actual data\n # vector\n no_noise = lal.CreateREAL8TimeSeries('blah', self.epoch, 0,\n self.td_noise.delta_t, lal.StrainUnit, \n int(self.td_noise.duration / self.td_noise.delta_t))\n\n no_noise.data.data = np.zeros(\\\n int(self.td_noise.duration / self.td_noise.delta_t))\n\n signal = lal.AddREAL8TimeSeries(no_noise, signal)\n\n self.td_signal = \\\n pycbc.types.timeseries.TimeSeries(initial_array=np.copy(signal.data.data),\n delta_t=signal.deltaT, epoch=noise_plus_signal.epoch)\n\n del noise, signal, noise_plus_signal", "def ternary_noise(N_stimuli, Nx, Ny):\n return np.random.randint(-1, 2, size=(N_stimuli, Nx, Ny))", "def noise(self):\n # Extract parameters\n pzs = self.params[0]\n ngals = np.array([pz.gals_per_steradian for pz in pzs])\n return 1.0 / ngals", "def addNoise (image,noise_type=\"gauss\",var = .01):\n row,col,ch= image.shape\n if noise_type == \"gauss\": \n mean = 0.0\n #var = 0.001\n sigma = var**0.5\n gauss = np.array(image.shape)\n gauss = np.random.normal(mean,sigma,(row,col,ch))\n gauss = gauss.reshape(row,col,ch)\n #print(gauss)\n noisy = image + gauss*255\n return noisy.astype('uint8')\n elif noise_type == \"s&p\":\n s_vs_p = 0.5\n amount = 0.09\n out = image\n # Generate Salt '1' noise\n num_salt = np.ceil(amount * image.size * s_vs_p)\n coords = [np.random.randint(0, i - 1, int(num_salt))\n for i in image.shape]\n out[coords] = 255\n # Generate Pepper '0' noise\n num_pepper = np.ceil(amount* image.size * (1. - s_vs_p))\n coords = [np.random.randint(0, i - 1, int(num_pepper))\n for i in image.shape]\n out[coords] = 0\n return out\n elif noise_type == \"poisson\":\n vals = len(np.unique(image))\n vals = 2 ** np.ceil(np.log2(vals))\n noisy = np.random.poisson(image * vals) / float(vals)\n return noisy\n elif noise_type ==\"speckle\":\n gauss = np.random.randn(row,col,ch)\n gauss = gauss.reshape(row,col,ch) \n noisy = image + image * gauss\n return noisy\n else:\n return image" ]
[ "0.68984324", "0.6839585", "0.67851245", "0.6750113", "0.66716886", "0.66040987", "0.64807314", "0.63987464", "0.6348617", "0.6240534", "0.62197", "0.6102634", "0.60901725", "0.60885996", "0.60824186", "0.6081778", "0.6057069", "0.6057069", "0.6010699", "0.6005882", "0.6003883", "0.5974253", "0.5958111", "0.5928545", "0.5887459", "0.58775127", "0.58604974", "0.58585274", "0.5849963", "0.5834128" ]
0.8146284
0
Given the rates, sample new rate uniformly between ((1percent)rates, (1+percent)rates)
def add_uniform_noise(rates, percent): raise 0 < percent < 1 or AssertionError rtemp = rates.copy().getA() noise = np.random.uniform(1 - percent, 1 + percent, np.shape(rtemp)) rtemp = rtemp * noise return np.matrix(rtemp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_rate(self):\r\n interval = self.data.iloc[2, 0] - self.data.iloc[1, 0]\r\n self.rate = int(1 / interval)", "def mutate(chrom, rate=100):\n for i in range(len(chrom)):\n chance = randint(0, 100)\n if chance <= rate:\n chrom[i] = randint(0, 13)\n return chrom", "def update_rate(self):\n self._rate = (\n (self._received - self._samples[0]) / float(self.sample_size)\n )\n self._samples.append(self._received)", "def learning_rate(self, global_step, boundaries, rates, warmup=False):\n if any([b < 0 for b in boundaries]) or any(\n [not isinstance(b, int) for b in boundaries]):\n raise ValueError('boundaries must be a list of positive integers')\n if any([bnext <= b for bnext, b in zip(boundaries[1:], boundaries[:-1])]):\n raise ValueError('Entries in boundaries must be strictly increasing.')\n if any([not isinstance(r, float) for r in rates]):\n raise ValueError('Learning rates must be floats')\n if len(rates) != len(boundaries) + 1:\n raise ValueError('Number of provided learning rates must exceed '\n 'number of boundary points by exactly 1.')\n\n if boundaries and boundaries[0] == 0:\n raise ValueError('First step cannot be zero.')\n\n if warmup and boundaries:\n slope = (rates[1] - rates[0]) * 1.0 / boundaries[0]\n warmup_steps = list(range(boundaries[0]))\n warmup_rates = [rates[0] + slope * step for step in warmup_steps]\n boundaries = warmup_steps + boundaries\n rates = warmup_rates + rates[1:]\n else:\n boundaries = [0] + boundaries\n num_boundaries = len(boundaries)\n\n def eager_decay_rate():\n \"\"\"Callable to compute the learning rate.\"\"\"\n rate_index = tf.reduce_max(tf.where(\n tf.greater_equal(global_step, boundaries),\n list(range(num_boundaries)),\n [0] * num_boundaries))\n return tf.reduce_sum(rates * tf.one_hot(rate_index, depth=num_boundaries),\n name='learning_rate')\n\n if tf.executing_eagerly():\n return eager_decay_rate\n else:\n return eager_decay_rate()", "def test_init_out_with_rate(self):\n n = 3\n rates = [10, 50, 90]\n\n G = RandomLayer(n, ini_rate=rates)\n G.prepare(10.0, 0.1)\n\n self.assertLess(np.max(np.abs(G.out - rates)), 1e-9)", "def mutate(self, probability, rate):\n for i in range(self.number_of_transitions):\n shape = np.shape(self.weights[i])\n size = self.weights[i].size\n weights = self.weights[i].flatten()\n for j in range(len(weights)):\n if np.random.uniform(0, 1) < probability:\n weights[j] = weights[j] + rate * np.random.normal(0, 1 / np.sqrt(shape[0]))\n self.weights[i] = weights.reshape(shape)\n for j in range(len(self.biases[i])):\n if np.random.uniform(0, 1) < probability:\n self.biases[i][j] = self.biases[i][j] + rate * np.random.normal(0, 1)", "def rates(self, rates):\n\n self._rates = rates", "def rates(self, rates):\n\n self._rates = rates", "def sample_rate(P1, P2):\n v = (P1[0] - P2[0], P1[1] - P2[1], P1[2] - P2[2])\n # Project v onto the xy plane\n # xvect is a unit vector on that plane\n normalized = (1. / np.sqrt(2), 1. / np.sqrt(2), 0.)\n \n angle = np.dot(normalized, v) / modulus(v)\n \n # We need 1 / cosA\n return 1. / np.cos(angle)", "def sample_poisson_rate_pymc(rate_multiplier, num_events,\n iters=1000, nchains=2):\n\n pmodel = pm.Model()\n\n with pmodel:\n # Prior for rate, at the moment, use a transformation trick\n # I want rate distributed as rate**-0.5\n # So if u ~ Uniform, then v = Cu**2 is v ~ v**-0.5.\n # Should recheck the maths.\n u = pm.Uniform('u', 0, 10)\n rate = u**2 * rate_multiplier\n events = pm.Poisson('events', mu=rate, observed=num_events)\n\n # u = pm.Uniform('u', 0, 10)\n\n # events = pm.Poisson('events', mu=u, observed=num_events)\n\n map_estimate = pm.find_MAP(model=pmodel)\n\n with pmodel:\n trace = pm.sample(iters, chains=nchains)\n\n return trace['u']**2", "def pick_policy(Qs, rates, num_of_servers):\n new_rates = copy.deepcopy(rates)\n Q = np.random.choice(Qs)\n srv = np.random.randint(1, num_of_servers + 1)\n state = pick_a_state(Q)\n scale = np.random.uniform(0, 2)\n new_rates[srv][state] *= scale\n return new_rates, srv", "def set_sample_rate(self, rate):\n self.check_validity()\n\n rate = int(rate)\n\n self.ipcon.send_request(self, BrickletIndustrialDualAnalogInV2.FUNCTION_SET_SAMPLE_RATE, (rate,), 'B', 0, '')", "def sample_b_to_rate(R):\n b_to_rate = {}\n v_to_source = Ftree.R_to_v_to_source(R)\n for v in Ftree.R_to_preorder(R):\n p = v_to_source.get(v, None)\n if p is None:\n continue\n # sample a coefficient regardless of whether we use it\n # this is an obsolete method\n #log_coeff = (random.random() - 0.5) * epsrate\n #coeff = math.exp(log_coeff)\n curr_branch = frozenset([v, p])\n gp = v_to_source.get(p, None)\n if gp is None:\n parent_rate = 1.0\n else:\n prev_branch = frozenset([p, gp])\n parent_rate = b_to_rate[prev_branch]\n b_to_rate[curr_branch] = random.expovariate(1/parent_rate)\n return b_to_rate", "def get_sample_rate(self):\n return 1", "def Probability(rating1, rating2):\n return 1.0 * 1.0 / (1 + 1.0 * math.pow(10, 1.0 * (rating1 - rating2) / 400))", "def randomInverseTimeLearningRate(rate):\n def function(t):\n return random() * float(rate)/t\n return function", "def rate(self, newrate):\n command = 'rate ' + str(newrate)\n self.run_command(command)", "def rate(self, rate):\n # Get the sign of the rates before calculating\n x_sign = copysign(1, self.x_rate)\n y_sign = copysign(1, self.y_rate)\n self._rate = rate\n # Multiply by the original sign to retain direction\n self.x_rate = x_sign * fabs(rate * cos(self._angle))\n self.y_rate = y_sign * fabs(rate * sin(self._angle))", "def conversion_rate(self, init, new_currency):\r\n\r\n curr = CurrencyRates()\r\n curr_conv_rate = curr.get_rate(init, new_currency)\r\n return curr_conv_rate", "def get_sample_rate(rate_string):\n if rate_string.endswith(\"%\"):\n rate = float(rate_string[:-1])/100\n elif '/' in rate_string:\n x, y = rate_string.split('/')\n rate = Decimal(x) / (Decimal(y) * Decimal('1.0'))\n else:\n rate = float(rate_string)\n if rate < 0 or rate > 1:\n raise ValueError('rate %r (=%.3f) must be 1%% <= rate <= 100%% ' % (rate_string, rate))\n return int(rate * 1000)", "def randomRateRelease(self, period):\n rate = np.random.choice(self.parameters)\n return rate", "def sample(probs):\n\n probs = probs / probs.sum()\n return np.random.choice(np.arange(len(probs)), p=probs.flatten())", "def _do_set_rate(self, rate):\n self.set_remote_status(1)\n if rate == 0:\n self.set_to_slow()\n elif rate == 1:\n self.set_to_fast()\n self.set_remote_status(3)\n print(self._do_get_rate())", "def set_current_rate(self, rate_to_set):\n pass", "def get_uniformization_sample(initial_state, terminal_state, states, path_length, rate_matrix):\n # map states to indices\n state_to_index = dict((state, i) for i, state in enumerate(states))\n # find the maximum rate away from a state\n max_rate = max(-rate_matrix[(a, a)] for a in states)\n # create a uniformized discrete transition matrix in convenient dictionary form\n discrete_transition_matrix = {}\n for (a, b), r in rate_matrix.items():\n discrete_transition_matrix[(a, b)] = r / max_rate\n if a == b:\n discrete_transition_matrix[(a, b)] += 1.0\n # create a discrete transition matrix in the numpy format,\n # and create the rate matrix in the numpy format\n R = np.zeros((len(states), len(states)))\n numpy_rate_matrix = np.zeros((len(states), len(states)))\n for (a, b), r in rate_matrix.items():\n ia = state_to_index[a]\n ib = state_to_index[b]\n numpy_rate_matrix[ia, ib] = r\n R[ia, ib] = discrete_transition_matrix[(a, b)]\n # convert initial and terminal states to indices\n initial_index = state_to_index[initial_state]\n terminal_index = state_to_index[terminal_state]\n # get the probability of the terminal state given the initial state and the path length\n rate_matrix_exponential = scipy.linalg.matfuncs.expm(numpy_rate_matrix * path_length)\n Pab = rate_matrix_exponential[initial_index, terminal_index]\n # draw the number of state changes\n cumulative_probability = 0\n n = 0\n matrix_powers = MatrixPowerCache(R)\n cutoff = random.uniform(0, Pab)\n #print 'cutoff =', cutoff\n #print 'initial_index =', initial_index\n #print 'terminal_index =', terminal_index\n #print matrix_powers.get_power(0)\n while 1:\n poisson_factor = scipy.stats.poisson.pmf(n, max_rate * path_length)\n discrete_transition_factor = matrix_powers.get_power(n)[initial_index, terminal_index]\n cumulative_probability += poisson_factor * discrete_transition_factor\n #print 'cumulative probability =', cumulative_probability\n if cutoff < cumulative_probability:\n break\n n += 1\n #print 'n =', n\n # deal with degenerate cases\n if n == 0:\n return []\n elif n == 1:\n if initial_state == terminal_state:\n return []\n else:\n return [(random.uniform(0, path_length), terminal_state)]\n # Simulate a discrete path given the number of changes and the initial and terminal states.\n # The path is called virtual because some changes may be from a state to itself.\n virtual_path = get_discrete_path_sample(initial_state, terminal_state, states, n+1, discrete_transition_matrix)[1:]\n virtual_times = list(sorted(random.uniform(0, path_length) for i in range(n)))\n events = []\n last_state = initial_state\n last_time = 0\n for current_state, current_time in zip(virtual_path, virtual_times):\n if current_state == last_state:\n continue\n events.append((current_state, current_time))\n last_state = current_state\n last_time = current_time\n return events", "def fv(rate, n_years):\n return pow(1 + rate, n_years)", "def set_learning_rate(self, rates):\n\n for (layer, rate) in zip(self.layers, rates):\n layer.set_learning_rate(rate)", "def sample_distribution(numbers, probabilities, num_samples):\n intervals = []\n intervals.append(probabilities[0])\n new_interval = probabilities[0]\n\n for i in range(1, len(probabilities)):\n new_interval += probabilities[i]\n intervals.append(new_interval)\n\n counter = 0\n new_numbers = []\n while counter <= num_samples:\n for i in range(len(intervals)):\n # Generate a random num between 0 - 1\n # i.e. flip a coin.\n rand_prob = np.random.random_sample((1,))\n if rand_prob <= [intervals[i]]:\n new_numbers.append(numbers[i])\n counter += 1\n\n return new_numbers", "def test_simple_rd_1():\n dist = Distribution(['0', '1'], [1 / 2, 1 / 2])\n rd = RDCurve(dist, beta_num=10)\n for r, d in zip(rd.rates, rd.distortions):\n assert r == pytest.approx(1 - entropy(d))", "def relative_rate(self) -> \"double\":\n return _beamforming_swig.randomsampler_sptr_relative_rate(self)" ]
[ "0.6379363", "0.6271895", "0.6191277", "0.61757326", "0.615608", "0.6149897", "0.6067622", "0.6067622", "0.59839284", "0.5963225", "0.59294873", "0.59209937", "0.5910566", "0.5888665", "0.58422065", "0.58108896", "0.5792376", "0.573272", "0.57085705", "0.5676844", "0.5660222", "0.56561714", "0.56416494", "0.5626547", "0.56212205", "0.56164545", "0.5591175", "0.55767584", "0.55758333", "0.5560048" ]
0.687847
0
This function runs the estimation procedure for the first time slice for given number of demes and repeats the process reps number of times. The values of mean pop size and mig rates is preset but will be changed in future versions. The third parameter here controls the noise amount in the estimates of coalescent intensities number of regions that contributed to the estimate itself
def run_Over_Grid(numdemes = 2, reps = 10, numreg = 100, t = 1000): Nmean = 2000 Nsd = 100 migMean = 0.0001 migsd = 1e-06 ndc2 = numdemes * (numdemes - 1) / 2 rows = ndc2 + numdemes + 1 I = np.matrix(np.eye(rows)) Ck = I[0:rows - 1, :] Dk = I[rows - 1, :] output = [] for r in xrange(reps): N = np.random.normal(Nmean, Nsd, (numdemes,)) mtemp = np.random.normal(migMean, migsd, (ndc2,)) xtrue = np.hstack((N, mtemp)) m = np.zeros((numdemes, numdemes)) cnt = 0 for i in xrange(numdemes): for j in xrange(i + 1, numdemes): m[i, j] = m[j, i] = mtemp[cnt] cnt += 1 Ninv = [ 1.0 / x for x in N ] Qtrue = comp_pw_coal_cont(m, Ninv) Ptrue = expM(t * Qtrue) obs_rates = Ck * Ptrue * Dk.T if numreg > 0: sd_rates = np.real(np.sqrt(obs_rates.getA() * (1 - obs_rates).getA() / numreg)) noise = np.random.normal(0.0, sd_rates) print 'Noise:\n', noise N0 = np.random.normal(Nmean / 2.0, Nsd * 3.0, (numdemes,)) m0 = np.random.normal(migMean / 2.0, migsd * 3.0, (ndc2,)) x0 = np.hstack((N0, m0)) xopt = opt.fmin(compute_Frob_norm_mig, x0, (t, obs_rates), maxfun=1000000, maxiter=100000) output.append((xtrue, xopt, linalg.norm(xopt - xtrue))) return output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sample(\n self,\n repetitions,\n nChains=3,\n burnIn=100,\n thin=1,\n convergenceCriteria=0.8,\n variables_of_interest=None,\n DEpairs=2,\n adaptationRate=\"auto\",\n eps=5e-2,\n mConvergence=True,\n mAccept=True,\n ):\n\n self.set_repetiton(repetitions)\n print(\n \"Starting the DEMCz algotrithm with \" + str(repetitions) + \" repetitions...\"\n )\n\n self.min_bound, self.max_bound = (\n self.parameter()[\"minbound\"],\n self.parameter()[\"maxbound\"],\n )\n repetitions = int(repetitions / nChains)\n ndraw_max = repetitions * nChains\n maxChainDraws = int(ndraw_max / nChains)\n\n dimensions = len(self.parameter()[\"random\"])\n\n # minbound,maxbound=self.find_min_max()\n # select variables if necessary\n if variables_of_interest is not None:\n slices = []\n for var in variables_of_interest:\n slices.append(self.slices[var])\n else:\n slices = [slice(None, None)]\n\n # make a list of starting chains that at least span the dimension space\n # in this case it will be of size 2*dim\n nSeedIterations = max(int(np.ceil(dimensions * 2 / nChains)), 2)\n\n # init a simulationhistory instance\n history = _SimulationHistory(\n maxChainDraws + nSeedIterations, nChains, dimensions\n )\n history.add_group(\"interest\", slices)\n\n ### BURN_IN\n burnInpar = [np.zeros((nChains, dimensions))] * nSeedIterations\n for i in range(nSeedIterations):\n self._logPs = []\n simulationlist = []\n old_like = np.empty(nChains)\n param_generator = (\n (rep, self.parameter()[\"random\"]) for rep in range(int(nChains))\n )\n\n for rep, vector, simulations in self.repeat(param_generator):\n burnInpar[i][rep] = vector\n likelist = self.postprocessing(i, vector, simulations, chains=rep)\n simulationlist.append(simulations)\n self._logPs.append(likelist)\n old_like[rep] = likelist\n burnInpar[i][rep] = vector\n if self.status.stop:\n break\n if not self.status.stop:\n history.record(burnInpar[i], self._logPs, 1)\n\n gamma = None\n self.accepts_ratio = 0.000001\n\n # initilize the convergence diagnostic object\n grConvergence = _GRConvergence()\n covConvergence = _CovarianceConvergence()\n\n # get the starting log objectivefunction and position for each of the\n # chains\n currentVectors = burnInpar[-1]\n currentLogPs = self._logPs[-1]\n\n # 2)now loop through and sample\n cur_iter = 0\n accepts_ratio_weighting = 1 - np.exp(-1.0 / 30)\n lastRecalculation = 0\n # continue sampling if:\n # 1) we have not drawn enough samples to satisfy the minimum number of iterations\n # 2) or any of the dimensions have not converged\n # 3) and we have not done more than the maximum number of iterations\n\n while cur_iter < maxChainDraws:\n print(cur_iter, burnIn)\n if cur_iter == burnIn:\n print(\"starting\")\n history.start_sampling()\n\n # every5th iteration allow a big jump\n if np.random.randint(5) == 0.0:\n gamma = np.array([1.0])\n else:\n gamma = np.array([2.38 / np.sqrt(2 * DEpairs * dimensions)])\n\n if cur_iter >= burnIn:\n proposalVectors = _dream_proposals(\n currentVectors,\n history,\n dimensions,\n nChains,\n DEpairs,\n gamma,\n 0.05,\n eps,\n )\n for i in range(len(proposalVectors)):\n proposalVectors[i] = self.check_par_validity(proposalVectors[i])\n # print proposalVectors\n else:\n proposalVectors = []\n for i in range(nChains):\n proposalVectors.append(self.parameter()[\"random\"])\n proposalVectors[i] = self.check_par_validity(proposalVectors[i])\n\n # if self.bounds_ok(minbound,maxbound,proposalVectors,nChains):\n proposalLogPs = []\n old_simulationlist = simulationlist\n old_likelist = self._logPs[-1]\n new_simulationlist = []\n new_likelist = []\n\n param_generator = (\n (rep, list(proposalVectors[rep])) for rep in range(int(nChains))\n )\n for rep, vector, simulations in self.repeat(param_generator):\n new_simulationlist.append(simulations)\n like = self.postprocessing(\n cur_iter + nSeedIterations,\n list(vector),\n simulations,\n chains=rep,\n )\n self._logPs.append(like)\n new_likelist.append(like)\n proposalLogPs.append(like)\n if self.status.stop:\n cur_iter = maxChainDraws\n break\n\n if not self.status.stop:\n # apply the metrop decision to decide whether to accept or reject\n # each chain proposal\n decisions, acceptance = self._metropolis_hastings(\n currentLogPs, proposalLogPs, nChains\n )\n self._update_accepts_ratio(accepts_ratio_weighting, acceptance)\n # choose from list of possible choices if 1d_decision is True at\n # specific index, else use default choice\n # np.choose(1d_decision[:,None], (list of possible choices, default\n # choice)\n save_likes = []\n save_pars = []\n save_sims = []\n\n for curchain in range(nChains):\n if decisions[curchain]:\n save_likes.append(float(new_likelist[curchain]))\n old_like[curchain] = float(new_likelist[curchain])\n save_pars.append(proposalVectors[curchain])\n save_sims.append(new_simulationlist[curchain])\n else:\n save_likes.append(old_like[curchain])\n save_pars.append(currentVectors[curchain])\n save_sims.append(old_simulationlist[curchain])\n\n currentVectors = np.choose(\n decisions[:, np.newaxis], (currentVectors, proposalVectors)\n )\n currentLogPs = np.choose(decisions, (currentLogPs, proposalLogPs))\n\n simulationlist = [\n [new_simulationlist, old_simulationlist][int(x)][ix]\n for ix, x in enumerate(decisions)\n ]\n\n likelist = list(\n np.choose(\n decisions[:, np.newaxis], (new_likelist, old_likelist)\n )\n )\n\n # we only want to recalculate convergence criteria when we are past\n # the burn in period\n\n if cur_iter % thin == 0:\n\n historyStartMovementRate = adaptationRate\n # try to adapt more when the acceptance rate is low and less\n # when it is high\n if adaptationRate == \"auto\":\n historyStartMovementRate = min(\n (0.234 / self.accepts_ratio) * 0.5, 0.95\n )\n\n history.record(\n currentVectors,\n currentLogPs,\n historyStartMovementRate,\n grConvergence=grConvergence.R,\n )\n\n if (\n history.nsamples > 0\n and cur_iter > lastRecalculation * 1.1\n and history.nsequence_histories > dimensions\n ):\n lastRecalculation = cur_iter\n grConvergence.update(history)\n covConvergence.update(history, \"all\")\n covConvergence.update(history, \"interest\")\n if all(grConvergence.R < convergenceCriteria):\n cur_iter = maxChainDraws\n print(\n \"All chains fullfil the convergence criteria. Sampling stopped.\"\n )\n cur_iter += 1\n\n # 3) finalize\n # only make the second half of draws available because that's the only\n # part used by the convergence diagnostic\n self.history = history.samples\n self.histo = history\n self.iter = cur_iter\n self.burnIn = burnIn\n self.R = grConvergence.R\n text = \"Gelman Rubin R=\" + str(self.R)\n print(text)\n self.status.rep = self.status.repetitions\n self.final_call()", "def multi_run(replications: int, iters: List, n: int):\n global call_count\n kwargs = {\n # 'alpha': 0.75,\n # 'rho': 'VaR',\n 'alpha': 0.75,\n 'rho': 'CVaR',\n 'x0': 2,\n 'n0': n,\n 'mu_1': -15,\n 'mu_2': 10,\n 'sigma_1': 4,\n 'sigma_2': 2\n }\n\n out_dict = {\n 'SA': dict(),\n 'SA_SAA': dict(),\n 'NM': dict(),\n 'NM_SAA': dict(),\n 'LBFGS': dict(),\n 'LBFGS_SAA': dict(),\n 'EI': dict(),\n 'EI_SAA': dict()\n }\n total_calls = dict()\n for key in out_dict.keys():\n total_calls[key] = dict()\n for it_count in iters:\n kwargs['iter_count'] = it_count\n for key in out_dict.keys():\n out_dict[key][it_count] = dict()\n total_calls[key][it_count] = 0\n i = 0\n while i < replications:\n try:\n out_dict['SA'][it_count][i] = SA_run(seed=i, **kwargs)\n total_calls['SA'][it_count] += call_count\n call_count = 0\n out_dict['SA_SAA'][it_count][i] = SA_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['SA_SAA'][it_count] += call_count\n call_count = 0\n out_dict['NM'][it_count][i] = NM_run(seed=i, **kwargs)\n total_calls['NM'][it_count] += call_count\n call_count = 0\n out_dict['NM_SAA'][it_count][i] = NM_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['NM_SAA'][it_count] += call_count\n call_count = 0\n out_dict['LBFGS'][it_count][i] = LBFGS_run(seed=i, **kwargs)\n total_calls['LBFGS'][it_count] += call_count\n call_count = 0\n out_dict['LBFGS_SAA'][it_count][i] = LBFGS_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['LBFGS_SAA'][it_count] += call_count\n call_count = 0\n out_dict['EI'][it_count][i] = EI_run(seed=i, **kwargs)\n total_calls['EI'][it_count] += call_count\n call_count = 0\n out_dict['EI_SAA'][it_count][i] = EI_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['EI_SAA'][it_count] += call_count\n call_count = 0\n i += 1\n except:\n continue\n np.save('call_counts_cvar_%d.npy' % n, total_calls)\n evaluate(out_dict, n)", "def repeat_expt(epsilon, gamma,\n result_nonprivate, \n repetitions=10,\n outfile_singles=None, outfile_aggregates=None, \n data_blocker=1, windsorized=False):\n \n \n blocker = gupt.GuptRunTime.get_data_blockers()[data_blocker-1]\n # 1 NaiveDataBlocker\n # 2 ResamplingDataBlockerConstantSize \n # 3 ResamplingDataBlockerConstantBlocks\n\n if not windsorized:\n DP_mode=\"standard_DP\"\n else:\n DP_mode=\"windsorized_DP\"\n\n logger.info(\"Running %d repetitions with data_blocker=%s\" % (repetitions, blocker))\n logger.info(\"epsilon=%s gamma=%s, in mode %s\" % (epsilon, gamma, DP_mode))\n \n results, starttime = [], time.clock()\n \n # results = pickle.load( open( \"res.pickle\", \"rb\" ))\n \n \n for i in range(repetitions):\n\n # TODO: Perhaps they DO or DO NOT have to be recreated in each run?\n blocker = gupt.GuptRunTime.get_data_blockers()[data_blocker-1]\n reader = censusdatadriver.get_reader()\n runtime = gupt.GuptRunTime(MeanComputer, reader, epsilon, \n blocker_name=blocker, blocker_args=gamma)\n # end TODO\n\n if not windsorized:\n res=runtime.start()\n else:\n res=runtime.start_windsorized()\n \n # artificial 2nd dimension, just for testing these routines:\n # res = res + res\n \n print report_results(res, result_nonprivate, DP_mode, blocker, \n epsilon, gamma, outfile_singles)\n sleep_short()\n \n results.append(res)\n\n # pickle.dump(results, open( \"res.pickle\", \"wb\" ) )\n \n \n duration = time.clock() - starttime\n logger.info(\"%d repetitions took %.2f seconds\" % (repetitions, duration))\n \n mean, std = analyze_results(results) # , result_nonprivate)\n \n print report_results_repeated(mean, std, DP_mode, blocker,\n epsilon, gamma, repetitions,\n outfile=outfile_aggregates)", "def simulate_finite_sample(settings):\n # Create a dictionary to save the finale results.\n output = {}\n\n # Create array with x values we want to consider.\n x_range = np.linspace(\n settings['x_min'],\n settings['x_max'],\n settings['x_gridpoints']\n )\n\n # Save x_range to dictionary as we want to plot the results later.\n output['x_range'] = x_range\n\n # Iterate over the list of sample sizes.\n for sample_size in settings['n_list']:\n # Create Arrays to save the results for the given sample size.\n mse_array_bagging = np.ones(settings['x_gridpoints']) * np.nan\n mse_array_unbagged = np.ones(settings['x_gridpoints']) * np.nan\n\n # Iterate over the range of x values.\n for i_x, x_value in enumerate(x_range):\n\n # Create Arrays to save the simulated results.\n y_se_bagged = np.ones(settings['n_repeat']) * np.nan\n y_se_unbagged = np.ones(settings['n_repeat']) * np.nan\n\n # Set random state s.t. for each grid point we draw the same\n # sequence. A larger explanation why we define RandomStates\n # can be found in the documentation.\n random_state = np.random.RandomState(settings['random_seed'])\n\n # Calculate the true prediction for the given x.\n true_prediction = indicator(x_value, settings['mu'])\n\n # Simulate the Expected MSPE for given x.\n for i_repeat in range(settings['n_repeat']):\n\n # Draw a new sample and make a prediction for bagging and\n # without bagging.\n y_sample = (\n random_state.normal(\n settings['mu'],\n settings['sigma'],\n size=sample_size\n )\n )\n\n # Make a prediction with the unbagged predictor.\n prediction_unbagged = indicator(x_value, y_sample.mean())\n\n # Make a prediction with the bagged predictor.\n prediction_bagged = (\n bagged_indicator(\n x_value,\n y_sample,\n b_iterations=settings['b_iterations']\n )\n )\n\n # Calculate the Squared Error for the given repetition.\n y_se_bagged[i_repeat] = (\n (true_prediction - prediction_bagged) ** 2\n )\n y_se_unbagged[i_repeat] = (\n (true_prediction - prediction_unbagged) ** 2\n )\n\n # Calculate the MSPE for bagging and the normal predictor.\n mse_array_bagging[i_x] = (\n y_se_bagged.sum(axis=0) / settings['n_repeat']\n )\n mse_array_unbagged[i_x] = (\n y_se_unbagged.sum(axis=0) / settings['n_repeat']\n )\n\n # Save the results of the given sample size.\n output[sample_size] = {}\n output[sample_size]['mse_bagging'] = mse_array_bagging\n output[sample_size]['mse_unbagged'] = mse_array_unbagged\n\n return output", "def precalc_all(REPS):\n for sigma in [0.25, 1.5]:\n print('-'*60)\n\n N_RANGE = arange(5,105,5)\n\n filename = f'categorical_K2_C{C}_sigma{sigma:.2f}_M{M}'.replace('.','_')\n with Timer(f'{filename} ({REPS} repetitions)'):\n run_precalc(filename, [(n,n,n) for n in N_RANGE], C, sigma, M, REPS)\n\n filename = f'categorical_LOO_C{C}_sigma{sigma:.2f}_M{M}'.replace('.','_')\n with Timer(f'{filename} ({REPS} repetitions)'):\n run_precalc(filename, [(n,1,n) for n in N_RANGE], C, sigma, M, REPS)", "def sampleall(nruns=2, nMC=(3000, 100000), useNormalizedBeam=True, irun=0,\n noCorrelations=True, fac=None, doBlock=True, nhits=None):\n \n \n \n plotOne = False ### can't get this to work yet!\n if plotOne:\n pylab.axes()\n pylab.xlim(-100,100)\n pylab.ylim(-100,100)\n \n #dets = [13, 14, 15, 23, 24, 25] ## LUIS\n\n if irun==0:\n dets = [12, 13, 14, 15] # brad, day\n nrow=2; ncol=2\n # dets = [13, 14] # brad, both\n # nrow=1; ncol=2\n DayNight=0\n elif irun==1:\n dets = [13, 14, 23, 24, 25, 33, 34, 35, 43, 44, 45 ] # brad, night\n nrow=3; ncol=4\n # dets = [13, 14] # brad, both\n # nrow=1; ncol=2\n DayNight = 1\n elif irun==2: \n dets = [13, 14] # brad, both\n nrow=1; ncol=2\n DayNight=2\n \n reslist = []\n nfig=2\n ntotrun = nruns*nfig\n for run in range(nruns):\n res={}\n for ib, det in enumerate(dets):\n print('Detector: %d' % det)\n fig=pylab.figure(irun*ntotrun+nfig*run)\n if not plotOne:\n ax=fig.add_subplot(nrow, ncol, ib+1)\n ax.cla()\n\n ## need to explicitly read the data here, now -- how to make generic?\n data, xyrange = read_data_MAXI(num=det, DayNight=DayNight, sigcut=sigcut, ctscut=ctscut)\n like, prop_sigmas, start_params = setup_sampler(data, xyrange,\n useNormalizedBeam=useNormalizedBeam)\n \n res[det] = sample1beam(like, nMC=nMC, fac=fac,\n prop_sigmas=prop_sigmas, start_params=start_params,\n noCorrelations=noCorrelations,\n doBlock=doBlock)\n if plotOne: \n pylab.xlim(-100,100)\n pylab.ylim(-100,100)\n\n fig=pylab.figure(irun*ntotrun+nfig*run+1)\n ax=fig.add_subplot(nrow, ncol, ib+1)\n samples = cat([ s.samples for s in res[det][0] ])\n #samples.transpose()\n ## nb. with numpy, a.transpose doesn't change the array, just gives a new view.\n for var in samples.transpose(): ax.plot(var)\n reslist.append(res)\n\n return reslist", "def simulationWithDrug(numViruses, maxPop, maxBirthProb, clearProb, resistances,\n mutProb, numTrials):\n \n #create viruses list\n viruses = []\n for i in range(numViruses):\n viruses.append(ResistantVirus(maxBirthProb, clearProb, resistances, mutProb))\n \n #create test patient P1\n results = np.zeros(numTrials*300).reshape(300,numTrials)\n resultsPopResist = np.zeros(numTrials*300).reshape(300,numTrials)\n \n #runs numTrials of 300 steps, putting results in an array of 300 lines, \n # numTrials columns\n for t in range(numTrials) :\n P1 = TreatedPatient(viruses, maxPop)\n for s in range(150):\n P1.update()\n results[s][numTrials-1] += P1.getTotalPop()\n resultsPopResist[s][numTrials-1] += P1.getResistPop(['guttagonol'])\n \n P1.addPrescription('guttagonol')\n for s in range(150,300):\n P1.update()\n results[s][numTrials-1]+=P1.getTotalPop()\n resultsPopResist[s][numTrials-1] += P1.getResistPop(['guttagonol'])\n \n \n #calculating average of virus population size at each step \n yValues1 = []\n for i in range(300):\n a = sum(results[i].tolist())/len(results[i])\n yValues1.append(a)\n \n yValues2 = []\n for i in range(300):\n a = sum(resultsPopResist[i].tolist())/len(resultsPopResist[i])\n yValues2.append(a)\n\n pylab.plot(yValues1,label='pop average')\n pylab.plot(yValues2,'r--',label = 'resistant virus population')\n pylab.title('virus pop average at each step')\n pylab.legend()\n pylab.xlabel('Time Steps')\n pylab.ylabel('pop #')\n pylab.show()", "def chunk(wb_run,sample_run,ei_guess,rebin,mapingfile,nchunk,**kwargs):\n global reducer,rm_zero,inst_name,van_mass,bleed_switch,rate,pixels\n print 'DGreduce run for ',inst_name,'run number ',sample_run\n try:\n n,r=lhs('both')\n wksp_out=r[0]\n except:\n if sample_run == 0:\n #deal with the current run being parsed as 0 rather than 00000\n sample_run='00000'\n wksp_out=inst_name+str(sample_run)+'.spe'\n if kwargs.has_key('sum') and kwargs.get('sum')==True:\n wksp_out=inst_name+str(sample_run[0])+'sum'+'.spe'\n \n start_time=time.time()\n \n if sample_run=='00000' and mtd.doesExist(inst_name+'00000.raw')==True:\n print 'Deleteing previous instance of temp data'\n DeleteWorkspace(Workspace=inst_name+'00000.raw')\n \n \n reducer.energy_bins = rebin\n \n mon_list1=reducer.ei_mon_spectra\n mon_list2=reducer.mon1_norm_spec\n mon_list1.append(mon_list2)\n #mon_list1.sort()\n print 'Monitors for this chunk are: ',mon_list1\n # monitors for merlin[69634,69638]\n \n if inst_name == 'MER':\n #number of spectrums per instrument and where the detectors start (i.e. 5 for mari but 1 for merlin)\n numspec=69632\n spectrum_start=1\n if inst_name == 'MAP':\n #number of spectrums per instrument and where the detectors start (i.e. 5 for mari but 1 for merlin)\n numspec=41472\n spectrum_start=1\n \n if kwargs.has_key('det_cal_file'):\n cal_file = kwargs.get('det_cal_file') \n else:\n print 'Setting detector calibration to detector block info from ', sample_run\n \n reducer.det_cal_file =None\n reducer.relocate_dets = False\n nums=range(spectrum_start,numspec,nchunk)\n output_wkspName=wksp_out\n for i in nums:\n print '=========================================================================='\n print 'start spectra for this chunk',i\n chunk=range(i,i+nchunk)\n endIndex=nchunk-1\n if i+nchunk > numspec:\n chunk=range(i,numspec+1)\n endIndex=len(chunk)-1\n print 'end spectra for this chunk ', i+endIndex\n \n speclist=mon_list1+chunk\n #print speclist\n LoadRaw(Filename=wb_run,OutputWorkspace=\"wb_wksp\",LoadLogFiles=\"0\",SpectrumList=speclist)\n \n LoadRaw(Filename=sample_run,OutputWorkspace=\"run_wksp\",LoadLogFiles=\"0\",SpectrumList=speclist)\n \n tmp=arb_units(\"wb_wksp\",\"run_wksp\",ei_guess,rebin,'none_for_this_run_type',one2one=True,bleed=False,**kwargs)\n \n \n DeleteWorkspace(Workspace=\"wb_wksp\")\n DeleteWorkspace(Workspace=\"run_wksp\")\n #DeleteWorkspace(\"_wksp.spe\")\n #DeleteWorkspace(\"_wksp.spe-white\")\n \n if i == spectrum_start:\n #crop the workspace to remove the monitors, the workpsace seems sorted on specnumber so this is ok for instruments where the monitors are at the end of the \n # spectrum list\n CropWorkspace(InputWorkspace=tmp,OutputWorkspace=wksp_out,StartWorkSpaceIndex=0,EndWorkSpaceIndex=endIndex)\n else:\n CropWorkspace(InputWorkspace=tmp,OutputWorkspace=tmp,StartWorkSpaceIndex=0,EndWorkSpaceIndex=endIndex)\n ConjoinWorkspaces(InputWorkspace1=wksp_out,InputWorkspace2=tmp,CheckOverlapping='0')\n print int(((float(i+endIndex))/float(numspec))*100),'% complete'\n print '===============================================================================' \n \n GroupDetectors(InputWorkspace=output_wkspName,OutputWorkspace=output_wkspName,MapFile=mapingfile)\n\n \n \n print 'Elapsed time =',time.time()-start_time, 's'\n return mtd[wksp_out]", "def run_sims(output_filepath, pop_sizes, trials, error_rate_from, error_rate_to, start_allele_freq, ending_num_reads, use_norm_approx, s):\n fileout = open(output_filepath, \"w\")\n #write header for output file\n fileout.write('ending_allele_frequency_with_start_freq_of_' + str(start_allele_freq) + '\\n')\n #for each simulation\n for i in xrange(trials):\n allele_freq = start_allele_freq\n if allele_freq != 0.0 and allele_freq != 1.0:\n for j in xrange(len(pop_sizes)-1): #for each gen\n if use_norm_approx == True:\n #since pop is super big, use normal distribution to\n #approximate the binomial\n var = pop_sizes[j+1] * allele_freq * (1-allele_freq)\n stdev = math.sqrt(var)\n if stdev <=0:\n print j, allele_freq\n print stdev, var\n mean = pop_sizes[j+1] * allele_freq * (1+s)\n next_gen_allele_count = round(numpy.random.normal(mean, stdev), 0)\n elif use_norm_approx == False:\n #use binomial if computation time is managable\n allele_freq = allele_freq * (1+s)\n next_gen_allele_count = float(numpy.random.binomial(pop_sizes[j+1], allele_freq))\n allele_freq = next_gen_allele_count / pop_sizes[j+1]\n if allele_freq <= 0.0:\n allele_freq = 0.0\n break\n elif allele_freq >=1.0:\n allele_freq = 1.0\n break\n #introduce noise from subsampling when sequencing\n #and also from mutation/sequencing error\n sub_samp_seq_error_allele_count = sub_sample_add_seq_error_population(ending_num_reads, allele_freq, error_rate_from, error_rate_to)\n allele_freq = sub_samp_seq_error_allele_count / ending_num_reads\n fileout.write(str(allele_freq) + '\\n')\n fileout.close()\n return", "def refugia_adj_5_simsplit_4epochs_iter1 (params, ns):\n #28 parameters \n nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, nu1d, nu2d, nu3d, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m3_12, m3_13, m3_21, m3_23, m3_31, m3_32, T1, T2, T3, T4 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2]) \n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1 (to reflect sum effect of all previous glacial-interglacial cycles)\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2 (to reflect period of isolation during last glacial)\n nu_T2 = [nu1b, nu2b, nu3b]\n fs.integrate(nu_T2, T2)\n ## Population function and migration matrix for T3 (to reflect inter-glacial expansion)\n nu_T3 = [nu1c, nu2c, nu3c]\n mig3 = numpy.array([[0, m3_12, m3_13],[m3_21, 0, m3_23], [m3_31, m3_32, 0]]) \n fs.integrate(nu_T3, T3, m=mig3)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T4 = [nu1d, nu2d, nu3d]\n fs.integrate(nu_T4, T4) \n return fs", "def simulationDelayedTreatment(numTrials):\n \n numViruses = 100\n maxPop = 1000\n maxBirthProb = 0.1\n clearProb = 0.05\n resistances = {'guttagonol': False}\n mutProb = 0.005\n delays = [300, 150, 75, 0]\n f, axarr = pylab.subplots(2, 2)\n x_plot = []\n\n for delay in delays:\n FinalPopSize = [0.0 for x in range(numTrials)]\n for trial in range(numTrials):\n viruses = [ResistantVirus(maxBirthProb, clearProb, resistances, mutProb) for n in range(numViruses)]\n patient = TreatedPatient(viruses, maxPop)\n for i in range(delay):\n patient.update()\n patient.addPrescription('guttagonol')\n for j in range(delay, delay+150):\n patient.update()\n FinalPopSize[trial] = patient.getTotalPop()\n x_plot.append(FinalPopSize)\n\n axarr[0, 0].hist(x_plot[0])\n axarr[0, 1].hist(x_plot[1])\n axarr[1, 0].hist(x_plot[2])\n axarr[1, 1].hist(x_plot[3])\n pylab.show()\n\n # pylab.plot(avgPopSize, label = 'avg pop size')\n # pylab.plot(avgGuttagonolResistantPop, label = 'avg pop size guttagonol-resistant')\n # pylab.xlabel(\"Time\")\n # pylab.ylabel(\"Average Population Size\")\n # pylab.title(\"Average Size of the Virus Populations\")\n # pylab.legend(loc = 'best')\n # pylab.show()", "def refugia_adj_5_simsplit_4epochs_iter5 (params, ns):\n #28 parameters \n nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, nu1d, nu2d, nu3d, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m3_12, m3_13, m3_21, m3_23, m3_31, m3_32, T1, T2, T3, T4 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2]) \n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1 (to reflect sum effect of all previous glacial-interglacial cycles)\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2 (to reflect period of isolation during last glacial)\n nu_T2 = [nu1b, nu2b, nu3b]\n fs.integrate(nu_T2, T2)\n ## Population function and migration matrix for T3 (to reflect inter-glacial expansion)\n nu_T3 = [nu1c, nu2c, nu3c]\n mig3 = numpy.array([[0, m3_12, m3_13],[m3_21, 0, m3_23], [m3_31, m3_32, 0]]) \n fs.integrate(nu_T3, T3, m=mig3)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T4 = [nu1d, nu2d, nu3d]\n fs.integrate(nu_T4, T4) \n return fs", "def refugia_adj_5_simsplit_4epochs_iter2 (params, ns):\n #28 parameters \n nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, nu1d, nu2d, nu3d, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m3_12, m3_13, m3_21, m3_23, m3_31, m3_32, T1, T2, T3, T4 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2]) \n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1 (to reflect sum effect of all previous glacial-interglacial cycles)\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2 (to reflect period of isolation during last glacial)\n nu_T2 = [nu1b, nu2b, nu3b]\n fs.integrate(nu_T2, T2)\n ## Population function and migration matrix for T3 (to reflect inter-glacial expansion)\n nu_T3 = [nu1c, nu2c, nu3c]\n mig3 = numpy.array([[0, m3_12, m3_13],[m3_21, 0, m3_23], [m3_31, m3_32, 0]]) \n fs.integrate(nu_T3, T3, m=mig3)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T4 = [nu1d, nu2d, nu3d]\n fs.integrate(nu_T4, T4) \n return fs", "def split_simsplit_3epochs_iter5(params, ns):\n #24 parameters \n nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m2_12, m2_13, m2_21, m2_23, m2_31, m2_32, T1, T2, T3 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2]) \n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2\n nu_T2 = [nu1b, nu2b, nu3b]\n mig2 = numpy.array([[0, m2_12, m2_13],[m2_21, 0, m2_23], [m2_31, m2_32, 0]]) \n fs.integrate(nu_T2, T2, m=mig2)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T3 = [nu1c, nu2c, nu3c]\n fs.integrate(nu_T3, T3) \n return fs", "def refugia_adj_5_simsplit_4epochs_iter4 (params, ns):\n #28 parameters \n nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, nu1d, nu2d, nu3d, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m3_12, m3_13, m3_21, m3_23, m3_31, m3_32, T1, T2, T3, T4 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2]) \n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1 (to reflect sum effect of all previous glacial-interglacial cycles)\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2 (to reflect period of isolation during last glacial)\n nu_T2 = [nu1b, nu2b, nu3b]\n fs.integrate(nu_T2, T2)\n ## Population function and migration matrix for T3 (to reflect inter-glacial expansion)\n nu_T3 = [nu1c, nu2c, nu3c]\n mig3 = numpy.array([[0, m3_12, m3_13],[m3_21, 0, m3_23], [m3_31, m3_32, 0]]) \n fs.integrate(nu_T3, T3, m=mig3)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T4 = [nu1d, nu2d, nu3d]\n fs.integrate(nu_T4, T4) \n return fs", "def refugia_adj_5_simsplit_4epochs_iter3 (params, ns):\n #28 parameters \n nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, nu1d, nu2d, nu3d, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m3_12, m3_13, m3_21, m3_23, m3_31, m3_32, T1, T2, T3, T4 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2]) \n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1 (to reflect sum effect of all previous glacial-interglacial cycles)\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2 (to reflect period of isolation during last glacial)\n nu_T2 = [nu1b, nu2b, nu3b]\n fs.integrate(nu_T2, T2)\n ## Population function and migration matrix for T3 (to reflect inter-glacial expansion)\n nu_T3 = [nu1c, nu2c, nu3c]\n mig3 = numpy.array([[0, m3_12, m3_13],[m3_21, 0, m3_23], [m3_31, m3_32, 0]]) \n fs.integrate(nu_T3, T3, m=mig3)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T4 = [nu1d, nu2d, nu3d]\n fs.integrate(nu_T4, T4) \n return fs", "def run_Simulation2(k,N=100,T=10,start = 1,p=0.5,q=0.08,startcenter = False,startcorner=False):\n recover = [0]\n infect = [start]\n suspect = [N-start]\n pop = [Person() for i in range(N)]\n ##we need to change the code for the case start people infected\n for i in range(start):\n pop[i].get_infected();\n if(startcenter):\n resetcenter(start,pop)\n if(startcorner):\n resetcorner(start,pop)\n np.random.seed(10)\n for i in range(T):\n for j in range(N):\n pop[j].movepos(p)\n X = calculatedistance(pop)\n tree = cKDTree(X)\n for j in range(N):\n if pop[j].is_infected():\n addvalue = np.array([X[j]])\n inds = tree.query_ball_point(addvalue, q)\n inds = inds[0]\n #may have problem here\n for l in inds:\n if pop[l].is_willinfected():\n pop[l].get_infected()\n\n for j in range(N):\n if pop[j].is_infected():\n if np.random.rand()< k:\n pop[j].get_recovered()\n\n recover.append(count_recover(pop))\n infect.append(count_infect(pop))\n suspect.append(count_suspectial(pop))\n newrecover = [i/N for i in recover]\n newsuspect = [s/N for s in suspect]\n newinfect = [i/N for i in infect]\n plt.plot(range(T+1),newrecover,label = \"r: percentage of removed \")\n plt.plot(range(T+1),newsuspect,label = \"s: percentage of susceptible\")\n plt.plot(range(T+1),newinfect,label = \"i: percentage of infected\")\n plt.xlabel(\"T\")\n plt.ylabel(\"percentage\")\n plt.title(\"Percentage of Population, Discrete\")\n plt.legend()\n plt.show()", "def run_experiment(x_loops=15, max_steps=0, display_on=True, max_fps=10,\n garden_size=8, tako_number=1, pop_max=30, max_width=1800,\n max_height=900, collect_data=True, export_all=False,\n rand_nets=False, max_gen = 505, genetic_mode=\"Plain\",\n learning_on=False, seeds=None, garden_mode=\"Diverse Static\",\n family_detection=None, family_mod=0, record_inbreeding=True,\n inbreed_lim = 1.1, hla_genes=0, binary_health=0,\n carrier_percentage=40, two_envs=False, diff_envs=False,\n migration_rate=0, phen_pref=False, filename=\"\"): \n #round width/height down to nearest multiple of 50 if need be\n if max_width % 50 != 0:\n max_width = max_width - (max_width % 50)\n if max_height % 50 != 0:\n max_height = max_height - (max_height % 50)\n\n i = 0\n #create csv files if they don't already exist\n if collect_data or export_all:\n if filename == \"\":\n filename = str(int(time.time())) + \".csv\"\n elif len(filename) < 4:\n filename = filename + \".csv\"\n elif filename[-4:] != \".csv\":\n filename = filename + \".csv\"\n\n if not os.path.exists(\"Data\"):\n os.makedirs(\"Data\")\n\n if collect_data:\n if not os.path.exists(os.path.join(\"Data\", filename)):\n with open(os.path.join(\"Data\", filename), 'a', newline='') as\\\n csvfile:\n writ = csv.writer(csvfile)\n writ.writerow(['iteration', 'env #', 'ID', 'parent1',\n 'parent2', 'age', 'generation', '# children',\n 'mating attempts', 'accum pain',\n 'cause of death', 'timestep', 'mutations',\n 'parent_degree', 'parent_genoverlap',\n '# disorders',\n 'health a', 'health b', 'preference'])\n else:\n with open(os.path.join(\"Data\", filename), newline='') as\\\n csvfile:\n reader = csv.DictReader(csvfile)\n row = None\n for row in reader: pass\n if row != None:\n i = int(row[\"iteration\"]) + 1\n\n if export_all:\n h = make_headers()\n f = os.path.join('Data', (filename[:-4] + ' gene data.csv'))\n if not os.path.exists(f):\n with open(f, 'a') as file:\n writ = csv.writer(file)\n writ.writerow(h)\n\n tako.rand_nets = rand_nets\n tako.family_mod = family_mod\n tako.family_detection = family_detection\n gt.family_detection = family_detection\n tako.record_inbreeding = record_inbreeding\n tako.inbreed_lim = inbreed_lim\n tako.hla_genes = hla_genes\n tako.binary_health = binary_health\n tako.carrier_percentage = carrier_percentage\n tako.phen_pref = phen_pref\n gt.phen_pref = phen_pref\n \n loop_limit = x_loops\n if loop_limit < 1:\n loop_limit = 1\n\n if seeds == None:\n seeds = [None for i in range(x_loops)]\n\n while loop_limit > 0:\n #check if seeds is long enough\n if len(seeds) < loop_limit + i:\n for j in range(loop_limit + i - len(seeds)):\n seeds.append(seeds[j])\n if seeds[0] != None:\n tako.set_seed(seeds[i])\n g = garden_game(garden_size, tako_number, pop_max, max_width,\n max_height, display_on, max_fps, learning_on,\n genetic_mode, rand_nets, garden_mode, filename,\n export_all, family_mod, family_detection,\n two_envs, diff_envs, migration_rate,\n seeds[i])\n if display_on:\n main_window = g\n main_window.main_loop(max_steps, max_gen, display_on,\n collect_data, garden_mode, i)\n else:\n g.main_loop(max_steps, max_gen, display_on, collect_data,\n garden_mode, i)\n loop_limit -= 1\n i += 1", "def run_expt_many_times(result_nonprivate, repetitions=10,\n data_blocker=1, windsorized=False,\n outfile_singles=None, outfile_aggregates=None):\n for epsilon in numpy.arange(0.1, 2.2, 0.2):#(0.2, 10, 0.2):\n for gamma in range(1, 10, 1): # (1, 7, 1):\n \n repeat_expt(epsilon, gamma,\n data_blocker=data_blocker, windsorized=windsorized,\n result_nonprivate=result_nonprivate,\n repetitions=repetitions,\n outfile_singles=outfile_singles, \n outfile_aggregates=outfile_aggregates)", "def run(self):\n evaluateAllRuns = False\n while True:\n if self.host == \"\":\n # respond to clients\n self.respond2Clients()\n else:\n print(\"Next...\")\n # randomly choose experiment + run\n if not evaluateAllRuns:\n print(\"Randomly fetching run\")\n self.exp, self.runnum, self.detname = randExpRunDet()\n else:\n\t\t try:\n print(\"Fecthing next run in experiment\")\n self.exp, self.runnum, self.detname = nextExpRunDet(self.goodExp, self.runList[0])\n if self.exp is None:\n self.runList.pop(0)\n continue\n except:\n evaluateAllRuns = False\n continue\n if not self.checkStatus(self.exp, self.runnum, self.detname):\n print \"trying: exp %s, run %s, det %s\"%(self.exp,self.runnum,self.detname)\n try: #temp\n self.ds = safeDataSource(self.exp, self.runnum)\n except: #temp\n continue #temp\n self.run = self.ds.runs().next()\n self.times = self.run.times()\n #Start temp code\n if self.detname is None:\n continue\n #End temp code\n self.det = psana.Detector(self.detname)\n self.det.do_reshape_2d_to_3d(flag=True)\n try:\n self.iX = np.array(self.det.indexes_x(self.run), dtype=np.int64)\n self.iY = np.array(self.det.indexes_y(self.run), dtype=np.int64)\n self.ipx, self.ipy = self.det.point_indexes(self.run, pxy_um=(0, 0))\n self.alg = PyAlgos()\n self.alg.set_peak_selection_pars(npix_min=2, npix_max=30, amax_thr=300, atot_thr=600, son_min=10)\n mask = self.det.mask(self.runnum, calib=True, status=True, edges=True, central=True, unbond=True, unbondnbrs=True)\n\n samples = np.linspace(0, len(self.times), num=100, endpoint=False, retstep=False, dtype='int')\n offset = np.floor(np.random.uniform(0, len(self.times)-samples[-1])).astype('int')\n mysamples = samples + offset\n numCrystals = 0\n for self.eventNum in mysamples:\n self.evt = self.run.event(self.times[self.eventNum])\n calib = self.det.calib(self.evt)\n if calib is not None:\n peaks = self.alg.peak_finder_v3r3(calib, rank=3, r0=3, dr=2, nsigm=10, mask=mask.astype(np.uint16))\n if self.likelihood(peaks) >= self.goodLikelihood:\n numCrystals += 1\n if numCrystals >= self.minCrystals:\n self.numSaved +=1\n self.updateStatus(self.exp, self.runnum, self.detname, self.numSaved)\n self.lastGood = True\n break\n except:\n print \"Could not analyse this run\"\n #If an experiment has not had all of its runs evaluated yet\n # and if the last randomly selected run in this experiment was good\n # then all the runs in this experiment should be evaluated\n if (self.exp not in self.goodList) and self.lastGood:\n self.goodExp = self.exp #Save the name of this experiment\n self.goodRun = self.runnum #Save the run that has already been evaluated\n self.lastGood = False #Reset the condition that the last run was \"good\"\n self.goodList.append(self.goodExp) #Add this experiment name to the list of experiments that have had all runs evaluated\n self.runList = returnRunList(self.goodExp, self.goodRun) #save list of all runs in this good exp\n evaluateAllRuns = True #rerun loop with new algorithm that evaluates each run in an experiment\n continue\n if evaluateAllRuns: #If the loop is currently evaluating all of the runs in an experiment\n if(len(self.runList) > 1):\n self.runList.pop(0) #Remove runs from the list of runs each time they are evaluated\n else:\n self.runList.pop(0)#Remove runs until the list is completely empty\n evaluateAllRuns = False #Stop evaluated all the runs of an experiment, go back to random fetching", "def split_simsplit_3epochs_iter4(params, ns):\n #24 parameters \n nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m2_12, m2_13, m2_21, m2_23, m2_31, m2_32, T1, T2, T3 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2]) \n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2\n nu_T2 = [nu1b, nu2b, nu3b]\n mig2 = numpy.array([[0, m2_12, m2_13],[m2_21, 0, m2_23], [m2_31, m2_32, 0]]) \n fs.integrate(nu_T2, T2, m=mig2)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T3 = [nu1c, nu2c, nu3c]\n fs.integrate(nu_T3, T3) \n return fs", "def test_est_popsize(self):\n\n k = 50\n rho = 1.5e-8\n mu = 2.5e-8\n length = int(1e6)\n times = arghmm.get_time_points(ntimes=30, maxtime=200000)\n popsize = 1e4\n refine = 0\n\n util.tic(\"sim ARG\")\n arg = arghmm.sample_arg_dsmc(k, 2 * popsize,\n rho, start=0, end=length, times=times)\n #arg = arglib.sample_arg_smc(k, 2 * popsize,\n # rho, start=0, end=length)\n #arg = arglib.sample_arg(k, 2 * popsize, rho, start=0, end=length)\n util.toc()\n\n x = []\n for tree in arglib.iter_marginal_trees(arg):\n arglib.remove_single_lineages(tree)\n x.append(mle_popsize_tree(tree, mintime=0))\n \n p = plot(x, ymin=0)\n p.plot([0, len(x)], [popsize, popsize], style='lines')\n \n pause()", "def split_simsplit_3epochs_iter1(params, ns):\n #24 parameters \n nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m2_12, m2_13, m2_21, m2_23, m2_31, m2_32, T1, T2, T3 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2]) \n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2\n nu_T2 = [nu1b, nu2b, nu3b]\n mig2 = numpy.array([[0, m2_12, m2_13],[m2_21, 0, m2_23], [m2_31, m2_32, 0]]) \n fs.integrate(nu_T2, T2, m=mig2)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T3 = [nu1c, nu2c, nu3c]\n fs.integrate(nu_T3, T3) \n return fs", "def simulateUpTo2016FullPop3params(params, extraParams):\n beta, betaW, mu = params\n\n aseason, delta, gamma, gammaA, gammaE, k, m, nInfected, pseason, Pop, red_beta, red_mu, rlen, rep, \\\n sigma, tau, tspan_length, V, vrate, waterMat, wRate = extraParams\n\n beta_A = red_beta*beta\n mu_A = red_mu*mu\n\n repMat = [rep]*10\n\n numDep = 10\n\n travelMat = formTravelMat1(numDep, rlen, tau, vrate, Pop)\n #set initial conditions:\n S0 = np.copy(Pop) - nInfected\n E0 = np.zeros(np.size(Pop))\n I0 = map(lambda x: x / reporting_rate, nInfected)#nInfected/rep # np.zeros(np.size(Pop))\n A0 = np.zeros(np.size(Pop))\n R0 = np.zeros(np.size(Pop))\n RA0 = np.zeros(np.size(Pop))\n W0 = np.zeros(np.size(Pop)) # 15*Pop*365\n C0 = I0\n\n\n\n initCond = np.array([S0, E0, I0, A0, R0, RA0, W0, C0]).reshape((8 * numDep))\n # print initCond\n tspan = range(tspan_length)\n\n paramsODE = [aseason, beta, beta_A, betaW, delta, gamma, gammaA, gammaE, k, m, mu, mu_A, numDep, \\\n pseason, sigma, travelMat, V, wRate, waterMat]\n\n\n sol, info = odeint(choleraEqs10WithoutVaccinationNetwork, initCond, tspan,\n args=(paramsODE,), full_output=True)\n\n\n if info['message'] == 'Integration successful.':\n temp = sol[:, 70:]\n newcases = np.zeros((np.size(sol[:, 0]), 10))\n\n newcases[0, :] = sol[0, 70:80]\n\n for jvals in range(10):\n for t in range(1, len(tspan)):\n newcases[t, jvals] = (np.sum(temp[t, jvals]) - np.sum(temp[t - 1, jvals]))\n\n # newcases[:, jvals] = repMat[jvals] * newcases[:, jvals]\n\n\n # print np.shape(newcases)\n return [sol, newcases]\n else:\n # print 'hola'\n return [0, np.ones((tspan_length, 10))*10**8]", "def batch_anneal(self, times=10):\n for i in range(1, times + 1):\n print(f\"Iteration {i}/{times} -------------------------------\")\n self.T = self.T_save\n self.iteration = 1\n self.cur_solution, self.cur_fitness = self.initial_solution()\n self.anneal()", "def FigA7(case):\n \n #set the parameter, arrays\n \n n_array=np.array([1,2,3])\n\n #set the result arrays\n if case==0:\n class_number=5\n elif case==1:\n class_number=6\n fate=np.zeros([class_number])#number of evolutionary fate\n fate_matrix=np.zeros([np.size(n_array),np.size(fate)])\n \n time=np.linspace(0,100000, 1000000)\n loop=10**6\n \"\"\"\n 0 Co and/or Ch cannot survive in mono-culture\n 1 Co cannot invade\n 2 Only equilibrium of exclusion is stable\n 3 Only equilibrium of coexistence is stable\n 4 Two equilibria are UNstable\n 5 two Equilibrium are stable (which may occur only when sCO vs rCh)\n \"\"\"\n for tri in range(np.size(n_array)):\n counter=0\n n=n_array[tri]\n print(str(\"Hill coefficient is %d\" %(n)))\n fate=np.zeros([class_number])#number of evolutionary fate should be reset\n if case==0 or case==1:\n fname=str('parameter-sweep-MC-n%d-case%d' %(n, case))\n else:\n print(\"Error in case\")\n return 1\n \n for i in range(loop):\n if(i+1)%10000==0:\n print(i+1)\n Ks,cd,T0, alpha,=np.random.uniform(0,1,4)\n Kr,cr=np.random.uniform([Ks,0],[1,1],2)#Kr>Ks and cr.cd\n #check whether r is positive or not\n if case==0:\n r1=rmax*(1-cr-cd)#rCO\n r2=rmax#sCH\n W0Co=r1-dmax*T0**n/(T0**n+Kr**n)-alpha#initial growth of Cooperator\n W0Ch=r2-dmax*T0**n/(T0**n+Ks**n)-alpha#initial growth of Cheater\n elif case==1:\n r1=rmax*(1-cd)#sCo\n r2=rmax*(1-cr)#rCh\n W0Co=r1-dmax*T0**n/(T0**n+Ks**n)-alpha\n W0Ch=r2-dmax*T0**n/(T0**n+Kr**n)-alpha\n stab_e=0#initialize the falgs of stability\n stab_c=0\n if W0Co<0 or W0Ch<0:\n fate[0]+=1\n res=0\n else:\n #succeed in mono-culture \n init=np.array([T0,10**(-6)])\n if case==0: \n solCo=odeint(DyCoop, init, time, args=(T0, r1, Kr, alpha, n))\n Ts=solCo[-1,0]\n #x1s=solCo[-1,1]\n solCh=odeint(DyCheat, init, time, args=(T0, r2, Ks, alpha, n))\n x2s=solCh[-1,1]\n else:\n solCo=odeint(DyCoop, init, time, args=(T0, r1, Ks, alpha, n))\n Ts=solCo[-1,0]\n #x1s=solCo[-1,1]\n solCh=odeint(DyCheat, init, time, args=(T0, r2, Kr, alpha, n))\n x2s=solCh[-1,1]\n \n #Evolutionary dynamics \n if case==0:\n K=Kr\n else:\n K=Ks\n if r1*(1-x2s)-dmax*T0**n/(T0**n+K**n)<alpha:\n #Co cannot invade\n fate[1]+=1\n res=1\n else:\n #Co can invade\n #calculate Tdagger Td and check whether coexist or exclude\n if case==0:\n #rCo vs sCh\n #in this case, at most one equilbrium is stable\n tau=Quad(case,alpha,cr+cd,0,Kr, Ks, n)\n Td=tau**(1/n)\n if Td<Ts:\n #Co exclude Ch\n fate[2]+=1\n res=2\n else:\n x1d=alpha*Kd*(T0-Td)/(fmax*Td-alpha*(T0-Td))\n x2d=1-x1d-(dmax*Td**n/(Td**n+K**n)+alpha)/r1\n #check the stability condition\n stab=Stab_cond(alpha, T0, Td,x1d,x2d, r1,r2,n, K)\n if stab==0:\n #stable coexistence\n fate[3]+=1\n res=3\n else:\n #unstable coexistence nor exclusion\n fate[4]+=1\n res=4\n print(Td, x1d, x2d)\n else:\n #sCo vs rCh\n # in this case two equilibria can be stable at the same time\n [tau_p,tau_m]=Quad(case,alpha,cd,cr,Ks, Kr, n)\n if tau_m>Ts**n or tau_p<Ts**n:\n # cexclusion is stable\n stab_e=1\n # stability in coexistence \n if tau_p<0:\n stab_c=0\n else:\n Td=tau_p**(1/n)\n x1d=alpha*Kd*(T0-Td)/(fmax*Td-alpha*(T0-Td))\n x2d=1-x1d-(dmax*Td**n/(Td**n+K**n)+alpha)/r1\n #check the stability condition\n stab=Stab_cond(alpha, T0, Td,x1d,x2d, r1,r2,n, K)\n if stab==0:\n #stable coexistence\n stab_c=1\n #classify\n if stab_e==1 and stab_c==1:\n # two stable equilbria\n fate[5]+=1\n res=5\n elif stab_e==1 and stab_c==0:\n #only stable cexclusion\n fate[2]+=1\n res=2\n elif stab_e==0 and stab_c==1:\n #stable coexistence\n fate[3]+=1\n res=3\n else:\n #both unstable\n fate[4]+=1\n res=4\n \n #save the results\n if counter==0:\n result=np.array([[Ks, Kr, cr, cd, alpha, T0,res]])\n #save the result with parameter values\n \n else:\n #add array of results\n R=np.array([[Ks, Kr, cr, cd, alpha, T0,res]])\n result=np.concatenate((result, R), axis=0)\n counter+=1\n \n #save csv file and graph\n np.savetxt(fname+'.csv',result, delimiter=',', header='Ks, Kr, cr, cd, alpha, T0, class', fmt='%.6f') \n print(fate)\n fate_matrix[tri,:]=fate \n if case==0: \n np.savetxt('parameter_sweep_MC_total_case0.csv',fate_matrix, delimiter=',', header='cl0,l1,cl2,cl3,cl4', fmt='%d')\n else:\n np.savetxt('parameter_sweep_MC_total_case1.csv',fate_matrix, delimiter=',', header='cl0,l1,cl2,cl3,cl4,cl5', fmt='%d')\n Plot(case)", "def run_experiment(m1,m2,m3,N):\r\n bandits = [Bandit(m1),Bandit(m2),Bandit(m3)]\r\n \r\n #for PLOTTING ONLY\r\n #this is not needed for functioning\r\n data = np.empty(N)\r\n #play game N times\r\n for i in range(N):\r\n j = np.argmax([b.mean for b in bandits])\r\n #pull the one we chose\r\n x = bandits[j].pull()\r\n #whether explore or exploit, we update our knowledge\r\n bandits[j].update(x)\r\n #for PLOTTING\r\n data[i] = x\r\n #this is basically the rate your agent sees at the win rate per bandit\r\n #once the experiment is over\r\n cumulative_average = np.cumsum(data)/(np.arange(N)+1)\r\n plt.plot(cumulative_average)\r\n #just lines, like grid lines\r\n for b in bandits:\r\n plt.plot(b.signal)\r\n plt.title(\"Sin Bandits\")\r\n #plt.xscale('log')\r\n plt.show()\r\n return cumulative_average", "def infer(self, niter, reps):\n # Start containers to hold the optimized parameters\n self.p_init = []\n self.hot_params = []\n self.cold_params = []\n self.opt_params = []\n self.theta = []\n self.mod_like = []\n self.opt_like = []\n self.aic = []\n # Get the sample sizes from the SFS\n sample_sizes = self.sfs.sample_sizes\n # Generate the points of the grid for the optimization\n grid = 50\n # Apply mask\n # Calculate the model SFS\n mod_sfs = self.modelfunc(self.params['Values'], sample_sizes, grid)\n # Calculate the likelihood of the data given the model SFS that we just\n # generated\n mod_like = dadi.Inference.ll_multinom(mod_sfs, self.sfs)\n # Start with hot annealing, then cold annealing, then BFGS\n r = 0\n while r < reps:\n p_init = dadi.Misc.perturb_params(\n self.params['Values'],\n fold=1,\n lower_bound=self.params['Lower'],\n upper_bound=self.params['Upper'])\n # Get some hot-optimized parameters\n p_hot = dadi_custom.optimize_anneal(\n p_init,\n self.sfs,\n self.modelfunc,\n grid,\n lower_bound=self.params['Lower'],\n upper_bound=self.params['Upper'],\n maxiter=niter,\n Tini=100,\n Tfin=0,\n learn_rate=0.005,\n schedule=\"cauchy\")\n p_cold = dadi_custom.optimize_anneal(\n p_hot,\n self.sfs,\n self.modelfunc,\n grid,\n lower_bound=self.params['Lower'],\n upper_bound=self.params['Upper'],\n maxiter=niter,\n Tini=50,\n Tfin=0,\n learn_rate=0.01,\n schedule=\"cauchy\")\n p_bfgs = dadi.Inference.optimize_log(\n p_cold,\n self.sfs,\n self.modelfunc,\n grid,\n lower_bound=self.params['Lower'],\n upper_bound=self.params['Upper'],\n maxiter=niter)\n self.p_init.append(p_init)\n self.hot_params.append(p_hot)\n self.cold_params.append(p_cold)\n self.opt_params.append(p_bfgs)\n opt_sfs = self.modelfunc(p_bfgs, sample_sizes, grid)\n opt_like = dadi.Inference.ll_multinom(opt_sfs, self.sfs)\n # Estimate theta\n self.theta.append(dadi.Inference.optimal_sfs_scaling(opt_sfs, self.sfs))\n # And calculate the AIC\n aic = 2 * len(self.params) - 2 * opt_like\n self.mod_like.append(mod_like)\n self.opt_like.append(opt_like)\n self.aic.append(aic)\n r += 1\n # Set these as class variables for printing later\n self.model_sfs = opt_sfs\n return", "def simulationTwoDrugsDelayedTreatment(numTrials):\n \n numViruses = 100\n maxPop = 1000\n maxBirthProb = 0.1\n clearProb = 0.05\n resistances = {'guttagonol': False, 'grimpex': False}\n mutProb = 0.005\n delays = [300, 150, 75, 0]\n f, axarr = pylab.subplots(2, 2)\n x_plot = []\n\n for delay in delays:\n FinalPopSize = [0.0 for x in range(numTrials)]\n for trial in range(numTrials):\n viruses = [ResistantVirus(maxBirthProb, clearProb, resistances, mutProb) for n in range(numViruses)]\n patient = TreatedPatient(viruses, maxPop)\n for i in range(150):\n patient.update()\n patient.addPrescription('guttagonol')\n for j in range(150, 150+delay):\n patient.update()\n patient.addPrescription('grimpex')\n for k in range(150+delay, 300+delay):\n patient.update()\n FinalPopSize[trial] = patient.getTotalPop()\n x_plot.append(FinalPopSize)\n\n axarr[0, 0].hist(x_plot[0])\n axarr[0, 1].hist(x_plot[1])\n axarr[1, 0].hist(x_plot[2])\n axarr[1, 1].hist(x_plot[3])\n pylab.show()\n return x_plot", "def run_psavg_sims(bursttimefile):\n\n nfolder = [5,6,8,12]\n datadirs = [\"P20165/20165-01-01-000\", \"P20165/20165-01-01-001\", \"P20165/20165-01-01-002\",\n \"P10223/10223-01-03-01\", \"P10223/10223-01-03-010\" ]\n\n data_all, unbary_all, tstart_all, tend_all, t0_all, pcus_all, std1dir_all = [], [], [], [], [], [], []\n\n for d in datadirs:\n print(\"I am on directory %s\" %d)\n files = rxteburst.search_filenames_recursively(\"./%s/\"%d, \"*1div8192*.asc\")\n if len(files) == 0:\n files = rxteburst.search_filenames_recursively(\"./%s/\"%d, \"*1div-32768s*.asc\")\n if len(files) == 0:\n files = rxteburst.search_filenames_recursively(\"./%s/\"%d, \"*1div8*.asc\")\n #print(\"File to use %s\" %files[0])\n data = rxte.RXTEData(times=None, channels=None, datafile=files[0], npcus=None, ra=None, dec=None, emid=None, emiddir=None, bary=True)\n\n len_datafile = len(files[0].split(\"/\")[-1])\n len_processed = len(files[0].split(\"/\")[-2])\n std1dir_all.append(files[0][:-(len_datafile+len_processed+1)])\n\n data_all.append(np.array([p.time for p in data.photons])+data.t0)\n unbary_all.append(np.array([p.unbary for p in data.photons])+data.t0)\n tstart_all.append(data.photons[0].unbary+data.t0)\n tend_all.append(data.photons[-1].unbary+data.t0)\n t0_all.append(data.t0)\n pcus_all.append(data.pcus)\n\n t0_sorted, tstart_sorted, tend_sorted, data_sorted, pcus_sorted, std1dir_sorted, unbary_sorted = \\\n zip(*sorted(zip(t0_all, tstart_all, tend_all, data_all, pcus_all, std1dir_all, unbary_all)))\n t0_sorted = np.array(t0_sorted)\n\n psno = [5,6,8,12]\n m_all = [30, 23, 23, 50]\n\n for n,m in zip(psno, m_all):\n psavg_all = sgr1900_results.make_randomly_sampled_periodograms(datadirs, bursttimefile, m, n=1000,\n save_step=100, fileroot=\"sgr1806_psavg%i\"%n,\n data_sorted=data_sorted, t0_sorted=t0_sorted,\n pcus_sorted=pcus_sorted, tend_sorted=tend_sorted,\n tstart_sorted=tstart_sorted,\n unbary_sorted=unbary_sorted)\n\n return" ]
[ "0.5957784", "0.59119064", "0.5818777", "0.57929796", "0.5779033", "0.57222825", "0.5718269", "0.566151", "0.5655278", "0.5591914", "0.5582998", "0.5532946", "0.5525634", "0.55158126", "0.5478543", "0.5461477", "0.54515594", "0.544741", "0.5431594", "0.54281795", "0.5378584", "0.5368079", "0.5365735", "0.53490806", "0.5343979", "0.53418756", "0.5323399", "0.5322874", "0.53190917", "0.53159887" ]
0.6416229
0
Given the true and the estimated parameter values this function computes the error in the parameter estimates. The order controls the norm used, by default its the maximum so sup norm
def compute_error(true, estimate, order = np.inf): print true print estimate errs = [] for i in xrange(len(true)): estError = abs(true[i] - estimate[i]) for j in xrange(len(true[i])): if true[i][j] != 0: estError[j] = estError[j] / true[i][j] errs.append(linalg.norm(estError, order)) return errs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_error(self, params):\n return self.endog - self.predict(params)", "def OF1_CalcErrorEstimation(param_list, args):\n #return (sum( \\\n #( OF1_SumOfGauss(param_list, classNum, g_lvls) - histogram ) ** 2) / g_lvls.size) + \\\n #(abs(sum(param_list[:classNum]) - 1) * o)\n return (sum( \\\n ( OF1_SumOfGauss(param_list, args[0], args[1]) - args[2] ) ** 2) / args[1].size) + \\\n (abs(sum(param_list[:args[0]]) - 1) * args[3])", "def exo1():\n randu = randn(N/ 2, N/ 2, 2); % a random vector field\n b = 2\n for i in 1: 4:\n LLs_u = Li{i}(LiS{i}(randu))\n % relative error should be very small\n norm(abs(LLs_u(: ) - b*randu(: )))/ norm(randu(: ))", "def convergence_order(N, err):\n import numpy as np\n\n if len(N) != len(err):\n raise ValueError('Convergence order args do not have same length')\n\n A = np.ones([len(err), 2])\n B = np.ones([len(err), 1])\n # ERR = A*N + B\n for i in range( len(N) ) :\n A[i,0] = np.log(N[i])\n B[i] = np.log(err[i])\n\n x, residuals, rank, singval = np.linalg.lstsq(A, B, rcond=None)\n\n return x[0]", "def Error_estimation(params, qu=None, qu_err=None, p_maxL=None, par=False):\n \n if par is True:\n print('Estimate uncertainties for the parameters')\n sigma = par_err(params)\n return(sigma)\n elif qu is not None:\n N = len(qu[0,:])\n model_err = np.zeros((3, N))\n star_err = np.zeros((3,N))\n msg = 'Estimate model uncertainties'\n print(msg)\n if qu_err is None:\n print(' -> by varying the model')\n star_err[:-1,:] = np.std(QU_func(params, qu, star=True), axis=2)\n model_err[:-1,:] = np.std(QU_func(params, qu), axis=2)\n bkgr_err = np.std(background(params[N:,:]), axis=2) #?\n \n else:\n print('-> using data and parameters')\n err = par_err(params)\n s_ax = (p_maxL[:N]*qu_err)**2 + (qu*err[:N])**2\n s_b = np.array([(err[N:-1]*np.cos(2*p_maxL[-1]))**2 +\\\n (2*p_maxL[N:-1]*err[-1]*np.sin(2*p_maxL[-1]))**2,\\\n (err[N:-1]*np.sin(2*p_maxL[-1]))**2 +\\\n (2*p_maxL[N:-1]*err[-1]*np.sin(2*p_maxL[-1]))**2])\n model_err[:-1,:] = np.sqrt(s_ax + s_b)\n star_err[:-1,:] = np.sqrt(s_ax)\n bkgr_err = np.sqrt(s_b)\n star_err[-1,:] = np.sqrt(Cov(star_err[0,:], star_err[1,:]))\n model_err[-1,:] = np.sqrt(Cov(model_err[0,:], model_err[1,:]))\n return(model_err, star_err, bkgr_err)", "def calc_error_parameter(X, y, target, dimension): #change if more parameters\n\n pos_max = np.argmax(y)\n best_parameters = X[pos_max, 0:dimension]\n best_parameters = np.reshape(best_parameters, (-1, 1))\n\n l2_errors = (\n np.power(best_parameters[0, :] - target[0], 2) +\n np.power(best_parameters[1, :] - target[1], 2) +\n np.power(best_parameters[2, :] - target[2], 2))\n\n return l2_errors.tolist(), best_parameters.tolist()", "def calc_errors(problem, points):\n original = problem.getp()\n try:\n ret = plugin.calc_errors(problem, points)\n except:\n import traceback\n print(\"error calculating distribution on model\")\n traceback.print_exc()\n ret = None\n finally:\n problem.setp(original)\n return ret", "def ssq_error(correct, estimate):\n assert correct.ndim == 2\n if np.sum(estimate ** 2) > 1e-5:\n alpha = np.sum(correct * estimate) / np.sum(estimate ** 2)\n else:\n alpha = 0.\n return np.sum((correct - alpha * estimate) ** 2)", "def error_compute(self):\n self.tt_error = np.linalg.norm(self.rel_error)\n if self.global_rank==0:print('Overall error is::',self.tt_error)\n return {'NMF': self.rel_error, 'tt': self.tt_error}", "def linear_combination_plus_error(X, num_dependent_cols=5, parameter_mean=0, parameter_std=1, error_mean=0, error_std=1):\n length = X.shape[0]\n param = np.random.normal(loc=parameter_mean,\n scale=parameter_std,\n size=(num_dependent_cols,))\n error = np.random.normal(loc=error_mean,\n scale=error_std,\n size=(length,))\n result = np.zeros(length,)\n for i in range(num_dependent_cols):\n result += param[i] * X[:, i]\n return result + error", "def find_error(p_s, p_t, A_d,\n A, b):\n def T(x):\n return(A.dot(x) + b)\n\n# TODO: add in w_j here\n second_sum = np.array([np.sqrt(np.linalg.norm(T(p_s[i]) - p_t[i]))\n for i in A_d])\n #error = second_sum.sum() / len(A_d)\n# TODO: the below is temprorary!! Need to figure out something not a hack!!\n# the 1/det(A) is to prevent us from pushing A towards zero\n error = second_sum.sum() / len(A_d) + 1 / np.linalg.det(A) + np.linalg.det(A)\n return(error)", "def solve(self, values, errors, const={}, combo=None, check=True, stdev=False):\n if check:\n self.check(values, errors, combo)\n\n val, err = self.used_vars(values, errors, combo)\n n, m = len(val), len(err)\n\n xk = np.array([errors[k] for k in err], dtype='float').reshape(-1, 1)\n jac = self.jacobian(values, errors, combo)\n ju, jk = jac[:,:n], jac[:,n:n+m]\n jui = np.abs(np.linalg.inv(ju))\n ju, jk = np.abs(ju), np.abs(jk)\n\n if const:\n ck = np.array([const.get(k, 0) for k in err], dtype='float').reshape(-1, 1)\n cu = np.array([const.get(k, 0) for k in val], dtype='float').reshape(-1, 1)\n\n if stdev:\n xk **= 2\n ck **= 2\n cu **= 2\n xk += ck\n else:\n xk = np.abs(xk)\n ck = np.abs(ck)\n cu = np.abs(cu)\n xk += ck\n\n xu = jui.dot(jk.dot(xk) + ju.dot(cu))\n del jac, jui, ju, jk, cu, ck\n\n else:\n if stdev:\n xk **= 2\n else:\n xk = np.abs(xk)\n\n xu = jui.dot(jk.dot(xk))\n del jac, jui, ju, jk\n\n if stdev:\n xu **= 0.5\n xk **= 0.5\n\n xu = xu.ravel()\n xk = xk.ravel()\n\n # Create data frame of results\n df = pd.DataFrame()\n df['var'] = val + err\n df['value'] = [values[k] for k in df['var']]\n df['error'] = np.concatenate([xu, xk])\n df['pct_error'] = 100 * abs(df['error'] / df['value'])\n df['is_calc'] = np.concatenate([np.ones(n), np.zeros(m)]).astype('bool')\n\n df.sort_values('var', inplace=True)\n df.replace(float('inf'), np.nan, inplace=True)\n df.set_index('var', inplace=True)\n\n return df", "def get_error_estimates(self, Y, M1, M2=None):\n # First K0 and K1\n Mminus = M1\n if M2 is None:\n Mplus = M1\n else:\n Mplus = M2\n if self.Cp0 != 0 and self.Cp1 != 0 and self.Cm != 0:\n Cp0 = self.Cp0\n Cp1 = self.Cp1\n Cm = self.Cm\n else:\n PP = self.principal_part()\n Cmax = max(PP.values())\n Kmax = 0\n for t in PP.keys():\n if isinstance(t, tuple):\n (c, l) = t\n elif isinstance(t, (int, Integer)):\n (c, l) = rn_from_D(self._space.multiplier(), t)\n else:\n raise ValueError(\"Incorrect principal part: t={0}\".format(t))\n if c in self._space.multiplier().D():\n tmp = l + self._space.multiplier().Qv[self._space.index_set().index(c)]\n elif c in range(len(self._space.multiplier().Qv)):\n tmp = l + self._space.multiplier().Qv[c]\n else:\n raise ValueError(\"Incorrect principal part: c,l={0},{1}\".format(c, l))\n if(abs(tmp) > Kmax):\n Kmax = abs(tmp)\n [Cp0, Cp1] = self._space.get_Cp(Cmax)\n Cm = self._space.get_Cm(Kmax, Cmax)\n self.Cp0 = Cp0\n self.Cp1 = Cp1\n self.Cm = Cm\n\n fak = len(self._space.index_set())\n # print \"Cp0,Cp1,Cm=\",Cp0,Cp1,Cm\n # print \"fak=\",fak\n\n er1 = fak * self._space.err_est_vv_hwmf_neg(Y, Mminus, Cm)\n er2 = fak * self._space.err_est_vv_hwmf_pos(Y, Mplus, Cp0, Cp1)\n return [er1, er2]", "def compute_errors(u_exact, u):\n\n # Compute error norm (for very small errors, the value can be\n # negative so we run abs(assemble(error)) to avoid failure in sqrt\n\n V = u.function_space()\n\n # Function - Expression\n error = (u - u_exact)**2*dx\n E1 = sqrt(abs(assemble(error)))\n\n # Explicit interpolation of u_e onto the same space as u:\n u_e = interpolate(u_exact, V)\n error = (u - u_e)**2*dx\n E2 = sqrt(abs(assemble(error)))\n\n # Explicit interpolation of u_exact to higher-order elements,\n # u will also be interpolated to the space Ve before integration\n Ve = FunctionSpace(V.mesh(), 'P', 5)\n u_e = interpolate(u_exact, Ve)\n error = (u - u_e)**2*dx\n E3 = sqrt(abs(assemble(error)))\n\n # fenics.errornorm interpolates u and u_e to a space with\n # given degree, and creates the error field by subtracting\n # the degrees of freedom, then the error field is integrated\n # TEMPORARY BUG - doesn't accept Expression for u_e\n #E4 = errornorm(u_e, u, normtype='l2', degree=3)\n # Manual implementation errornorm to get around the bug:\n def errornorm(u_exact, u, Ve):\n u_Ve = interpolate(u, Ve)\n u_e_Ve = interpolate(u_exact, Ve)\n e_Ve = Function(Ve)\n # Subtract degrees of freedom for the error field\n e_Ve.vector()[:] = u_e_Ve.vector().array() - u_Ve.vector().array()\n # More efficient computation (avoids the rhs array result above)\n #e_Ve.assign(u_e_Ve) # e_Ve = u_e_Ve\n #e_Ve.vector().axpy(-1.0, u_Ve.vector()) # e_Ve += -1.0*u_Ve\n error = e_Ve**2*dx(Ve.mesh())\n return sqrt(abs(assemble(error))), e_Ve\n E4, e_Ve = errornorm(u_exact, u, Ve)\n\n # Infinity norm based on nodal values\n u_e = interpolate(u_exact, V)\n E5 = abs(u_e.vector().array() - u.vector().array()).max()\n\n # H1 seminorm\n error = dot(grad(e_Ve), grad(e_Ve))*dx\n E6 = sqrt(abs(assemble(error)))\n\n # Collect error measures in a dictionary with self-explanatory keys\n errors = {'u - u_exact': E1,\n 'u - interpolate(u_exact,V)': E2,\n 'interpolate(u,Ve) - interpolate(u_exact,Ve)': E3,\n 'errornorm': E4,\n 'infinity norm (of dofs)': E5,\n 'grad(error) H1 seminorm': E6}\n\n return errors", "def calc_error_dist(self):\n pass", "def error_metrics(self, q=None, tol=1.e-14):\n if q is None:\n q = self.order(tol=tol)\n print('main method has order {}'.format(q))\n tau_1 = self.error_coeffs(q+1)\n tau_2 = self.error_coeffs(q+2)\n\n A_qp1 = snp.norm(tau_1)\n A_qp1_max = max([abs(tau) for tau in tau_1])\n A_qp2 = snp.norm(tau_2)\n A_qp2_max = max([abs(tau) for tau in tau_2])\n\n D = max(np.max(np.abs(self.A)), np.max(np.abs(self.b)), np.max(np.abs(self.c)))\n return A_qp1, A_qp1_max, A_qp2, A_qp2_max, D", "def report_errors(params, modelpars=None):\n parnames = sorted(params)\n print('-------------------------------------')\n print( 'Best Fit Values and Standard Errors:')\n namelen = max([len(n) for n in parnames])\n\n for name in parnames:\n par = params[name]\n space = ' '*(namelen+2 - len(name))\n nout = \" %s: %s\" % (name, space)\n initval = 'inital= % .6f' % par.init_value\n if modelpars is not None and name in modelpars:\n initval = '%s, model_value=% .6f' % (initval, modelpars[name].value)\n if par.vary:\n print(\" %s % .6f +/- %.6f (%s)\" % (nout, par.value,\n par.stderr, initval))\n\n elif par.expr is not None:\n print(\" %s % .6f == '%s'\" % (nout, par.value,\n par.expr))\n else:\n print(\" %s fixed\" % (nout))\n\n print( 'Correlations:')\n correls = {}\n for i, name in enumerate(parnames):\n par = params[name]\n if not par.vary:\n continue\n if hasattr(par, 'correl') and par.correl is not None:\n for name2 in parnames[i+1:]:\n if name != name2 and name2 in par.correl:\n correls[\"%s, %s\" % (name, name2)] = par.correl[name2]\n\n sort_correl = sorted(correls.items(), key=lambda it: abs(it[1]))\n sort_correl.reverse()\n for name, val in sort_correl:\n lspace = max(1, 25 - len(name))\n print(' C(%s)%s = % .3f ' % (name, (' '*30)[:lspace], val))\n print('-------------------------------------')", "def _epsilon_eval(z, A, ord=2):\n z=np.array(z)\n A=np.array(A)\n zc = complex(z[0], z[1])\n try :\n ep = 1/spl.norm(spl.inv(zc*np.eye(*A.shape)-A),ord=ord)\n # ep = spl.norm(zc*np.eye(*A.shape)-A,ord=ord)\n except TypeError:\n if ord==\"svd\":\n ep = np.min(spl.svdvals(zc*np.eye(*A.shape)-A))\n else: raise Exception(\"invalid method\")\n return ep", "def converged(M,L,S, verbose = True, tol=10e-6):\n error = frobeniusNorm(M - L - S) / frobeniusNorm(M)\n if verbose:\n print (\"error =\", error)\n return error <= tol", "def err_func(x,rv,valore,specn,lcrop,models='da2014'):\n tmp = tmp_func(x[0], x[1], rv, specn, lcrop, models)\n if tmp != 1: return abs(tmp[3]-(valore+1.)) #this is quantity that gets minimized \n else: return 1E30", "def compute_error(data, user_features, item_features, nz):\n sum_err = 0\n for d, n in nz:\n err = data[d,n] - np.dot(item_features[d,:],user_features[:,n])\n sum_err += err**2\n rmse = 0.5*sum_err/len(nz)\n return rmse", "def simulationTestGaussian2(params):\r\n x = gaussian(params[0], params[1], mu-3.5*sigma, mu+3.5*sigma)\r\n error = np.sum(np.power(optimal - x, 2))/optimal.shape[0]\r\n return 1/error", "def error(preferences, true_preferences, normalised = True):\n\n # Sum all of the inconsistent pairs of preference relations as 1s.\n differences = sum(1 for (x,y) in preferences if (y,x) in true_preferences)\n\n # Sum all of the missing pairs of preference relations as 1/2s.\n differences += 0.5 * (abs(len(true_preferences) - len(preferences)))\n\n # If normalising the result (default) then divide the sum of the differences\n # by the length of the maximum number of pairs of relations.\n if normalised:\n return differences / len(true_preferences)\n\n return differences", "def hangerqs_old(fitparams):\n return abs(fitparams[1]/fitparams[2]), abs(fitparams[1])/(1-abs(fitparams[2]))", "def error(self,pt,eta):\n return self._data[self.__ptBin(pt)][self.__etaBin(eta)][1]", "def __error(self, R, P, Q, K, beta):\n e = 0\n for i in xrange(len(R)):\n for j in xrange(len(R[i])):\n if R[i][j] > 0:\n\n # loss function error sum( (y-y_hat)^2 )\n e = e + pow(R[i][j]-numpy.dot(P[i,:],Q[:,j]), 2)\n\n # add regularization\n for k in xrange(K):\n\n # error + ||P||^2 + ||Q||^2\n e = e + (beta/2) * ( pow(P[i][k], 2) + pow(Q[k][j], 2) )\n return e", "def get_error_independence_p_values(self):\n eps = self.residuals_\n psi0 = self._adjacency_matrices[0][0]\n E = np.dot(np.eye(psi0.shape[0]) - psi0, eps.T).T\n n_samples = E.shape[0]\n n_features = E.shape[1]\n\n p_values = np.zeros([n_features, n_features])\n for i, j in itertools.combinations(range(n_features), 2):\n _, p_value = hsic_test_gamma(\n np.reshape(E[:, i], [n_samples, 1]), np.reshape(E[:, j], [n_samples, 1])\n )\n p_values[i, j] = p_value\n p_values[j, i] = p_value\n\n return p_values", "def standardError2(self):\n if self.count<=self.n:\n return float('inf')\n return self.residualNorm2()/self.sumWeight*(self.count / (self.count-self.n))", "def var_parameters(jd,mag,err):\n\n mean = np.mean(mag)\n nepochs = float(len(jd))\n\n chi = np.sum( (mag - mean)**2. / err**2. )\n p_chi = chi2.cdf(chi,(nepochs-1))\n\n\n a = (mag-mean)**2\n ex_var = (np.sum(a-err**2)/((nepochs*(mean**2))))\n sd = np.sqrt((1./(nepochs-1))*np.sum(((a-err**2)-ex_var*(mean**2))**2))\n ex_verr = sd/((mean**2)*np.sqrt(nepochs))\n\n\n return p_chi, ex_var, ex_verr", "def MSE_all_errors(real, estimate):\n error = mean_squared_error(real.T, estimate.T, multioutput='raw_values')\n return error" ]
[ "0.5976348", "0.5961685", "0.59206486", "0.59203446", "0.58393806", "0.58081555", "0.5629805", "0.561695", "0.5568433", "0.55431527", "0.5517937", "0.5446575", "0.54422283", "0.5426394", "0.54178244", "0.54091245", "0.54078066", "0.53778136", "0.5358041", "0.53563577", "0.53175193", "0.53122514", "0.52701604", "0.52674073", "0.52594674", "0.52572525", "0.5242519", "0.5235284", "0.5230534", "0.5230312" ]
0.6888212
0
This function processes the timestring from PSMC and converts this to list of time slice lengths
def process_time_string(timestr): timestr = timestr.strip() toks = timestr.split('+') timeslices = [] for t in toks: tm = t.strip() mobj = re.search('\\*', tm) if mobj == None: timeslices += [int(tm)] else: tms = tm.split('*') timeslices += int(tms[0]) * [int(tms[1])] return timeslices
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_times(time_str):\n warnings = []\n days, interval = time_str.split(',')\n assert int(days) == float(days)\n days = int(days)\n assert int(interval) == float(interval)\n interval = int(interval)\n if interval < 3:\n warnings.append('Minimum interval is 3 hours')\n if days > 14:\n warnings.append('Maximum spot forecast period is 14 days')\n hours = np.arange(days * 24 + 1)[::interval]\n return hours.tolist(), warnings", "def time_convert(intime):\n Nt = intime.shape[0]\n outtime = []\n for t in range(Nt):\n timestr = ''.join([intime[t,:][~intime[t,:].mask].data[i].decode('utf-8') for i in range(len(intime[t,:][~intime[t,:].mask].data))])\n outtime.append(datetime.strptime(timestr, '%Y-%m-%d_%H:%M:%S'))\n return outtime", "def _parse_ps_output(string):\n t = string.replace('-', ':').split(':')\n t = [0] * (4 - len(t)) + [int(i) for i in t]\n seconds = t[0] * 86400 + t[1] * 3600 + t[2] * 60 + t[3]\n return seconds", "def vector_clock_from_string(vector_clock_str):\n return map(int, vector_clock_str.split())", "def get_time_strs(self):\n\n log(\"Getting time strings starting at {}\".format(self._t0))\n tz = dt.timezone.utc\n mkdt = lambda n: dt.datetime.fromtimestamp(\n self._t0 - (self._delta * n),\n tz=tz\n )\n ns = range(self._frames, 0, -1)\n return [mkdt(n).strftime('%Y%m%d%H%M') for n in ns]", "def get_times(traj_num_str):\n times = []\n # Get timestamps of sequence\n times_file_path = \"./dataset/\" + traj_num_str.zfill(2) + \"/times.txt\"\n with open(times_file_path, \"r\") as fid:\n for i, line in enumerate(fid):\n times.append(float(line))\n return times", "def get_times(ts_full, ts_system, len_state, sys_position, sys_length):\n ts = list(ts_full) + list(ts_system)\n subsystems = [[0, len_state]] * len(ts_full) + \\\n [[sys_position, sys_position + sys_length]] * len(ts_system)\n return ts, subsystems", "def OPCtimetransform(data, to):\n \n remove_times = []\n outtimes = []\n times = {'ms':[],'SS':[],'MM':[],'HH':[]}\n\n for i in range(0, len(data)):\n times['HH'] = 0\n times['MM'] = 0\n times['SS'] = 0\n times['ms'] = 0\n\n item = data[i]\n \n try:\n if len(item.split('.')[1]) < 2:\n item += '0'\n except IndexError:\n item += '.00'\n if len(item) < 9:\n item = item.zfill(9)\n if int(item[:2]) > 23:\n item = '0' + item\n \n # remove items with extra zero (2319010.00 to 231910)\n if len(item) > 9:\n olditem = item\n newitem = item[:4] + item[5:]\n print( ('Repairing strange value %s into %s')%(olditem, newitem) )\n item = newitem\n else:\n pass\n try:\n md = dt.datetime.strptime(item, \"%H%M%S.%f\")\n \n # round off items which exceed 59 minutes or 59 seconds \n # (i.e. 146001 to 150001.)\n except ValueError:\n \n try:\n times['HH'] = int(item[0:2])\n times['MM'] = int(item[2:4])\n times['SS'] = int(item[4:6])\n times['ms'] = int(item[7:9])\n except ValueError:\n print(i, item)\n\n if times['SS'] > 59:\n times['MM'] += 1\n times['SS'] = 0\n if times['MM'] > 59:\n times['HH'] += 1\n times['MM'] = 0\n # discard items which exceed 23 hours\n if times['HH'] > 23:\n times['HH'] = 23\n print( ('resetting value %s')%(item) )\n \n\n md = dt.datetime(1900,1,1,times['HH'], times['MM'], times['SS']) \n\n \n outtimes.append( dt.datetime.strftime(md, to) )\n\n return outtimes", "def str_to_secs(table):\n out = np.zeros([len(table), 2])\n for i in range(len(table)):\n t1, t2 = table[i].split()\n out[i, 0] = Time.DateTime(t1).secs\n out[i, 1] = Time.DateTime(t2).secs\n return out", "def get_dur(self):\n return [char.get_dur() for char in self.string]", "def read_times(self, slices=None):\n times = netCDF4.num2date(\n datetime.strptime(\n self.get_handler().SOURCE_START_DATE.split('.')[0],\n '%Y%m%d%H%M%S'\n )\n )\n return numpy.ma.array([times])", "def id2segtimes(sid, ann_type=\"uppercase\", salamipath=dpath.SALAMI):\n files = id2filenames(sid, ann_type=ann_type, salamipath=salamipath)\n times = []\n for i in range(len(files)):\n events, _ = mir_eval.io.load_labeled_events(files[i])\n times = times + events[1:-1].tolist()\n return times", "def OPCtimetransformOld(data, to):\n outtimes = []\n \n times = {\n 'ms':[],\n 'SS':[],\n 'MM':[],\n 'HH':[]\n }\n for i in range(0, len(data)):\n item = data[i]\n try: \n times['HH'].append(int(item[0:2]))\n times['MM'].append(int(item[2:4]))\n times['SS'].append(int(item[4:6]))\n times['ms'].append(int(item[7:9]))\n except ValueError:\n # strange value 2319010.00 in 201129 file...\n olditem = item\n newitem = item[:4] + item[4+1:]\n print( ('Repairing strange value %s into %s')%(olditem, newitem) )\n try:\n times['HH'].append(int(newitem[0:2]))\n times['MM'].append(int(newitem[2:4]))\n times['SS'].append(int(newitem[4:6]))\n times['ms'].append(int(newitem[7:9]))\n except ValueError:\n print(newitem)\n\n # OPC times go up to 60 minutes. This is corrected by moving one minute\n times['MM'] = [max(0,x-1) for x in times['MM']]\n times['SS'] = [max(0,x-1) for x in times['SS']]\n\n for i in range(0, len(data)):\n md = dt.datetime(1900,1,1,times['HH'][i], times['MM'][i], times['SS'][i]) \n outtimes.append( dt.datetime.strftime(md, to))\n\n return outtimes", "def _parse_interval_str(cls, s):\n\n start, stop = s.split(':')\n if start == '':\n start = 0\n else:\n start = int(start)\n if stop == '':\n stop = None\n else:\n stop = int(stop)\n return slice(start, stop)", "def collapse_using_timeStr(self):\n if self.modified == True:\n raise Exception('Probabilities already modified.\\nCollapsing after modification will lead to incorrect results.')\n timeUnits = np.array(process_time_string(self.timeStr))\n if len(self.timeslices) + 1 == np.sum(timeUnits):\n if timeUnits[-1] == 1:\n timeUnits = timeUnits[:-1]\n else:\n timeUnits[-1] -= 1\n if len(self.timeslices) != np.sum(timeUnits):\n raise Exception('Total number of timeslices is different.')\n ind = 0\n cnt = 0\n curr_rates = np.matrix(np.zeros((np.shape(self.obsRates)[0], len(timeUnits))))\n curr_times = []\n for i in timeUnits:\n curr_rates[:, cnt] = np.sum(self.obsRates[:, ind:ind + i], axis=1)\n curr_times.append(np.sum(self.timeslices[ind:ind + i]))\n ind += i\n cnt += 1\n\n self.obsRates = curr_rates\n self.timeslices = curr_times", "def get_timescale_stringlist(self):\n return text_timescale", "def list_times(self, start: int = None, end: int = None) -> List:\n return [i.time for i in self.data[start:end]]", "def timestamp_decode(e: Encoding) -> List[int]:\n return _decode(e, Decoder)", "def parse_time(time_string):\n times = time_string.split(\"\\n\")\n\n user_time_str = times[-2].split(\"\\t\")[-1]\n sys_time_str = times[-1].split(\"\\t\")[-1]\n\n #print user_time_str, sys_time_str\n\n user_time = parse_m_s(user_time_str)\n sys_time = parse_m_s(sys_time_str)\n\n return user_time + sys_time", "def lengths(strings):\n # fill in this function's definition to make the test pass.\n return list(map(lambda x:len(x),strings))", "def parseTTL(self):\n rep = ''\n lastChannels = numpy.zeros(self.channelTotal)\n powerArray = 2**numpy.arange(self.channelTotal, dtype = numpy.uint64)\n for key,newChannels in sorted(self.switchingTimes.iteritems()):\n channels = lastChannels + newChannels #computes the action of switching on the state\n if (channels < 0).any(): raise Exception ('Trying to switch off channel that is not already on')\n channelInt = numpy.dot(channels,powerArray)\n rep = rep + self.numToHex(key) + self.numToHex(channelInt) #converts the new state to hex and adds it to the sequence\n lastChannels = channels\n rep = rep + 2*self.numToHex(0) #adding termination\n return rep", "def get_time_slices(self):\n tot = []\n for clu in self._clusters:\n tot.extend(self._clusters[clu].to_dict()[:])\n #tot.sort()\n return tot", "def __parse(self):\n lines = self.data.readlines()\n for i in range(0, len(lines)):\n line = lines[i]\n if line[0] == '#':\n continue\n tokens = line.split(\"\\t\")\n time_str = tokens[self.timecol]\n if time_str.find('start:') != -1:\n time_str = time_str.split()[1] + \" \" + time_str.split()[2]\n self.calls.append((0, 0, 0))\n self.durations.append(0.0)\n elif time_str.find('end:') != -1:\n time_str = time_str.split()[1] + \" \" + time_str.split()[2]\n time = datetime.strptime(time_str, \"%Y-%m-%d %H:%M:%S\")\n self.times.append(time)\n self.calls.append((0, 0, 0))\n self.durations.append(0.0)\n break\n else:\n duration = float(tokens[6])\n fms = int(tokens[2])\n hfms = int(tokens[3])\n svs = int(tokens[4])\n self.calls.append((fms, hfms, svs))\n self.durations.append(duration)\n time = datetime.strptime(time_str, \"%Y-%m-%d %H:%M:%S\")\n self.times.append(time)\n self.length = (self.times[len(self.times) - 1] -\\\n self.times[0]).seconds", "def _parse_time_metadata(self, data, kwargs):\n try:\n time = self._get_time_range(data)\n except KeyError:\n time = []\n try:\n time_steps = data.coords[self.time_field].size\n except KeyError:\n time_steps = kwargs.get('limit')\n return time, time_steps", "def lengths(strings):\n # fill in this function's definition to make the test pass.\n lst=[]\n for i in strings:\n lst.append(len(i))\n return lst", "def calc_run_lengths(sequence: List[int]) -> List[Run]:\n return [Run(object=g[0], length=len(list(g[1])))\n for g in itertools.groupby(sequence)]", "def readAll():\n result = _rc.readAttribute(OPTYPE.TIME_OF_FLIGHT)\n mm = bytesToIntArray(result, 2, signed=False)\n return [mm[0] / 10, mm[1] / 10]", "def get_timestamped_strings(self):\n ret_list = []\n i = 0\n while i < len(self.__string_list):\n ret_list.append(self.__timestamp_list[i].strftime(\"%Y-%m-%d %H:%M:%S\")+\" \"+self.__string_list[i])\n i += 1\n return ret_list", "def get_subphase_durations(\n data: pd.DataFrame, subphases: Dict[str, Union[int, Tuple[int, int]]]\n) -> Sequence[Tuple[int, int]]:\n subphase_durations = np.array(list(subphases.values()))\n if subphase_durations.ndim == 1:\n # 1d array => subphase values are integer => they are consecutive and each entry is the duration\n # of the subphase, so the start and end times of each subphase must be computed\n times_cum = np.cumsum(subphase_durations)\n if subphase_durations[-1] == 0:\n # last subphase has duration 0 => end of last subphase is length of dataframe\n times_cum[-1] = len(data)\n subphase_times = list(zip([0, *list(times_cum)], times_cum))\n else:\n # 2d array => subphase values are tuples => start end end time of each subphase are already provided and do\n # not need to be computed\n subphase_times = subphase_durations\n return subphase_times", "def parseBeatLength(length):\n return [int(x) for x in \"{:07b}\".format(int(length*64))]" ]
[ "0.60004324", "0.5779885", "0.56531847", "0.5637275", "0.5626897", "0.5624664", "0.55409265", "0.5522083", "0.54359984", "0.5405842", "0.53607774", "0.5295241", "0.5267315", "0.526679", "0.524625", "0.52031076", "0.5193402", "0.5178458", "0.5174391", "0.5154917", "0.51378095", "0.51328677", "0.5116863", "0.5107312", "0.5094468", "0.5079773", "0.5075039", "0.506999", "0.5060183", "0.5041779" ]
0.7197089
0
The coalescence matrix C as a vectorization of the upper triangular matrix and npop, the number of demes.
def mkCoalMatrix(C, npop): C = np.array(C).flatten() M = np.zeros((npop, npop)) cnt = 0 for i in range(npop): for j in range(i, npop): M[i, j] = C[cnt] if i != j: M[j, i] = M[i, j] cnt += 1 return M
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_C(n_c,CV_matrix):\n C = np.zeros((n_c, n_c), dtype=np.float32)\n for i in range(3):\n C += np.asfortranarray(CV_matrix[:, :, i]) @ np.asfortranarray(CV_matrix[:, :, np.mod(i + 2, 3)].T)\n C = (C != 0).astype(np.int32)\n return C", "def Cijkl(C):\n c = np.zeros(shape=(3, 3, 3, 3))\n CC = np.zeros(shape=(9, 9))\n CC[0:6, 0:6] = C[0:6, 0:6]\n CC[6:9, 6:9] = C[3:6, 3:6]\n CC[0:6, 6:9] = C[0:6, 3:6]\n CC[6:9, 0:6] = C[3:6, 0:6]\n\n c[0, 0, 0, 0] = CC[0, 0]\n c[0, 0, 1, 1] = CC[0, 1]\n c[0, 0, 2, 2] = CC[0, 2]\n c[0, 0, 1, 2] = CC[0, 3]\n c[0, 0, 2, 0] = CC[0, 4]\n c[0, 0, 0, 1] = CC[0, 5]\n c[0, 0, 2, 1] = CC[0, 6]\n c[0, 0, 0, 2] = CC[0, 7]\n c[0, 0, 1, 0] = CC[0, 8]\n\n c[1, 1, 0, 0] = CC[1, 0]\n c[1, 1, 1, 1] = CC[1, 1]\n c[1, 1, 2, 2] = CC[1, 2]\n c[1, 1, 1, 2] = CC[1, 3]\n c[1, 1, 2, 0] = CC[1, 4]\n c[1, 1, 0, 1] = CC[1, 5]\n c[1, 1, 2, 1] = CC[1, 6]\n c[1, 1, 0, 2] = CC[1, 7]\n c[1, 1, 1, 0] = CC[1, 8]\n\n c[2, 2, 0, 0] = CC[2, 0]\n c[2, 2, 1, 1] = CC[2, 1]\n c[2, 2, 2, 2] = CC[2, 2]\n c[2, 2, 1, 2] = CC[2, 3]\n c[2, 2, 2, 0] = CC[2, 4]\n c[2, 2, 0, 1] = CC[2, 5]\n c[2, 2, 2, 1] = CC[2, 6]\n c[2, 2, 0, 2] = CC[2, 7]\n c[2, 2, 1, 0] = CC[2, 8]\n\n c[1, 2, 0, 0] = CC[3, 0]\n c[1, 2, 1, 1] = CC[3, 1]\n c[1, 2, 2, 2] = CC[3, 2]\n c[1, 2, 1, 2] = CC[3, 3]\n c[1, 2, 2, 0] = CC[3, 4]\n c[1, 2, 0, 1] = CC[3, 5]\n c[1, 2, 2, 1] = CC[3, 6]\n c[1, 2, 0, 2] = CC[3, 7]\n c[1, 2, 1, 0] = CC[3, 8]\n\n c[2, 0, 0, 0] = CC[4, 0]\n c[2, 0, 1, 1] = CC[4, 1]\n c[2, 0, 2, 2] = CC[4, 2]\n c[2, 0, 1, 2] = CC[4, 3]\n c[2, 0, 2, 0] = CC[4, 4]\n c[2, 0, 0, 1] = CC[4, 5]\n c[2, 0, 2, 1] = CC[4, 6]\n c[2, 0, 0, 2] = CC[4, 7]\n c[2, 0, 1, 0] = CC[4, 8]\n\n c[0, 1, 0, 0] = CC[5, 0]\n c[0, 1, 1, 1] = CC[5, 1]\n c[0, 1, 2, 2] = CC[5, 2]\n c[0, 1, 1, 2] = CC[5, 3]\n c[0, 1, 2, 0] = CC[5, 4]\n c[0, 1, 0, 1] = CC[5, 5]\n c[0, 1, 2, 1] = CC[5, 6]\n c[0, 1, 0, 2] = CC[5, 7]\n c[0, 1, 1, 0] = CC[5, 8]\n\n c[2, 1, 0, 0] = CC[6, 0]\n c[2, 1, 1, 1] = CC[6, 1]\n c[2, 1, 2, 2] = CC[6, 2]\n c[2, 1, 1, 2] = CC[6, 3]\n c[2, 1, 2, 0] = CC[6, 4]\n c[2, 1, 0, 1] = CC[6, 5]\n c[2, 1, 2, 1] = CC[6, 6]\n c[2, 1, 0, 2] = CC[6, 7]\n c[2, 1, 1, 0] = CC[6, 8]\n\n c[0, 2, 0, 0] = CC[7, 0]\n c[0, 2, 1, 1] = CC[7, 1]\n c[0, 2, 2, 2] = CC[7, 2]\n c[0, 2, 1, 2] = CC[7, 3]\n c[0, 2, 2, 0] = CC[7, 4]\n c[0, 2, 0, 1] = CC[7, 5]\n c[0, 2, 2, 1] = CC[7, 6]\n c[0, 2, 0, 2] = CC[7, 7]\n c[0, 2, 1, 0] = CC[7, 8]\n\n c[1, 0, 0, 0] = CC[8, 0]\n c[1, 0, 1, 1] = CC[8, 1]\n c[1, 0, 2, 2] = CC[8, 2]\n c[1, 0, 1, 2] = CC[8, 3]\n c[1, 0, 2, 0] = CC[8, 4]\n c[1, 0, 0, 1] = CC[8, 5]\n c[1, 0, 2, 1] = CC[8, 6]\n c[1, 0, 0, 2] = CC[8, 7]\n c[1, 0, 1, 0] = CC[8, 8]\n return c", "def reduce_C(self, C_on_basis_vecs):\n self.C_reduced = np.mat(np.array(C_on_basis_vecs, ndmin=2).T)\n return self.C_reduced", "def reduce_C(self, C_on_basis_vecs):\n self.C_reduced = np.mat(np.array(C_on_basis_vecs, ndmin=2))\n return self.C_reduced", "def nC(self):\n return int(self.vnC.prod())", "def c_matrix(x1,x2,x3):\n\tC = np.array([\t[\t2*(x2-x1), \t\t(x2-x1), \t\t\t0\t\t\t], \\\n\t\t\t\t\t[\t(x2-x1), \t\t2*(x3-x1), \t\t(x3-x2)\t\t], \\\n\t\t\t\t\t[\t0,\t\t\t\t(x3-x2),\t\t2*(x3-x2)\t] \t], \\\n\t\t\t\t\tfloat)\n\treturn(C)", "def get_C_boundary(n_c,CV_matrix):\n C = np.zeros((n_c, n_c), dtype=np.float32)\n for i in range(3):\n C += np.asfortranarray(CV_matrix[:, :, i]) @ np.asfortranarray(CV_matrix[:, :, np.mod(i + 2, 3)].T)\n C += np.asfortranarray(CV_matrix[:, :, i]) @ np.asfortranarray(CV_matrix[:, :, np.mod(i + 1, 3)].T)\n C = (C != 0).astype(np.int32)\n return C", "def c_():\r\n c = np.array([[0, 0], [0, 100], [100, 100], [100, 80], [20, 80],\r\n [20, 20], [100, 20], [100, 0], [0, 0]])\r\n return c", "def cont_c(self, percent=0.9, N=None): # bug? check axis number 0 vs 1 here\n\n\t\tif not hasattr(self, 'G'): self.fs_c(N=self.rank) # generate G\n\t\treturn numpy.apply_along_axis(lambda _: _/self.L[:N], 1, \n\t\t\tnumpy.apply_along_axis(lambda _: _*self.c, 0, self.G[:,:N]**2))", "def _C(self):\n\n # Find the local x and y coordinates at each node\n xi = 0\n yi = 0\n xj = self.width()\n yj = 0\n xm = xj\n ym = self.height()\n xn = 0\n yn = ym\n\n # Calculate the [C] coefficient matrix\n C = array([[1, xi, yi, xi**2, xi*yi, yi**2, xi**3, xi**2*yi, xi*yi**2, yi**3, xi**3*yi, xi*yi**3],\n [0, 0, 1, 0, xi, 2*yi, 0, xi**2, 2*xi*yi, 3*yi**2, xi**3, 3*xi*yi**2],\n [0, -1, 0, -2*xi, -yi, 0, -3*xi**2, -2*xi*yi, -yi**2, 0, -3*xi**2*yi, -yi**3],\n \n [1, xj, yj, xj**2, xj*yj, yj**2, xj**3, xj**2*yj, xj*yj**2, yj**3, xj**3*yj, xj*yj**3],\n [0, 0, 1, 0, xj, 2*yj, 0, xj**2, 2*xj*yj, 3*yj**2, xj**3, 3*xj*yj**2],\n [0, -1, 0, -2*xj, -yj, 0, -3*xj**2, -2*xj*yj, -yj**2, 0, -3*xj**2*yj, -yj**3],\n\n [1, xm, ym, xm**2, xm*ym, ym**2, xm**3, xm**2*ym, xm*ym**2, ym**3, xm**3*ym, xm*ym**3],\n [0, 0, 1, 0, xm, 2*ym, 0, xm**2, 2*xm*ym, 3*ym**2, xm**3, 3*xm*ym**2],\n [0, -1, 0, -2*xm, -ym, 0, -3*xm**2, -2*xm*ym, -ym**2, 0, -3*xm**2*ym, -ym**3],\n\n [1, xn, yn, xn**2, xn*yn, yn**2, xn**3, xn**2*yn, xn*yn**2, yn**3, xn**3*yn, xn*yn**3],\n [0, 0, 1, 0, xn, 2*yn, 0, xn**2, 2*xn*yn, 3*yn**2, xn**3, 3*xn*yn**2],\n [0, -1, 0, -2*xn, -yn, 0, -3*xn**2, -2*xn*yn, -yn**2, 0, -3*xn**2*yn, -yn**3]])\n \n # Return the coefficient matrix\n return C", "def vnC(self):\n return np.array(\n [x for x in [self.nCx, self.nCy, self.nCz] if x is not None],\n dtype=int\n )", "def compCGP_C(M):\n # we store the coedd as a lower triangular matrix\n # random polynomial coefficients\n c = 0.5 * np.random.uniform(-1.0, -0.45, size=(M + 1, M + 1)) +\\\n 0.5 * np.random.uniform(0.45, 1.0, size=(M + 1, M + 1))\n for i in np.arange(M + 1):\n c[i, :] /= 2**(np.arange(M + 1) + i)\n c /= 1.5\n c = np.tril(c)\n c[0, 0] = 0\n c[1, 0] = 0\n c[1, 1] = 1\n\n return c", "def nC(self):\n return int(self._n.prod())", "def expansion_matrix_c(self):\n row = np.zeros(0)\n nnz = 0\n col = np.arange(nnz, dtype=np.int)\n data = np.zeros(nnz)\n return csr_matrix((data, (row, col)), shape=(self.ng, nnz))", "def Nmatrix(init_par, alpha, delta, obs, sigma_obs, ccoef, N):\n\tparallax, v, sigma_v = init_par[:-4], init_par[-4:-1], init_par[-1] \n\tplx_obs, mualpha_obs, mudelta_obs = obs[:, 0], obs[:, 1], obs[:, 2]\n\n\tp, q, r = normalTriad(alpha, delta)\n\tmualpha_mod = np.dot(np.transpose(p),v)*parallax/_A\n\tmudelta_mod = np.dot(np.transpose(q),v)*parallax/_A\n\t\n\tplx_mod, mualpha_mod, mudelta_mod = parallax, mualpha_mod, mudelta_mod\n\tsigma_plx, sigma_mualpha, sigma_mudelta = np.transpose(sigma_obs)\n\ta,like, expo, detD = np.ones(N),np.ones(N),np.ones(N), np.ones(N) \n\tC = np.zeros((3,3,N),dtype=np.float64)\n\tC[0,0,:],C[1,1,:],C[2,2,:] = sigma_plx**2.,sigma_mualpha**2., sigma_mudelta**2.\n\tcorr_coefficient_plx_mualpha, corr_coefficient_plx_mudelta, corr_coefficient_mualpha_mudelta = np.zeros(N), np.zeros(N), np.zeros(N)\n\tcorr_coefficient_plx_mualpha[:], corr_coefficient_plx_mudelta[:], corr_coefficient_mualpha_mudelta[:] = ccoef[:, 0], ccoef[:, 1], ccoef[:, 2] \n\t\n\tC[0,1,:], C[0,2,:] = corr_coefficient_plx_mualpha*sigma_plx*sigma_mualpha, corr_coefficient_plx_mudelta*sigma_plx*sigma_mudelta\n\tC[1,0,:], C[1,2,:] = corr_coefficient_plx_mualpha*sigma_plx*sigma_mualpha, corr_coefficient_mualpha_mudelta*sigma_mualpha*sigma_mudelta\n\tC[2,0,:], C[2,1,:] = corr_coefficient_plx_mudelta*sigma_plx*sigma_mudelta, corr_coefficient_mualpha_mudelta*sigma_mualpha*sigma_mudelta\n\tE = np.zeros((3,3,N),dtype=np.float64)\n\tE[1,1,:],E[2,2,:] = (sigma_v**2.)*(parallax/_A)**2., (sigma_v**2.)*(parallax/_A)**2.\n\tD,invD = np.zeros((3,3,N),dtype=np.float64),np.zeros((3,3,N),dtype=np.float64)\n\tD = np.add(E,C)\n\tfor i in range(N):\n\t\tdetD[i] = matrix_det(D[:,:,i]) \n\t\tinvD[:,:,i] = matrix_inv(D[:,:,i])\n\t\t\n\ta_c = np.ones((3,N))\n\ta_c = [plx_obs - plx_mod, mualpha_obs - mualpha_mod, mudelta_obs-mudelta_mod]\n\t\n\t\n\n\t\n\tcprime_pi, cprime_vx, cprime_vy, cprime_vz, = np.ones((3,N)), np.ones((3,N)), \\\n\t\t\t\t\t\t\tnp.ones((3,N)), np.ones((3,N)), \n\tcprime_pi[0,:] = 1.\n\tcprime_pi[1,:] = np.dot(np.transpose(p),v)/_A\n\tcprime_pi[2,:] = np.dot(np.transpose(q),v)/_A\n\t\n\tcprime_vx[0,:] = 0.\n\tcprime_vx[1,:] = -np.sin(alpha)*plx_mod/_A \n\tcprime_vx[2,:] = -np.sin(delta)*np.cos(alpha)*plx_mod/_A\n\n\t\n\tcprime_vy[0,:] = 0.\n\tcprime_vy[1,:] = np.cos(alpha)*plx_mod/_A \n\tcprime_vy[2,:] = -np.sin(delta)*np.sin(alpha)*plx_mod/_A\n\n\tcprime_vz[0,:] = 0.\n\tcprime_vz[1,:] = 0. \n\tcprime_vz[2,:] = np.cos(delta)*plx_mod/_A\n\n\tdlnd_dpi, dlnd_dsigmav = np.zeros(N), np.zeros(N)\n\tde_dpi, de_dsigmav = np.zeros(N), np.zeros(N)\n\t\n\n\t### See formula A.5 \n\tde_dpi[:] = ((sigma_v/_A)**2.)*2.*plx_mod[:]\n\tde_dsigmav[:] = ((plx_mod[:]/_A)**2.)*2.*sigma_v\n\t\n\tdlnd_dpi[:] = (invD[1,1,:] + invD[2,2,:])*de_dpi[:] \n\tdlnd_dsigmav[:] = (invD[1,1,:] + invD[2,2,:])*de_dsigmav[:]\n\t\n\t\n\t\n\t### See formula A.7\n\thess = np.zeros((N+4, N+4))\n\n\thess_diag_pi, hess_diag_pi_1, hess_diag_pi_2 = np.zeros(N), np.zeros(N), np.zeros(N)\n\thess_diag_pi_1[:] = invD[0, 0, :]*cprime_pi[0, :]*cprime_pi[0, :] + invD[0, 1, :]*cprime_pi[0, :]*cprime_pi[1, :] + invD[0, 2, :]*cprime_pi[0, :]*cprime_pi[2, :] + \\\n\t\t\t invD[1, 0, :]*cprime_pi[1, :]*cprime_pi[0, :] + invD[1, 1, :]*cprime_pi[1, :]*cprime_pi[1, :] + invD[1, 2, :]*cprime_pi[1, :]*cprime_pi[2, :] + \\\n\t\t \t invD[2, 0, :]*cprime_pi[2, :]*cprime_pi[0, :] + invD[2, 1, :]*cprime_pi[2, :]*cprime_pi[1, :] + invD[2, 2, :]*cprime_pi[2, :]*cprime_pi[2, :]\t\n\t\n\t\n\t#hess_diag_pi_2[:] = np.sum(0.5*(invD[1, 1, :]**2. + 2.*invD[1, 2, :]**2. + invD[2, 2, :]**2.)*de_dpi[:]*de_dpi[:]) ### Check if it's with or without sum: without!\n\t# So correct formula is below.\n\thess_diag_pi_2[:] = (0.5*(invD[1, 1, :]**2. + 2.*invD[1, 2, :]**2. + invD[2, 2, :]**2.)*de_dpi[:]*de_dpi[:])\n\thess_diag_pi[:] = hess_diag_pi_1[:] + hess_diag_pi_2[:]\t\n\n\t\n\thess_diag_vx, hess_diag_vy, hess_diag_vz, hess_diag_sigmav = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N)\n\thess_pi_vx, hess_pi_vy, hess_pi_vz, hess_pi_sigmav = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N)\n\thess_diag_vxi, hess_diag_vyi, hess_diag_vzi = np.zeros(N), np.zeros(N), np.zeros(N)\n\t\n\thess_diag_vxi[:] = invD[0, 0, :]*cprime_vx[0, :]*cprime_vx[0, :] + invD[0, 1, :]*cprime_vx[0, :]*cprime_vx[1, :] + invD[0, 2, :]*cprime_vx[0, :]*cprime_vx[2, :] + \\\n\t\t\t invD[1, 0, :]*cprime_vx[1, :]*cprime_vx[0, :] + invD[1, 1, :]*cprime_vx[1, :]*cprime_vx[1, :] + invD[1, 2, :]*cprime_vx[1, :]*cprime_vx[2, :] + \\\n\t\t\t invD[2, 0, :]*cprime_vx[2, :]*cprime_vx[0, :] + invD[2, 1, :]*cprime_vx[2, :]*cprime_vx[1, :] + invD[2, 2, :]*cprime_vx[2, :]*cprime_vx[2, :] \t\t\n\t\n\thess_diag_vyi[:] = invD[0, 0, :]*cprime_vy[0, :]*cprime_vy[0, :] + invD[0, 1, :]*cprime_vy[0, :]*cprime_vy[1, :] + invD[0, 2, :]*cprime_vy[0, :]*cprime_vy[2, :] +\\\n\t\t\t invD[1, 0, :]*cprime_vy[1, :]*cprime_vy[0, :] + invD[1, 1, :]*cprime_vy[1, :]*cprime_vy[1, :] + invD[1, 2, :]*cprime_vy[1, :]*cprime_vy[2, :] +\\\n\t\t\t invD[2, 0, :]*cprime_vy[2, :]*cprime_vy[0, :] + invD[2, 1, :]*cprime_vy[2, :]*cprime_vy[1, :] + invD[2, 2, :]*cprime_vy[2, :]*cprime_vy[2, :] \t\n\n\n\thess_diag_vzi[:] = invD[0, 0, :]*cprime_vz[0, :]*cprime_vz[0, :] + invD[0, 1, :]*cprime_vz[0, :]*cprime_vz[1, :] + invD[0, 2, :]*cprime_vz[0, :]*cprime_vz[2, :] +\\\n\t\t\t invD[1, 0, :]*cprime_vz[1, :]*cprime_vz[0, :] + invD[1, 1, :]*cprime_vz[1, :]*cprime_vz[1, :] + invD[1, 2, :]*cprime_vz[1, :]*cprime_vz[2, :] +\\\n\t\t\t invD[2, 0, :]*cprime_vz[2, :]*cprime_vz[0, :] + invD[2, 1, :]*cprime_vz[2, :]*cprime_vz[1, :] + invD[2, 2, :]*cprime_vz[2, :]*cprime_vz[2, :] \t\t\n\t\n\n\thess_pi_vx[:] = invD[0, 0, :]*cprime_pi[0,:]*cprime_vx[0, :] + invD[0, 1, :]*cprime_pi[0,:]*cprime_vx[1, :] + invD[0, 2, :]*cprime_pi[0,:]*cprime_vx[2, :] +\\\n\t\t\tinvD[1, 0, :]*cprime_pi[1,:]*cprime_vx[0, :] + invD[1, 1, :]*cprime_pi[1,:]*cprime_vx[1, :] + invD[1, 2, :]*cprime_pi[1,:]*cprime_vx[2, :] +\\\n\t\t\tinvD[2, 0, :]*cprime_pi[2,:]*cprime_vx[0, :] + invD[2, 1, :]*cprime_pi[2,:]*cprime_vx[1, :] + invD[2, 2, :]*cprime_pi[2,:]*cprime_vx[2, :] \n\n\thess_pi_vy[:] = invD[0, 0, :]*cprime_pi[0,:]*cprime_vy[0, :] + invD[0, 1, :]*cprime_pi[0,:]*cprime_vy[1, :] + invD[0, 2, :]*cprime_pi[0,:]*cprime_vy[2, :] +\\\n\t\t\tinvD[1, 0, :]*cprime_pi[1,:]*cprime_vy[0, :] + invD[1, 1, :]*cprime_pi[1,:]*cprime_vy[1, :] + invD[1, 2, :]*cprime_pi[1,:]*cprime_vy[2, :] +\\\n\t\t\tinvD[2, 0, :]*cprime_pi[2,:]*cprime_vy[0, :] + invD[2, 1, :]*cprime_pi[2,:]*cprime_vy[1, :] + invD[2, 2, :]*cprime_pi[2,:]*cprime_vy[2, :] \n\n\thess_pi_vz[:] = invD[0, 0, :]*cprime_pi[0,:]*cprime_vz[0, :] + invD[0, 1, :]*cprime_pi[0,:]*cprime_vz[1, :] + invD[0, 2, :]*cprime_pi[0,:]*cprime_vz[2, :] +\\\n\t\t\tinvD[1, 0, :]*cprime_pi[1,:]*cprime_vz[0, :] + invD[1, 1, :]*cprime_pi[1,:]*cprime_vz[1, :] + invD[1, 2, :]*cprime_pi[1,:]*cprime_vz[2, :] +\\\n\t\t\tinvD[2, 0, :]*cprime_pi[2,:]*cprime_vz[0, :] + invD[2, 1, :]*cprime_pi[2,:]*cprime_vz[1, :] + invD[2, 2, :]*cprime_pi[2,:]*cprime_vz[2, :] \n\n\t\t\t\t\t\t\n\thess_diag_vx = np.sum(hess_diag_vxi)\n\thess_diag_vy = np.sum(hess_diag_vyi)\n\thess_diag_vz = np.sum(hess_diag_vzi)\t\n\t\n\thess_diag_sigmav = np.sum(0.5*(invD[1, 1, :]**2. + 2.*invD[1, 2, :]**2. + invD[2, 2, :]**2.)*de_dsigmav[:]*de_dsigmav[:])\n\thess_pi_sigmav[:] = 0.5*(invD[1, 1, :]**2. + 2.*invD[1, 2, :]**2. + invD[2, 2, :]**2.)*de_dpi[:]*de_dsigmav[:] \n\n\thess_diag = np.concatenate((hess_diag_pi, np.array([hess_diag_vx, hess_diag_vy, hess_diag_vz, hess_diag_sigmav])))\n\t\n\tfor i in range(N+4):\n\t\thess[i, i] = hess_diag[i]\n\t\t\n\t\n\tfor j in range(N):\n\t\t\thess[j, -4] = hess_pi_vx[j]\n\t\t\thess[j, -3] = hess_pi_vy[j]\n\t\t\thess[j, -2] = hess_pi_vz[j]\n\t\t\thess[j, -1] = hess_pi_sigmav[j]\n\t\t\thess[-4, j] = hess_pi_vx[j]\n\t\t\thess[-3, j] = hess_pi_vy[j] \n\t\t\thess[-2, j] = hess_pi_vz[j]\n\t\t\thess[-1, j] = hess_pi_sigmav[j]\n\t\t\t\n\n\t\n\t\n\tpart_12, part_13, part_23 = np.zeros(N),np.zeros(N),np.zeros(N)\n\tfor ia in range(3):\n\t\tfor ib in range(3):\n\t\t\tpart_12[:] += invD[ia, ib, :]*cprime_vx[ia, :]*cprime_vy[ib, :] \n\t\t\tpart_13[:] += invD[ia, ib, :]*cprime_vx[ia, :]*cprime_vz[ib, :] \n\t\t\tpart_23[:] += invD[ia, ib, :]*cprime_vy[ia, :]*cprime_vz[ib, :] \t\t\t\t\n\n\thess[-4, -3] = np.sum(part_12)\n\thess[-3, -4] = hess[-4, -3]\n\t\n\thess[-4, -2] = np.sum(part_13)\n\thess[-2, -4] = hess[-4, -2]\n\n\thess[-3, -2] = np.sum(part_23)\n\thess[-2, -3] = hess[-3, -2]\n\n\t#### I am returning here the matrix Njk, which is defined as -E(H),\n\t#### where H is the hessian of the likelihood: therefore to obtain the real hessian, one\n\t#### should multiply this by '-1' (see function below.)\n\treturn hess ### See eq. 18", "def test_cmatrix_reduction(self):\n cmat = np.array([[1,4,5,0,0],\n [1,4,5,0,0],\n [6,2,2,0,0],\n [0,0,0,3,7],\n [0,0,0,7,3]])\n reduced_matrix = get_connected_count_matrix(cmat)\n difference = reduced_matrix - np.array([[1, 4, 5], [1, 4, 5], [6, 2, 2]])\n self.assertTrue(np.sum(difference) == 0)", "def N_max_matriz_covarianza(C):\n # valores auxiliares\n n_filas_cova = np.shape( C )[0]\n n_cols_cova = np.shape( C )[1]\n # valores y vectores propios\n eig_val, eig_vec = la.eig( C )\n eig_vals = eig_val.real # valores propios (lambda_k)\n eig_vecs = eig_vec.real # vectores propios (v_k) (columnas de eig_vecs)\n # vector con indices de valores propios\n idx_vec = np.arange((np.size(eig_vals)))\n # transformamos los valores propios y los indices a un espacio log-log\n x = idx_vec+1\n y = np.abs(eig_vals)\n # Triangular/circumscribed circle simple approximation to curvature \n # (after Roger Stafford)\n\n # the series of points used for the triangle/circle\n x1 = x[:-2]\n x2 = x[1:-1]\n x3 = x[2:]\n y1 = y[:-2]\n y2 = y[1:-1]\n y3 = y[2:]\n\n # the side lengths for each triangle\n a = np.sqrt(np.square(x3-x2)+np.square(y3-y2))\n b = np.sqrt(np.square(x1-x3)+np.square(y1-y3))\n c = np.sqrt(np.square(x2-x1)+np.square(y2-y1))\n # semi perimetro\n s = (a+b+c)/2.\n # radio de cada circulo\n R = (a*b*c)/(4*np.sqrt((s*(s-a)*(s-b)*(s-c))))\n # The curvature for each estimate for each value which is\n # the reciprocal of its circumscribed radius. Since there aren't circles for \n # the end points they have no curvature\n kappa = np.ones((n_filas_cova))\n kappa[0] = 0.\n kappa[-1] = 0.\n kappa[1:-1] = np.reciprocal(R)\n idx_max = np.where(kappa == np.max(kappa))[0][0] - 1\n return idx_max", "def nCz(self):\n if self.dim < 3:\n return None\n return int(self._n[2])", "def compCoeff_CGP(i, A, c, N):\n Ap = np.copy(A)\n out = c[i, 0] * np.eye(N)\n j = 1\n while j <= i:\n # compute A to the power p\n if j > 1:\n Ap = Ap.dot(A)\n\n # add to the polynome\n out += c[i, j] * Ap\n j += 1\n\n return out", "def build_cooc_matrix(users):\n nprods = constants.N_PRODUCTS\n M = scipy.sparse.dok_matrix((nprods, nprods), dtype=np.int32)\n i = 0\n for user in users:\n order = user.orders[-1]\n for pid in user.sorted_pids:\n focal_ix = pid-1\n prevs = paired_pids(user, pid)\n for prev in prevs:\n key = (focal_ix, prev-1)\n #n = M.get(key, 0)\n # further centi-optimization\n n = dict.get(M, key, 0)\n M.update({key:n+1})\n # Above is like 5x faster than below (and this inner loop is current bottleneck)\n #M[focal_ix, prev-1] += 1\n i += 1\n if i % 10000 == 0:\n logging.info('Processed {} users'.format(i))\n\n return M", "def muscovite():\n\n rho = 2834.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 181.; C[0,1] = 48.8; C[0,2] = 25.6; C[0,3] = 0.; C[0,4] = -14.2; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 178.4; C[1,2] = 21.2; C[1,3] = 0.; C[1,4] = 1.1; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 58.6; C[2,3] = 0.; C[2,4] = 1.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 16.5; C[3,4] = 0.; C[3,5] = -5.2\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 19.5; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 72.\n\n return C, rho", "def appearance_space(state_data, C):\n return np.dot(C, state_data)", "def compute_nc(self, X, G):\n N = X.shape[0]\n M = G.shape[0]\n nc = np.zeros(N)\n\n # Convolution on the diagonal\n for i in range(M // 2, N - M // 2 + 1):\n nc[i] = np.sum(X[i - M // 2:i + M // 2, i - M // 2:i + M // 2] * G)\n\n # Normalize\n # TODO: Why normalizing here ??\n nc += nc.min()\n nc /= nc.max()\n return nc", "def cofactorMatrix(self):\n returnvalue = Matrix()\n for i in range(self._height):\n newRow = list()\n for j in range(self._width):\n newRow.append(self.cofactor(i, j))\n returnvalue.addRow(*newRow)\n return returnvalue", "def gridder_to_C(gridder, W):\n M = len(gridder) // W\n C = np.zeros((W, M), dtype=float)\n for r in range(0, W):\n ell = r - (W / 2) + 1\n indx = (np.arange(M) - 2 * M * ell).astype(int)\n # Use symmetry to deal with negative indices\n indx[indx < 0] = -indx[indx < 0] - 1\n C[r, :] = gridder[indx]\n return C", "def _build_c_phi_matrices(self, t: tf.Tensor) -> tf.Tensor:\n c_phi_matrices = self.kernel.compute_c_phi(t, t)\\\n + tf.expand_dims(tf.eye(self.n_points_int, dtype=tf.float64), 0)\\\n * self.likelihood_variances\n return c_phi_matrices", "def _dgp_cov_matrix(Nt, snr2=100, clen2=1):\n f = lambda x: np.exp(-(x**2)/clen2)\n C = snr2 * (2*f(np.arange(Nt)) - f(1+np.arange(Nt))- f(-1+np.arange(Nt)))\n C[0] += 2 + 0.01 # noise, add a small number to regularize\n C[1] += -1\n return scipy.linalg.toeplitz(C)", "def _gp_cov_matrix(Nt, snr2, clen2):\n f = lambda x: np.exp(-(x**2)/clen2)\n C = snr2 * f(np.arange(Nt))\n C[0] += 1 # noise\n return scipy.linalg.toeplitz(C)", "def _mn_cov_ ( self , size = -1 , root = False ) :\n #\n if size <= 0 : size = len ( self )\n size = min ( size , len ( self ) ) \n #\n from array import array\n matrix = array ( 'd' , [ 0 for i in range(0, size * size) ] )\n self.mnemat ( matrix , size )\n #\n import ostap.math.linalg\n from ostap.core.core import Ostap \n mtrx = Ostap.Math.SymMatrix ( size )() \n for i in range ( 0 , size ) :\n for j in range ( i , size ) : \n mtrx [ i , j ] = matrix [ i * size + j ]\n \n return mtrx", "def _cswap(i, j, S):\n N = _rswap(i, j, S.transpose()).transpose()\n return N" ]
[ "0.69439816", "0.65459836", "0.6285086", "0.6249481", "0.6161368", "0.6148797", "0.6126897", "0.61106193", "0.60805976", "0.60785055", "0.60363877", "0.5989471", "0.59715444", "0.59453815", "0.5887798", "0.58766955", "0.5835838", "0.57483953", "0.57132053", "0.56960946", "0.56527144", "0.56332844", "0.56199783", "0.56016874", "0.5592048", "0.55803126", "0.5562512", "0.55449474", "0.5543058", "0.54806983" ]
0.73259985
0
The rates obtained from PSMC are the prob of coal in that timeslice, not the prob of coal in that timeslice AND not coalescing in any other timeslice. We need the conditional probability of coal in that timeslice given lines have not coalesced in any of the previous timeslices. This function converts the PSMC values into our values.
def modify_rates(self): if self.modified: print 'Already Modified Probabilities' elif self.varGiven: print 'You must enter the conditional coalescent probabilties if you want to supply variance of' print 'the coalescent probabilities. Required since we cannot compute the variance of the conditionals' print 'given the variance of the marginals. Assuming that you gave the conditional probs.' else: testrates = self.obsRates.copy() tratesum = testrates.cumsum(1) nocoal = 1 - tratesum nocoal = nocoal[:, :-1] nocoal = np.hstack((np.ones((np.shape(nocoal)[0], 1)), nocoal)) testrates = testrates.getA() / (nocoal.getA() + 1e-200) self.modified = True self.obsRates = np.matrix(np.max([np.min([testrates, np.ones(np.shape(testrates))], 0), np.zeros(np.shape(testrates))], 0))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def proba(c_pred,m_pred,f_pred, dataset):\n p = np.zeros(10)\n if dataset == 'cifar10':\n for i in range(10):\n if i <4:\n if i <2:\n p[i] = c_pred[0]*(m_pred[0]/(m_pred[0]+m_pred[1]))*(f_pred[i]/np.sum(f_pred[0:2]))\n elif i <4:\n p[i] = c_pred[0]*(m_pred[1]/(m_pred[0]+m_pred[1]))*(f_pred[i]/np.sum(f_pred[2:4]))\n if i >=4:\n if i <6:\n p[i] = c_pred[1]*(m_pred[2]/(m_pred[2]+m_pred[3]+m_pred[4]))*(f_pred[i]/np.sum(f_pred[4:6]))\n elif i <8:\n p[i] = c_pred[1]*(m_pred[3]/(m_pred[2]+m_pred[3]+m_pred[4]))*(f_pred[i]/np.sum(f_pred[6:8]))\n elif i <10:\n p[i] = c_pred[1]*(m_pred[4]/(m_pred[2]+m_pred[3]+m_pred[4]))*(f_pred[i]/np.sum(f_pred[8:10]))\n else :\n for i in range(10):\n if i <5:\n if i <3:\n p[i] = c_pred[0]*(m_pred[0]/(m_pred[0]+m_pred[1]))*(f_pred[i]/np.sum(f_pred[0:3]))\n elif i <5:\n p[i] = c_pred[0]*(m_pred[1]/(m_pred[0]+m_pred[1]))*(f_pred[i]/np.sum(f_pred[3:5]))\n if i >=5:\n if i <8:\n p[i] = c_pred[1]*(m_pred[2]/(m_pred[2]+m_pred[3]))*(f_pred[i]/np.sum(f_pred[5:8]))\n elif i <10:\n p[i] = c_pred[1]*(m_pred[3]/(m_pred[2]+m_pred[3]))*(f_pred[i]/np.sum(f_pred[8:]))\n return(p)", "def find_all_cps(xs, cp_prob=1./250, plot=False):\r\n prior_params = mu0, kappa0, alpha0, beta0 = np.mean(xs), 1., 1.01, 1.\r\n post_params = mu_t, kappa_t, alpha_t, beta_t = map(lambda f: np.array([f]), prior_params)\r\n\r\n T = len(xs)\r\n R, M, V = np.zeros((T, T)), np.zeros((T, T)), np.zeros((T, T))\r\n R[0, 0] = 1\r\n M[0, 0] = mu0\r\n V[0, 0] = xs.var()\r\n\r\n mu_pred, sigma2_pred, dof_pred = compute_t_params(mu_t, kappa_t, alpha_t, beta_t)\r\n for t, x in enumerate(xs[1:], start=1):\r\n pred_prob = np.array([nct(x, m, v, d) for m, v, d in zip(mu_pred, sigma2_pred, dof_pred)])\r\n\r\n R[:t + 1, t] = compute_rt(R[:t, t - 1], pred_prob, cp_prob)\r\n\r\n post_params = mu_t, kappa_t, alpha_t, beta_t = update_params(x, prior_params, post_params)\r\n mu_pred, sigma2_pred, dof_pred = compute_t_params(mu_t, kappa_t, alpha_t, beta_t)\r\n\r\n M[:t + 1, t] = mu_pred\r\n V[:t + 1, t] = compute_t_var(sigma2_pred, dof_pred)\r\n\r\n if plot:\r\n mu_hat = np.sum(M*R, axis=0)\r\n var_hat = np.sum(V*R, axis=0)\r\n plot_results(xs, mu_hat, var_hat)\r\n\r\n return R, M, V", "def cumprobs(self, values):\n values = np.asarray(values)\n index = np.searchsorted(self.xs, values, side='right')\n ps = self.ps[index-1]\n ps[values < self.xs[0]] = 0.0\n return ps", "def computeProbs(psDf, add_masked_seqs=True, filterOut=False, max_cdr3_length=30, allow_stop_codons=False, allow_X=False):\n \n out = []\n for rowi, row in psDf.iterrows():\n \"\"\"If iterrows is slow there are potentially ways to speed this up using psDf.apply()\"\"\"\n vals = {}\n vals['ind'] = rowi\n \n if filterOut:\n fo = filterOutRow(row,\n max_cdr3_length=max_cdr3_length,\n allow_stop_codons=allow_stop_codons,\n allow_X=allow_X)\n if fo:\n \"\"\"vals will be missing keys, which will be assigned Nan in outDf\"\"\"\n continue\n \n aprob_nucseq, aprob_protseq = samplerProb(row, 'a')\n va_rep_prob, ja_rep_prob = rearrangementProb(row, 'a')\n \n vals['a_protseq_prob' ] = aprob_protseq * va_rep_prob * ja_rep_prob\n vals['cdr3a_protseq_prob'] = aprob_protseq\n vals['va_rep_prob' ] = va_rep_prob\n vals['ja_rep_prob' ] = ja_rep_prob\n vals['a_nucseq_prob' ] = aprob_nucseq * va_rep_prob * ja_rep_prob\n \n bprob_nucseq, bprob_protseq = samplerProb(row, 'b')\n vb_rep_prob, jb_rep_prob = rearrangementProb(row, 'b')\n \n vals['b_protseq_prob' ] = bprob_protseq * vb_rep_prob * jb_rep_prob\n vals['cdr3b_protseq_prob'] = bprob_protseq\n vals['vb_rep_prob' ] = vb_rep_prob\n vals['jb_rep_prob' ] = jb_rep_prob\n vals['b_nucseq_prob' ] = bprob_nucseq * vb_rep_prob * jb_rep_prob\n \n if add_masked_seqs:\n cdr3a_protseq_masked, ita, cdr3a_new_nucseq = getMaskedSeqs(row, 'a')\n cdr3b_protseq_masked, itb, cdr3b_new_nucseq = getMaskedSeqs(row, 'b')\n\n vals[ 'cdr3a_protseq_masked'] = cdr3a_protseq_masked\n vals[ 'a_indels'] = ita\n vals[ 'cdr3a_new_nucseq' ] = cdr3a_new_nucseq\n vals[ 'cdr3b_protseq_masked'] = cdr3b_protseq_masked\n vals[ 'b_indels'] = itb\n vals[ 'cdr3b_new_nucseq' ] = cdr3b_new_nucseq\n out.append(vals)\n \n outDf = pd.DataFrame(out).set_index('ind')\n assert outDf.shape[0] == psDf.shape[0]\n return outDf", "def _snrenv_to_pc(snrenv, k=None, q=None, sigma_s=None, m=None):\n un = norm.ppf(1.0 - 1.0 / m)\n sn = 1.28255 / un\n un += 0.577 / un\n dp = k * snrenv ** q\n return norm.cdf(dp, un, np.sqrt(sigma_s ** 2 + sn ** 2)) * 100", "def cps_vals(self):\n\n return unumpy.nominal_values(self.cps)", "def cumprob(self):\r\n return self.probabilities.cumsum(-1)", "def _calc_ppcc(self):\n\n res = self._model.fit()\n normal_quantile = self._calc_res_normal_quantile()\n\n ppcc, _ = stats.pearsonr(normal_quantile, res.resid)\n\n return ppcc", "def NPV(B,C,BV,CV,d,pb,pc):\n b=[BV[0] if x=='L' else BV[1] for x in B] #decoding revenue\n c=[CV[0] if x=='L' else CV[1] for x in C] #decoding cost\n z=[b_i - c_i for b_i, c_i in zip(b, c)] #profit at each time\n npv=np.npv(d, z)\n pnpv=pb*pc\n return (npv,pnpv)", "def get_conditional_probs_asbf( p01, p10, v):\n\n ns,L =v.shape\n if ns != 2**L:\n raise ValueError\n #matrix of conitional probs.\n pi = np.zeros((ns,ns))\n p00 = 1-p10\n p11 = 1-p01\n for i in range(ns):\n #final state\n si = v[i,:]\n for j in range(i,ns):\n #initial state\n sj = v[j, :]\n #number of sites where 1->1 transition occurs, etc\n n11 = np.sum((si==1)*(sj==1))\n n10 = np.sum((si==1)*(sj==0))\n n01 = np.sum((si==0)*(sj==1))\n n00 = np.sum((si==0)*(sj==0))\n pi[i, j] = (p11**n11)*(p10**n10)*(p01**n01)*(p00**n00)\n pi[j,i] = (p11**n11)*(p10**n01)*(p01**n10)*(p00**n00)\n return pi", "def calc_t_chan(tpbpc):\n t_chan_new = []\n for j in range(len(tpbpc)):\n tmp = 0.0\n for k in range(len(tpbpc[j])):\n if np.isfinite(tpbpc[j][k]):\n tmp += tpbpc[j][k]\n t_chan_new.append(tmp)\n t_chan_new = np.array(t_chan_new)\n return t_chan_new", "def cloudModel(PsPs, PcPc):\n PsPc = 1 - PsPs\n PcPs = 1 - PcPc\n \"\"\"\n transition matrix -- don't really need...\n \"\"\"\n P = np.array([[PsPs, PsPc],\n [PcPs, PcPc]])\n \"\"\"\n initial condintion\n \"\"\"\n sunny = True\n t0 = np.random.choice([False, True])\n \"\"\"\n run realisation\n \"\"\"\n clear = []\n t = t0\n for k in xrange(365):\n # predict tomorrow\n if t == sunny:\n t1 = bool(scipy.stats.bernoulli.rvs(PsPs))\n else:\n t1 = not bool(scipy.stats.bernoulli.rvs(PcPc))\n clear.append(t1)\n t = t1\n clear = np.array(clear)\n return clear", "def MAP(cpts, obs, terms):\r\n\r\n # a list to store the computed probabilities\r\n all_sums = []\r\n # initialize all terms to false\r\n for value in range(len(terms)):\r\n terms[value] = [terms[value], '0']\r\n search_array = terms + obs\r\n # if all terms are being watched, just call MPE\r\n if len(search_array) == len(cpts):\r\n return MPE(cpts, obs)\r\n # we need to know what terms we aren't interested in so we start with \r\n # or terms and observations and note the variables that appear in CPT but\r\n # not in those\r\n dont_count = []\r\n for var in cpts:\r\n if [var[0], '0'] not in search_array and [var[0], '1'] not in search_array:\r\n dont_count.append(var[0])\r\n terms.append([var[0],'1'])\r\n # sort the terms to ensure correct ordering\r\n terms.sort()\r\n # creates a list of all possible bit strings\r\n # just an easy way to create all possible truth assignments\r\n seq = [\"\".join(seq) for seq in itertools.product(\"01\", repeat=len(terms))]\r\n # loop through all possible truth assignments\r\n for j in range(len(seq)):\r\n # we initialize at probability = 100%\r\n chance = 1\r\n # assign the truth values\r\n for k in range(len(seq[j])):\r\n terms[k][1] = seq[j][k]\r\n # this computes the probability using the chaining rule\r\n for i in range(len(terms)):\r\n new_terms = terms[:-i-1] + obs\r\n new_terms.sort()\r\n chance *= probability(cpts,terms[-i-1], new_terms)\r\n # add the probabilities to our list\r\n all_sums.append(chance)\r\n combine = []\r\n # note all variables which weren't in obs or Vs\r\n for i in dont_count:\r\n combine.append(terms.index([i,'1']))\r\n # this will store the final probabilities\r\n final_array = [0] * len(seq)\r\n # another complicated looking loop, it just serves to combine probabilities\r\n # for example, if we have a CPT with x_1, x_2, x_3, x_4 and we observe \r\n # x_1 to be true and have Vs = [x_3, x_4] then we need to combine the \r\n # probabilities that are the same except for x_2 = true vs false\r\n for loc in combine:\r\n for sequence in range(len(seq)):\r\n for alt_sequence in range(sequence+1,len(seq)):\r\n if (seq[sequence][:loc] + seq[sequence][loc+1:]) == (seq[alt_sequence][:loc] + seq[alt_sequence][loc+1:]):\r\n final_array[sequence] = all_sums[sequence] + all_sums[alt_sequence]\r\n\r\n # get the truth assignment for the highest probability\r\n location = seq[final_array.index(max(final_array))]\r\n truth_assignment = []\r\n # place the truth assignment in a more readable fashion\r\n for value in range(len(terms)):\r\n if terms[value] in search_array:\r\n if location[value] == '0':\r\n truth_assignment.append(terms[value][0]+ ' = False')\r\n else:\r\n truth_assignment.append(terms[value][0]+ ' = True')\r\n return (truth_assignment)", "def SCP(counter, t1, t2, alpha=0.5, delta=0.05):\n prob1 = conditionalProb(counter, t1, t2, alpha, delta)\n prob2 = conditionalProb(counter, t2, t1, alpha, delta)\n return prob1*prob2", "def layer_probs(wind_cells,calc_risk):\n probs = []\n this_conf = (0.0, 1.0)\n probs.append(this_conf)\n wind_value = 0.25\n for risk in calc_risk:\n conf = 1.0 - risk\n # cut off at wind value 25 BUT leave a small possibility of getting through\n if wind_value > 25.0:\n conf = 0.02\n this_conf = (wind_value, conf)\n probs.append(this_conf)\n wind_value += 0.5\n # add final value at wind value 1000 -> all values above 30.25 at 0\n this_conf = (1000.0, 0.0)\n probs.append(this_conf)\n # calc new celle values with confidence levels instead of wind values\n new_cells = wind_cells\n for r in range(len(wind_cells)):\n for x in range(len(wind_cells[r])):\n for y in range(len(wind_cells[r][x])):\n for p in range(len(probs)-1):\n if wind_cells[r][x][y] >= probs[p][0] and wind_cells[r][x][y] < probs[p+1][0] :\n new_cells[r][x][y] = probs[p][1] + \\\n (abs(probs[p+1][1] - probs[p][1]) * abs(wind_cells[r][x][y] - probs[p][0]) / abs(probs[p+1][0] - probs[p][0]))\n return new_cells", "def prect(precc, precl):\n var = precc + precl\n var = convert_units(var, \"mm/day\")\n var.long_name = \"Total precipitation rate (convective + large-scale)\"\n return var", "def findpc(self):\n u = -(-can.C.len() + self.P.len() + can.R)/self.V.len()\n if u >= 0:\n return self.P + self.V.scale(u), u\n else:\n u = (can.C.len() - self.P.len() + can.R)/self.V.len()\n return self.P + self.V.scale(u), u", "def getCummulativeValues(self):\n self.cumulativePhaseHeightInRing1 = np.cumsum(self.phaseHeightInRing1)\n self.cumulativePhaseHeightInRing2 = np.cumsum(self.phaseHeightInRing2)\n self.cumulativeLeftCriticalPointsRing1 = np.cumsum(self.leftCriticalPointsRing1)\n self.cumulativeRightCriticalPointsRing1 = np.cumsum(self.rightCriticalPointsRing1)\n self.cumulativeLeftCriticalPointsRing2 = np.cumsum(self.leftCriticalPointsRing2)\n self.cumulativeRightCriticalPointsRing2 = np.cumsum(self.rightCriticalPointsRing2)\n\n if(self.init1 > 0):\n for index, value in enumerate(self.cumulativeLeftCriticalPointsRing1):\n self.cumulativeLeftCriticalPointsRing1[index] = value + self.init1\n for index, value in enumerate(self.cumulativeRightCriticalPointsRing1):\n self.cumulativeRightCriticalPointsRing1[index] = value + self.init1\n\n if(self.init2 > 0):\n for index, value in enumerate(self.cumulativeLeftCriticalPointsRing2):\n self.cumulativeLeftCriticalPointsRing2[index] = value + self.init2\n for index, value in enumerate(self.cumulativeRightCriticalPointsRing2):\n self.cumulativeRightCriticalPointsRing2[index] = value + self.init2\n\n self.cumulativePhaseHeightInRing1 = np.insert(self.cumulativePhaseHeightInRing1, 0, 0.0)\n self.cumulativePhaseHeightInRing2 = np.insert(self.cumulativePhaseHeightInRing2, 0, 0.0)\n self.cumulativeLeftCriticalPointsRing1 = np.insert(self.cumulativeLeftCriticalPointsRing1, 0, 0.0)\n self.cumulativeRightCriticalPointsRing1 = np.insert(self.cumulativeRightCriticalPointsRing1, 0, 0.0)\n self.cumulativeLeftCriticalPointsRing2 = np.insert(self.cumulativeLeftCriticalPointsRing2, 0, 0.0)\n self.cumulativeRightCriticalPointsRing2 = np.insert(self.cumulativeRightCriticalPointsRing2, 0, 0.0)", "def get_CPs(rates, pref_msk, actn, dt, smoothwin=100e-3, step=5):\n # params\n timepoints = rates.shape[2]\n this_time = np.linspace(0, timepoints, int(timepoints / step), dtype=int)[:-1]\n nselect = actn.shape[0]\n newdt = dt * step\n kernel = np.ones((int(smoothwin / newdt)))\n prefrates = rates[pref_msk == True, :, :]\n nprefrates = rates[pref_msk == False, :, :]\n\n # allocate variable to save CP\n aucs = np.zeros((nselect, this_time.shape[0]))\n smoothauc = aucs.copy()\n\n # for each neuron that is active\n for i, n in tqdm(enumerate(actn)):\n\n # define max rate of neuron\n maxrate = max(2, rates[:, n, :].max() + 1)\n\n # for each timepoint\n for j, t in enumerate(this_time):\n # get this rate across all trials\n pref = prefrates[:, n, t:t + step]\n npref = nprefrates[:, n, t:t + step]\n\n # hist\n x1, e1 = np.histogram(pref, bins=np.arange(maxrate), density=True)\n x2, e2 = np.histogram(npref, bins=np.arange(maxrate), density=True)\n\n # cumulative distribution\n cx1 = np.concatenate(([0], np.cumsum(x1)))\n cx2 = np.concatenate(([0], np.cumsum(x2)))\n\n # auc\n aucs[i, j] = mtr.auc(cx1, cx2) # reversed because pref > npref\n\n smoothauc[i] = np.convolve(aucs[i], kernel, mode='same') / (smoothwin/newdt)\n\n return smoothauc", "def pensions(cps):\n # Head of unit\n mask = cps['tc6_p'] > 0\n cps_valid = cps[mask]\n rand = np.random.uniform(size=len(cps_valid))\n new_vals = np.exp(10.5 + 1. * rand)\n new_vals = np.where(new_vals < 45000., 45000., new_vals)\n cps.loc[mask, 'pensionsp'] = new_vals\n # spouse of unit\n mask = cps['tc6_s'] > 0\n cps_valid = cps[mask]\n rand = np.random.uniform(size=len(cps_valid))\n new_vals = np.exp(10.5 + 1. * rand)\n new_vals = np.where(new_vals < 45000., 45000., new_vals)\n cps.loc[mask, 'pensionss'] = new_vals", "def proba_middle(c_pred,m_pred,dataset):\n if dataset =='cifar10':\n p = np.zeros(5)\n for i in range(5):\n if i in [0,1]:\n p[i] = c_pred[0]*(m_pred[i]/(m_pred[0]+m_pred[1]))\n if i in [2,3,4]:\n p[i] = c_pred[1]*(m_pred[i]/(m_pred[2]+m_pred[3]++m_pred[4]))\n else :\n p = np.zeros(4)\n for i in range(4):\n if i in [0,1]:\n p[i] = c_pred[0]*(m_pred[i]/(m_pred[0]+m_pred[1]))\n if i in [2,3]:\n p[i] = c_pred[1]*(m_pred[i]/(m_pred[2]+m_pred[3]))\n return(p)", "def proba_fm(m_pred,f_pred, dataset):\n p = np.zeros(10)\n if dataset == 'cifar10':\n for i in range(10):\n if i <4:\n if i <2:\n p[i] = (m_pred[0])*(f_pred[i]/np.sum(f_pred[0:2]))\n else:\n p[i] = (m_pred[1])*(f_pred[i]/np.sum(f_pred[2:4]))\n else:\n if i <6:\n p[i] = (m_pred[2])*(f_pred[i]/np.sum(f_pred[4:6]))\n elif i <8:\n p[i] = (m_pred[3])*(f_pred[i]/np.sum(f_pred[6:8]))\n else:\n p[i] = (m_pred[4])*(f_pred[i]/np.sum(f_pred[8:]))\n else :\n for i in range(10):\n if i <5:\n if i <3:\n p[i] = (m_pred[0])*(f_pred[i]/np.sum(f_pred[0:3]))\n else:\n p[i] = (m_pred[1])*(f_pred[i]/np.sum(f_pred[3:5]))\n else:\n if i <8:\n p[i] = (m_pred[2])*(f_pred[i]/np.sum(f_pred[5:8]))\n else:\n p[i] = (m_pred[3])*(f_pred[i]/np.sum(f_pred[8:]))\n return(p)", "def DPc(R,Pc):\n return r2*(K2**B2/(K2**B2 + (A)**B2))*(S/(S + R*Pc + Pc)) \\\n *(R*M)/(K3 + R*M)*Pc - gc*Pc", "def calc_cop():\n df = pp.load_csv_file('COP_in.csv', 'metrics_data') \n df = pp.clean_dataframe(df, 5)\n\n df_cop = df['LP01LM01_QQ'] / df['SJ01_SM01']\n df_cop = df_cop.replace(to_replace=np.nan, value = 0, inplace=False)\n \n return df_cop", "def compute_CRBs(self, ytemp, PD, nbnoises=100):\r\n\t\t# I select the parameters PD, m0s, T1, T2, R and T2s (the parameter T1s is not selected). PD is selected only if it is not considered constant in our experiments.\r\n\t\ty = copy.deepcopy(ytemp)\r\n\t\ty[:,:5] *= PD #i.e. if the derivative wrt the proton density has not to be multiply by the proton density\r\n\t\t\t\r\n\t\tif self.noise_type == 'Standard':\r\n\t\t\tI = np.dot(y[:,self.parasCRB].T,y[:,self.parasCRB]) \r\n\t\t\tIm1 = np.linalg.inv(I)\r\n\t\t # Optimize for the average of all parameters\r\n\t\t\tC = Im1.diagonal()\r\n\t\t\tC = C[self.trparas.params]\r\n\t\t # Normalize the cost; the abs is just in case the inversion failed\r\n\t\t\tC = abs(C)\r\n\t\treturn C / nbnoises", "def C_P(self):\n return self.generic_getter(\n get_heat_capacity_pressure, \"C_P\", \"convert_heat_capacity\"\n )", "def conditional_probability(data, attr, cp_table):\n # gets class names for dataframe manipulation\n classes = attr.tail(1)['vars'].tolist()\n classlist = [classes[0][0], classes[0][1]]\n class0 = classlist[0]\n class1 = classlist[1]\n # number of instances beloning to each class\n nclass0 = cp_table.loc[0, class0].sum()\n nclass1 = cp_table.loc[0, class1].sum()\n total = nclass0 + nclass1\n # all probabilities include a laplace est of 1\n prior0 = (nclass0 + 1) / (total + 2)\n prior1 = (nclass1 + 1) / (total + 2)\n list0 = []\n list1 = []\n for index, row in cp_table.iterrows():\n numattr = len(attr.loc[index, 'vars'])\n numer0 = row[class0] + 1\n numer1 = row[class1] + 1\n denom0 = nclass0 + (1 * numattr)\n denom1 = nclass1 + (1 * numattr)\n cp0 = numer0 / denom0\n cp1 = numer1 / denom1\n list0.append(cp0)\n list1.append(cp1)\n # replacing columns in previous table with cond probs\n del cp_table[class0]\n del cp_table[class1]\n cp_table[class0] = list0\n cp_table[class1] = list1\n \n return cp_table, prior0, prior1", "def probability(cpts, term, obs):\r\n \r\n \r\n # term is a list e.g., ['x_1', '0']\r\n # flip refers to the assignment either '0' false or '1' true\r\n flip = term[1]\r\n # the term itself\r\n term = term[0]\r\n # accumulator variable\r\n answer = 0\r\n # this loop locates where in the CPT we're looking\r\n for clause in range(len(cpts)):\r\n if cpts[clause][0] == term:\r\n index = clause\r\n # focus on our term\r\n cpt = cpts[index]\r\n # this loop checks if there are no preconditions\r\n # if not, then we immediately know the probability and can return\r\n for m in range(len(cpt[1])):\r\n if cpt[1][m][-2][1] == '1':\r\n if cpt[1][m][0] == [[]]:\r\n answer = cpt[1][m][-1]\r\n # list of the variables we have observed\r\n have = []\r\n if obs != []:\r\n for k in obs:\r\n have.append(k[0])\r\n # list of variables we need to know in order to calculate the probability\r\n needed = []\r\n for prob in range(len(cpt[1])):\r\n for j in cpt[1][prob][0]:\r\n if j != []:\r\n if j[0] not in needed:\r\n needed.append(j[0])\r\n # conditional logic based on the known variables\r\n for required in needed:\r\n if required not in have:\r\n # deep copy our observations list\r\n obs2 = []\r\n obs3 = []\r\n for observs in obs:\r\n obs2.append(observs)\r\n obs3.append(observs)\r\n # if we need to know a variable but don't have it\r\n # then we allow it to be either 0 or 1\r\n obs3.append([required,'1'])\r\n obs2.append([required,'0'])\r\n # computes probability if the unknown term is true, times \r\n # the probability that the unknown term is true, plus the\r\n # probability if the unknown term is false, times the \r\n # probability that the unknown term is false\r\n answer = (probability(cpts, [term,flip], obs3) * probability(cpts, [required,'1'], obs)) + (probability(cpts, [term,flip], obs2) * (probability(cpts, [required,'0'], obs)))\r\n # this loop looks complicated but all it's doing is finding the correct\r\n # line in the CPT\r\n if cpt[1][prob][-2][1] == '1':\r\n count = 1\r\n for i in range(len(cpt[1][prob][0])):\r\n if cpt[1][prob][0][i] in obs:\r\n count *= 1\r\n else:\r\n count = 0\r\n if count == 1:\r\n answer += cpt[1][prob][-1]\r\n\r\n\r\n # this computes the probability that the term is true, so if we asked \r\n # for the probability that it is false, just return 1 - answer\r\n if flip == '0':\r\n return 1 - answer\r\n return answer", "def cps(self):\n\n if self._cps is not None:\n return self._cps\n else:\n try:\n return self.counts / self.livetime\n except TypeError:\n raise SpectrumError(\n 'Unknown livetime; cannot calculate CPS from counts')", "def predict_proba(self, x_ts):\n self.yprob = self.cpe_model.predict_proba(x_ts)\n return self.yprob" ]
[ "0.5494266", "0.543836", "0.539172", "0.5361028", "0.5348872", "0.53372526", "0.53356594", "0.53264946", "0.5319149", "0.5312941", "0.52899146", "0.5248596", "0.5166682", "0.51662475", "0.5162503", "0.5162355", "0.5160059", "0.51591235", "0.51554865", "0.51400554", "0.5133518", "0.511867", "0.51174027", "0.5116336", "0.5108395", "0.5073537", "0.50607395", "0.50595355", "0.5052883", "0.50485283" ]
0.5660929
0
This function collapses the time slices and the coalescent prbabilities using the time string
def collapse_using_timeStr(self): if self.modified == True: raise Exception('Probabilities already modified.\nCollapsing after modification will lead to incorrect results.') timeUnits = np.array(process_time_string(self.timeStr)) if len(self.timeslices) + 1 == np.sum(timeUnits): if timeUnits[-1] == 1: timeUnits = timeUnits[:-1] else: timeUnits[-1] -= 1 if len(self.timeslices) != np.sum(timeUnits): raise Exception('Total number of timeslices is different.') ind = 0 cnt = 0 curr_rates = np.matrix(np.zeros((np.shape(self.obsRates)[0], len(timeUnits)))) curr_times = [] for i in timeUnits: curr_rates[:, cnt] = np.sum(self.obsRates[:, ind:ind + i], axis=1) curr_times.append(np.sum(self.timeslices[ind:ind + i])) ind += i cnt += 1 self.obsRates = curr_rates self.timeslices = curr_times
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter_time_slices(time_slices, apt_no, exp_no):\n # Removing the extraneous time slices\n if apt_no == '102A' and exp_no == '3':\n discard_ts = time_slices[\n (time_slices.phase == 'Not Found') & (time_slices.magnitude < 100)]\n time_slices = time_slices.ix[time_slices.index - discard_ts.index]\n\n elif apt_no == '603':\n print \"here\"\n # Likely power consumption of fridge is 110-150\n time_slices = time_slices[(time_slices.magnitude < 110) | (time_slices.magnitude > 150) &\n (time_slices.type == 'power')]\n # 25-26Nov\n if exp_no == '25-26Nov':\n time_slices = time_slices[time_slices.end_time < 1385404505]\n elif exp_no == '26-27Nov':\n time_slices = time_slices[time_slices.end_time < 1385492334]\n\n elif apt_no == '703':\n # Likely power consumption of fridge is 130-152\n fridge_ts = time_slices[(time_slices.magnitude >= 130) & (time_slices.magnitude <= 170) &\n (time_slices.type == 'power')]\n time_slices = time_slices.ix[time_slices.index - fridge_ts.index]\n\n # Likely power consumption of geyser > 2000 but on light phase > 1000\n geyser_ts = time_slices[(time_slices.magnitude > 1000) & (time_slices.type == 'light')]\n time_slices = time_slices.ix[time_slices.index - geyser_ts.index]\n\n # 26-27Nov\n if exp_no == '26-27Nov':\n washing_ts = time_slices[\n (time_slices.start_time >= 1385470967) & (time_slices.end_time <= 1385471880)]\n time_slices = time_slices.ix[time_slices.index - washing_ts.index]\n\n # 28-29Nov\n if exp_no == '28-29Nov':\n time_slices = time_slices[\n (time_slices.start_time < 1385646060) | (time_slices.end_time > 1385648143)]\n\n # Removing time slices with duration less than 30 seconds\n idx_list = []\n for idx in time_slices.index:\n start_time = time_slices.ix[idx]['start_time']\n end_time = time_slices.ix[idx]['end_time']\n magnitude = time_slices.ix[idx]['magnitude']\n\n time_diff = end_time - start_time\n\n if time_diff < 30 and magnitude < 80:\n print \"idx\", idx, \"time_diff\", time_diff, \"magnitude\", magnitude\n # Qualified for filtering it\n idx_list.append(idx)\n time_slices = time_slices.ix[time_slices.index - idx_list]\n\n return time_slices", "def _set_window_time(slices, times):\n t_idx_ = [t[-1] for t in slices]\n return times[t_idx_]", "def part2(puzzle_input):\n\n puzzle_input_arr = puzzle_input.split('\\n')\n seconds_to_action = {} # {timestamp: (datetime, action)\n for line in puzzle_input_arr:\n m = re.match(r'\\[(\\d+)-(\\d+)-(\\d+) (\\d+):(\\d+)\\](.*)', line)\n dt = datetime(1970, int(m.group(2)), int(m.group(3)), int(m.group(4)), int(m.group(5)))\n seconds = dt.timestamp()\n seconds_to_action[seconds] = (dt, m.group(6))\n seconds_sorted = sorted(seconds_to_action.keys())\n guard_to_minutes_slept = {} # {Guard ID: number of minutes slept}\n guard_to_minute = {} # {Guard Id: [5, 6, 7, 8, 9...24, 30, 31...54, 24, 25, 26, 27, 28]}\n minute_to_guard_slept= {} # {minute: [guard IDs]}\n guard_id = 0\n sleep_time = None\n for second in seconds_sorted:\n dt, action = seconds_to_action[second]\n if \"begins\" in action:\n guard_id = int(re.match(r' Guard #(\\d+)', action).group(1))\n if guard_id not in guard_to_minutes_slept:\n guard_to_minutes_slept[guard_id] = 0\n guard_to_minute[guard_id] = []\n elif \"falls\" in action:\n sleep_time = dt\n elif \"wakes\" in action:\n difference_in_minutes = int((dt.timestamp() - sleep_time.timestamp()) // 60)\n guard_to_minutes_slept[guard_id] += difference_in_minutes\n for i in range(difference_in_minutes):\n if (sleep_time.minute + i) % 60 not in minute_to_guard_slept:\n minute_to_guard_slept[(sleep_time.minute + i) % 60] = [guard_id]\n else:\n minute_to_guard_slept[(sleep_time.minute + i) % 60].append(guard_id)\n most_frequent_number_of_occurrences, sleepiest_guard_id = (0, 0)\n sleepiest_minute = 0\n for minute in minute_to_guard_slept:\n c = collections.Counter(minute_to_guard_slept[minute])\n if c.most_common(1)[0][1] > most_frequent_number_of_occurrences:\n sleepiest_guard_id, most_frequent_number_of_occurrences = c.most_common(1)[0]\n sleepiest_minute = minute\n return sleepiest_guard_id * sleepiest_minute", "def part1(puzzle_input):\n\n puzzle_input_arr = puzzle_input.strip().split('\\n')\n seconds_to_action = {} # {timestamp: (datetime, action)}\n for line in puzzle_input_arr:\n m = re.match(r'\\[(\\d+)-(\\d+)-(\\d+) (\\d+):(\\d+)\\](.*)', line)\n dt = datetime(1970, int(m.group(2)), int(m.group(3)), int(m.group(4)), int(m.group(5)))\n seconds = dt.timestamp()\n seconds_to_action[seconds] = (dt, m.group(6))\n seconds_sorted = sorted(seconds_to_action.keys())\n guard_to_minutes_slept = {} # {guard ID: number of minutes slept}\n guard_to_minute = {} # {Guard ID: [5, 6, 7, 8, 9...24, 30, 31...54, 24, 25, 26, 27, 28]}\n for second in seconds_sorted:\n dt, action = seconds_to_action[second]\n if \"begins\" in action:\n guard_id = int(re.match(r' Guard #(\\d+)', action).group(1))\n if guard_id not in guard_to_minutes_slept: # I could use a default dictionary for this\n guard_to_minutes_slept[guard_id] = 0\n guard_to_minute[guard_id] = []\n elif \"falls\" in action:\n sleep_time = dt\n elif \"wakes\" in action:\n difference_in_minutes = int((dt.timestamp() - sleep_time.timestamp()) // 60)\n guard_to_minutes_slept[guard_id] += difference_in_minutes\n guard_to_minute[guard_id] += [(sleep_time.minute + i) % 60 for i in range(difference_in_minutes)]\n guard_with_longest_sleep = max(guard_to_minutes_slept, key=guard_to_minutes_slept.get)\n most_common_minute = max(guard_to_minute[guard_with_longest_sleep],\n key=guard_to_minute[guard_with_longest_sleep].count)\n return guard_with_longest_sleep * most_common_minute", "def reset_time(data_dict, key = 'p1', cutoff = 0.01, grace = 10):\n assert set({'p1', 'p2', 'time'}).issubset(set((data_dict.keys()))), \"data_dict keys ({}) do not match required keys: {}\".format(set(data_dict.keys()), set({'p1', 'p2', 'time'}))\n \n key = key.lower()\n \n assert key in set({'p1', 'p2'}), \"key {} is not in allowed. must be 'p1' or 'p2'\".format(key)\n assert key in set(data_dict.keys()), \"key {} does not exist in data_dict keys ({})\".format(key, data_dict.keys())\n \n time_ida = iterable_data_array(data_dict, 'time')\n p1_ida = iterable_data_array(data_dict, 'p1')\n p2_ida = iterable_data_array(data_dict, 'p2')\n\n tmpout = {'time':data_array_builder(), 'p1': data_array_builder(), 'p2': data_array_builder()}\n \n for time, p1, p2 in zip(time_ida, p1_ida, p2_ida):\n assert time.shape == p1.shape and time.shape == p2.shape, \"shapes do not match: time - {}, p1 - {}, p2 - {}\".format(time.shape, p1.shape, p2.shape)\n if key == 'p1':\n checker = p1\n elif key == 'p2':\n checker == p2\n else:\n raise KeyError('key {}. Not allowed. Must be \"p1\" or \"p2\"'.format(key))\n arg = _get_startarg_1d(checker, cutoff = cutoff)\n start_time = _get_starttime_from_startarg_1d(time, arg)\n tmpout['time'].append(time[arg - grace:] - start_time)\n tmpout['p1'].append(p1[arg - grace:]) \n tmpout['p2'].append(p2[arg - grace:])\n\n\n out = {'time':data_array_builder(), 'p1': data_array_builder(), 'p2': data_array_builder()}\n\n #handle the fact that after this removal not all will have the same number of data points \n\n max_number_of_timestamps = max([len(x) for x in tmpout['time']])\n for i in range(len(tmpout['time'])):\n #import pdb; pdb.set_trace()\n l = len(tmpout['time'][i])\n nnans_to_add_to_front = max_number_of_timestamps - l\n\n ttime = tmpout['time'][i]\n tp1 = tmpout['p1'][i]\n tp2 = tmpout['p2'][i]\n\n for ticker in range(nnans_to_add_to_front):\n ttime = np.concatenate(([np.nan], ttime))\n tp1 = np.concatenate(([np.nan], tp1))\n tp2 = np.concatenate(([np.nan], tp2))\n\n out['time'].append(ttime)\n out['p1'].append(tp1)\n out['p2'].append(tp2)\n\n return {key: out[key].build() for key in out}", "def process_time_string(timestr):\n timestr = timestr.strip()\n toks = timestr.split('+')\n timeslices = []\n for t in toks:\n tm = t.strip()\n mobj = re.search('\\\\*', tm)\n if mobj == None:\n timeslices += [int(tm)]\n else:\n tms = tm.split('*')\n timeslices += int(tms[0]) * [int(tms[1])]\n\n return timeslices", "def OPCtimetransform(data, to):\n \n remove_times = []\n outtimes = []\n times = {'ms':[],'SS':[],'MM':[],'HH':[]}\n\n for i in range(0, len(data)):\n times['HH'] = 0\n times['MM'] = 0\n times['SS'] = 0\n times['ms'] = 0\n\n item = data[i]\n \n try:\n if len(item.split('.')[1]) < 2:\n item += '0'\n except IndexError:\n item += '.00'\n if len(item) < 9:\n item = item.zfill(9)\n if int(item[:2]) > 23:\n item = '0' + item\n \n # remove items with extra zero (2319010.00 to 231910)\n if len(item) > 9:\n olditem = item\n newitem = item[:4] + item[5:]\n print( ('Repairing strange value %s into %s')%(olditem, newitem) )\n item = newitem\n else:\n pass\n try:\n md = dt.datetime.strptime(item, \"%H%M%S.%f\")\n \n # round off items which exceed 59 minutes or 59 seconds \n # (i.e. 146001 to 150001.)\n except ValueError:\n \n try:\n times['HH'] = int(item[0:2])\n times['MM'] = int(item[2:4])\n times['SS'] = int(item[4:6])\n times['ms'] = int(item[7:9])\n except ValueError:\n print(i, item)\n\n if times['SS'] > 59:\n times['MM'] += 1\n times['SS'] = 0\n if times['MM'] > 59:\n times['HH'] += 1\n times['MM'] = 0\n # discard items which exceed 23 hours\n if times['HH'] > 23:\n times['HH'] = 23\n print( ('resetting value %s')%(item) )\n \n\n md = dt.datetime(1900,1,1,times['HH'], times['MM'], times['SS']) \n\n \n outtimes.append( dt.datetime.strftime(md, to) )\n\n return outtimes", "def tidy_time_string(time):\n\n # TODO - :return date_range: Where date_status is \"centred\", date_range is a tuple (`first_date`, `last_date`) of\n # `datetime64[D]` objects. Otherwise will return a tuple of Not a Time objects.\n # TODO - warnings/logging\n # TODO - change date offsets to rounding using MonthEnd/MonthBegin\n # https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html\n # TODO - allow mulitple `date_status`es (circa and centred).\n\n date_status = 'not_converted'\n date = pd.NaT\n original_time_string = str(time)\n\n # IS THE STRING ALREADY PARSABLE AS AN EXACT TIME:\n if '-' not in time: # to avoid accidentally parsing ranges as exact times. e.g. \"25-27 june\".\n\n try:\n date = pd.to_datetime(time)\n date_status = 'exact'\n return date, date_status\n except:\n pass\n\n # IS THE STRING \"CIRCA\" SOMETHING:\n if (('c' in time) or (('[' in time) or (']' in time))):\n if 'c' in time: # contains 'c' (not in a month, e.g. Dec), so \" c. \", \"c \", t\n time = re.sub(r'(?<!\\w)(c[.]?\\s?)', '', time)\n\n if ('[' in time) and (']' in time): # contains square brackets\n\n # We don't attempt to fix multiple pairs of brackets with one missing bracket\n num_sq_brackets = time.count(']') + time.count(']')\n if num_sq_brackets >= 3 and (num_sq_brackets % 2) != 0:\n logging.info(\"Cannot fix multiple pairs of brackets with one missing bracket.\")\n return date, date_status\n\n reg2 = re.findall(r'\\[(.*?)\\]', time)\n if reg2 is not None:\n # remove square brackets\n for in_brackets in reg2:\n time = time.replace(f\"[{in_brackets}]\", in_brackets)\n elif '[' in time:\n time = time.replace('[', '')\n elif ']' in time:\n time = time.replace(']', '')\n\n time = time.strip()\n\n try:\n date = pd.to_datetime(time)\n date_status = 'circa'\n return date, date_status\n except:\n pass\n\n # IS THE STRING A RANGE OF DATES? WHICH WE CAN AVERAGE OR CENTRE:\n # We are assuming an '[1,2]\\d{2}0)s' pattern (e.g. 1970s, 1980s, 1730s, 1900s) implies a decade.\n if ('s' in time) or ('-') in time:\n if ('s' in time) and ('-' not in time):\n reg3 = re.findall(r'([1,2]\\d{2}0)s', time)\n for reg in reg3:\n time = time.replace(f\"{reg}s\", str(int(reg) + 5)) # centre is 5 years later\n date = pd.to_datetime(time, format='%Y')\n date_status = 'centred'\n\n elif ('-' in time):\n if time.count('-') > 1:\n print('many hyphens', original_time_string)\n # Not attempting to deal with multiple hyphens at the moment.\n pass\n else:\n time = re.sub(r'\\s?-\\s?', '-', time)\n reg4 = re.match(r'(.*?)-(.*)$', time)\n\n first = time.replace(reg4.group(0), reg4.group(1))\n last = time.replace(reg4.group(0), reg4.group(2))\n\n if 's' in first:\n reg5 = re.findall(r'([1,2]\\d{2}0)s', time)\n for reg in reg5:\n first = first.replace(f\"{reg}s\", reg)\n\n if not re.search(r'[1,2]\\d{3}', first): # no year:\n if not re.search(r'\\d+', first): # no days in `first` => varying month:\n # Take the year from last and add it on\n reg5 = re.findall(r'[1,2]\\d{3}', last)\n first = f\"{first} {reg5[0]}\"\n else: # days in `first` => varying days:\n # Take the month and year from last and add it on.\n reg6 = re.findall(r'\\w+ [1,2]\\d{3}', last)\n if len(reg6) > 0:\n first = f\"{first} {reg6[0]}\"\n\n if 's' in last:\n reg7 = re.findall(r'([1,2]\\d{2}0)s', time)\n for reg in reg7:\n last = last.replace(f\"{reg}s\", str(int(reg) + 10)) # end is 10 years later.\n\n if re.match(r'\\w+\\s\\d+', last): # assuming month and year\n time_delta = pd.tseries.offsets.DateOffset(months=1)\n elif re.match(r'[a-zA-Z]', last): # assuming it's a month\n time_delta = pd.tseries.offsets.DateOffset(months=1)\n elif re.match(r'[1,2]\\d{3}', last): # assuming it's a year\n time_delta = pd.tseries.offsets.DateOffset(months=12)\n elif re.match(r'\\d+', last).span()[1] - re.match(r'\\d+', last).span()[0] <= 2: # assuming it's a day:\n time_delta = pd.tseries.offsets.DateOffset(months=0)\n else:\n logging.info(f\"Can't guess format of {last} from {original_time_string}\")\n return date, date_status\n\n try:\n last = pd.to_datetime(last)\n except:\n logging.info(f\"Could not parse `last` ({last}) into `datetime` format.\")\n\n return date, date_status\n\n last = last + time_delta\n\n try:\n first = pd.to_datetime(first)\n except:\n logging.info(f\"Could not parse `first` ({first}) into `datetime` format.\")\n\n return date, date_status\n\n centre_date = first + (last - first) / 2\n date_status = 'centred'\n return centre_date, date_status\n\n return date, date_status", "def split_before_after(time, columns): \n if time is None or np.isnan(time):\n return [], [], []\n \n currentday = int(time)//24\n before = columns[:currentday]\n current = columns[currentday]\n after = columns[currentday+1:]\n return (before, current, after)", "def discrete_trajectory_to_wait_times(data, t_col='t', state_col='state'):\n\n states = data[state_col].values\n times = data[t_col].values\n num_measurements = len(data)\n\n # now iterate through valid part of trajectory to establish wait times\n start_times = []\n end_times = []\n earliest_st = [] # bounds on start time\n latest_st = []\n earliest_et = [] # bounds on end time\n latest_et = []\n wait_state = []\n wait_type = []\n k0 = 0 # index at which current state began\n state = states[k0]\n state_has_changed = False\n for k in range(num_measurements):\n # if no state change, continue\n if states[k] == state:\n continue\n # otherwise, store change\n start_times.append(times[k0])\n end_times.append(times[k])\n wait_state.append(state)\n # bounds on true wait time value\n if k0 == 0: # left exterior times have exactly determined \"start\"\n earliest_st.append(times[k0])\n else:\n earliest_st.append(times[k0-1])\n latest_st.append(times[k0])\n earliest_et.append(times[k-1])\n latest_et.append(times[k])\n # if this is the first state change, we store it separately\n if not state_has_changed:\n wait_type.append('left exterior')\n state_has_changed = True\n # otherwise, a normal state change\n else:\n wait_type.append('interior')\n # either way, state has changed\n state = states[k]\n k0 = k\n # also store the time spent in final state\n start_times.append(times[k0])\n end_times.append(times[k])\n if k0 == 0: # full exterior times also have exactly determined \"start\"\n earliest_st.append(times[k0])\n else:\n earliest_st.append(times[k0-1])\n latest_st.append(times[k0])\n # right/full exterior times have exactly determined \"end\"\n earliest_et.append(times[k])\n latest_et.append(times[k])\n # state type stored specially for final state\n wait_state.append(state)\n if not state_has_changed:\n wait_type.append('full exterior')\n else:\n wait_type.append('right exterior')\n start_times = np.array(start_times)\n end_times = np.array(end_times)\n wait_times = end_times - start_times\n min_waits = np.array(earliest_et) - np.array(latest_st)\n max_waits = np.array(latest_et) - np.array(earliest_st)\n df = pd.DataFrame({'start_time': start_times, 'end_time': end_times,\n 'wait_time': wait_times, 'state': wait_state,\n 'min_waits': min_waits, 'max_waits': max_waits,\n 'wait_type': wait_type})\n df.index.name = 'rank_order'\n df['window_size'] = times[-1] - times[0]\n return df", "def time_slices(field=['uu1'], datadir='data/', proc=-1, extension='xz',\n format='native', tmin=0., tmax=1.e38, amin=0., amax=1.,\n transform='plane[0]', dtstep=1, deltat=0,\n oldfile=False, outfile=\"\"):\n\n import pylab as plt\n\n datadir = os.path.expanduser(datadir)\n if outfile != \"\":\n outslice = open(outfile, \"w\")\n filename = []\n if proc < 0:\n for i in field:\n filename += [datadir + '/slice_' + i + '.' + extension]\n else:\n for i in field:\n filename += [datadir + '/proc' +\n str(proc) + '/slice_' + i + '.' + extension]\n\n # Read the global dimensions.\n dim = read_dim(datadir, proc)\n if dim.precision == 'D':\n precision = 'd'\n else:\n precision = 'f'\n\n # Set up slice plane.\n if extension == 'xy' or extension == 'Xy':\n hsize = dim.nx\n vsize = dim.ny\n if extension == 'xz':\n hsize = dim.nx\n vsize = dim.nz\n if extension == 'yz':\n hsize = dim.ny\n vsize = dim.nz\n plane = []\n infile = []\n for i in filename:\n plane += [np.zeros((vsize, hsize), dtype=precision)]\n\n infile += [npfile(i, endian=format)]\n\n ifirst = True\n islice = 0\n plotplane = []\n dt = 0\n nextt = tmin\n while True:\n try:\n raw_data = []\n for i in infile:\n raw_data += [i.fort_read(precision)]\n except ValueError:\n break\n except TypeError:\n break\n\n if oldfile:\n t = raw_data[0][-1]\n for i in range(len(raw_data)):\n plane[i] = raw_data[i][:-1].reshape(vsize, hsize)\n else:\n t = raw_data[0][-2]\n for i in range(len(raw_data)):\n plane[i] = raw_data[i][:-2].reshape(vsize, hsize)\n\n exec('tempplane =' + transform)\n\n if t > tmin and t < tmax:\n if dt == 0:\n plotplane += tempplane.tolist()\n\n if ifirst:\n #print \"----islice----------t---------min-------max-------delta\" # Python 2\n print(\"----islice----------t---------min-------max-------delta\")\n #print \"%10i %10.3e %10.3e %10.3e %10.3e\" % \\ # Python 2\n #(islice, t, tempplane.min(), tempplane.max(), # Python 2\n #tempplane.max() - tempplane.min()) # Python 2\n print(\"{0:10} {1:10.3e} {2:10.3e} {3:10.3e} {4:10.3e}\".format(islice, t, tempplane.min(), tempplane.max(), tempplane.max() - tempplane.min()))\n if outfile != \"\":\n outslice.write(\n #\"%10i %10.3e %10.3e %10.3e %10.3e\" % # Python 2\n #(islice, # Python 2\n #t, # Python 2\n #tempplane.min(), # Python 2\n #tempplane.max(), # Python 2\n #tempplane.max() - # Python 2\n #tempplane.min())) # Python 2\n \"{0:10} {1:10.3e} {2:10.3e} {3:10.3e} {4:10.3e}\".format(\n islice,\n t,\n tempplane.min(),\n tempplane.max(),\n tempplane.max() -\n tempplane.min())) \n outslice.write(\"\\n\")\n\n ifirst = False\n islice += 1\n nextt = t + deltat\n if deltat == 0:\n dt = (dt + 1) % dtstep\n elif t >= nextt:\n dt = 0\n nextt = t + deltat\n else:\n dt = 1\n\n ax = plt.axes()\n ax.set_xlabel('t')\n ax.set_ylabel('y')\n ax.set_ylim\n plt.imshow(np.array(plotplane).reshape(islice, vsize).transpose(),\n vmin=amin, vmax=amax)\n manager = plt.get_current_fig_manager()\n manager.show()\n\n for i in infile:\n i.close()\n if outfile != \"\":\n outslice.close()", "def split(self, time: float) -> Tuple['Trajectory','Trajectory']:\n if time <= self.times[0]:\n #split before start of trajectory\n return self.constructor()([time],[self.milestones[0]]),self.constructor()([time]+self.times,[self.milestones[0]]+self.milestones)\n elif time >= self.times[-1]:\n #split after end of trajectory\n return self.constructor()(self.times+[time],self.milestones+[self.milestones[-1]]),self.constructor()([time],[self.milestones[-1]])\n i,u = self.getSegment(time)\n assert i >= 0,\"getSegment returned -1? something must be wrong with the times\"\n #split in middle of trajectory\n splitpt = self.interpolate_state(self.milestones[i],self.milestones[i+1],u,self.times[i+1]-self.times[i])\n front = self.constructor()(self.times[:i+1],self.milestones[:i+1])\n back = self.constructor()(self.times[i+1:],self.milestones[i+1:])\n if u > 0:\n front.times.append(time)\n front.milestones.append(splitpt)\n if u < 1:\n back.times = [time] + back.times\n back.milestones = [splitpt] + back.milestones\n return (front,back)", "def _splitTime(self, time): \n if (time):\n x = re.split(\"[-\\/\\s:]\", time)\n else:\n x = []\n # Pad the list to four elements (year,month,day,hour)\n while (len(x) < 4):\n x.append(None)\n return x", "def _prep_times(self):\n self.test_times = 'diagonal'\n if hasattr(self, 'times'):\n self.train_times = self.times\n if hasattr(self, 'times_'):\n self.train_times_ = self.times_\n self.test_times_ = _DecodingTime()\n self.test_times_['slices'] = [[slic] for slic in\n self.train_times_['slices']]\n self.test_times_['times'] = [[tim] for tim in\n self.train_times_['times']]\n if hasattr(self, 'scores_'):\n self.scores_ = [[score] for score in self.scores_]\n if hasattr(self, 'y_pred_'):\n self.y_pred_ = [[y_pred] for y_pred in self.y_pred_]", "def sanitize(time_string): # Fix non-uniformity in the athletes data to enable sorting\n if '-' in time_string:\n splitter = '-'\n (mins, secs) = time_string.split(splitter)\n elif ':' in time_string:\n splitter = ':'\n (mins, secs) = time_string.split(splitter)\n else:\n return time_string\n return '{0}.{1}'.format(mins, secs)", "def OPCtimetransformOld(data, to):\n outtimes = []\n \n times = {\n 'ms':[],\n 'SS':[],\n 'MM':[],\n 'HH':[]\n }\n for i in range(0, len(data)):\n item = data[i]\n try: \n times['HH'].append(int(item[0:2]))\n times['MM'].append(int(item[2:4]))\n times['SS'].append(int(item[4:6]))\n times['ms'].append(int(item[7:9]))\n except ValueError:\n # strange value 2319010.00 in 201129 file...\n olditem = item\n newitem = item[:4] + item[4+1:]\n print( ('Repairing strange value %s into %s')%(olditem, newitem) )\n try:\n times['HH'].append(int(newitem[0:2]))\n times['MM'].append(int(newitem[2:4]))\n times['SS'].append(int(newitem[4:6]))\n times['ms'].append(int(newitem[7:9]))\n except ValueError:\n print(newitem)\n\n # OPC times go up to 60 minutes. This is corrected by moving one minute\n times['MM'] = [max(0,x-1) for x in times['MM']]\n times['SS'] = [max(0,x-1) for x in times['SS']]\n\n for i in range(0, len(data)):\n md = dt.datetime(1900,1,1,times['HH'][i], times['MM'][i], times['SS'][i]) \n outtimes.append( dt.datetime.strftime(md, to))\n\n return outtimes", "def available_timing(filename,day,venue):\n #reading the file\n f = open(filename,\"r\")\n incsv = f.readlines()\n #removing affixes\n incsv[:] = [i.rstrip('\\n') for i in incsv]\n #lines into lists\n tempstr = \"\"\n templist = []\n for j in range(len(incsv)):\n #enters each line into temporary string variable\n tempstr = incsv[j]\n #enters the split string into a temporary list variable\n templist.append(tempstr.split(\",\"))\n #modify original line in original list with split list\n incsv[j] = templist\n #reset temporary variables\n tempstr = \"\"\n templist = []\n #finding occupied hours\n brlist = []\n #for all lines in file\n for k in range(len(incsv)):\n #if venue in line matches desired venue and day in line matches desired day\n if incsv[k][0][7] == venue and int(incsv[k][0][3]) == day:\n #add time range of line into brlist\n brlist.append([int(incsv[k][0][5]),int(incsv[k][0][6])])\n #pruning\n #tlist stands for timelist. stores remaining hours for synthesis\n tlist = []\n #list of hours\n tlist = [600,700,800,900,1000,1100,1200,1300,1400,1500,1600,1700,1800,1900,2000,2100,2200,2300,2400]\n #for line in brlist\n for l in range(len(brlist)):\n #for the range of hours of the line\n for m in range(int((brlist[l][1]-brlist[l][0])/100)):\n #if hours in range still in tlist\n if (brlist[l][0] + m*100) in tlist:\n #remove from tlist\n tlist.remove(brlist[l][0] + m*100)\n #plist for partition list. range of available timings appended here\n plist = []\n #check is for the start time of each available time ranges\n check = 0\n #formation of time ranges\n #for hours in tlist\n for n in range(len(tlist)):\n #if code is checking element 2. Could have used range(1,len(tlist)) but nevermind\n if n >= 1:\n #if 2 adjacent hours are not consecutive\n if tlist[n] != (tlist[n-1]+100):\n #add time range to plist\n plist.append((tlist[check],tlist[n-1]+100))\n #set check to next minimum available start time\n check = n\n #adding range with last hour\n #if last hour in tlist is 2400 and precedent hour in tlist is 2300\n if tlist[n] == 2400 and tlist[n-1] == 2300:\n #add time range\n plist.append((tlist[check],2400))\n return plist", "def split_by_timegap(X, timename='time', hours=1):\n \n time = X[timename].values\n dt = np.diff(time)\n\n # print(len(time))\n \n i = [int(ii) for ii in np.where(dt>np.timedelta64(hours, 'h'))[0]] + [len(time)-1]\n i = [-1] + list(set(i))\n i.sort()\n \n # print('Split index')\n # print(i)\n \n Xs = [X.isel({timename: np.arange(i[j]+1, i[j+1])}) for j in np.arange(0, len(i)-1)]\n \n return Xs", "def process_timecards(self):\n timecard = open('timecards.txt','r')\n time_temp = []\n time = []\n for line in timecard:\n time_temp.append(line)\n for i in time_temp:\n time.append(i.split(','))\n for i in time:\n for q in range(len(i)):\n if q == 0:\n pass\n else:\n i[q] = float(i[q])\n for i in time:\n for q in range(len(i)):\n self.timecard[i[0]] = i[1:]\n #print(self.timecard)\n return self.timecard", "def convert_clocks_time_to_time(\n scan_key: dict,\n time_boundaries,\n source_type: str,\n target_type: str,\n return_interpolate: bool = False,\n drop_single_idx: bool = True,\n debug: bool = True,\n):\n\n ##\n ## Fetch source and target times, along with converting between Stimulus or Behavior clock if needed\n ##\n\n source_times, target_times = fetch_timing_data(\n scan_key, source_type, target_type, debug\n )\n\n ##\n ## Check if None is used to set to full length of signal or fix common error of not having a list of lists\n ##\n\n if time_boundaries is None:\n time_start = np.nanmin(source_times)\n time_stop = np.nanmax(source_times)\n time_boundaries = [[time_start, time_stop]]\n elif isinstance(time_boundaries[0], numbers.Number):\n time_boundaries = [time_boundaries]\n\n ##\n ## Convert source indices to time boundaries, then convert time boundaries into target indices\n ##\n\n target_indices = []\n single_idx_count = 0\n\n ## Loop through start & end times and create list of indices corresponding to that block of time\n for [start, end] in time_boundaries:\n target_idx = np.where(\n np.logical_and(target_times >= start, target_times <= end)\n )[0]\n if len(target_idx) < 2:\n if drop_single_idx:\n single_idx_count += 1\n else:\n msg = (\n f\"Event of length {len(target_idx)} found. \"\n f\"Set drop_single_idx to True to suppress these errors.\"\n )\n raise PipelineException(msg)\n else:\n target_indices.append(target_idx)\n\n if debug:\n print(f\"Indices converted. {single_idx_count} events of length 0 or 1 dropped.\")\n\n ##\n ## Interpolate related signal if requested, else return target times.\n ##\n\n if return_interpolate:\n\n ## Create full interpolated signal\n interpolated_signal = interpolate_signal_data(\n scan_key, source_type, target_type, source_times, target_times, debug=debug\n )\n\n ## Split indices given into fragments based on which ones are continuous (incrementing by 1)\n source_signal_fragments = []\n for idx_fragment in target_indices:\n source_signal_fragments.append(interpolated_signal[idx_fragment])\n\n ## If full signal is converted, remove wrapping list\n if len(source_signal_fragments) == 1:\n source_signal_fragments = source_signal_fragments[0]\n\n return source_signal_fragments\n\n else:\n\n ## Convert indices to times and return\n source_times_to_target_times = []\n\n for target_idx_list in target_indices:\n source_times_to_target_times.append(target_times[target_idx_list])\n\n return source_times_to_target_times", "def infer_parts_to_slice(start,end,skip,sequence):\n\ttry:\n\t\t# protect from ambiguous step names which occurs when the time stamp starts at zero on a new step \n\t\t# ... and the original method cannot tell which step to use. typically the last step is the only \n\t\t# ... relevant one since preceding steps are usualy preparatory e.g. with restraints. users who wish\n\t\t# ... to have more control are welcome to code up something more specific. the slicer is due for an\n\t\t# ... overhaul anyway. for now, we just try to get the right sequence by restricting attention to\n\t\t# ... the last step. since the toc is sorted this is easy.\n\t\t# all steps have the same sn and they should be ordered from the toc so we filter by the last one\n\t\t#! note that this breaks v563. fixed it by linking things in and moving s01\n\t\tlast_step = sequence[-1][0][1]\n\t\tsequence_alt = [s for s in sequence if s[0][1]==last_step]\n\t\tslice_target = infer_parts_to_slice_legacy(start,end,skip,sequence_alt)\n\t# fall back to the original method\n\texcept Exception as e: \n\t\traise \n\t\tslice_target = infer_parts_to_slice_legacy(start,end,skip,sequence)\n\treturn slice_target", "def test_time_lapse(self):\n t0 = time.time()\n time.sleep(2)\n lap = time_lapse(t0)\n self.assertEqual(lap, '00:00:02')", "def at(self, time_slices):\n\n if self.base is not None:\n return self.base.at(time_slices)\n\n if isinstance(time_slices, TimeSlice):\n time_slices = [time_slices]\n\n # join the time slice values\n timed_data = pd.DataFrame(columns=self.data.columns)\n\n # make the new data\n for slice_t in time_slices:\n slice_index = (slice_t.time <= self.data.index) & (\n self.data.index < slice_t.time + slice_t.duration\n )\n timed_data.loc[slice_t.time] = self.aggregate(\n self.data[slice_index], axis=0\n )\n\n # return the new feature object\n return Feature(\n data=timed_data,\n aggregate=self.aggregate,\n base=self,\n time_slices=time_slices,\n )", "def fix_annotation(csv_data, time_offset = 0):\n # step 1: eliminate rows with same starttime and endtime\n csv_data = csv_data[csv_data.STARTTIME != csv_data.ENDTIME]\n\n # step 2: elminate nan in starttime and endtime\n csv_data = csv_data.dropna(axis=0,subset=[st_col,et_col])\n\n # step 3: fill \"blank\" cells\n csv_data = csv_data.reset_index(drop=True)\n csv_data[puff_col] = csv_data[puff_col].fillna(value='no-puff')\n csv_data[activity_col] = csv_data[activity_col].fillna(value='no-activity')\n csv_data[post_col] = csv_data[post_col].fillna(method='backfill')\n csv_data[post_col] = csv_data[post_col].fillna(method='ffill')\n csv_data[smoke_col] = csv_data[smoke_col].fillna(value='not-smoking')\n \n # step 4: fill 'no-activity' cells whose length is less than 3s with backfill\n csv_data = csv_data.reset_index(drop=True)\n filt = csv_data.apply(lambda x: x[et_col] - x[st_col] <= timedelta(seconds=2) and x[activity_col] == 'no-activity', axis=1)\n csv_data.ix[csv_data[filt].index, activity_col] = csv_data.ix[csv_data[filt].index+1, activity_col].values\n csv_data[activity_col] = csv_data[activity_col].fillna(value='no-activity')\n # step 5: change isolated single \"smoking\" cells into proper label\n bshift_smoke = csv_data[smoke_col].shift(1).fillna(method='backfill')\n fshift_smoke = csv_data[smoke_col].shift(-1).fillna(method='ffill')\n filt = np.logical_and(csv_data[smoke_col] != bshift_smoke, csv_data[smoke_col] != fshift_smoke)\n # print csv_data[filt]\n # ind = csv_data[filt].index\n filt1 = np.logical_and(filt, csv_data[smoke_col] == 'smoking')\n csv_data.ix[filt1, smoke_col] = 'not-smoking'\n filt = np.logical_and(csv_data[smoke_col] != bshift_smoke, csv_data[smoke_col] != fshift_smoke)\n filt2 = np.logical_and(np.logical_and(filt, csv_data[smoke_col] == 'not-smoking'), csv_data.apply(lambda x: x[et_col] - x[st_col] < timedelta(minutes=1),axis=1))\n csv_data.ix[filt2, smoke_col] = 'smoking'\n # print csv_data.iloc[ind]\n\n # step 6: turn smoking sequence without puffs into \"not smoking\"\n st_filt = np.logical_and(csv_data[smoke_col] != csv_data[smoke_col].shift(1), csv_data[smoke_col] == 'smoking')\n et_filt = np.logical_and(csv_data[smoke_col] != csv_data[smoke_col].shift(-1), csv_data[smoke_col] == 'smoking')\n cig_st = csv_data[st_filt]\n cig_et = csv_data[et_filt]\n for i in range(0,len(cig_st.index)):\n puff_flag = csv_data[cig_st.index[i]:cig_et.index[i]+1][puff_col] == 'no-puff'\n if puff_flag.all():\n csv_data[cig_st.index[i]:cig_et.index[i]+1][smoke_col] = 'not-smoking'\n\n # step 7: add offset to starttime and endtime\n # print csv_data.head()\n csv_data[et_col] = csv_data[et_col] + timedelta(seconds=time_offset)\n csv_data[st_col] = csv_data[st_col] + timedelta(seconds=time_offset)\n # print csv_data.head()\n\n # step 8: reindex from 0\n csv_data = csv_data.reset_index(drop=True)\n return csv_data", "def _parse_hpr_time_series(self, offset, rules):\n # Unpack the unpacking rules\n (hpr_num_name, beam_angle_name, spare_name, hpr_time_names),\\\n (hpr_num_fmt, beam_angle_fmt, spare_fmt, hpr_time_fmt) = zip(*rules)\n\n # First unpack the array length and single length value, no need to unpack spare\n (hpr_num_data, beam_angle_data) = struct.unpack_from(\n '<%s%s' % (hpr_num_fmt, beam_angle_fmt), self.raw_data, offset)\n\n # Then unpack the array using the retrieved lengths value\n next_offset = offset + struct.calcsize(hpr_num_fmt) + struct.calcsize(beam_angle_fmt) + \\\n struct.calcsize(spare_fmt)\n hpr_time_list_data = struct.unpack_from(\n '<%s%s' % (hpr_num_data * HPR_TIME_SERIES_ARRAY_SIZE, hpr_time_fmt), self.raw_data, next_offset)\n\n # convert to numpy array and reshape the data to a 2d array per IDD spec\n transformed_hpr_time_data = numpy.array(hpr_time_list_data).reshape(\n (hpr_num_data, HPR_TIME_SERIES_ARRAY_SIZE)).transpose().tolist()\n\n # Add to the collected parameter data\n self.final_result.extend(\n ({DataParticleKey.VALUE_ID: hpr_num_name, DataParticleKey.VALUE: hpr_num_data},\n {DataParticleKey.VALUE_ID: beam_angle_name, DataParticleKey.VALUE: beam_angle_data},\n {DataParticleKey.VALUE_ID: hpr_time_names[HEADING_TIME_SERIES_IDX],\n DataParticleKey.VALUE: transformed_hpr_time_data[HEADING_TIME_SERIES_IDX]},\n {DataParticleKey.VALUE_ID: hpr_time_names[PITCH_TIME_SERIES_IDX],\n DataParticleKey.VALUE: transformed_hpr_time_data[PITCH_TIME_SERIES_IDX]},\n {DataParticleKey.VALUE_ID: hpr_time_names[ROLL_TIME_SERIES_IDX],\n DataParticleKey.VALUE: transformed_hpr_time_data[ROLL_TIME_SERIES_IDX]}))", "def timeSlice(requestContext, seriesList, startSliceAt, endSliceAt=\"now\"):\n\n results = []\n start = time.mktime(parseATTime(startSliceAt).timetuple())\n end = time.mktime(parseATTime(endSliceAt).timetuple())\n\n for slicedSeries in seriesList:\n slicedSeries.name = 'timeSlice(%s, %s, %s)' % (slicedSeries.name, int(start), int(end))\n\n curr = time.mktime(requestContext[\"startTime\"].timetuple())\n for i, v in enumerate(slicedSeries):\n if v is None or curr < start or curr > end:\n slicedSeries[i] = None\n curr += slicedSeries.step\n\n results.append(slicedSeries)\n\n return results", "def decodeSpaceTime(self, result):\r\n if self.case == 1:\r\n if self.granularity == 'day':\r\n return map(lambda x: [reader.formatTime(reader.inverseDaySinceEpoch(int(x[0]/self.scale))),\r\n reader.morton2coordsX2D(x[1], self.offx, self.scalex, self.roundNum), \r\n reader.morton2coordsY2D(x[1], self.offy, self.scaley, self.roundNum), x[2]], result)\r\n else:\r\n return map(lambda x: [int(x[0]/self.scale), reader.morton2coordsX2D(x[1], self.offx, self.scalex, self.roundNum), \r\n reader.morton2coordsY2D(x[1], self.offy, self.scaley, self.roundNum), x[2]], result)\r\n elif self.case == 2:\r\n if self.granularity == 'day':\r\n return map(lambda x: [reader.formatTime(reader.inverseDaySinceEpoch(int(x[0]/self.scale))), \r\n reader.morton2coordsX3D(x[1], self.offx, self.scalex, self.roundNum), \r\n reader.morton2coordsY3D(x[1], self.offy, self.scaley, self.roundNum), \r\n reader.morton2coordsZ3D(x[1], self.offz, self.scalez, self.roundNum)], result)\r\n else:\r\n return map(lambda x: [int(x[0]/self.scale), reader.morton2coordsX3D(x[1], self.offx, self.scalex, self.roundNum), \r\n reader.morton2coordsY3D(x[1], self.offy, self.scaley, self.roundNum), \r\n reader.morton2coordsZ3D(x[1], self.offz, self.scalez, self.roundNum)], result)\r\n elif self.case == 3:\r\n if self.granularity == 'day':\r\n return map(lambda x: [reader.formatTime(reader.inverseDaySinceEpoch(int(reader.morton2coordst3D(x[0])/self.scale))), \r\n reader.morton2coordsY3D(x[0], self.offx, self.scalex, self.roundNum),\r\n reader.morton2coordsZ3D(x[0], self.offy, self.scaley, self.roundNum), x[1]], result)\r\n else:\r\n return map(lambda x: [int(reader.morton2coordst3D(x[0])/self.scale), \r\n reader.morton2coordsY3D(x[0], self.offx, self.scalex, self.roundNum), \r\n reader.morton2coordsZ3D(x[0], self.offy, self.scaley, self.roundNum), x[1]], result)\r\n elif self.case == 4:\r\n if self.granularity == 'day':\r\n return map(lambda x: [reader.formatTime(reader.inverseDaySinceEpoch(int(reader.morton2coordst4D(x[0])/self.scale))), \r\n reader.morton2coordsX4D(x[0], self.offx, self.scalex, self.roundNum), \r\n reader.morton2coordsY4D(x[0], self.offy, self.scaley, self.roundNum), \r\n reader.morton2coordsZ4D(x[0], self.offz, self.scalez, self.roundNum)], result)\r\n else:\r\n return map(lambda x: [int(reader.morton2coordst4D(x[0])/self.scale), \r\n reader.morton2coordsX4D(x[0], self.offx, self.scalex, self.roundNum), \r\n reader.morton2coordsY4D(x[0], self.offy, self.scaley, self.roundNum), \r\n reader.morton2coordsZ4D(x[0], self.offz, self.scalez, self.roundNum)], result)", "def condense_meeting_times_2(arr):\n\n # sort the meeting times by start time (this will be O(lg(n)), at least)\n # without sorting by start times, the random order will make this O(n^2)\n arr.sort()\n\n # make a list to store output\n output = [arr[0]]\n\n # iterate over all the time blocks and check for merges\n for time_block in arr[1:]:\n # get the times to compare against from the latest block in output\n first_start, first_stop = output[-1]\n # unpack the current time block being assessed for overlap\n second_start, second_stop = time_block\n # if the current time block overlaps with most recent, condense the two\n # by updating the entire tuple in the output list with latest time\n if second_start <= first_stop:\n output[-1] = (first_start, max(first_stop, second_stop))\n # else, there was no overlap. Add current to output and continue loop\n else:\n output.append((second_start, second_stop))\n\n return output", "def processPhaseDuration(self, line, criticalPointList1, criticalPointList2):\n list1, list2 = ([] for i in range(2))\n \n phaseDurationOfP1, phaseDurationOfP2, phaseDurationOfP3, phaseDurationOfP4, phaseDurationOfP5, phaseDurationOfP6, phaseDurationOfP7, phaseDurationOfP8 = line.split()\n\n self.phaseDurationList = [float(phaseDurationOfP1), float(phaseDurationOfP2), float(phaseDurationOfP3), float(phaseDurationOfP4),\n float(phaseDurationOfP5), float(phaseDurationOfP6), float(phaseDurationOfP7), float(phaseDurationOfP8)]\n\n [list1.append(value) for index, value in enumerate(self.phaseDurationList) if value > 0.0 and index < 4]\n [list2.append(value) for index, value in enumerate(self.phaseDurationList) if value > 0.0 and index >= 4]\n\n [criticalPointList1.append(value) for value in list1 if len(list1) > 0 and len(list2) > 0]\n [criticalPointList2.append(value) for value in list2 if len(list1) > 0 and len(list2) > 0]", "def isolate_self_reporting_cases(self, time: int):" ]
[ "0.58654743", "0.557243", "0.54334253", "0.5312071", "0.53107566", "0.5259074", "0.5191302", "0.5137161", "0.5035805", "0.49849787", "0.49590302", "0.49435392", "0.4941344", "0.48714745", "0.48218355", "0.48059455", "0.47934845", "0.47744334", "0.47464475", "0.47226936", "0.472224", "0.47167215", "0.4698919", "0.46935612", "0.46690962", "0.46668243", "0.4661576", "0.46453714", "0.4634165", "0.46112075" ]
0.5932365
0
Authenticate a request. Returns a `User` if a valid token has been supplied using HTTP Basic authentication. Otherwise returns `None`.
def authenticate(self, request): auth = get_authorization_header(request).split() if not auth or auth[0].lower() != b"basic": return None if len(auth) == 1: raise AuthenticationFailed( "Invalid Basic authorization header. No credentials provided." ) elif len(auth) > 2: raise AuthenticationFailed( "Invalid Basic authorization header. Credentials string should not contain spaces." ) try: auth_parts = ( base64.b64decode(auth[1]).decode(HTTP_HEADER_ENCODING).split(":") ) except (TypeError, UnicodeDecodeError, binascii.Error): raise AuthenticationFailed( "Invalid Basic authorization header. Credentials not correctly base64 encoded." ) username, password = ( auth_parts if len(auth_parts) >= 2 else (auth_parts[0], None) ) if password: if settings.API_BASIC_AUTH: return DRFBasicAuthentication().authenticate_credentials( username, password, request ) else: raise AuthenticationFailed( "Basic authorization with a password is not allowed; use an API token instead." ) else: # Treat the username as a token; pass it on to `knox.TokenAuthentication` token = username.encode("utf-8") return TokenAuthentication().authenticate_credentials(token)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def authenticate(self, request):\n\n if \"Authorization\" not in request.headers:\n return\n\n auth = request.headers[\"Authorization\"]\n\n scheme, token = auth.split()\n if scheme.lower() != 'bearer':\n raise AuthenticationError(\n \"Please use Bearer as authorization scheme.\")\n\n try:\n payload = await self._auth.validate_token(token, audience=\n tedious.config.CONFIG[\"TOKEN\"][\"audience\"])\n requester = Requester(uuid=payload['uid'],\n username=payload['name'],\n role=payload['role'])\n return AuthCredentials([\"authenticated\"]), RequestUser(requester)\n except InvalidToken as e:\n raise AuthenticationError(str(e))", "def authenticateRequest(request, storeSessionCookie=False):\n if SESSION_KEY in request.session:\n user = ezidapp.models.getUserById(request.session[SESSION_KEY])\n if user != None and user.loginEnabled:\n return user\n else:\n return None\n elif \"HTTP_AUTHORIZATION\" in request.META:\n h = request.META[\"HTTP_AUTHORIZATION\"].split()\n try:\n assert len(h) == 2 and h[0] == \"Basic\"\n s = base64.decodestring(h[1])\n assert \":\" in s\n except:\n return \"error: bad request - malformed Authorization header\"\n return authenticate(\n *s.split(\":\", 1),\n request=(request if storeSessionCookie else None),\n coAuthenticate=False\n )\n else:\n return None", "def load_user_from_request(request):\n credentials = request.headers.get('Credentials')\n if not credentials:\n credentials = request.headers.get('Authorization')\n if not credentials:\n return None\n\n # Cases where the header may be of the form `Authorization: Basic api_key`\n credentials = credentials.replace('Basic ', '', 1)\n\n try:\n credentials = base64.b64decode(credentials).decode('utf-8')\n except (UnicodeDecodeError, binascii.Error):\n return None\n username, password = credentials.split('|')\n user = get_user(Users, username)\n if user:\n if user.check_password_hash(password.strip()):\n log(\n f'User <code>{user.name}</code> just authenticated a {request.method} API call with credentials!',\n )\n return user\n return None", "def basic_auth(user, password):\n return AuthToken(\"basic\", user, password)", "def auth(self):\r\n basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION',''))\r\n if basic: return basic\r\n ruser = self.environ.get('REMOTE_USER')\r\n if ruser: return (ruser, None)\r\n return None", "def get(self):\n\n\t\trequest = user_auth_parser.parse_args(strict=True)\n\n\t\tresult = Authenticator.authenticate(\n\t\t\trequest[\"username\"],\n\t\t\trequest[\"password\"]\n\t\t)\n\n\t\treturn result", "def authenticate():\n return Response(\n '', 401, {'WWW-Authenticate': 'Basic realm=\"Login Required\"'}\n )", "def authenticate():\n return Response('Not Authorized', 401, {'WWW-Authenticate': 'Basic realm=\"api\"'})", "def authenticate():\n return Response(\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate_user():\n if request.headers['content-type'] == 'application/json':\n print(request)\n data = request.get_json()\n if data:\n username = data['username']\n password = data['password']\n else:\n return Response(status=400) # no JSON to parse\n\n if username is None or password is None:\n return Response(status=400) # missing arguments\n\n if not verify_password(username, password):\n return Response(status=403) # User not authenticated\n\n return jsonify({'username': username, 'success': True}), 201\n else:\n print(\"invalid request type, no json\")\n return Response(status=400) # invalid request type", "def authenticate(self, environ):\n try:\n hd = parse_auth_header(environ['HTTP_AUTHORIZATION'])\n except:\n return False\n\n return self.authfn(hd['user'], hd['password'], self.get_realm(environ), environ)", "def requires_http_basic_auth(self, f: Callable):\n\n @functools.wraps(f)\n def decorated(*args, **kwargs):\n # Try to authenticate user from HTTP basic auth headers (failure will raise appropriate exception).\n self.authenticate_basic(request)\n # TODO: optionally pass access_token and user_id from authentication result?\n return f(*args, **kwargs)\n\n return decorated", "def basic_auth(user=\"user\", passwd=\"passwd\"):\n\n if not check_basic_auth(user, passwd):\n return status_code(401)\n\n return jsonify(authenticated=True, user=user)", "def Authenticate(self, req, username, password):\n ctx = self._GetRequestContext(req)\n\n user = self._user_fn(username)\n if not (user and\n self.VerifyBasicAuthPassword(req, username, password,\n user.password)):\n # Unknown user or password wrong\n return False\n\n if (not ctx.handler_access or\n set(user.options).intersection(ctx.handler_access)):\n # Allow access\n return True\n\n # Access forbidden\n raise http.HttpForbidden()", "def authenticate(self):\n try:\n auth_header = self.basic_token\n username, password = decode(auth_header)\n\n user_principal = None\n allowlisted_users = Environment().get_allowlisted_users()\n if allowlisted_users is not None:\n password_from_allowlist = allowlisted_users.get(username)\n if password_from_allowlist is None or password_from_allowlist != password:\n logger.log_error(\"Invalid user credentials provided\")\n raise AuthenticationError(\"Invalid user credential\")\n else:\n raise AuthenticationError(\"No whitelisted users found to authenticate against\")\n\n if Environment().is_kerberos_enabled():\n user_principal = self.get_user_principal(username)\n key_tab_path = Environment().get_hdfs_keytab_file_path()\n logger.log_info(\"Minting a kerberos ticket for principal {} using keytab {}\".format(user_principal, key_tab_path))\n if key_tab_path is None or user_principal is None:\n raise AuthenticationError(\"Keytab file or kerberos principal missing\")\n returncode = KerberosUtil.renew_kinit(key_tab_path, user_principal)\n logger.log_info('kinit return code:' + str(returncode))\n\n return username, user_principal\n except Exception as e:\n logger.log_exception(\"Failed while authenticating user\", exc_info=True)\n raise AuthenticationError(str(e))", "def authenticate(self, request):\n\n # Get the underlying HttpRequest object\n request = request._request\n user = getattr(request, 'user', None)\n\n # Unauthenticated, CSRF validation not required\n if not user or not user.is_active:\n return None\n\n #self.enforce_csrf(request)\n\n # CSRF passed with authenticated user\n return (user, None)", "def credentials(self) -> HTTPBasicAuth:\n if self.user is None or self.password is None:\n return None\n else:\n return HTTPBasicAuth(self.user, self.password)", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL, need token.\\n', 403,\n {'WWW-Authenticate': 'Basic realm=\"token Required\"'})", "def load_user_from_request(request):\n if app.auth != \"none\":\n user = basic_login_from_request(request, app)\n _check_session(user, request, True)\n return user", "def authenticate(self, request):\n\n # HTTP_AUTHORIZATION 请求头中对应的值应该为:Token QWxhZGRpbjpvcGVuIHNlc2FtZQ==\n # Token QWxhZGRpbjpvcGVuIHNlc2FtZQ==\n # auth = get_authorization_header(request).split()\n # if not auth or auth[0].lower() != self.keyword.lower().encode():\n # # 未获取到授权请求头\n # return None\n #\n # # 授权请求头值太短\n # if len(auth) == 1:\n # msg = _('Invalid token header. No credentials provided.')\n # raise exceptions.AuthenticationFailed(msg)\n #\n # # 授权请求头值太长\n # elif len(auth) > 2:\n # msg = _('Invalid token header. Token string should not contain spaces.')\n # raise exceptions.AuthenticationFailed(msg)\n #\n # try:\n # token = auth[1].decode()\n # except UnicodeError:\n # # 授权请求头值格式错误\n # msg = _('Invalid token header. Token string should not contain invalid characters.')\n # raise exceptions.AuthenticationFailed(msg)\n from rest_framework.request import Request\n token = request.query_params.get('token')\n if not token:\n raise exceptions.AuthenticationFailed('验证失败')\n\n return self.authenticate_credentials(token)", "async def authenticate(self, request: Request):\n\n pass", "async def authentication_handler(request: Request, call_next): # type: ignore\n username = \"anonymous\"\n password = \"anonymous\"\n if \"authorization\" in request.headers:\n header = request.headers[\"authorization\"]\n if header.startswith(\"Basic \"):\n b64encoded = header.replace(\"Basic \", \"\")\n try:\n decoded = b64decode(b64encoded).decode()\n username, password = decoded.split(\":\")\n except Exception as exc: # pylint: disable=broad-except\n logger.error(exc)\n\n if not _authenticate_user(username, password):\n response = ResponsePayload(\n success=False,\n message=\"Authentication failed.\",\n )\n return JSONResponse(status_code=401, content=response.dict())\n\n request.state.user_info = {\n \"username\": username,\n \"password\": password,\n }\n\n response = await call_next(request)\n return response", "def authenticate(self, request=None, **kwargs):\n if request is None:\n return None\n\n access_token = jwt_utils.get_access_token_by_request(request)\n\n if access_token is None:\n return None\n\n try:\n payload = jwt_utils.jwt_decode(access_token)\n\n except DecodeError:\n raise PermissionDenied()\n\n user = User.objects.get_user_or_none(pk=payload.get('sub'))\n\n if not user:\n return None\n\n access_token_is_active = user.refresh_tokens.access_token_is_active(jti=payload['jti'])\n return user if access_token_is_active else None", "def _authenticate_for(self, resp):\n # Get the auth. info from the headers\n scheme, params = resp.headers['Www-Authenticate'].split(None, 1)\n assert (scheme == 'Bearer')\n info = {k: v.strip('\"') for k, v in (i.split('=')\n for i in params.split(','))}\n\n # Request a token from the auth server\n params = {k: v for k, v in info.items() if k in ('service', 'scope')}\n auth = HTTPBasicAuth(self.username, self.password)\n r2 = requests.get(info['realm'], params=params,\n auth=auth, verify=self.verify_ssl)\n\n if r2.status_code == 401:\n raise RuntimeError(\"Authentication Error\")\n r2.raise_for_status()\n\n self.auth = BearerAuth(r2.json()['token'])", "def authenticate(self, request):\n\n return self._validate_token(request)", "def authenticate(self, request, username=None, password=None, **kwargs):\n user_model = get_user_model()\n try:\n user = user_model.objects.get(email=username)\n except user_model.DoesNotExist:\n return None\n else:\n if user.check_password(password):\n return user\n return None", "def check_auth(username, password):\n return basic_login(username, password)", "def authenticate():\n return flask.Response('Login required.', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your credentials for that url', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def login():\n if not request.is_json:\n return jsonify({\"msg\": \"Missing JSON in request\"}), 400\n\n username = request.json.get('username', None)\n password = request.json.get('password', None)\n\n user = get_user_by_username(username)\n\n if not user:\n return make_response(CONST_LOGIN_MSG, 401, {\n 'WWW-Authenticate': f'Basic realm=\"{CONST_REALM_MSG}\"'})\n\n if user.check_password(password):\n if user.is_admin:\n claims = {'is_admin': True}\n else:\n claims = {'is_admin': False}\n\n user.last_seen = dt.utcnow()\n db.session.add(user)\n db.session.commit()\n\n now = datetime.datetime.now(datetime.timezone.utc)\n access_expires = (now + jwt_config.access_expires).timestamp()\n refresh_expires = (now + jwt_config.refresh_expires).timestamp()\n\n result = dict(\n access_token=create_access_token(identity=username,\n user_claims=claims),\n access_expires=access_expires,\n refresh_expires=refresh_expires,\n refresh_token=create_refresh_token(identity=username),\n user=get_user_details(user)\n )\n\n return jsonify(dict(result)), 200\n\n return make_response(\n CONST_LOGIN_MSG,\n 401,\n {'WWW-Authenticate': f'Basic realm=\"{CONST_REALM_MSG}\"'})" ]
[ "0.7748496", "0.7482971", "0.7319348", "0.7214823", "0.69572836", "0.69473755", "0.6900045", "0.68219066", "0.6816961", "0.68142855", "0.6797234", "0.6780275", "0.6769279", "0.67666936", "0.6727957", "0.6711529", "0.66963404", "0.66865516", "0.6681249", "0.6672814", "0.66725487", "0.6661452", "0.66549754", "0.6653689", "0.6649376", "0.6647904", "0.6608701", "0.65955675", "0.65894324", "0.65755427" ]
0.791987
0
Do not enforce CSRF.
def enforce_csrf(self, request): return # To not perform the csrf check previously happening
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_csrf_validation(event):\n if event.request.method == 'POST':\n token = event.request.POST.get('_csrf')\n if token is None or token != event.request.session.get_csrf_token():\n headers = forget(event.request) # force a log out\n raise HTTPForbidden('CSRF token is missing or invalid',\n headers=headers)", "def enforce_csrf(request):\n check = CSRFCheck()\n check.process_request(request)\n reason = check.process_view(request, None, (), {})\n if reason:\n # CSRF failed, bail with explicit error message\n raise NotAuthenticated(\"CSRF validation failed: %s\" % reason)", "def test_csrf():\n\n # The authenticate method must not be altered for this test to be valid.\n assert (\n SessionAuthentication.authenticate\n is CsrfExemptSessionAuthentication.authenticate\n )\n\n # The `enforce_csrf` method should just pass with any request.\n assert CsrfExemptSessionAuthentication().enforce_csrf(\"foo\") is None", "def check_csrf(self):\n if (self.HTTP_X_CSRF_TOKEN in os.environ and\n self.is_csrf_token(os.environ[self.HTTP_X_CSRF_TOKEN])):\n pass\n else:\n common.render_error('Invalid CSRF token.')", "def test_csrf_required_if_normal_view(self):\n\n client = Client(enforce_csrf_checks=True)\n response = client.post(reverse(\"test_view\"))\n self.assertEqual(response.status_code, 403)\n\n response = client.post(reverse(\"test_view\"), HTTP_X_APPENGINE_TASKNAME=\"test\")\n self.assertEqual(response.status_code, 200)", "def validate_csrf_token(event):\n request = event.request\n if request.is_xhr or request.method.upper() in ('POST', 'PUT', 'DELETE'):\n pyramid.session.check_csrf_token(request, token='XSRF_TOKEN',\n header='X-XSRF-TOKEN', raises=True)", "def inbound(request):\n\n try:\n csrf_token = request.headers.cookie.get('csrf_token')\n csrf_token = '' if csrf_token is None else csrf_token.value\n csrf_token = _sanitize_token(csrf_token)\n # Use same token next time\n request.context['csrf_token'] = csrf_token\n except KeyError:\n csrf_token = None\n # Generate token and store it in the request, so it's\n # available to the view.\n request.context['csrf_token'] = _get_new_csrf_key()\n\n # Assume that anything not defined as 'safe' by RC2616 needs protection\n if request.line.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):\n\n if _is_secure(request):\n # Suppose user visits http://example.com/\n # An active network attacker (man-in-the-middle, MITM) sends a\n # POST form that targets https://example.com/detonate-bomb/ and\n # submits it via JavaScript.\n #\n # The attacker will need to provide a CSRF cookie and token, but\n # that's no problem for a MITM and the session-independent\n # nonce we're using. So the MITM can circumvent the CSRF\n # protection. This is true for any HTTP connection, but anyone\n # using HTTPS expects better! For this reason, for\n # https://example.com/ we need additional protection that treats\n # http://example.com/ as completely untrusted. Under HTTPS,\n # Barth et al. found that the Referer header is missing for\n # same-domain requests in only about 0.2% of cases or less, so\n # we can use strict Referer checking.\n referer = request.headers.get('Referer')\n if referer is None:\n raise Response(403, REASON_NO_REFERER)\n\n # Note that get_host() includes the port.\n good_referer = 'https://%s/' % _get_host(request)\n if not same_origin(referer, good_referer):\n reason = REASON_BAD_REFERER % (referer, good_referer)\n raise Response(403, reason)\n\n if csrf_token is None:\n # No CSRF cookie. For POST requests, we insist on a CSRF cookie,\n # and in this way we can avoid all CSRF attacks, including login\n # CSRF.\n raise Response(403, REASON_NO_CSRF_COOKIE)\n\n # Check non-cookie token for match.\n request_csrf_token = \"\"\n if request.line.method == \"POST\":\n request_csrf_token = request.body.get('csrf_token', '')\n\n if request_csrf_token == \"\":\n # Fall back to X-CSRF-TOKEN, to make things easier for AJAX,\n # and possible for PUT/DELETE.\n request_csrf_token = request.headers.get('X-CSRF-TOKEN', '')\n\n if not constant_time_compare(request_csrf_token, csrf_token):\n raise Response(403, REASON_BAD_TOKEN)", "def csrf_protect():\n if request.method == \"POST\" and request.path[0:5] != \"/api/\":\n token = login_session.pop('_csrf_token', None)\n request_token = request.form.get('_csrf_token')\n print(\"Comparing server token [\" + token + \"]\")\n print(\"with client token [\" + request_token + \"]\")\n if not token or token != request_token:\n print(\"Tokens do not match! Aborting..\")\n abort(403)\n print(\"Tokens match - accepted\")", "def _request_csrf_token(self, params):\n if params.get(\"action\") == \"query\":\n if params.get(\"meta\"):\n if \"tokens\" not in params[\"meta\"].split(\"|\"):\n params[\"meta\"] += \"|tokens\"\n else:\n params[\"meta\"] = \"tokens\"\n if params.get(\"type\"):\n if \"csrf\" not in params[\"type\"].split(\"|\"):\n params[\"type\"] += \"|csrf\"", "def csrf_view_exempt(view_func):\r\n warnings.warn(\"csrf_view_exempt is deprecated. Use csrf_exempt instead.\",\r\n PendingDeprecationWarning)\r\n return csrf_exempt(view_func)", "def check_csrf(f):\n\n @wraps(f)\n @require_login\n def wrapper(*args, **kwds):\n if \"token\" not in session:\n raise PicoException(\n \"Internal server error\",\n data={\"debug\": \"CSRF token not found in session\"},\n )\n submitted_token = request.headers.get(\"X-CSRF-Token\", None)\n if submitted_token is None:\n raise PicoException(\"CSRF token not included in request\", 403)\n if session[\"token\"] != submitted_token:\n raise PicoException(\"CSRF token is not correct\", 403)\n return f(*args, **kwds)\n\n return wrapper", "def xsrf_protected(f):\n @functools.wraps(f)\n def wrapper(self, *args, **kwargs):\n non_xsrf_protected_verbs = ['options', 'head', 'get']\n if (self.request.method.lower() in non_xsrf_protected_verbs or\n self._RequestContainsValidXsrfToken()):\n return f(self, *args, **kwargs)\n else:\n try:\n self.XsrfFail()\n except Exception, e:\n self.handle_exception(e, self.app.debug)\n finally:\n self.session_store.save_sessions(self.response)\n return wrapper", "def csrf_protection(fn):\n def protected(*args):\n if 'X-Requested-With' in request.headers:\n return fn(*args)\n else:\n return \"X-Requested-With header missing\", HTTPStatus.FORBIDDEN\n return protected", "def check_csrf_token(func):\n def new_fn(self, req):\n if 'csrf_token' not in req.params:\n return exc.HTTPForbidden(\"You must provide a CSRF token\")\n\n csrf_token = req.params['csrf_token']\n if not security.valid_csrf_token(csrf_secret, csrf_token):\n return exc.HTTPForbidden(\"Invalid CSRF token\")\n\n return func(self, req)\n\n new_fn.exposed = True\n return new_fn", "def _get_csrf(self):\n\n csrf_token_header_name = \"X-CsrfToken\"\n if csrf_token_header_name not in self.headers:\n home_head_response = requests.head(self.BASE_URL)\n self.cookies.update(home_head_response.cookies)\n csrf_token = self.cookies[\"csrftoken\"]\n csrf_header = {csrf_token_header_name: csrf_token}\n self.headers.update(csrf_header)", "def retain_csrf_token(req):\n session = req.environ.get('rex.session', {})\n csrf_token = session.get('_csrf_token')\n if not csrf_token:\n csrf_token = session['_csrf_token'] = b2a(os.urandom(16))\n return csrf_token", "def test_csrf(self):\n response = self.client.get(self.url)\n self.assertContains(response, 'csrfmiddlewaretoken')", "def checkcsrf(func):\n @functools.wraps(func)\n @sessiondecorator\n def wrapper(*args, session = None, **kw):\n if \"X-CSRF-TOKEN\" not in session.cookies:\n getcsrf(session)\n return func(*args,session = session, **kw)\n return wrapper", "def post(self, request, *args, **kwargs):\n verify_secure(request)\n return super().post(request, args, kwargs)", "def post(self, request, *args, **kwargs):\n verify_secure(request)\n return super().post(request, args, kwargs)", "def csrf(request):\n return django_csrf(request)['csrf_token']", "def test_csrf(self):\n self.assertContains(self.response, 'csrfmiddlewaretoken')", "def test_csrf(self):\n self.assertContains(self.response, 'csrfmiddlewaretoken')", "def pre_process_request(self, req, handler):\n\n if self.match_request(req):\n # We disable CSRF protection here and force ourselves as a handler\n req.form_token = None\n return self\n \n return handler", "def csrf_response_exempt(view_func):\r\n warnings.warn(\"csrf_response_exempt is deprecated. It no longer performs a \"\r\n \"function, and calls to it can be removed.\",\r\n PendingDeprecationWarning)\r\n return view_func", "def getcsrf(session):\n session.get(\"http://anichart.net\")", "def test_csrf_no_inject(self, mock_csrf):\n mw = CSRFHeaderInject()\n request = MagicMock()\n response = MagicMock()\n mw.process_response(request, response)\n response.set_cookie.assert_not_called()", "def test_csrf(self):\n self.assertContains(self.resp, 'csrfmiddlewaretoken')", "def xhr_forbidden_view(request):\n return HTTPForbidden()", "def on_GET_request_setup_csrf_cookie(ev) -> None:\n request = ev.request\n if request.method != \"GET\":\n # Skip if not GET. If could detect static requests, would skip too\n return\n token = request.session.get_csrf_token()\n # print(request.session.session_id, token)\n if request.cookies.get(\"XSRF-TOKEN\") != token:\n # Set the Secure flag on the cookie only when serving on https.\n secure: bool = request.registry.settings.get(\n \"scheme_domain_port\", \"\"\n ).startswith(\"https\")\n ev.response.set_cookie(\n COOKIE_NAME,\n token,\n overwrite=True,\n secure=secure,\n httponly=False, # The client reads the cookie to send header\n samesite=\"strict\",\n )" ]
[ "0.77730787", "0.71921813", "0.6985222", "0.68456197", "0.6827762", "0.68134195", "0.6702927", "0.66973877", "0.66799164", "0.66241884", "0.6571412", "0.6558275", "0.65252817", "0.646873", "0.645416", "0.63729364", "0.63329005", "0.6296769", "0.6245508", "0.6245508", "0.6245343", "0.62352467", "0.62352467", "0.62117875", "0.61964583", "0.61832285", "0.6181697", "0.6151094", "0.6122066", "0.6099998" ]
0.85253215
0
Lists all files below the given folder that match the pattern.
def _list_files(folder, pattern): for root, folders, files in os.walk(folder): for filename in files: if fnmatch.fnmatch(filename, pattern): yield os.path.join(root, filename)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_and_filter(self, pattern, root_path):\n for path, dirs, files in os.walk(os.path.abspath(root_path)):\n for filename in fnmatch.filter(files, pattern):\n yield os.path.join(path, filename)", "def find(pattern):\n files = config.index.files(path_glob=\"*%s*\" % pattern)\n print_files(files)", "def listfiles(pattern):\n pattern = os.path.normpath(pattern)\n first_wildcard = re.search(\"{[^{]\", pattern)\n if first_wildcard:\n dirname = os.path.dirname(pattern[:first_wildcard.start()])\n if not dirname:\n dirname = \".\"\n else:\n dirname = os.path.dirname(pattern)\n pattern = re.compile(snakemake.io.regex(pattern))\n for dirpath, dirnames, filenames in os.walk(dirname, followlinks=True):\n for f in itertools.chain(filenames, dirnames):\n if dirpath != \".\":\n f = os.path.normpath(os.path.join(dirpath, f))\n match = re.match(pattern, f)\n if match:\n wildcards = snakemake.io.Namedlist(fromdict=match.groupdict())\n yield f, wildcards", "def filesInDir(self, path=None, pattern=None):\n if path is None:\n path = self.myDir\n if os.path.isfile(path):\n fileList = [path]\n else:\n fileList = os.listdir(path)\n if pattern is None:\n return fileList\n results = []\n for fileName in fileList:\n if pattern in fileName:\n results.append(fileName)\n return results", "def find_files(directory, pattern):\n file_list = []\n for root, _, files in os.walk(directory):\n for basename in files:\n if fnmatch.fnmatch(basename, pattern):\n filename = os.path.join(root, basename)\n file_list.append(filename)\n \n return file_list", "def list_files(root_path, filename_pattern):\n\tfor root, dirs, files in os.walk(root_path):\n\t\tfor basename in files:\n\t\t\tif fnmatch.fnmatch(basename, filename_pattern):\n\t\t\t\tfilename = os.path.join(root, basename)\n\t\t\t\tyield filename", "def LocateFiles(pattern, root=os.curdir):\n for path, _, files in os.walk(os.path.abspath(root)):\n for filename in fnmatch.filter(files, pattern):\n yield os.path.join(path, filename)", "def list_files(base_dir, file_pattern):\n\n return sorted(glob(os.path.join(base_dir) + file_pattern))", "def list_files_in_subfolders(fpath, pattern=r\"*\"):\n return tuple(pathlib.Path(fpath).rglob(pattern))", "def allfiles(dir, pattern=\"*\"):\n\tdir = uniformpath(dir)\n\tif not os.path.isdir(dir): # must be file\n\t\treturn [dir]\n\tmatching_files = []\n\tfor root, subFolders, files in os.walk(dir):\n\t\tmatching = fnmatch.filter(files, pattern)\n\t\tmatching_files.extend(os.path.join(root, f) for f in matching)\n\treturn matching_files", "def find_files(directory, pattern='**/*.wav'):\n return glob(os.path.join(directory, pattern), recursive=True)", "def find(directory, slash='/', pattern=r'.+\\.out'):\n for directory, subdirectories, files in os.walk(directory):\n for file in files:\n if re.findall(pattern, str(file)):\n yield str(directory + slash + file)", "def glob(glob_pattern: str, directoryname: str) -> List[str]:\n matches = []\n for root, dirnames, filenames in os.walk(directoryname):\n for filename in fnmatch.filter(filenames, glob_pattern):\n absolute_filepath = os.path.join(root, filename)\n matches.append(absolute_filepath)\n return matches", "def locate(pattern, root=os.curdir):\n for path, dirs, files in os.walk(os.path.abspath(root)):\n for filename in fnmatch.filter(files, pattern):\n yield os.path.join(path, filename)", "def _files_in_subdir(self, subdir, pattern, regex):\n all_files = glob(join(subdir, (pattern or '**')), recursive=True)\n all_files = [fp for fp in all_files if isfile(fp)]\n\n if pattern and regex:\n raise ValueError(\"Specify pattern OR regex, not both!\")\n elif pattern:\n files = [fn for fn in glob(join(subdir, pattern), recursive=True)]\n elif regex:\n files = [fn for fn in all_files if re.search(regex, fn)]\n else:\n files = all_files\n\n return sorted(files)", "def _recursive_file_search(self, path, pattern):\n matches = []\n for root, dirnames, filenames in os.walk(path):\n for filename in fnmatch.filter(filenames, pattern):\n matches.append(os.path.join(root, filename))\n\n return matches", "def scan_folder(folder):\n LOGGER.debug(\"Scanning folder: %s\", folder)\n for file in os.listdir(folder):\n if file.endswith(\".csv\"):\n yield os.path.join(folder, file)", "def find_files(path, pattern):\n\n for root, dirnames, filenames in os.walk(path):\n for filename in fnmatch.filter(filenames, pattern):\n yield os.path.join(root, filename)", "def find_files(directory, pattern):\n try:\n for root, dirs, files in os.walk(directory, followlinks=True):\n for basename in files:\n if fnmatch.fnmatch(basename, pattern):\n filename = os.path.join(root, basename)\n yield filename\n except Exception as e:\n sys.stderr.write(e.message)\n exit(1)", "def file_parser(folder, pattern = '.json'):\n path = os.getcwd()\n path_to_folder = os.path.join(path, folder)\n files = []\n jsons = []\n # r=root, d=directories, f = files\n for r, d, f in os.walk(path_to_folder):\n for file in f:\n if pattern in file:\n files.append(os.path.join(r, file))\n jsons.append(file)\n\n return((files,jsons))", "def getFiles(folderToProcess,filter):\n\n print(f\"Parsing {folderToProcess} for {filter} files\")\n\n if debug:\n for path in Path(folderToProcess).rglob(filter):\n print(f\"Found {path}\")\n\n all_files = [str(x) for x in Path(folderToProcess).rglob(filter)] \n\n return all_files", "def find(cls, searched_dir, pattern):\n Log.debug('find {0} with pattern: {1}'.format(searched_dir, pattern))\n matched_files = []\n for root_dir, dir_names, file_names in os.walk(searched_dir,\n followlinks=False):\n for file_name in file_names:\n if fnmatch.fnmatch(file_name, pattern):\n file_path = os.path.join(root_dir, file_name)\n if not os.path.islink(file_path):\n matched_files.append(file_path)\n matched_files.sort()\n return matched_files", "def _listdir(folder):\n\tfilePattern = r\"^\\d{4}\\-(0?[1-9]|1[012])\\-(0?[1-9]|[12][0-9]|3[01])\\-clipping\\-[\\d]*\\.json$\"\n\tfilenames = [f for f in os.listdir(folder) if re.match(filePattern, f)]\n\treturn filenames", "def find_files(path: str, filename_pattern: str, sort: bool = True) -> list:\n files = list()\n for root, _, filenames in os.walk(path):\n for filename in fnmatch.filter(filenames, filename_pattern):\n files.append(os.path.join(root, filename))\n if sort:\n files.sort()\n return files", "def glob1(self, dirname, pattern):\n names = self.listdir(dirname)\n if pattern[0] != '.':\n names = filter(lambda x: x[0] != '.',names)\n return fnmatch.filter(names, pattern)", "def iglob_recursive(directory, file_pattern):\n for root, dir_names, file_names in os.walk(directory, followlinks=True):\n files = fnmatch.filter(file_names, file_pattern)\n for filename in files:\n yield os.path.join(root, filename)", "def recursive_glob(path, pattern):\n for root, dirnames, filenames in os.walk(path, followlinks=True):\n for filename in fnmatch.filter(filenames, pattern):\n yield os.path.join(root, filename)", "def _find_files(directory: str, pattern: str) -> Iterator[str]:\n for root, dirs, files in os.walk(directory, topdown=True):\n dirs[:] = [d for d in dirs if _is_file_valid(d)]\n for basename in sorted(files):\n if _is_file_valid(basename) and fnmatch.fnmatch(basename, pattern):\n filename = os.path.join(root, basename)\n yield filename", "def listDir(dirPath, regexPattern):\n dirList = os.listdir(dirPath)\n return [d for d in dirList if re.search(regexPattern, d)]", "def _get_files(self, path, pattern):\n files = [\n os.path.join(path, file)\n for file in glob.glob(os.path.join(path, pattern))\n ]\n return files" ]
[ "0.77849257", "0.7500726", "0.74775934", "0.7411119", "0.74076384", "0.7355792", "0.7233464", "0.71732324", "0.71499306", "0.71378464", "0.71049553", "0.70946896", "0.6977931", "0.69686073", "0.6965376", "0.6960672", "0.69308126", "0.69025075", "0.68708056", "0.6868468", "0.67896545", "0.6786529", "0.67556304", "0.6754441", "0.6731101", "0.6715682", "0.67130303", "0.6711247", "0.67006963", "0.6662974" ]
0.81099135
1
Returns a list of files changed for this pull request / push. If running on a public CI like Travis or Circle this is used to only run tests/lint for changed files.
def _get_changed_files(): if not ci_diff_helper: return None try: config = ci_diff_helper.get_config() except OSError: # Not on CI. return None changed_files = ci_diff_helper.get_changed_files('HEAD', config.base) changed_files = set([ './{}'.format(filename) for filename in changed_files]) return changed_files
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_changed_files():\n upstream = \"origin/master\"\n local_commit = subprocess.check_output(\n \"git rev-list HEAD ^{} -- 2>/dev/null | tail -1\".format(upstream),\n shell=True).strip().decode()\n diff_base = subprocess.check_output(\n ['git', 'rev-parse', local_commit +\n '^']).strip().decode() if local_commit else \"HEAD\"\n files = subprocess.check_output(['git', 'diff', '--name-only',\n diff_base]).strip().decode().split('\\n')\n\n repo = subprocess.check_output(['git', 'rev-parse',\n '--show-toplevel']).strip().decode()\n # add prefixes so that all and targets can be specified relative to FUCHSIA_DIR\n if repo.endswith('topaz'):\n files = [os.path.join('topaz', p) for p in files]\n elif repo.endswith('third_party/go'):\n files = [os.path.join('third_party/go', p) for p in files]\n\n return files", "def detect_changed_files(self) -> list[Path]:\n repos = [(self.open_repo(), self.git_directory)]\n # Check server and api dirs too\n # Normally these are ignored but we need to check these\n if (server_repo_path := Path(self.git_directory, \"Paper-Server\")).exists():\n repos.append((pygit2.Repository(str(server_repo_path)), server_repo_path))\n if (api_repo_path := Path(self.git_directory, \"Paper-API\")).exists():\n repos.append((pygit2.Repository(str(api_repo_path)), api_repo_path))\n changed = []\n for repo, repo_path in repos:\n changed.extend(p.relative_to(self.git_directory) for p in detect_changed_files(repo, repo_path))\n changed.sort()\n return changed", "def get_files_changed():\n files_list = []\n test = os.popen('git show --name-only')\n repo_location = os.popen('git rev-parse --show-toplevel')\n repo_location = repo_location.readlines()\n repo_location = repo_location[0]\n repo_location = repo_location.replace('\\n', '')\n if \"Not a git repository\" in repo_location:\n files_list.append(\"Not a git repository\")\n return files_list\n files_list.append(repo_location.split('/')[-1])\n output = test.readlines()\n for a in range(6, len(output)):\n files_list.append(output[a].replace('\\n', ''))\n return files_list", "def all_changed_files(self):\n return [path_to_file_type(os.path.join(self.path, p)) for p in self.changed_paths() if p]", "def get_modified_files(repo, args):\n commit = repo.commit(args.commit)\n return commit.stats.files", "def ListChangedFiles(self, request, global_params=None):\n config = self.GetMethodConfig('ListChangedFiles')\n return self._RunMethod(\n config, request, global_params=global_params)", "def files_changed(revish: Text,\n ignore_rules: Optional[Sequence[Text]] = None,\n include_uncommitted: bool = False,\n include_new: bool = False\n ) -> Tuple[List[Text], List[Text]]:\n files = repo_files_changed(revish,\n include_uncommitted=include_uncommitted,\n include_new=include_new)\n if not files:\n return [], []\n\n return exclude_ignored(files, ignore_rules)", "def get_changed_files(path_to_repository, ignore_subrepositories):\n diff = _get_diff_to_last_commit(path_to_repository, ignore_subrepositories)\n return [item.b_path for item in diff if item.change_type in _CHANGE_TYPES_CONSIDERED_FOR_PRECOMMIT]", "def _findChangedFiles(self):\n changedFiles = []\n # calculate and update checksums always for ALL files\n for observedFile in self.observedFiles:\n if os.path.isfile(observedFile.filePath):\n currentChecksum = checksumFile(observedFile.filePath)\n else:\n currentChecksum = None\n # different values with None value checking\n if ((observedFile.lastChecksum is None\n and currentChecksum is not None)\n or observedFile.lastChecksum != currentChecksum):\n changedFiles.append(observedFile) # notify change\n observedFile.lastChecksum = currentChecksum # update checksum\n\n return changedFiles", "def get_affected_files(allow_limited=True):\n diff_base = None\n if in_travis():\n # In the case of a pull request into a branch, we want to\n # diff against HEAD in that branch.\n if in_travis_pr():\n diff_base = travis_branch()\n else:\n diff_base = local_diff_branch()\n\n if diff_base is not None and allow_limited:\n result = subprocess.check_output(['git', 'diff', '--name-only',\n diff_base])\n print('Using files changed relative to %s:' % (diff_base,))\n print('-' * 60)\n print(result.rstrip('\\n')) # Don't print trailing newlines.\n print('-' * 60)\n else:\n print('Diff base not specified, listing all files in repository.')\n result = subprocess.check_output(['git', 'ls-files'])\n\n # Only return filenames that exist. For example, 'git diff --name-only'\n # could spit out deleted / renamed files. Another alternative could\n # be to use 'git diff --name-status' and filter out files with a\n # status of 'D'.\n filenames = [filename\n for filename in result.rstrip('\\n').split('\\n')\n if os.path.exists(filename)]\n return filenames, diff_base", "def files(self):\n return self._changeset.get('files', [])", "def file_changes(self):\n new = []\n changed = []\n deleted = []\n parent = self.parent_tag\n # Loop through the files and find the ones that have changed\n for relative_path, file_dict in self.checksum[\"files\"].items():\n if relative_path not in parent[\"files\"]:\n new.append(relative_path)\n elif file_dict[\"checksum\"] != parent[\"files\"][relative_path][\"checksum\"]:\n changed.append(relative_path)\n # Loop through the parent files and see which files have been deleted\n for relative_path in parent[\"files\"].keys():\n if relative_path not in self.checksum[\"files\"]:\n deleted.append(relative_path)\n return {\"new\": new, \"changed\": changed, \"deleted\": deleted}", "def _get_file_changes(\n self, pull_request_number: int\n ) -> Union[Tuple[List[Tuple[str, int, int, int]], int], None]:\n files = get_pull_request_files(\n self._repo_name, pull_request_number, self._auth)\n if not files:\n return None\n files_changes = []\n num_line_changes = 0\n for file in files:\n file_name = file['filename']\n num_additions = file['additions']\n num_deletions = file['deletions']\n num_changes = file['changes']\n num_line_changes += num_changes\n files_changes.append((file_name, num_additions, num_deletions,\n num_changes))\n return files_changes, num_line_changes", "def get_list_of_comitted_files():\n files = []\n output = []\n try:\n output = subprocess.check_output(['git','diff-index', '--name-status', '--cached','HEAD']\n ).decode(\"utf-8\")\n except subprocess.CalledProcessError:\n print(\"Error diff files get: trace %s\" % subprocess.CalledProcessError)\n return files\n\n for result in output.split(\"\\n\"):\n logging.info(result)\n if result != '':\n match = modified.match(result)\n if match:\n files.append(match.group('name'))\n\n return files", "def untracked_files():\n res = run(\n \"cd %s ; git status\" % (SOURCE_ABSOLUTE),\n stdout=PIPE, stderr=PIPE,\n universal_newlines=True,\n shell=True\n )\n result = [line.strip() for line in res.stdout.split(\"\\n\")]\n\n files = [file\n for file in result if (file.endswith(\".txt\")\n and not (file.startswith(\"new file\") or\n file.startswith(\"deleted\") or file.startswith(\"modified\")))]\n\n return files", "def _on_watch_changes(self, *changes):\n self.dirty = self._git.is_dirty()\n if self._watcher:\n for change in self._watcher.changes:\n for tracker in self._trackers:\n tracked_path = Path(self._git.working_dir) / change[\"path\"]\n if tracker.path.resolve() == tracked_path.resolve():\n tracker._on_file_change(None)\n return [\n dict(a_path=diff.a_path, b_path=diff.b_path, change_type=diff.change_type)\n for diff in self._git.index.diff(None)\n ] + [\n dict(a_path=None, b_path=ut, change_type=\"U\")\n for ut in self._git.untracked_files\n ]", "def _git_diff_files(ref=\"master\"):\n result = []\n command = [\"git\", \"diff\", \"--name-status\", \"%s\" % (ref)]\n exit_code, output = _execute(command)\n if exit_code != 0:\n print(\"Failed to diff files.\")\n sys.exit(1)\n\n for line in output.decode(\"utf-8\").splitlines():\n parts = line.split(\"\\t\")\n action = parts[0]\n name = parts[-1]\n action = action.lower()\n result.append((action, name))\n\n return result", "def change_files(self):\n change_files = []\n change_files_url = self._url + '/files' + OAUTH_TOKEN\n change_files_data = json.load(urllib2.urlopen(change_files_url))\n for item in change_files_data:\n change_files.append(item['filename'])\n return change_files", "def retrieve_modified_files(self):\n result = [(diff_obj.a_path, diff_obj.b_path)\n for diff_obj in self.repo.index.diff(None)]\n\n return result", "def get_changed_paths(*args, globs=None):\n if globs:\n args = list(args) + [\"--\", *globs]\n diff_output = git(\"diff\", \"--name-only\", *args)\n\n return set([line.strip() for line in diff_output.splitlines()])", "def changed_files(self, base=None, remote=None, single_commit=None):\n if single_commit:\n cmd = ['git', 'diff', '{}^!'.format(single_commit), '--name-only']\n elif base and remote:\n if base == 'WORKING':\n cmd = ['git', 'diff', remote, '--name-only']\n elif base == 'INDEX':\n cmd = ['git', 'diff', '--staged', remote, '--name-only']\n else:\n cmd = ['git', 'diff', base, remote, '--name-only']\n else:\n raise HTTPError(400, 'Either single_commit or (base and remote) must be provided')\n\n \n response = {}\n try:\n stdout = subprocess.check_output(\n cmd, \n cwd=self.root_dir,\n stderr=subprocess.STDOUT\n )\n response['files'] = stdout.decode('utf-8').strip().split('\\n')\n response['code'] = 0\n except CalledProcessError as e:\n response['message'] = e.output.decode('utf-8')\n response['code'] = e.returncode\n\n return response", "def get_relevant_files(self):\n relevant_files = []\n\n if self.tree_cache is None:\n return relevant_files\n\n tree = json.loads(self.tree_cache)\n if \"commit\" in tree:\n commit_data = tree[\"commit\"]\n master_commit_files = commit_data[\"files\"]\n\n for patched_files in master_commit_files:\n relevant_file_path = \"./\" + patched_files[\"path\"]\n relevant_files.append(relevant_file_path)\n\n return relevant_files", "def get_changed_files_from(old_commit_sha, new_commit_sha):\n return check_output(\n \"git diff-tree --no-commit-id --name-only -r {0}..{1}\".format(\n old_commit_sha,\n new_commit_sha\n ).split(\" \")\n ).decode('utf-8').strip()", "def modified_workload_files():\n try:\n # Returns the names of files in src/workloads/ that have been added/modified/renamed since the common ancestor of HEAD and origin/master\n out = subprocess.check_output(\n 'git diff --name-only --diff-filter=AMR $(git merge-base HEAD origin/master) -- src/workloads/', shell=True)\n except subprocess.CalledProcessError as e:\n print(e.output, file=sys.stderr)\n raise e\n\n if out.decode() == '':\n return []\n\n # Make paths relative to workloads/ e.g. src/workloads/scale/NewTask.yml --> scale/NewTask.yml\n short_filenames = [f.split('workloads/', 1)[1] for f in out.decode().strip().split('\\n')]\n short_filenames = list(filter(lambda x: x.endswith('.yml'), short_filenames))\n return short_filenames", "def get_changed(self):\n ret = []\n def list_callback(status, path):\n ret.append( (status, path) )\n self._walk_tree(ChangedEditor, pass_root=1, callback=list_callback)\n return ret", "def git_ls_files():\n\tproc = subprocess.Popen(\n\t\t['git', 'ls-files'],\n\t\tstdin=subprocess.DEVNULL,\n\t\tstdout=subprocess.PIPE,\n\t\tstderr=None\n\t)\n\t(stdout, stderr) = proc.communicate()\n\tif proc.returncode != 0:\n\t\traise OSError(\"Cannot list version-controlled files\")\n\tfilenames = stdout.decode().split()\n\treturn list(filter(is_regular_file, filenames))", "def getChangeSources():", "def get_items_changed(self, base_ref='HEAD'):\n command = ['diff-index', '--name-only',\n '--cached', base_ref]\n res = self.run(command)\n items = res.split('\\n') if res else []\n return items", "def filter_changed_files(changed_files, path_to_repository, file_encoding):\n filtered_files = []\n for changed_file in changed_files:\n file_is_valid = True\n if (os.path.isdir(os.path.join(path_to_repository, changed_file))):\n #ignore directories (e.g., git submodule folders)\n continue\n if os.path.getsize(os.path.join(path_to_repository, changed_file)) > 1 * 1024 * 1024:\n print('File too large for precommit analysis. Ignoring: %s' % changed_file)\n file_is_valid = False\n\n try:\n with open(os.path.join(path_to_repository, changed_file), encoding=file_encoding) as file:\n file.read()\n except UnicodeDecodeError:\n encoding_string = file_encoding\n if encoding_string is None:\n encoding_string = locale.getpreferredencoding() + ' (system encoding)'\n\n print(\n 'File at %s is not encoded in %s. Try using the --file-encoding option.' % (\n changed_file, encoding_string))\n file_is_valid = False\n\n if file_is_valid:\n filtered_files.append(changed_file)\n\n return filtered_files", "def changed_lines(self):\n return self._depot_tools_affected_file.ChangedContents()" ]
[ "0.76511127", "0.7632489", "0.73521656", "0.71647716", "0.70889413", "0.70419586", "0.7040667", "0.6961542", "0.69610536", "0.67889297", "0.66707456", "0.66461307", "0.6640469", "0.6635299", "0.651519", "0.6512538", "0.65028757", "0.64318836", "0.6417526", "0.6387403", "0.63820535", "0.637769", "0.6337656", "0.6268486", "0.6263876", "0.62579423", "0.62254936", "0.6137891", "0.6073341", "0.60500497" ]
0.78147644
0
Filers the list of sample directories to only include directories that contain files in the list of changed files.
def _filter_samples(sample_dirs, changed_files): result = [] for sample_dir in sample_dirs: for changed_file in changed_files: if changed_file.startswith(sample_dir): result.append(sample_dir) return list(set(result))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_filter_files(self):\n expected = [\n (\"/subdir1/fichier1\", False),\n (\"/subdir1/fichier4\", False),\n (\"/subdir1/subsubdir1\", False),\n ]\n files = [\n (\"/subdir1/fichier1\", False),\n (\"/subdir2/fichier2\", False),\n (\"/subdir2/fichier3\", False),\n (\"/subdir1/fichier4\", False),\n (\"/subdir1/subsubdir1/fichier1\", False),\n (\"/subdir1/subsubdir1/\", False),\n ]\n self.assertEqual(\n list(self.path_translator.filter_files(files, \"/subdir1\")),\n expected)", "def get_list_of_copied_sample_files(self, repo, changeset_revision, dir):\n deleted_sample_files = []\n sample_files = []\n for changeset in hg_util.reversed_upper_bounded_changelog(repo, changeset_revision):\n changeset_ctx = repo.changectx(changeset)\n for ctx_file in changeset_ctx.files():\n ctx_file_name = basic_util.strip_path(ctx_file)\n # If we decide in the future that files deleted later in the changelog should\n # not be used, we can use the following if statement. if ctx_file_name.endswith( '.sample' )\n # and ctx_file_name not in sample_files and ctx_file_name not in deleted_sample_files:\n if ctx_file_name.endswith('.sample') and ctx_file_name not in sample_files:\n fctx = hg_util.get_file_context_from_ctx(changeset_ctx, ctx_file)\n if fctx in ['DELETED']:\n # Since the possibly future used if statement above is commented out, the\n # same file that was initially added will be discovered in an earlier changeset\n # in the change log and fall through to the else block below. In other words,\n # if a file named blast2go.loc.sample was added in change set 0 and then deleted\n # in changeset 3, the deleted file in changeset 3 will be handled here, but the\n # later discovered file in changeset 0 will be handled in the else block below.\n # In this way, the file contents will always be found for future tools even though\n # the file was deleted.\n if ctx_file_name not in deleted_sample_files:\n deleted_sample_files.append(ctx_file_name)\n else:\n sample_files.append(ctx_file_name)\n tmp_ctx_file_name = os.path.join(dir, ctx_file_name.replace('.sample', ''))\n fh = open(tmp_ctx_file_name, 'wb')\n fh.write(fctx.data())\n fh.close()\n return sample_files, deleted_sample_files", "def _collect_dirs(\n start_dir,\n blacklist=set(['conftest.py', 'noxfile.py', 'lib', 'third_party']),\n suffix='_test.py',\n recurse_further=False):\n # Collect all the directories that have tests in them.\n for parent, subdirs, files in os.walk(start_dir):\n if './.' in parent:\n continue # Skip top-level dotfiles\n elif any(\n f for f in files if f.endswith(suffix) and f not in blacklist\n ):\n # Don't recurse further for tests, since py.test will do that.\n if not recurse_further:\n del subdirs[:]\n # This dir has desired files in it. yield it.\n yield parent\n else:\n # Filter out dirs we don't want to recurse into\n subdirs[:] = [\n s for s in subdirs\n if s[0].isalpha() and\n s not in blacklist]", "def files_in( d ):\n return [ join(d,f) for f in os.listdir(d) if isfile(join(d,f)) ]", "def setupSampleDirectories(self):\n self.allSamplesDir = \"%s/%s_all-samples\" % (os.getcwd(),\n self.project2Id)\n if not os.path.exists(self.allSamplesDir):\n os.mkdir(self.allSamplesDir)\n for sampleId,iSample in self.dSamples.items():\n dirSample = \"%s/%s\" % (self.allSamplesDir, sampleId)\n iSample.dir = dirSample\n if not os.path.exists(dirSample):\n os.mkdir(dirSample)\n if self.verbose > 0:\n msg = \"sample directories: %s\" % self.allSamplesDir\n print(msg); sys.stdout.flush()", "def get_files(self, include=[], exclude=[]):\r\n for (basepath, dpaths, fpaths) in os.walk(self.path, topdown=True):\r\n for subpath in dpaths + fpaths:\r\n path = os.path.join(self.chroot_path(basepath), subpath)\r\n if filter_path(path, include, exclude):\r\n yield path", "def include_dirs(self):", "def collect_files(path, audio_files):\n\n for entry in os.scandir(path):\n if entry.is_dir():\n collect_files(entry.path, audio_files)\n if entry.is_file() and (entry.path.endswith(\".flac\") or entry.path.endswith(\".wav\")):\n audio_files.append(entry.path)", "def get_dirs_prefix(wdir, prefix, excludes=None, Lshow=True, Ldir=True):\n matched_dirs=[]\n for fname in os.listdir(wdir):\n # re.match finds only prefix\n if os.path.isdir(fname) and re.match(prefix, fname):\n if excludes:\n tag=False\n for ex in excludes:\n if re.search(ex, fname):\n tag=True\n break\n if not tag :\n matched_dirs.append(fname)\n print (fname)\n else:\n matched_dirs.append(fname)\n print (fname)\n return matched_dirs", "def __get_list_of_interm_dirs(self, dirs):\n for name in os.listdir(self.path):\n if _DIRECTORY_REGEXP.match(name):\n dirs.append(name)", "def cmp_directories(self, dir_1='./', dir_2='./'):\n dirs_cmp = filecmp.dircmp(dir_1, dir_2)\n list_dirs_json = dict()\n path_in = self.make_path_in(dir_1, dir_2)\n\n equal_files_json = self.equal_files_to_json(\n dirs_cmp.same_files,\n dir_1,\n dir_2\n )\n\n diff_files_json = self.diff_files_to_json(\n dirs_cmp.diff_files,\n dir_1,\n dir_2\n )\n only_in_one_json = self.only_in_one_to_json(\n dir_1,\n dirs_cmp.left_only,\n dir_2,\n dirs_cmp.right_only\n )\n common_dirs_json = self.common_dirs_to_json(\n dirs_cmp.common_dirs,\n dir_1,\n dir_2\n )\n\n all_lists_json = json.loads(\n json.dumps(\n list(\n equal_files_json +\n diff_files_json +\n only_in_one_json +\n common_dirs_json\n ),\n sort_keys=True))\n if dirs_cmp.common_dirs:\n list_dirs_json = self.internal_directories_json(\n dir_1,\n dir_2,\n dirs_cmp.common_dirs\n )\n list_dirs_json.update(\n dict({path_in: self.directory_to_json(path_in, all_lists_json)})\n )\n\n return list_dirs_json", "def filter_files(self, path):\n excludes = r'|'.join([fnmatch.translate(x) for x in self.project.EXCLUDES]) or r'$.'\n for root, dirs, files in os.walk(path, topdown=True):\n dirs[:] = [d for d in dirs if not re.match(excludes, d)]\n dirs[:] = [os.path.join(root, d) for d in dirs]\n rel_path = os.path.relpath(root, path)\n\n paths = []\n for f in files:\n if rel_path == '.':\n file_path = f\n else:\n file_path = os.path.join(rel_path, f)\n if not re.match(excludes, file_path):\n paths.append(f)\n\n files[:] = paths\n yield root, dirs, files", "def touch_files_dependent_on_changes(kymera_path, dirs, suffixes, changes):\n for dir in dirs:\n if dir[0] != '/':\n # This is a relative path to kymera root\n dir = kymera_path + dir\n if not os.path.exists(dir):\n print \"Directory %s included in ALL_SRCDIRS, ALL_INCDIRS or CFG_LIBS doesn't exist, continuing...\" % dir\n else:\n for file_name in os.listdir(dir):\n full_file_path= os.path.join(dir, file_name)\n # Filter a list of filenames down to those with one of the given suffixes\"\n if matching_file(suffixes, full_file_path):\n # Find all the files from a set with one of a list of suffices\n # containing one of the changed definitions\n if grep_words(changes, full_file_path):\n print \"Mark file for rebuild:\", full_file_path\n touch_file(full_file_path)", "def test_GetFilesInDirectory_exclude_subdir(tempdir: pathlib.Path):\n # Create files: [ a, foo, sub/foo ]\n (tempdir / \"a\").touch()\n (tempdir / \"foo\").touch()\n (tempdir / \"sub\").mkdir()\n (tempdir / \"sub\" / \"foo\").touch()\n (tempdir / \"sub\" / \"sub\").mkdir()\n (tempdir / \"sub\" / \"sub\" / \"foo\").touch()\n assert set(dpack.GetFilesInDirectory(tempdir, [\"sub/foo\"])) == {\n pathlib.Path(\"a\"),\n pathlib.Path(\"foo\"),\n pathlib.Path(\"sub/sub/foo\"),\n }\n assert set(dpack.GetFilesInDirectory(tempdir, [\"*/foo\"])) == {\n pathlib.Path(\"a\"),\n pathlib.Path(\"foo\"),\n }\n assert set(dpack.GetFilesInDirectory(tempdir, [\"*/foo*\"])) == {\n pathlib.Path(\"a\"),\n pathlib.Path(\"foo\"),\n }", "def get_update_file_list(directory):\n update_files_list = set(UPDATE_FILES_STATIC)\n update_files_exclude = set(UPDATE_FILES_EXCLUDE)\n\n for root, dirs, files in os.walk(path.join(PATH_ROOT, directory)):\n for filen in files:\n if UPDATE_FILES_RE.match(filen):\n filep = path.join(root, filen)\n update_files_list.add(path.relpath(filep, PATH_ROOT))\n \n return update_files_list - update_files_exclude", "def copy_disk_sample_files_to_dir(self, repo_files_dir, dest_path):\n sample_files = []\n for root, dirs, files in os.walk(repo_files_dir):\n if root.find('.hg') < 0:\n for name in files:\n if name.endswith('.sample'):\n relative_path = os.path.join(root, name)\n tool_util.copy_sample_file(self.app, relative_path, dest_path=dest_path)\n sample_files.append(name)\n return sample_files", "def set_in_files():\r\n\tindatadir = '/nobackup/ejblom/reddit'\r\n\tcom_dir = '/comments'\r\n\tsubm_dir = '/submissions'\r\n\tglob_end = '/filtered*'\r\n\tcom_glob_str = indatadir + com_dir + glob_end\r\n\tsubm_glob_str = indatadir + subm_dir + glob_end\r\n\tinfilenames = sorted(glob.glob(com_glob_str)) + sorted(glob.glob(subm_glob_str))\r\n\treturn infilenames", "def update_dirs(dirs):\n index = len(dirs) - 1\n for i, d in enumerate(reversed(dirs)):\n if d in dir_ignore:\n del dirs[index - i]", "def walk_files():\n\n # TODO: not check twice the same dir or file\n for path in config.targets:\n abs_path = os.path.join(cwd, path)\n\n if not os.path.islink(abs_path) and os.path.isfile(abs_path):\n walked.append(abs_path)\n yield abs_path\n #process_file(abs_path)\n\n if os.path.isdir(abs_path):\n walked.append(abs_path)\n for root, dirs, files in os.walk(abs_path):\n for fname in files:\n if isbackup(fname):\n continue\n abs_path = os.path.join(root, fname)\n walked.append(abs_path)\n if not os.path.islink(abs_path) and\\\n os.path.isfile(abs_path):\n base, name = os.path.split(abs_path)\n XXX, ext = os.path.splitext(name)\n\n ignored = False\n for pattern in IGNORE_FILES:\n if pattern.search(fname):\n ignored = True\n break\n\n # maybe should be merged with IGNORE_FILES?\n for regexp in config.exclude_list:\n if regexp.search(fname):\n ignored = True\n break\n\n if not ignored:\n for test_ext in config.disallow_exts:\n if test_ext == ext:\n ignored = True\n break\n\n if not ignored:\n if config.allow_exts:\n ignored = True\n for test_ext in config.allow_exts:\n if test_ext == ext:\n ignored = False\n break\n\n if not ignored:\n yield abs_path\n #process_file(abs_path)\n\n for dir in dirs[:]:\n if dir in IGNORE_DIRS:\n dirs.remove(dir)\n if dir in dirs:\n dirs.remove(dir)\n # mayb be should be merged with IGNORE_DIRS?\n else:\n for regexp in config.exclude_list:\n if regexp.search(dir):\n # This check is required\n # because several different patterns\n # could match one file name\n if dir in dirs:\n dirs.remove(dir)\n\n for dir in dirs:\n abs_path = os.path.join(root, dir)\n walked.append(abs_path)", "def test_GetFilesInDirectory_leaf_files(tempdir: pathlib.Path):\n # Start with one file.\n (tempdir / \"a\").touch()\n assert set(dpack.GetFilesInDirectory(tempdir, [])) == {pathlib.Path(\"a\")}\n # Add a second file.\n (tempdir / \"b\").touch()\n assert set(dpack.GetFilesInDirectory(tempdir, [])) == {\n pathlib.Path(\"a\"),\n pathlib.Path(\"b\"),\n }\n # Add a third file.\n (tempdir / \"c\").touch()\n assert set(dpack.GetFilesInDirectory(tempdir, [])) == {\n pathlib.Path(\"a\"),\n pathlib.Path(\"b\"),\n pathlib.Path(\"c\"),\n }", "def test_GetFilesInDirectory_exclude_by_name(tempdir: pathlib.Path):\n # Create files: [ a, foo, sub/foo ]\n (tempdir / \"a\").touch()\n (tempdir / \"foo\").touch()\n (tempdir / \"sub\").mkdir()\n (tempdir / \"sub\" / \"foo\").touch()\n # Exclude pattern 'foo' does not exclude subdir 'foo'.\n assert set(dpack.GetFilesInDirectory(tempdir, [\"foo\"])) == {\n pathlib.Path(\"a\"),\n pathlib.Path(\"sub/foo\"),\n }", "def util_build_file_list(dirname, IGNORE_CREGEX):\n outlist = []\n logging.info('Scanning directory: %s', dirname)\n try:\n with os.scandir(dirname) as filelist:\n filelist_filt = [a for a in filelist if a.is_file() and not any(list(map(lambda rg: True if rg.match(a.name) else False, IGNORE_CREGEX)))]\n outlist = [ {'dir': dirname, 'filename': a.name, 'ctime': a.stat().st_ctime, 'mtime': a.stat().st_mtime} for a in filelist_filt ]\n dirlist = [ a for a in filelist if a.is_dir() ]\n if len(dirlist) > 0:\n outlist.append(list(map(util_build_file_list, dirlist)))\n except FileNotFoundError:\n logging.error('Directory not found: %s' % dirname)\n pass\n except Exception as e:\n logging.error('Error due to %s' % e) \n logging.debug('Filelist generated for %s as %s' % (dirname, outlist))\n return outlist", "def test_only_files(self):\n dummy_folder = TestOspaListDir.get_dummy_folder()\n need_result = ['meme1.jpg',\n 'meme2.png',\n 'meme4.jpg',\n 'meme4.png',\n 'meme monty python',\n ]\n need_result_new = [os.path.join(dummy_folder, 'memes', x) for x in need_result[:-1]]\n result = listdir(os.path.join(dummy_folder, 'memes'), only_files=True)\n self.assertEqual(sorted(result), sorted(need_result_new))\n\n need_result_new = [os.path.join(dummy_folder, 'memes', x) for x in need_result]\n result = listdir(os.path.join(dummy_folder, 'memes'), only_files=False)\n self.assertEqual(sorted(result), sorted(need_result_new))", "def filter(self):\n self._printer('Standard Walk')\n count = Counter(length=3)\n for directory in self.directory:\n self._printer('Searching ' + directory)\n for root, directories, files in os.walk(directory, topdown=self.topdown):\n root = root[len(str(directory)) + 1:]\n self._printer(str(count.up) + \": Explored path - \" + str(root), stream=True)\n if self.filters.validate(root):\n # Check that non-empty folders flag is on and we're at the max directory level\n if self.filters.non_empty_folders and self.filters.get_level(root) == self.filters.max_level:\n # Check that the path is not an empty folder\n if os.path.isdir(directory + os.sep + root):\n # Get paths in folder without walking directory\n paths = os.listdir(directory + os.sep + root)\n\n # Check that any of the paths are files and not just directories\n if paths and any(os.path.isfile(os.path.join(directory, p)) for p in paths):\n self.add_path(directory, root)\n\n else:\n for filename in files:\n fullname = os.path.join(root, filename)\n if self.filters.validate(fullname):\n # Join the two strings in order to form the full filepath.\n self.add_path(directory, fullname)", "def tidyFileNames(folderToCheck):\n\n filters = list(map(lambda x: \"*.\" + x, expectedExts))\n\n for filter in filters:\n\n for f in getFiles(folderToCheck,filter):\n\n clean = f\n for search in searches:\n clean = replace(clean,search)\n\n if renameFile(f,clean):\n results = list(map(os.path.basename,[f,clean]))\n if results[0] != results[1]:\n print(f\"Renamed: {results[0]} -> {results[1]}\")", "def collect_files(files: types.FilesCollection) -> List[str]:\n paths = [conf.proj_path(p) for p in files.paths]\n\n if context.RunContext().get('verbose', 0) >= 3:\n log.info(\"<35>Files:\")\n log.info(\"only_staged: <33>{}\".format(files.only_staged))\n log.info(\"untracked: <33>{}\".format(files.untracked))\n log.info(\"whitelist: <33>\\n{}\".format('\\n'.join(files.whitelist())))\n log.info(\"blacklist: <33>\\n{}\".format('\\n'.join(files.blacklist())))\n\n if files.only_staged and files.include and not files.whitelist():\n # include will be empty if none of the staged files match include\n # and thus the final fs walk will pick up everything. We want\n # to preserve the include patterns defined in `pelconf.yaml`\n # so nothing is picked if none of the staged files match.\n return []\n\n return list(itertools.chain.from_iterable(\n filtered_walk(path, files.whitelist(), files.blacklist())\n for path in paths\n ))", "def test_list_directory(self):\n import os\n stat_f = lambda x: FakeStat(33188, 16398844, 65024L, 1, 1049, 1049, 0,\n 1409046988, 1409046988, 1409046988)\n os.stat = stat_f\n os.lstat = stat_f\n expected = [\"subdir1\", \"subdir2\"]\n result = [r[0] for r in self.path_translator.list_directory(\n \"/{0}/search1/tmp/study\".format(self.search.instance))]\n self.assertEqual(result, expected)", "def affected_testfiles(files_changed: Iterable[Text],\n skip_dirs: Optional[Set[Text]] = None,\n manifest_path: Optional[Text] = None,\n manifest_update: bool = True\n ) -> Tuple[Set[Text], Set[Text]]:\n if skip_dirs is None:\n skip_dirs = {\"conformance-checkers\", \"docs\", \"tools\"}\n affected_testfiles = set()\n # Exclude files that are in the repo root, because\n # they are not part of any test.\n files_changed = [f for f in files_changed if not _in_repo_root(f)]\n nontests_changed = set(files_changed)\n wpt_manifest = load_manifest(manifest_path, manifest_update)\n\n test_types = [\"crashtest\", \"print-reftest\", \"reftest\", \"testharness\", \"wdspec\"]\n support_files = {os.path.join(wpt_root, path)\n for _, path, _ in wpt_manifest.itertypes(\"support\")}\n wdspec_test_files = {os.path.join(wpt_root, path)\n for _, path, _ in wpt_manifest.itertypes(\"wdspec\")}\n test_files = {os.path.join(wpt_root, path)\n for _, path, _ in wpt_manifest.itertypes(*test_types)}\n\n interface_dir = os.path.join(wpt_root, 'interfaces')\n interfaces_files = {os.path.join(wpt_root, 'interfaces', filename)\n for filename in os.listdir(interface_dir)}\n\n interfaces_changed = interfaces_files.intersection(nontests_changed)\n nontests_changed = nontests_changed.intersection(support_files)\n\n tests_changed = {item for item in files_changed if item in test_files}\n\n nontest_changed_paths = set()\n rewrites: Dict[Text, Text] = {\"/resources/webidl2/lib/webidl2.js\": \"/resources/WebIDLParser.js\"}\n for full_path in nontests_changed:\n rel_path = os.path.relpath(full_path, wpt_root)\n path_components = rel_path.split(os.sep)\n top_level_subdir = path_components[0]\n if top_level_subdir in skip_dirs:\n continue\n repo_path = \"/\" + os.path.relpath(full_path, wpt_root).replace(os.path.sep, \"/\")\n if repo_path in rewrites:\n repo_path = rewrites[repo_path]\n full_path = os.path.join(wpt_root, repo_path[1:].replace(\"/\", os.path.sep))\n nontest_changed_paths.add((full_path, repo_path))\n\n interfaces_changed_names = [os.path.splitext(os.path.basename(interface))[0]\n for interface in interfaces_changed]\n\n def affected_by_wdspec(test: Text) -> bool:\n affected = False\n if test in wdspec_test_files:\n for support_full_path, _ in nontest_changed_paths:\n # parent of support file or of \"support\" directory\n parent = os.path.dirname(support_full_path)\n if os.path.basename(parent) == \"support\":\n parent = os.path.dirname(parent)\n relpath = os.path.relpath(test, parent)\n if not relpath.startswith(os.pardir):\n # testfile is in subtree of support file\n affected = True\n break\n return affected\n\n def affected_by_interfaces(file_contents: Text) -> bool:\n if len(interfaces_changed_names) > 0:\n if 'idlharness.js' in file_contents:\n for interface in interfaces_changed_names:\n regex = '[\\'\"]' + interface + '(\\\\.idl)?[\\'\"]'\n if re.search(regex, file_contents):\n return True\n return False\n\n for root, dirs, fnames in os.walk(wpt_root):\n # Walk top_level_subdir looking for test files containing either the\n # relative filepath or absolute filepath to the changed files.\n if root == wpt_root:\n for dir_name in skip_dirs:\n dirs.remove(dir_name)\n for fname in fnames:\n test_full_path = os.path.join(root, fname)\n # Skip any file that's not a test file.\n if test_full_path not in test_files:\n continue\n if affected_by_wdspec(test_full_path):\n affected_testfiles.add(test_full_path)\n continue\n\n with open(test_full_path, \"rb\") as fh:\n raw_file_contents: bytes = fh.read()\n if raw_file_contents.startswith(b\"\\xfe\\xff\"):\n file_contents: Text = raw_file_contents.decode(\"utf-16be\", \"replace\")\n elif raw_file_contents.startswith(b\"\\xff\\xfe\"):\n file_contents = raw_file_contents.decode(\"utf-16le\", \"replace\")\n else:\n file_contents = raw_file_contents.decode(\"utf8\", \"replace\")\n for full_path, repo_path in nontest_changed_paths:\n rel_path = os.path.relpath(full_path, root).replace(os.path.sep, \"/\")\n if rel_path in file_contents or repo_path in file_contents or affected_by_interfaces(file_contents):\n affected_testfiles.add(test_full_path)\n continue\n\n return tests_changed, affected_testfiles", "def files_in_folder(self):\n non_til = set()\n filesInFolder = []\n for f in self.find_all_files():\n newstr = f.replace(\"~\", \"\") \n if newstr in self.find_all_files():\n non_til.add(newstr)\n for fs in non_til:\n filesInFolder.append(fs)\n return filesInFolder", "def _walk_dir(self, rootpath):\n assert os.path.isabs(rootpath)\n assert rootpath not in self._dirs\n relpath = self._get_rel_path(rootpath)\n self._dirs[relpath] = Directory(rootpath, relpath, None)\n for dirpath, dirnames, filenames in os.walk(rootpath):\n if 'refdata' in dirnames:\n dirnames.remove('refdata')\n currentdir = self._dirs[self._get_rel_path(dirpath)]\n # Loop through a copy so that we can modify dirnames.\n for dirname in list(dirnames):\n fullpath = os.path.join(dirpath, dirname)\n if fullpath == self._build_root:\n dirnames.remove(dirname)\n continue\n relpath = self._get_rel_path(fullpath)\n self._dirs[relpath] = Directory(fullpath, relpath, currentdir)\n extensions = ('.h', '.cuh', '.hpp', '.c', '.cc', '.cpp', '.cu', '.bm')\n for filename in filenames:\n basename, extension = os.path.splitext(filename)\n if extension in extensions:\n fullpath = os.path.join(dirpath, filename)\n relpath = self._get_rel_path(fullpath)\n self._files[relpath] = File(fullpath, relpath, currentdir)\n elif extension == '.cmakein':\n extension = os.path.splitext(basename)[1]\n if extension in extensions:\n fullpath = os.path.join(dirpath, filename)\n relpath = self._get_rel_path(fullpath)\n sourcefile = GeneratorSourceFile(fullpath, relpath, currentdir)\n self._files[relpath] = sourcefile\n fullpath = os.path.join(dirpath, basename)\n relpath = self._get_rel_path(fullpath)\n fullpath = os.path.join(self._build_root, relpath)\n generatedfile = GeneratedFile(fullpath, relpath, currentdir)\n self._files[relpath] = generatedfile\n generatedfile.set_generator_source(sourcefile)\n elif extension in ('.l', '.y', '.pre'):\n fullpath = os.path.join(dirpath, filename)\n relpath = self._get_rel_path(fullpath)\n self._files[relpath] = GeneratorSourceFile(fullpath, relpath, currentdir)" ]
[ "0.638365", "0.6379942", "0.6214398", "0.61015475", "0.6089934", "0.6049182", "0.5995398", "0.5965479", "0.5943096", "0.5874663", "0.58680815", "0.5832992", "0.5808962", "0.5801182", "0.57878786", "0.57864714", "0.5781348", "0.5780594", "0.57367", "0.57249534", "0.5714945", "0.5705886", "0.5697288", "0.5670727", "0.56646407", "0.5655098", "0.5653663", "0.56384605", "0.562038", "0.55971706" ]
0.80027604
0
Determines all import names that should be considered "local". This is used when running the linter to insure that import order is properly checked.
def _determine_local_import_names(start_dir): file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] return [ basename for basename, extension in file_ext_pairs if extension == '.py' or os.path.isdir( os.path.join(start_dir, basename)) and basename not in ('__pycache__')]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_import_local_methods(self):\n package_foo = determine_package(LocalClass().foo_method)\n package_bar = determine_package(LocalClass().bar_method)\n assert package_foo == package_bar", "def is_local(self) -> bool:\n if not self.source:\n return False\n\n if self.source.master_name.startswith(MODULE_NAME):\n return True\n\n if self.is_type_defs():\n return True\n\n return False", "def imports():\n for name, val in globals().items():\n if isinstance(val, getattr(types, \"ModuleType\")):\n yield val.__name__", "def redirectLocalImports(name, globals=None, *a, **kw):\n if globals is not None:\n mf = globals.get('__exocet_context__', None)\n if mf is not None:\n trace(\"isolated __import__ of\", name, \"called in exocet module\", mf, mf.mapper)\n return _isolateImports(mf, _originalImport, name, globals, *a, **kw)\n else:\n return _originalImport(name, globals, *a, **kw)\n else:\n return _originalImport(name, globals, *a, **kw)", "def detect_import(self):\n if self.contains_match(CONTAINS_IMPORT): self.es6import = True\n elif self.contains_match(CONTAINS_REQUIRE): self.es6import = False\n else: self.es6import = self.get_project_pref('detect_prefer_imports')", "def import_whitelist(str):\n pat = re.compile('[^\\s]*((from|import)\\s+)+(%s)+' % '|'.join(settings.IMPORT_WHITELIST))\n if pat.search(str):\n return True\n else:\n return False", "def import_packages_global():\n return \"\"", "def is_imported():\n return len(inspect.stack()) > 3", "def test_import_local_class(self):\n import_function(determine_package(LocalClass))\n assert f() == \"My name is f.\"", "def _check_imports():\n\n optlist = ['ALPSO', 'CONMIN', 'FSQP', 'IPOPT', 'NLPQLP',\n 'NSGA2', 'PSQP', 'SLSQP', 'SNOPT', 'NLPY_AUGLAG', 'NOMAD']\n\n for optimizer in optlist[:]:\n try:\n __import__('pyoptsparse', globals(), locals(), [optimizer], 0)\n except ImportError:\n optlist.remove(optimizer)\n\n return optlist", "def _test_import_local_class(self): # TODO\n module = determine_package(LocalClass)\n name = f.__name__\n\n function = {\"module\": module, \"name\": name}\n\n import_function(function)\n assert f() == \"My name is f.\"", "def importedNamespaces (self):\n return frozenset(self.__importedNamespaces)", "def getImportList():\n\timports = []\n\tfor line in vim.current.buffer:\n\t\twords = string.split(line)\n\t\tif (len(words)>0 and (words[0]=='import' or words[0]=='from')):\n\t\t\tif words[1] not in imports:\n\t\t\t\timports.append(words[1])\n\treturn imports", "def get_import_global_types(imports: Iterable[TExtern]) -> Tuple[GlobalType, ...]:\n return tuple(item for item in imports if isinstance(item, GlobalType))", "def is_import_completion(self):\n current_line = self.get_current_line()\n\n # Seperate cases! More difficult than I thought\n match = re.match(r\"(import)|(from)\", current_line)\n if match:\n word_before = self.get_word_before()\n if word_before == \"from\" or word_before == \"import\":\n # Need to check for multiple imports! (TODO)\n return True\n\n return False", "def _get_remote_import_ids(self, pkg_dir):\n sources = glob(os.path.join(pkg_dir, '*.go'))\n out = self.go_dist.create_go_cmd('list', args=['-json'] + sources).check_output()\n imports = json.loads(out).get('Imports', [])\n return [imp for imp in imports if imp not in self.go_stdlib]", "def is_import():\n return sync_mode in (SyncMode.IMPORT_LOCAL, SyncMode.IMPORT_REMOTE)", "def make_local_modules_constant():\n import inspect\n from types import FunctionType,ModuleType\n\n frame = inspect.currentframe(1)\n local_functions = []\n local_modules = {}\n for sym,value in frame.f_globals.iteritems():\n if isinstance(value,FunctionType) and value.func_globals is frame.f_globals:\n local_functions.append(value)\n elif isinstance(value,ModuleType):\n local_modules[sym] = value\n\n __mass_replace__(local_functions,local_modules)\n return", "def freeze_includes() -> List[str]:\n import _pytest\n\n result = list(_iter_all_modules(_pytest))\n return result", "def get_for_name(self, name: str, settings) -> ImportStrategy:\n if _is_dotted_path(name):\n module, type_name = name.rsplit(\".\", maxsplit=1)\n if type_name in self.local_types:\n local_module = self.local_types[type_name]\n if module == local_module:\n # `module == local_module` means an exact match in imports\n # i.e. from <package match> import <name match>\n return ImportStrategy.USE_EXISTING\n elif local_module is None:\n # `local_module is None` means local ClassDef\n # if there is a local ClassDef and type has dotted path then\n # maybe it was intended to disambiguate from the local cls?\n if settings.IMPORT_COLLISION_POLICY is ImportCollisionPolicy.IMPORT:\n # the name was maybe already in scope but it's safe\n # to add a specific import as well\n return ImportStrategy.ADD_DOTTED\n else:\n # TODO in theory we could probably calculate the absolute\n # import from filename + relative path, but it's awkward\n raise NameMatchesLocalClassError(module, type_name)\n elif local_module.startswith(\".\"):\n # Relative import: \"can't tell\"\n # we have a full path so we could add an import\n # but it may be duplicating something already imported\n if settings.IMPORT_COLLISION_POLICY is ImportCollisionPolicy.IMPORT:\n # the name was maybe already in scope but it's safe\n # to add a specific import as well\n return ImportStrategy.ADD_DOTTED\n else:\n # TODO in theory we could probably calculate the absolute\n # import from filename + relative path, but it's awkward\n raise NameMatchesRelativeImportError(module, type_name)\n else:\n # \"looks like different path\"\n return ImportStrategy.ADD_DOTTED\n else:\n # handle * imports? we could assume `name` is imported\n # if `from module import *` is present... BUT:\n # if `type_name.startswith(\"_\")` it would be exempt\n # and `__all__` could break both of these assumptions\n # So... we treat any matching * import as AMBIGUOUS\n if module in self.local_types.star_imports:\n if settings.IMPORT_COLLISION_POLICY is ImportCollisionPolicy.IMPORT:\n # the name was maybe already in scope but it's safe\n # to add a specific import as well\n return ImportStrategy.ADD_FROM\n else:\n raise ModuleHasStarImportError(module, type_name)\n elif module in self.local_types.type_defs:\n if settings.IMPORT_COLLISION_POLICY is ImportCollisionPolicy.IMPORT:\n # the name was maybe already in scope but it's safe\n # to add a specific import as well\n return ImportStrategy.ADD_FROM\n else:\n raise NameMatchesLocalClassError(module, name)\n elif module in self.local_types.package_imports:\n return ImportStrategy.USE_EXISTING_DOTTED\n elif module in self.local_types.names_to_packages:\n return ImportStrategy.USE_EXISTING_DOTTED\n else:\n return ImportStrategy.ADD_FROM\n else:\n if name == Types.ELLIPSIS:\n return ImportStrategy.USE_EXISTING\n elif name in self.local_types:\n return ImportStrategy.USE_EXISTING\n elif _is_builtin_type(name):\n return ImportStrategy.USE_EXISTING\n elif _is_typing_type(name):\n return ImportStrategy.ADD_FROM\n else:\n # there's no possibility to add an import, so no AUTO option\n raise NotFoundNoPathError(None, name)", "def ImportsTest(recipe, allowed_modules):\n\n for _, val in sorted(recipe.global_symbols.iteritems()):\n if isinstance(val, types.ModuleType):\n module_name = val.__name__\n for pattern in allowed_modules:\n if pattern.match(val.__name__):\n break\n else:\n yield ('In %s:\\n'\n ' Non-whitelisted import of %s' % (recipe.path, module_name))", "def test_imports_on_global_namespace_without_path(Script):\n completions = Script(\"import operator\").completions()\n assert [c.name for c in completions] == ['operator']\n completions = Script(\"import operator\", path='example.py').completions()\n assert [c.name for c in completions] == ['operator']\n\n # the first one has a path the second doesn't\n completions = Script(\"import keyword\", path='example.py').completions()\n assert [c.name for c in completions] == ['keyword']\n completions = Script(\"import keyword\").completions()\n assert [c.name for c in completions] == ['keyword']", "def _find_local_submodules(pkgpath):\r\n # Find all the children modules in this package (non recursive)\r\n pkgname = static.modpath_to_modname(pkgpath, check=False)\r\n if pkgname is None:\r\n raise Exception('cannot import {!r}'.format(pkgpath))\r\n # TODO:\r\n # DOES THIS NEED A REWRITE TO HANDLE THE CASE WHEN __init__ does not exist?\r\n\r\n try:\r\n # Hack to grab the root package\r\n a, b = static.split_modpath(pkgpath, check=False)\r\n root_pkgpath = join(a, b.replace('\\\\', '/').split('/')[0])\r\n except ValueError:\r\n # Assume that the path is the root package if split_modpath fails\r\n root_pkgpath = pkgpath\r\n\r\n for sub_modpath in static.package_modpaths(pkgpath, with_pkg=True,\r\n recursive=False, check=False):\r\n sub_modname = static.modpath_to_modname(sub_modpath, check=False,\r\n relativeto=root_pkgpath)\r\n rel_modname = sub_modname[len(pkgname) + 1:]\r\n if not rel_modname or rel_modname.startswith('_'):\r\n # Skip private modules\r\n pass\r\n else:\r\n yield rel_modname, sub_modpath", "def get_external_imports(tree: dict,\n only_top_level: bool = True) -> set:\n external_imports = set()\n modules = find_tree(tree, lambda x: x[\"type\"] == \"module\", how=\"all\")\n for module in modules:\n for import_item in module[\"imports\"].values():\n if import_item[\"lookup\"] is None:\n if import_item[\"type\"] == \"import\":\n external_imports.add(import_item[\"name\"])\n elif import_item[\"type\"] == \"from-import\":\n if import_item[\"module\"] is not None:\n external_imports.add(import_item[\"module\"])\n if only_top_level:\n external_imports = {i.partition(\".\")[0] for i in external_imports}\n return external_imports", "def is_third_party(self) -> bool:\n for third_party_import_string in self.third_party_import_strings:\n if self.source.startswith(third_party_import_string):\n return True\n\n return False", "def _find_all_importables(pkg: ModuleType) -> List[str]:\n return sorted(\n set(\n chain.from_iterable(\n _discover_path_importables(Path(p), pkg.__name__)\n # FIXME: Unignore after upgrading to `mypy > 0.910`. The fix\n # FIXME: is in the `master` branch of upstream since Aug 4,\n # FIXME: 2021 but has not yet been included in any releases.\n # Refs:\n # * https://github.com/python/mypy/issues/1422\n # * https://github.com/python/mypy/pull/9454\n for p in pkg.__path__ # type: ignore[attr-defined]\n ),\n ),\n )", "def get_imported_types(ast: ast_pb2.AST,\n include_paths: List[str]) -> Set[str]:\n result = set()\n includes = set(ast.usertype_includes)\n for include in includes:\n if include.endswith('_clif.h'):\n clif_uses = _get_clif_uses(include, include_paths)\n for clif_use in clif_uses:\n result.add(clif_use.cpp_name)\n return result", "def checkImport(self):\r\n for imp in self.cap_file.Import.packages:\r\n if a2s(imp.aid) not in export_refs:\r\n return False\r\n return True", "def is_local(baz):\n if 0.001 * baz[0] / 111.11 < 10.0:\n if 0.001 * baz[0] / 111.11 < 3.0:\n is_local = 'close'\n else:\n is_local = 'local'\n else:\n is_local = 'non-local'\n\n return is_local", "def imports(self):\n line = self.line.strip()\n if line.startswith('im'):\n if line.startswith('import') is False:\n return True\n elif line == '':\n return True" ]
[ "0.5883926", "0.58062863", "0.57493323", "0.57031", "0.56781274", "0.56347144", "0.5598635", "0.5551292", "0.5530518", "0.5508783", "0.55060405", "0.5462658", "0.5400261", "0.5390425", "0.53739977", "0.5363772", "0.53556126", "0.53410673", "0.5327128", "0.5322422", "0.53129756", "0.5312709", "0.5307345", "0.53003335", "0.52804154", "0.5275956", "0.52632916", "0.5261247", "0.52584803", "0.5246131" ]
0.7432707
0
Installs the App Engine SDK, if needed.
def _setup_appengine_sdk(session): session.env['GAE_SDK_PATH'] = os.path.join(_GAE_ROOT, 'google_appengine') session.run('gcp-devrel-py-tools', 'download-appengine-sdk', _GAE_ROOT)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_env():\r\n\r\n # Try to import the appengine code from the system path.\r\n try:\r\n from google.appengine.api import apiproxy_stub_map\r\n except ImportError:\r\n for k in [k for k in sys.modules if k.startswith('google')]:\r\n del sys.modules[k]\r\n\r\n # Not on the system path. Build a list of alternative paths\r\n # where it may be. First look within the project for a local\r\n # copy, then look for where the Mac OS SDK installs it.\r\n paths = [os.path.join(PROJECT_DIR, 'google_appengine'),\r\n os.environ.get('APP_ENGINE_SDK'),\r\n '/usr/local/google_appengine',\r\n '/usr/local/opt/google-app-engine/share/google-app-engine',\r\n '/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine']\r\n for path in os.environ.get('PATH', '').split(os.pathsep):\r\n path = path.rstrip(os.sep)\r\n if path.endswith('google_appengine'):\r\n paths.append(path)\r\n if os.name in ('nt', 'dos'):\r\n path = r'%(PROGRAMFILES)s\\Google\\google_appengine' % os.environ\r\n paths.append(path)\r\n\r\n # Loop through all possible paths and look for the SDK dir.\r\n sdk_path = None\r\n for path in paths:\r\n if not path:\r\n continue\r\n path = os.path.expanduser(path)\r\n path = os.path.realpath(path)\r\n if os.path.exists(path):\r\n sdk_path = path\r\n break\r\n\r\n # The SDK could not be found in any known location.\r\n if sdk_path is None:\r\n sys.stderr.write(\"The Google App Engine SDK could not be found!\\n\"\r\n \"Make sure it's accessible via your PATH \"\r\n \"environment and called google_appengine.\\n\")\r\n sys.exit(1)\r\n\r\n # First add the found SDK to the path\r\n sys.path = [ sdk_path ] + sys.path\r\n\r\n # Then call fix_sys_path from the SDK\r\n try:\r\n from dev_appserver import fix_sys_path\r\n except ImportError:\r\n from old_dev_appserver import fix_sys_path\r\n fix_sys_path()\r\n\r\n setup_project()\r\n from .utils import have_appserver\r\n if have_appserver:\r\n # App Engine's threading.local is broken.\r\n setup_threading()\r\n elif not os.path.exists(DATA_ROOT):\r\n os.mkdir(DATA_ROOT)\r\n setup_logging()\r\n\r\n if not have_appserver:\r\n # Patch Django to support loading management commands from zip\r\n # files.\r\n from django.core import management\r\n management.find_commands = find_commands", "def setup_gae_env():\n sdk_path = find_gae_sdk()\n if not sdk_path:\n raise BadEnvironmentError('Couldn\\'t find GAE SDK.')\n setup_gae_sdk(sdk_path)", "def setup_gae_sdk(sdk_path):\n global _GAE_SDK_PATH\n if _GAE_SDK_PATH:\n raise ValueError('setup_gae_sdk was already called.')\n _GAE_SDK_PATH = sdk_path\n\n sys.path.insert(0, sdk_path)\n # Sadly, coverage may inject google.protobuf in the path. Forcibly expulse it.\n if 'google' in sys.modules:\n del sys.modules['google']\n\n import dev_appserver\n dev_appserver.fix_sys_path()\n for i in sys.path[:]:\n if 'jinja2-2.6' in i:\n sys.path.remove(i)\n\n # Make 'yaml' variable (defined on top of this module) point to loaded module.\n global yaml\n import yaml as yaml_module\n yaml = yaml_module", "def install_vk_api_for_python():\r\n\r\n print(\"Установка необходимых библиотек...\")\r\n os.startfile('install_libs.bat', 'runas')", "def gae(session, sample):\n\n # Create a lib directory if needed, otherwise the App Engine vendor library\n # will complain.\n if not os.path.isdir(os.path.join(sample, 'lib')):\n os.mkdir(os.path.join(sample, 'lib'))\n\n _session_tests(session, sample, _setup_appengine_sdk)", "def _ensure_sdk(self, sdk_dir, sdk_version):\n with self.m.context(infra_steps=True):\n pkgs = self.m.cipd.EnsureFile()\n pkgs.add_package('chrome_internal/third_party/sdk/windows', sdk_version)\n self.m.cipd.ensure(sdk_dir, pkgs)\n return sdk_dir", "def install():\n deploy()\n configure()", "def main():\n Log.info('Installing...')\n app = Application()\n app.run()\n Log.info(\"Done successfully.\")", "def setup():\n require('hosts', provided_by=[prod])\n require('code_root')\n sudo('apt-get update')\n sudo('apt-get install -y python-setuptools')\n sudo('easy_install pip')\n sudo('pip install virtualenv')\n sudo('aptitude install -y apache2')\n sudo('aptitude install -y libapache2-mod-wsgi')\n sudo('apt-get install -y nginx')\n update_webserver_config()\n sudo('mkdir -p %s; cd %s; virtualenv .;' % (env.code_root, env.code_root))\n sudo('cd %s;mkdir releases; mkdir shared; mkdir packages; mkdir shared/media; mkdir shared/media/file;' % (env.code_root))\n deploy()", "def bootstrap():\n _require_environment()\n\n adduser()\n install_python()\n install_git()\n install_apache()\n install_mysql()\n setup_project()", "def _install(self):\n\n pass", "def install():\n execute(generate)\n execute(upload)", "def set_up(dev=False):\n _install_dependencies()", "def downloadAndInstallSDK():\n\tif sys.platform=='win32':\n\t\turl = \"https://dl.google.com/android/android-sdk_r24.4.1-windows.zip\"\n\telse:\n\t\turl = \"https://dl.google.com/android/android-sdk_r24.4.1-macosx.zip\"\n\tzippedFile = wget.download(url)\n\n\tprint \"Download finished. Unzipping files now...\"\n\tbasedir = unzip(zippedFile)\n\tprint \"Unzipped complete. Folder %s created\" % basedir\n\tinstallADB(basedir)\n\tprint \"Finished ADB installation...\"\n\tprint \"Setting up environmental variables...\"\n\tsetUpEnvironmentVariables(basedir)", "def appcfg_login(app):\n if not _GAE_SDK_PATH:\n raise ValueError('Call setup_gae_sdk first')\n if os.path.exists(_appcfg_oauth2_tokens()):\n os.remove(_appcfg_oauth2_tokens())\n # HACK: Call a command with no side effect to launch the flow.\n subprocess.call([\n sys.executable,\n os.path.join(_GAE_SDK_PATH, 'appcfg.py'),\n '--application', app.app_id,\n '--noauth_local_webserver',\n 'list_versions',\n ], cwd=app.app_dir)", "def Install (self):\n if self in sys.meta_path:\n return\n sys.meta_path.insert (0, self)", "def install():\n PackCommandExecutor().pack()\n InstallCommandExecutor().install()", "def downloadAndInstallJavaSDK():\n\tprint \"Downloading Java SDK...\"\n\tif sys.platform=='win32':\n\t\turl = \"http://download.oracle.com/otn-pub/java/jdk/8u102-b14/jdk-8u102-windows-i586.exe\"\n\t\texeFile = wget.download(url)\n\t\tprint \"Download complete.\"\n\t\tfolder = extractWindowsFolder(exeFile)\n\t\tprint \"Setting up temp variables...\"\n\t\tsetUpTempVariables(folder)\n\telse:\n\t\tdownloadAndInstallMacJavaSDK()", "def install_backend_deps():\n with lcd(BACKENDDIR):\n cmd = '%(pip)s install -r %(requirements_file)s' % {\n 'pip': get_pip(),\n 'requirements_file': requirements_file\n }\n local(cmd)\n # Install Pandoc\n local(\"sudo apt-get install pandoc\")\n # Install Pyandoc\n with lcd(HOMEDIR):\n if not os.path.isdir(os.path.join(HOMEDIR, 'pyandoc')):\n local(\"git clone [email protected]:kennethreitz/pyandoc.git\")\n with lcd(\"pyandoc\"):\n if not env.local:\n\t with prefix('. /home/ubuntu/virtualenvs/venv-system/bin/activate'):\n local(\"python setup.py install\")\n else:\n local(\"python setup.py install\")", "def DevAppserver(paths, args):\n try:\n import dev_appserver # pylint: disable=unused-variable\n except ImportError:\n # TODO(qyearsley): Put the App Engine SDK in the path with the\n # binary dependency manager.\n # See https://github.com/catapult-project/catapult/issues/2135\n print 'This script requires the App Engine SDK to be in PYTHONPATH.'\n sys.exit(1)\n with temp_deployment_dir.TempDeploymentDir(paths) as temp_dir:\n print 'Running dev server on \"%s\".' % temp_dir\n subprocess.call(\n [module_finder.FindModule('dev_appserver')] + args + [temp_dir]\n )", "def install(where='local'):\n config = get_config(where)\n print 'using configuration: %s' % config\n with settings(host_string=config['host_string']):\n if not files.exists(config['installation_dir']):\n run('git clone %(git_repo)s %(installation_dir)s' % config)\n with cd(config['installation_dir']):\n run('git submodule init')\n run('git submodule update --init')\n\n with settings(host_string=config['host_string']), cd(config['installation_dir']):\n run('python2.7 bootstrap.py -c %(cfg)s' % config)\n deploy(where)\n secs = 4\n sleep(secs)\n init_db(where)", "def setup():\n\n debs = (\"python-setuptools\", \"apache2\", \"libapache2-mod-wsgi\")\n\n require(\"hosts\", provided_by=[production, staging])\n sudo(\"apt-get install %s\" % \" \".join(debs))\n sudo(\"easy_install virtualenv pip\")\n sudo(\"mkdir -p %(path)s\" % env)\n with cd(\"%(path)s\" % env):\n sudo(\"mkdir -p releases; mkdir -p packages\")\n sudo(\"virtualenv --no-site-packages .\")\n sudo(\"mkdir -p /var/log/twit-demo; chown www-data:www-data /var/log/twit-demo\")", "def git_install(projects_yaml):\n if git_install_requested():\n git_pre_install()\n projects_yaml = git_default_repos(projects_yaml)\n git_clone_and_install(projects_yaml, core_project='keystone')\n git_post_install(projects_yaml)", "def add_sdk_options(parser, default_app_dir):\n parser.add_option(\n '-s', '--sdk-path',\n help='Path to AppEngine SDK. Will try to find by itself.')\n parser.add_option(\n '-p', '--app-dir',\n default=default_app_dir,\n help='Path to application directory with app.yaml.')\n parser.add_option('-A', '--app-id', help='Defaults to name in app.yaml.')\n parser.add_option('-v', '--verbose', action='store_true')", "def install_system_packages():\n print(\"Installiere notwendige Pakete...\")\n _run('sudo apt update')\n _run(\n \"sudo apt install \"\n \"apache2 apache2-dev python3-dev python3-venv python3-pip postgresql-contrib libpq-dev\"\n )\n print(\"Fertig!\", end=\"\\n\\n\")", "def install(self):\n log_info(\"No install needed for mono .NET\")\n pass", "def pre_installation(self):\n pass", "def install_django_project(self):\n\n from django.conf import settings as django_settings\n\n with cd(\"{0}\".format(self.app_remote_dir)):\n\n pip(\"install -r requirements.txt\")\n\n with cd(\"{0}\".format(self.app_package)):\n self.setup_settings_local()\n\n self.syncdb(django_settings)\n self.setup_gunicorn_supervisor()", "def find_gae_sdk_gcloud():\n try:\n gcloud = find_gcloud()\n except BadEnvironmentError:\n return None\n # 'gcloud' is <sdk_root>/bin/gcloud.\n sdk_root = os.path.dirname(os.path.dirname(gcloud))\n return os.path.join(sdk_root, 'platform', 'google_appengine')", "def setup(ctx):\r\n ctx.run('pip3 install -r requirements.txt')" ]
[ "0.7231505", "0.68327713", "0.6396035", "0.61375856", "0.5877419", "0.58420604", "0.5788967", "0.57099634", "0.5672816", "0.56718606", "0.55840725", "0.55813754", "0.5555057", "0.55090976", "0.5480833", "0.54060125", "0.53963965", "0.53802866", "0.5351276", "0.531847", "0.5295998", "0.5265977", "0.52415305", "0.5229917", "0.5228512", "0.5221129", "0.52147806", "0.52030116", "0.5193011", "0.518208" ]
0.7749325
0
Lists all sample directories that do not have tests.
def missing_tests(session): print('The following samples do not have tests:') for sample in set(ALL_SAMPLE_DIRECTORIES) - set(ALL_TESTED_SAMPLES): print('* {}'.format(sample))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def file_list_emptydirs(load):\n # TODO - implement this\n _init()\n\n return []", "def test_matlab_install_dir_absent(self):\n directories = (\"/\", \"/tmp\")\n for dirname in directories:\n with self.subTest(dirname=dirname):\n self.assertNotIn(\"matlab-install\", self.host.file(dirname).listdir())", "def test_GetFilesInDirectory_empty_dir(tempdir: pathlib.Path):\n assert not dpack.GetFilesInDirectory(tempdir, [])", "def listdir_nohidden(path):\n\treturn glob.glob(os.path.join(path, '*'))", "def test_list_directory(self):\n import os\n stat_f = lambda x: FakeStat(33188, 16398844, 65024L, 1, 1049, 1049, 0,\n 1409046988, 1409046988, 1409046988)\n os.stat = stat_f\n os.lstat = stat_f\n expected = [\"subdir1\", \"subdir2\"]\n result = [r[0] for r in self.path_translator.list_directory(\n \"/{0}/search1/tmp/study\".format(self.search.instance))]\n self.assertEqual(result, expected)", "def setupSampleDirectories(self):\n self.allSamplesDir = \"%s/%s_all-samples\" % (os.getcwd(),\n self.project2Id)\n if not os.path.exists(self.allSamplesDir):\n os.mkdir(self.allSamplesDir)\n for sampleId,iSample in self.dSamples.items():\n dirSample = \"%s/%s\" % (self.allSamplesDir, sampleId)\n iSample.dir = dirSample\n if not os.path.exists(dirSample):\n os.mkdir(dirSample)\n if self.verbose > 0:\n msg = \"sample directories: %s\" % self.allSamplesDir\n print(msg); sys.stdout.flush()", "def list_dir_no_hidden(path):\n\n return glob(os.path.join(path, \"*\"))", "def test_examples():\n tests = [d for d in listdir(ex) if path.isdir(path.join(ex, d))]\n for d in tests:\n yield check_examples, d", "def test_get_result_directories(self):\n pass", "def test_walk_not_full(self):\n dummy_folder = TestOspaListDir.get_dummy_folder()\n need_result = []\n for i in range(1, 4):\n need_result.append('meme{}.jpg'.format(i))\n need_result.extend(['meme1.jpg',\n 'meme2.png',\n 'meme4.jpg',\n 'meme4.png',\n '1.txt',\n '2.txt',\n '3.txt',\n 'not_txt.not_txt',\n 'antigravity.png',\n 'egg.png',\n 'empty.txt',\n 'holy_grenade.png',\n 'spam.jpg',\n ])\n\n result = listdir(dummy_folder, full_path=False, only_files=True, walk=True)\n self.assertEqual(sorted(result), sorted(need_result))", "def get_test_files(dirname):\n if not os.path.isdir(dirname):\n return []\n path = dirname + \"/{}\"\n return list(map(path.format, sorted(os.listdir(dirname))))", "def test_empty_directory(self):\n\n # create indexer object\n indexer = indexing_module.IndexModule()\n\n # index the location (storage/data/test/empty_directory)\n indexer.index(_path_prefix + 'empty_directory')\n\n # search for few words and check that the result is empty\n result = indexer.search(\"\")\n print(result)\n self.assertTrue(result == {})\n\n result = indexer.search(\"hello\")\n self.assertTrue(result == {})\n\n result = indexer.search(\"world\")\n self.assertTrue(result == {})", "def test_list_root(self):\n expected = [\"search1\", \"search2\"]\n result = [r[0] for r in self.path_translator.list_directory(\n \"/{0}\".format(self.search.instance))]\n self.assertEqual(result, expected)", "def test_list_dir_returns_dirs_only(self):\n with self.settings(MIDDLEWARE_CLASSES=self.fix_middleware(), KML_FILE_DIR=self.kml_file_dir):\n user = StaffUserFactory()\n ldv = self.initiate_view(user)\n base_path = settings.KML_FILE_DIR\n print base_path\n ldv.cache_dir_content(base_path)\n dirs = ldv.list_dirs()\n print dirs\n self.assertGreaterEqual(len(dirs), 1)\n for dir_name in dirs:\n dir_path = os.path.join(base_path, dir_name)\n self.assertTrue(os.path.isdir(dir_path))", "def test_ls_no_shareddir():\n\n with bad_fixture() as root:\n assert next(pipeline.ls(root=root), None) is None", "def dirs_with_test_yaml(dirs):\n for root in dirs or ['tests/']:\n for dir, subdirs, files in os.walk(root):\n if 'test.yaml' in files:\n yield dir", "def test_full(self):\n dummy_folder = TestOspaListDir.get_dummy_folder()\n need_result = ['meme1.jpg',\n 'meme2.png',\n 'meme4.jpg',\n 'meme4.png',\n 'meme monty python',\n ]\n result = listdir(os.path.join(dummy_folder, 'memes'), full_path=False)\n self.assertEqual(sorted(result), sorted(need_result))\n\n need_result_new = [os.path.join(dummy_folder, 'memes', x) for x in need_result]\n result = listdir(os.path.join(dummy_folder, 'memes'), full_path=True)\n self.assertEqual(sorted(result), sorted(need_result_new))", "def test_main(self):\n dummy_folder = TestOspaListDir.get_dummy_folder()\n result = listdir(dummy_folder,\n full_path=True,\n only_files=False,\n )\n need_result = ['memes',\n 'txt_files',\n 'antigravity.png',\n 'egg.png',\n 'empty.txt',\n 'holy_grenade.png',\n 'spam.jpg',\n ]\n need_result = [os.path.join(dummy_folder, x) for x in need_result]\n self.assertEqual(sorted(need_result), sorted(result))\n\n result = listdir(dummy_folder,\n full_path=False,\n only_files=False,\n )\n need_result = ['memes',\n 'txt_files',\n 'antigravity.png',\n 'egg.png',\n 'empty.txt',\n 'holy_grenade.png',\n 'spam.jpg',\n ]\n\n self.assertEqual(sorted(need_result), sorted(result))\n\n result = listdir(dummy_folder,\n full_path=True,\n only_files=True,\n )\n need_result = ['antigravity.png',\n 'egg.png',\n 'empty.txt',\n 'holy_grenade.png',\n 'spam.jpg',\n ]\n need_result = [os.path.join(dummy_folder, x) for x in need_result]\n self.assertEqual(sorted(need_result), sorted(result))\n self.assertEqual(sorted(os.listdir('.')), sorted(listdir(path='.', full_path=False)))", "def get_tests():\n\tret = []\n\tfor walk_tuple in os.walk(webnotes.defs.modules_path):\n\t\tfor test_file in filter(lambda x: x.startswith('test') and x.endswith('.py'), walk_tuple[2]):\n\t\t\tdir_path = os.path.relpath(walk_tuple[0], webnotes.defs.modules_path)\n\t\t\tif dir_path=='.':\n\t\t\t\tret.append(test_file[:-3])\n\t\t\telse:\n\t\t\t\tret.append(dir_path.replace('/', '.') + '.' + test_file[:-3])\t\t\t\n\treturn ret", "def test_everything(self):\n\n qs = FBO(\n path=TEST_FILES_ROOT,\n ).exclude(\n name__glob='*~',\n ).exclude(\n name__glob='*.meta',\n )\n\n self.assertEqual(\n 7,\n qs.count(),\n )\n self.assertEqual(\n {\n 'index.md',\n 'subdir/index.md',\n 'test1.md',\n 'test2.md',\n 'test1.rst',\n 'test2.rst',\n 'test3.rst',\n },\n {o.name for o in qs},\n )", "def test_GetFilesInDirectory_exclude_by_name(tempdir: pathlib.Path):\n # Create files: [ a, foo, sub/foo ]\n (tempdir / \"a\").touch()\n (tempdir / \"foo\").touch()\n (tempdir / \"sub\").mkdir()\n (tempdir / \"sub\" / \"foo\").touch()\n # Exclude pattern 'foo' does not exclude subdir 'foo'.\n assert set(dpack.GetFilesInDirectory(tempdir, [\"foo\"])) == {\n pathlib.Path(\"a\"),\n pathlib.Path(\"sub/foo\"),\n }", "def find_e2e_tests(directory):\n result = []\n\n for dirpath, dirnames, filenames in os.walk(directory):\n # Skip folders containing a sconstest.skip file\n if 'sconstest.skip' in filenames:\n continue\n try:\n with open(os.path.join(dirpath, \".exclude_tests\")) as f:\n excludes = [e.split(\"#\", 1)[0].strip() for e in f.readlines()]\n except EnvironmentError:\n excludes = []\n for fname in filenames:\n if fname.endswith(\".py\") and fname not in excludes:\n result.append(os.path.join(dirpath, fname))\n return sorted(result)", "def top_level_directories(self):\n return [d for d in self.directories if len([x for x in self.directories if x in d]) == 1]", "def _tested_notebooks():\n\n all_notebooks = _list_all_notebooks()\n skipped_notebooks = functools.reduce(\n lambda a, b: a.union(b),\n list(set(glob.glob(g, recursive=True)) for g in SKIP_NOTEBOOKS),\n )\n\n return sorted(\n os.path.abspath(n) for n in all_notebooks.difference(skipped_notebooks)\n )", "def test_os_listdir(self):\n need_result = os.listdir('.')\n result = listdir(path='.', full_path=False)\n self.assertEqual(sorted(result), sorted(need_result))", "def _collect_dirs(\n start_dir,\n blacklist=set(['conftest.py', 'noxfile.py', 'lib', 'third_party']),\n suffix='_test.py',\n recurse_further=False):\n # Collect all the directories that have tests in them.\n for parent, subdirs, files in os.walk(start_dir):\n if './.' in parent:\n continue # Skip top-level dotfiles\n elif any(\n f for f in files if f.endswith(suffix) and f not in blacklist\n ):\n # Don't recurse further for tests, since py.test will do that.\n if not recurse_further:\n del subdirs[:]\n # This dir has desired files in it. yield it.\n yield parent\n else:\n # Filter out dirs we don't want to recurse into\n subdirs[:] = [\n s for s in subdirs\n if s[0].isalpha() and\n s not in blacklist]", "def get_test_examples(self, data_dir):\n raise NotImplementedError()", "def get_test_examples(self, data_dir):\n raise NotImplementedError()", "def list_selfplay_dirs(base_dir):\n\n model_dirs = [os.path.join(base_dir, x)\n for x in tf.io.gfile.listdir(base_dir)]\n return sorted(model_dirs, reverse=True)", "def dir_tests():\n return abspath('tests')" ]
[ "0.66200477", "0.65644187", "0.6472452", "0.64620155", "0.63552594", "0.6348389", "0.6269922", "0.6265887", "0.6258353", "0.6112326", "0.6103435", "0.61029524", "0.610135", "0.60616046", "0.6050831", "0.595446", "0.5950891", "0.5949775", "0.594931", "0.5892177", "0.5882108", "0.58555037", "0.585159", "0.5844413", "0.5838513", "0.5813069", "0.5809234", "0.5809234", "0.57917917", "0.5785017" ]
0.79405457
0
(Re)generates the readme for a sample.
def readmegen(session, sample): session.install('jinja2', 'pyyaml') if os.path.exists(os.path.join(sample, 'requirements.txt')): session.install('-r', os.path.join(sample, 'requirements.txt')) in_file = os.path.join(sample, 'README.rst.in') session.run('python', 'scripts/readme-gen/readme_gen.py', in_file)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def readme_md(cls):\n\n template = Helpers.File(Settings.readme_me_template).read()\n\n template = Helpers.Regex(\n template, r\"%%version%%\", replace_with=Settings.version\n ).replace()\n template = Helpers.Regex(\n template, r\"%%lenHosts%%\", replace_with=format(len(Settings.domains), \",d\")\n ).replace()\n template = Helpers.Regex(\n template, r\"%%lenIPs%%\", replace_with=format(len(Settings.ips), \",d\")\n ).replace()\n template = Helpers.Regex(\n template,\n r\"%%lenHostsIPs%%\",\n replace_with=format(len(Settings.ips) + len(Settings.domains), \",d\"),\n ).replace()\n\n print(\"Generation of %s\" % Settings.readme_md_file, end=\" \")\n Helpers.File(Settings.readme_md_file).write(template, overwrite=True)\n print(Settings.done)", "def test_readme():\n readme = Path(README_PATH).read_text()\n Actions.read_from_md(readme)", "def make_readme(digest):\n o = 'SHA1 digest: %s\\n\\n'%digest[:10]\n print '...build readme file for GitHub' \n open('README.md','w').write(o + make_readme.__doc__)", "def _create_readme(self, name, summary, description):\n return \"\"\"\n %(header_bar)s\n %(header)s\n %(header_bar)s\n\n %(content)s\n \"\"\" % {\n 'header': name,\n 'header_bar': '=' * len(name),\n 'content': '\\n\\n'.join(\n content\n for content in (summary, description)\n if content\n ) or 'Describe your extension.',\n }", "def readme():\n with open(os.path.join(build_root, 'README.rst')) as f:\n return f.read()", "def hotkeys_readme():\n\n root = '/'.join(__file__.split('/')[:-4])\n fname = root + '/README.rst'\n with codecs.open(fname, 'r', 'utf-8') as f:\n rst = f.read()\n hotkeys = rst.split('.. hotkeys')[1]\n return docutils.examples.html_body(hotkeys)", "def create_readme(case_dict):\n # ---------------------------------------------------------------------\n logger.debug(\"create_readme\")\n os.chdir(case_dict[\"archive_temp_dir\"])\n\n fname = open(\"README.archive\", \"w\")\n fname.write(\"Archived metadata is available for this case at URL:\\n\")\n fname.write(case_dict[\"base_expdb_url\"])\n fname.close()", "def readme():\n with open('README.rst') as f:\n return f.read()", "def readme():\n\n with open('README.rst') as readme_file:\n return readme_file.read()", "def make_readme_txt(self, args):\n with open(self.readme_txt, 'w') as writer:\n log.info(\"args=%s\\n\", args)\n writer.write(\"# Created by pbtranscript-internal-validation.ValidationRunner.make_readme_txt()\\n\")\n writer.write(\"args=%s\\n\\n\" % args)\n\n files = self.common_files + self.collapse_human_files + self.reseq_human_files + self.sirv_files\n for desc, fn in files:\n if op.exists(fn):\n writer.write(\"%s=%s\\n\" % (desc, fn))", "def readme():\n with open('README.md') as readme_file:\n return readme_file.read()", "def readme():\n with open('README.md') as _file:\n return _file.read()", "def create_readme(self) -> None:\n # read the readme file and update the version and description\n with open(CONFIG.template_path / \"README.md\", \"r\") as f:\n TEMPLATE_README = f.read()\n\n # add a readme with the names of the stub-folders\n\n # read informations from firmware_stubs.json\n firmware_stubs = {}\n doc_stubs = {}\n core_stubs = {}\n try:\n with open(self.package_path / \"firmware_stubs.json\", \"r\") as f:\n firmware_stubs = json.load(f)\n with open(self.package_path / \"doc_stubs.json\", \"r\") as f:\n doc_stubs = json.load(f)\n with open(self.package_path / \"modules.json\", \"r\") as f:\n core_stubs = json.load(f)\n except FileNotFoundError:\n pass\n\n # Prettify this by merging with template text\n with open(self.package_path / \"README.md\", \"w\") as f:\n f.write(f\"# {self.package_name}\\n\\n\")\n f.write(TEMPLATE_README)\n f.write(f\"Included stubs:\\n\")\n for name, folder in self.stub_sources:\n f.write(f\"* {name} from `stubs/{Path(folder).as_posix()}`\\n\")\n\n f.write(f\"\\n\\n\")\n f.write(f\"origin | Family | Port | Board | Version\\n\")\n f.write(f\"-------|--------|------|-------|--------\\n\")\n try:\n f.write(\n f\"Firmware | {firmware_stubs['firmware']['family']} | {firmware_stubs['firmware']['port']} | {firmware_stubs['firmware']['machine']} | {clean_version(firmware_stubs['firmware']['version'])} \\n\"\n )\n except Exception:\n pass\n try:\n f.write(\n f\"Documentation | {doc_stubs['firmware']['family']} | {doc_stubs['firmware']['port']} | - | {clean_version(doc_stubs['firmware']['version'])} \\n\"\n )\n except Exception:\n pass\n try:\n f.write(\n f\"Core | {core_stubs['firmware']['family']} | {core_stubs['firmware']['port']} | - | {clean_version(core_stubs['firmware']['version'])} \\n\"\n )\n except Exception:\n pass", "def update_readme():\n\n temp = \"\"\"<head>\n <title>Unittest Results</title>\n <meta charset=\"utf-8\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\n <link rel=\"stylesheet\" href=\"https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/css/bootstrap.min.css\" integrity=\"sha384-1q8mTJOASx8j1Au+a5WDVnPi2lkFfwwEAa8hDDdjZlpLegxhjVME1fgjWPGmkzs7\" crossorigin=\"anonymous\">\n</head>\"\"\"\n\n with open(\"README_proxy.md\", \"r\") as old_readme_file:\n old_readme_txt = old_readme_file.read()\n\n with open(\"reports/test_result.html\", \"r\") as html_file:\n html = html_file.read().splitlines()[0:-21]\n html = \"\\n\".join(html).replace(temp, \"\")\n\n with open(\"README.md\", \"w\") as new_readme_file:\n new_readme_file.write(old_readme_txt + \"\\n\\n\\n\" + html + \"</body></html>\")", "def readme(fname='README'):\n with open(os.path.join(os.path.dirname(__file__), fname)) as file:\n return file.read()", "def with_readme(request, new_package):\n\n new_module, pkg_root = new_package\n pkg_name = os.path.basename(pkg_root)\n\n with open(os.path.join(new_module, \"README\"), \"w\") as openreadme:\n openreadme.write(\"{n}\\n{d}\\n\\n{n}'s readme... with content!\".format(\n n=pkg_name,\n d=\"=\" * len(pkg_name),\n ))\n\n with open(os.path.join(new_module, META_NAME), \"w\") as openmeta:\n openmeta.write(\n '{\"packages\": [\"find_packages()\"], \"long_description\": \"README\"}'\n )\n\n request.addfinalizer(module_cleanup)\n return new_module, pkg_root", "def build_readme(comic: str, readme: str) -> str:\n img = f\"{START_COMMENT}\\n{comic}\\n{END_COMMENT}\"\n return re.sub(listReg, img, readme)", "def deploy(version):\n toolkit.readmegen(version)", "def readme(self, ref=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/readme'.format(self.parent.get_url())\r\n\r\n return http.Request('GET', url, params), parsers.parse_json", "def longdescription():\r\n print()\r\n here = path.abspath(path.dirname(__file__))\r\n with open(path.join(here, 'README.rst')) as f:\r\n long_description = f.read()\r\n\r\n print(long_description)", "def _readme(datatype, fname, overwrite=False):\n if os.path.isfile(fname) and not overwrite:\n with open(fname, \"r\", encoding=\"utf-8-sig\") as fid:\n orig_data = fid.read()\n mne_bids_ref = REFERENCES[\"mne-bids\"] in orig_data\n datatype_ref = REFERENCES[datatype] in orig_data\n if mne_bids_ref and datatype_ref:\n return\n text = \"{}References\\n----------\\n{}{}\".format(\n orig_data + \"\\n\\n\",\n \"\" if mne_bids_ref else REFERENCES[\"mne-bids\"] + \"\\n\\n\",\n \"\" if datatype_ref else REFERENCES[datatype] + \"\\n\",\n )\n else:\n text = \"References\\n----------\\n{}{}\".format(\n REFERENCES[\"mne-bids\"] + \"\\n\\n\", REFERENCES[datatype] + \"\\n\"\n )\n\n _write_text(fname, text, overwrite=True)", "def generate_documentation(self):\n self.generate_api_docs()\n build.main([\n self.SOURCE_DIR,\n self.BUILD_DIR,\n ])", "def write_calling_seq(d):\n dr= get_sample_dir(d['outdir'],d['obj'])\n fn=os.path.join(dr,'README.txt')\n if os.path.exists(fn):\n os.remove(fn)\n with open(fn,'w') as foo:\n for key in d.keys():\n foo.write('%s %s\\n' % (key,str(d[key])))\n print('Wrote %s' % fn)", "def readme_tests(readme=None):\n if readme is None:\n readme = Path('..', 'readme.md')\n with open(readme) as target:\n for line in target:\n if line.startswith(' ') and 'test_utils' not in line:\n print()\n print(line)\n input('Press Enter to execute')\n sb.run(line, shell=True, check=True)", "def OpenReadMe():\n location = os.path.join(os.path.dirname(__file__), \"README.txt\")\n os.startfile(location)", "def main_docstring():", "def get_fsleyes_readme():\n with open(op.join(basedir, 'README.rst'), 'rt', encoding='utf-8') as f:\n return f.read().strip()", "def docs():", "def generate():\n\n # Verify if directory exists\n if not os.path.isdir(config.techniques_markdown_path):\n os.mkdir(config.techniques_markdown_path)\n\n #Write the technique index.html page\n with open(os.path.join(config.techniques_markdown_path, \"overview.md\"), \"w\", encoding='utf8') as md_file:\n md_file.write(config.technique_overview_md)\n\n for domain in config.domains:\n generate_domain_markdown(domain)", "def OpenReadMe():\n location = os.path.join(os.path.dirname(__file__), \"README.txt\")\n os.startfile(location)\n return" ]
[ "0.7287029", "0.7018073", "0.6953501", "0.6933401", "0.6853328", "0.6802791", "0.6781446", "0.6738891", "0.6733868", "0.6717046", "0.66753453", "0.66342485", "0.6618257", "0.64765066", "0.644934", "0.6441836", "0.6415977", "0.63786113", "0.63530713", "0.63517934", "0.62726355", "0.6251161", "0.62501574", "0.623805", "0.6225982", "0.6225031", "0.6219922", "0.62031364", "0.62027067", "0.61896217" ]
0.80653775
0
Returns a paranoid_pb2.TestResultsEntry protobuf ready for the checks. The created paranoid_pb2.TestResultsEntry is appropriate to be used on tests and have the paranoid_pb2.TestResultsEntry.result filled by the Check function (i.e., set as weak or not).
def _CreateTestResult(self) -> paranoid_pb2.TestResultsEntry: if self.severity is None: raise KeyError("Please specify self.severity for %s." % self.check_name) return paranoid_pb2.TestResultsEntry( severity=self.severity, test_name=self.check_name, result=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_verifier_result(self):\n stat = self.get_verifier_result(self.verification_id)\n try:\n num_executed = stat['num_tests'] - stat['num_skipped']\n try:\n self.result = 100 * stat['num_success'] / num_executed\n except ZeroDivisionError:\n self.result = 0\n if stat['num_tests'] > 0:\n LOGGER.info(\"All tests have been skipped\")\n else:\n LOGGER.error(\"No test has been executed\")\n return\n\n with open(os.path.join(self.res_dir, \"rally.log\"),\n 'r', encoding='utf-8') as logfile:\n output = logfile.read()\n\n success_testcases = []\n for match in re.findall(r'.*\\{\\d{1,2}\\} (.*?) \\.{3} success ',\n output):\n success_testcases.append(match)\n failed_testcases = []\n for match in re.findall(r'.*\\{\\d{1,2}\\} (.*?) \\.{3} fail',\n output):\n failed_testcases.append(match)\n skipped_testcases = []\n for match in re.findall(r'.*\\{\\d{1,2}\\} (.*?) \\.{3} skip(?::| )',\n output):\n skipped_testcases.append(match)\n\n self.details = {\"tests_number\": stat['num_tests'],\n \"success_number\": stat['num_success'],\n \"skipped_number\": stat['num_skipped'],\n \"failures_number\": stat['num_failures'],\n \"success\": success_testcases,\n \"skipped\": skipped_testcases,\n \"failures\": failed_testcases}\n except Exception: # pylint: disable=broad-except\n self.result = 0\n\n LOGGER.info(\"Tempest %s success_rate is %s%%\",\n self.case_name, self.result)", "def get(self):\n subscription = db_config.get_value('predator_result_topic')\n if not subscription:\n logs.log('No Predator subscription configured. Aborting.')\n return\n\n client = pubsub.PubSubClient()\n messages = client.pull_from_subscription(subscription, acknowledge=True)\n for message in messages:\n message = json.loads(message.data)\n testcase_id = message['crash_identifiers']\n try:\n testcase = data_handler.get_testcase_by_id(testcase_id)\n except errors.InvalidTestcaseError:\n logs.log('Testcase %s no longer exists.' % str(testcase_id))\n continue\n\n testcase.set_metadata('predator_result', message, update_testcase=False)\n testcase.delete_metadata('blame_pending', update_testcase=False)\n testcase.put()\n logs.log('Set predator result for testcase %d.' % testcase.key.id())\n\n logs.log('Finished processing predator results. %d total.' % len(messages))", "def results(self, checkid):\r\n return results.Results(self, checkid)", "def _worker(self, results):\n keys = {\n \"test-certificate-verify\": {\n \"MD5 forced\": 2,\n \"TLSv1.1 signature in TLSv1.2 Certificate Verify\": 1,\n \"MITIGATION\": \"SLOTH\",\n },\n \"test-sig-algs\": {\"MD5 first\": 2, \"MITIGATION\": \"SLOTH\"},\n \"test-clienthello-md5\": {\n \"only-md5-rsa-signature_algorithm\": 1,\n \"unknown-signature_algorithm-numbers\": 1,\n \"MITIGATION\": \"SLOTH\",\n },\n \"test-tls13-pkcs-signature\": {\n \"rsa_pkcs1_md5 signature\": 1,\n \"MITIGATION\": \"SLOTH_MD5_SIGNATURE_TLS_1_3\",\n },\n }\n return self._obtain_results(results, keys)", "def get_result(self):\n config = self.bisect_config\n results_confidence = 0\n if self.culprit:\n results_confidence = self.api.m.math_utils.confidence_score(\n self.lkgr.values, self.fkbr.values)\n\n if self.failed:\n status = 'failed'\n elif self.bisect_over:\n status = 'completed'\n else:\n status = 'started'\n\n aborted_reason = None\n if self.failed_initial_confidence:\n aborted_reason = _FAILED_INITIAL_CONFIDENCE_ABORT_REASON\n elif self.failed_direction:\n aborted_reason = _DIRECTION_OF_IMPROVEMENT_ABORT_REASON\n return {\n 'try_job_id': config.get('try_job_id'),\n 'bug_id': config.get('bug_id'),\n 'status': status,\n 'buildbot_log_url': self._get_build_url(),\n 'bisect_bot': self.get_perf_tester_name(),\n 'command': config['command'],\n 'test_type': config['test_type'],\n 'metric': config['metric'],\n 'change': self.relative_change,\n 'score': results_confidence,\n 'good_revision': self.good_rev.commit_hash,\n 'bad_revision': self.bad_rev.commit_hash,\n 'warnings': self.warnings,\n 'aborted_reason': aborted_reason,\n 'culprit_data': self._culprit_data(),\n 'revision_data': self._revision_data()\n }", "def _AddResult(self):\n if not self._results:\n result = analyzer_result.AnalyzerResult()\n result.attribute_name = 'test_result'\n result.attribute_value = 'is_vegetable'\n self._results.append(result)", "def process_ResultCheck(self):\n try:\n cmd = self.ExecutionTask.get_param().split(',')\n logging.debug(\"%s-%s-%s-%s-%s\" % ( TestScriptSymbolTable.get_value_from_sym_tab(cmd[0], TestScriptSymbolTable.test_script_sym_tab),cmd[0], cmd[1], cmd[2], cmd[3]))\n\n checkval = cmd[0].split('!') \n \n cval = TestScriptSymbolTable.get_value_from_sym_tab(checkval[1], TestScriptSymbolTable.capi_cmd_ret_sym_tab)\n\n if int(cval) >= int(cmd[1]):\n result = cmd[2]\n else:\n result = cmd[3]\n\n logging.info(\"\\nRESULT CHECK---> %15s\", result) \n self.setTestResult(result)\n \n #if result == 'FAIL':\n if 'FAIL' in result:\n self.setStatus('STOP')\n self.setTestResult('FAIL')\n except OSError:\n logging.info(\"\\nException - ResultCheck\")", "def results_data(self, as_dict=False) -> PFResult | dict:\n data = PFResult(\n tolerance_mm=self.tolerance,\n action_tolerance_mm=self.action_tolerance,\n percent_leaves_passing=self.percent_passing,\n number_of_pickets=self.num_pickets,\n absolute_median_error_mm=self.abs_median_error,\n max_error_mm=self.max_error,\n max_error_picket=self.max_error_picket,\n max_error_leaf=self.max_error_leaf,\n mean_picket_spacing_mm=self.mean_picket_spacing,\n offsets_from_cax_mm=[pk.dist2cax for pk in self.pickets],\n passed=self.passed,\n failed_leaves=self.failed_leaves(),\n mlc_skew=self.mlc_skew(),\n )\n if as_dict:\n return dataclasses.asdict(data)\n return data", "def _create_failure_entry(self):\r\n # view task entry for task failure\r\n progress = {'message': TEST_FAILURE_MESSAGE,\r\n 'exception': TEST_FAILURE_EXCEPTION,\r\n }\r\n return self._create_entry(task_state=FAILURE, task_output=progress)", "def get_results(self):\n error_dict = {'error_code_test': self.error_code_test,\n 'error_text_test': self.error_text_test}\n\n return self.testresults, error_dict, self.checkstats", "def report_results(results: dict):\n # Loop thru our results, compare to our upload and return the verdict\n for result in results:\n for item in Analyzer.files:\n if result[\"sha256\"] == item[2]:\n if \"no specific threat\" in result[\"verdict\"]:\n # File is clean\n logger.info(\"Verdict for %s: %s\", item[1], result[\"verdict\"])\n else:\n # Mitigation would trigger from here\n logger.warning(\"Verdict for %s: %s\", item[1], result[\"verdict\"])", "def all_results(self):\n res = [(True, result) for result in self.successes]\n res.extend([(False, result) for result in self.failures])\n return res", "def extract_results_test(self):\n assert len(self.results.keys()) != 0\n TESTS = [\n {\n \"input\": {\"molecules\": [\"DDSPDLPK\"], \"score_threshold\": 0.95},\n \"output\": {\n \"formula\": \"C(37)H(59)N(9)O(16)\",\n \"file_name\": \"BSA1.mzML\",\n \"scaling_factor\": 100,\n \"spec_id\": 1337,\n },\n }\n ]\n for test_dict in TESTS:\n for key, n, entry in self.results.extract_results(**test_dict[\"input\"]):\n print(key, entry)\n assert key.formula == test_dict[\"output\"][\"formula\"]\n assert key.file_name == test_dict[\"output\"][\"file_name\"]\n assert entry.scaling_factor == test_dict[\"output\"][\"scaling_factor\"]\n assert entry.spec_id == test_dict[\"output\"][\"spec_id\"]\n # print(self.results)\n # print(self.results.lookup)\n assert n == 0", "def _partitionPingResults(results):\n good, bad = [], []\n for result in results:\n if result.isUp:\n good.append(result.address)\n else:\n bad.append(result.address)\n return (good, bad)", "def CleanUpTestResults(self):\n name_key = lambda v: v.name\n results_by_name = sorted(self.results, key=name_key)\n\n for name, res_iter in groupby(results_by_name, key=name_key):\n results = set(res_iter)\n\n # If DejaGnu was unable to compile a test it will create following result:\n failed = DejaGnuTestResult(name, '(test for excess errors)', 'FAIL',\n False)\n\n # If a test compilation failed, remove all results that are dependent.\n if failed in results:\n dependants = set(filter(lambda r: r.result != 'FAIL', results))\n\n self.results -= dependants\n\n for res in dependants:\n logging.info('Removed {%s} dependance.', res)\n\n # Remove all UNRESOLVED results that were also marked as UNSUPPORTED.\n unresolved = [res._replace(result='UNRESOLVED')\n for res in results if res.result == 'UNSUPPORTED']\n\n for res in unresolved:\n if res in self.results:\n self.results.remove(res)\n logging.info('Removed {%s} duplicate.', res)", "def load_mock_results():\n options = Options()\n cp_dir = os.path.dirname(inspect.getfile(test_files))\n options.checkpoint_filename = os.path.join(cp_dir, 'checkpoint.json')\n\n cp = Checkpoint(options)\n results, _, _ = cp.load()\n\n return [r for r in results['Fake_Test_Data'] if r.problem_tag == 'prob_0']", "def _get_results(self, res):\n self.async_res = res\n self.full_res = res.wait() # pragma: no cover\n self.trained = True # pragma: no cover\n self.mod_id = self.full_res['model_id'] # pragma: no cover\n self.data_id = self.full_res['data_id'] # pragma: no cover\n self.params_dump = self.full_res['params_dump'] # pragma: no cover\n if self.verbose > 0: # pragma: no cover\n print(\"Result {} | {} ready\".format(\n self.mod_id, self.data_id)) # pragma: no cover", "def make_results(self):\n statistic_value, p_value = self.stats\n accept_hypothesis = self.accept_hypothesis(statistic_value)\n\n return FrequentistTestResults(\n control=self.comparison.d2,\n variation=self.comparison.d1,\n delta=self.comparison.delta,\n delta_relative=self.comparison.delta_relative,\n effect_size=self.comparison.effect_size,\n alpha=self.comparison.alpha,\n power=self.comparison.power,\n confidence_interval=self.ci,\n test_statistic=self.test_statistic,\n statistic_value=statistic_value,\n p_value=p_value,\n df=None,\n hypothesis=self.hypothesis_text,\n accept_hypothesis=accept_hypothesis,\n inference_procedure=self,\n warnings=self.comparison.warnings\n )", "def _validate_results(self, task, result):\n assert isinstance(result, dict), \\\n f\"{task} returned a {type(result)} rather than a dict\"\n for k in result:\n assert k in self.provides, \\\n f\"{task} provided unwanted output {k}\"\n for k in self.provides:\n assert k in result, \\\n f\"{task} failed to provide needed output {k}\"", "def _makeResult(self):\n\n result = super(CustomTextTestRunner, self)._makeResult()\n result.test_case_count = self.test_case_count\n return result", "def prepare_results(self) -> dict:\n if not hasattr(self, \"results\"):\n raise AttributeError(\n \"Results have not been finalized. Please call \"\n \"finalize_results() before saving output.\"\n )\n\n output = {\n \"armory_version\": armory.__version__,\n \"config\": self.config,\n \"results\": self.results,\n \"timestamp\": int(self.time_stamp),\n }\n return output", "def get_ticket_results(mgr, ticket_id, update_count=1):\r\n result = mgr.get_ticket(ticket_id)\r\n result = NestedDict(result)\r\n\r\n table = KeyValueTable(['Name', 'Value'])\r\n table.align['Name'] = 'r'\r\n table.align['Value'] = 'l'\r\n\r\n table.add_row(['id', result['id']])\r\n table.add_row(['title', result['title']])\r\n if result['assignedUser']:\r\n table.add_row(['assignedUser',\r\n \"%s %s\" % (result['assignedUser']['firstName'],\r\n result['assignedUser']['lastName'])])\r\n table.add_row(['createDate', result['createDate']])\r\n table.add_row(['lastEditDate', result['lastEditDate']])\r\n\r\n total_update_count = result['updateCount']\r\n count = min(total_update_count, update_count)\r\n for i, update in enumerate(result['updates'][:count]):\r\n update = wrap_string(update['entry'])\r\n table.add_row(['Update %s' % (i + 1,), update])\r\n\r\n return table", "def __processFollowerResults(self):\r\n\r\n resultCacheFileName = os.path.join(core.FW_conf['test_result_dir'], os.path.split(core.FW_conf['test_result_dir'])[1] + \".cache\" )\r\n if os.path.isfile(resultCacheFileName):\r\n resultDict = cPickle.load(open(resultCacheFileName, 'rb'))\r\n debug.brf('Loading existing results from cache...')\r\n else:\r\n resultDict = {}\r\n\r\n # read which iteration this was\r\n if not resultDict.has_key(self.tcId):\r\n resultDict[self.tcId] = {}\r\n iteration = len(resultDict[self.tcId]) + 1\r\n resultDict[self.tcId][iteration] = {} # new iteration for the result dictionary\r\n\r\n leaderResult = Dictionary[str, str]()\r\n res = self.result.getResult()\r\n\r\n try:\r\n if str(res) == 'Failed':\r\n errornode = self.result.getError()\r\n if errornode != None and errornode.getAttribute('reason') == 'X-Files generated during testcase execution' or \\\r\n errornode.getAttribute('reason') == 'Phone reset detected':\r\n leaderResult.Add('Result', 'X-files')\r\n elif errornode != None:\r\n leaderResult.Add('Result', str(errornode.getAttribute('reason')))\r\n except:\r\n debug.err('error reading result node')\r\n leaderResult.Add('Result', str(res))\r\n\r\n if not leaderResult.ContainsKey('Result'): # if result was not added\r\n leaderResult.Add('Result', str(res))\r\n\r\n folder = os.path.split(core.FW_conf['test_result_dir'])[1]\r\n\r\n leaderResult.Add('ResultDir', folder)\r\n leaderResult.Add('Duration', str( int( time.time() - self.leader_start_time )) )\r\n #leaderResult.Add('EndTime', strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()))\r\n leaderResult.Add('EndTime', DateTime.Now.ToString(\"yyyy-MM-dd HH:mm:ss\"))\r\n for i in range(1, len(self._capturedStillImages) + 1 ):\r\n debug.out(self._capturedStillImages[i-1])\r\n relativeFileName = os.path.join('videos',self._capturedStillImages[i-1])\r\n leaderResult.Add('Capture%s' % str(i), relativeFileName)\r\n\r\n try:\r\n relativeFileName = os.path.join('videos',os.path.splitext(core.FW_conf['test_result_name'])[0] + '.avi')\r\n videoFile = os.path.join(core.FW_conf['test_result_dir'], relativeFileName)\r\n if os.path.isfile(videoFile): # change video address if video file is found\r\n leaderResult.Add('Video', str(relativeFileName))\r\n #debug.brf('Added leader Video: %s' % str(videoFile))\r\n except Exception as e:\r\n debug.brf('Could not add Leader video to result: %s' % e.message)\r\n\r\n leaderResult.Add('Machine', str(Dns.GetHostName())) # add Leader machine name\r\n\r\n followerResults = core.FW_conf['leader'].GetResults()\r\n followerResults.Add('Leader', leaderResult) # Add leader result to dictionary\r\n\r\n for followerRes in followerResults:\r\n clientId = followerRes.Key # string client id\r\n result = followerRes.Value # dictionary of result, link\r\n\r\n if not resultDict[self.tcId][iteration].has_key(clientId):\r\n resultDict[self.tcId][iteration][clientId] = {}\r\n else:\r\n debug.err('\\n**** %s has already result for iteration %s on test case %s. Will not override.\\n ***' % (clientId, iteration, self.tcId))\r\n continue\r\n\r\n # loop through result\r\n debug.brf('Results for client %s' % clientId)\r\n for res in result:\r\n debug.brf('Adding %s %s' % (str(res.Key), str(res.Value)) )\r\n resultDict[self.tcId][iteration][clientId][str(res.Key)] = str(res.Value)\r\n\r\n if resultDict:\r\n # dump resultDict to cache\r\n try:\r\n cacheFile = open(resultCacheFileName, 'wb')\r\n cPickle.dump(resultDict, cacheFile, 0)\r\n cacheFile.close()\r\n except:\r\n for i in range(10):\r\n debug.err('*' * 40 + ' FAILED TO FLUSH RESULTS TO DISK! ' + '*' * 40)", "def ProcessResultForPublishing(self, result, key): # pragma: no cover.\n # TODO(katesonia) Add feedback page link information to result after\n # feedback page of Cracas is added.\n return result", "def results(update: Update, context: CallbackContext):\n #update.effective_message.reply_text(text=\"here are all new results\")\n if update is not None:\n context.bot.send_chat_action(\n chat_id=update.effective_chat.id, action=ChatAction.TYPING)\n get_latest_result(update, context)\n if update.effective_chat.id != 321641669:\n context.bot.send_message(chat_id=-1001414706781,\n text=str(update.effective_chat.id) + ' looked for <b>result</b>')", "def assertResults(self, expected, result, deduped=False):\n self.assertEqual([u'shards'], result.keys())\n self.assertEqual(1, len(result[u'shards']))\n self.assertTrue(result[u'shards'][0], result)\n result = result[u'shards'][0].copy()\n self.assertFalse(result.get(u'abandoned_ts'))\n bot_version = result.pop(u'bot_version')\n self.assertTrue(bot_version)\n if result.get(u'costs_usd') is not None:\n expected.pop(u'costs_usd', None)\n self.assertLess(0, result.pop(u'costs_usd'))\n if result.get(u'cost_saved_usd') is not None:\n expected.pop(u'cost_saved_usd', None)\n self.assertLess(0, result.pop(u'cost_saved_usd'))\n self.assertTrue(result.pop(u'created_ts'))\n self.assertTrue(result.pop(u'completed_ts'))\n self.assertLess(0, result.pop(u'duration'))\n task_id = result.pop(u'task_id')\n run_id = result.pop(u'run_id')\n self.assertTrue(task_id)\n self.assertTrue(task_id.endswith('0'), task_id)\n if not deduped:\n self.assertEqual(task_id[:-1] + '1', run_id)\n self.assertTrue(result.pop(u'bot_idle_since_ts'))\n self.assertTrue(result.pop(u'modified_ts'))\n self.assertTrue(result.pop(u'started_ts'))\n\n if getattr(expected.get(u'output'), 'match', None):\n expected_output = expected.pop(u'output')\n output = result.pop('output')\n self.assertTrue(\n expected_output.match(output),\n '%s does not match %s' % (output, expected_output.pattern))\n\n # Bot python version may be different.\n result[u'bot_dimensions'] = sorted(\n [d for d in result[u'bot_dimensions'] if not d['key'] == 'python'])\n\n self.assertEqual(expected, result)\n return bot_version", "def test_call_result_as_dict(self):\r\n exp_assignments = rdp_test1_expected_dict\r\n min_confidence = self.default_app.Params['Confidence']\r\n\r\n # Since there is some variation in the assignments, run\r\n # 10 trials and make sure we get the expected result at least once\r\n num_trials = 10\r\n unverified_seq_ids = set(exp_assignments.keys())\r\n for i in range(num_trials):\r\n obs_assignments = self.default_app(self.tmp_seq_filepath)\r\n for seq_id in list(unverified_seq_ids):\r\n obs_assignment, obs_confidence = obs_assignments[seq_id]\r\n exp_assignment, exp_confidence = exp_assignments[seq_id]\r\n self.assertTrue(obs_confidence >= min_confidence)\r\n if obs_assignment == exp_assignment:\r\n unverified_seq_ids.remove(seq_id)\r\n if not unverified_seq_ids:\r\n break\r\n\r\n messages = []\r\n for seq_id in unverified_seq_ids:\r\n messages.append(\r\n \"Unable to verify %s in %s trials\" % (seq_id, num_trials))\r\n messages.append(\" Expected: %s\" % exp_assignments[seq_id][0])\r\n messages.append(\" Observed: %s\" % obs_assignments[seq_id][0])\r\n messages.append(\" Confidence: %s\" % obs_assignments[seq_id][1])\r\n\r\n # make sure all taxonomic results were correct at least once\r\n self.assertFalse(unverified_seq_ids, msg='\\n'.join(messages))", "def create_result(main_test):\n result = Result(outputs=[DBHandler.NAME], main_test=main_test)\n result.startTestRun()\n return result", "def result(self):\n return Result(self.messages[:])", "def wait_for_keypoint_detection_result(self):\n rospy.loginfo(\"waiting for KeypointDetection result\")\n self.keypoint_detection_client.wait_for_result()\n result = self.keypoint_detection_client.get_result()\n state = self.keypoint_detection_client.get_state()\n rospy.loginfo(\"received KeypointDetection result\")\n print \"result:\\n\", result\n\n self.keypoint_detection_result = result\n\n succeeded = (state == GoalStatus.SUCCEEDED)\n\n if not succeeded:\n rospy.loginfo(\"KeypointDetection failed\")\n\n result_dict = dict()\n result_dict['result'] = result\n result_dict['output_dir'] = result.output_dir\n result_dict['state'] = state\n result_dict['succeeded'] = succeeded\n result_dict['type'] = \"mankey\"\n self._cache[\"keypoint_detection_result\"] = result_dict\n\n self.state._cache[\"keypoint_detection_result\"] = result_dict\n\n return result_dict" ]
[ "0.5186231", "0.5027672", "0.48830792", "0.4770407", "0.46957067", "0.46849295", "0.46664384", "0.46516448", "0.46486202", "0.4532336", "0.4530844", "0.4512337", "0.44765168", "0.43914053", "0.43779433", "0.43736914", "0.43657622", "0.4348417", "0.43343168", "0.43205202", "0.43147615", "0.4301758", "0.4257929", "0.42158484", "0.42074835", "0.4202224", "0.41932645", "0.41882738", "0.4184342", "0.41798034" ]
0.7242101
0
Run the 'program'. 'program' The path to the program to run. 'arguments' A list of the arguments to the program. This list must contain a first argument corresponding to 'argv[0]'. 'stdin' Content of standard input for the program. 'context' A 'Context' giving runtime parameters to the test. 'result' A 'Result' object. The outcome will be 'Result.PASS' when this method is called. The 'result' may be modified by this method to indicate outcomes other than 'Result.PASS' or to add annotations.
def RunProgram(self, program, arguments, stdin, context, result): # Construct the environment. environment = self.MakeEnvironment(context) e_stdin = stdin c = {} for pair in context.items(): c[pair[0]] = pair[1] for substitution in c.keys(): pattern = "$("+substitution.upper()+")" replacement = context[substitution] e_stdin = e_stdin.replace(pattern, replacement) basename = os.path.split(arguments[0])[-1] qm_exec = qm.executable.Filter(e_stdin, -2) try: exit_status= qm_exec.Run(arguments, environment) stdout = qm_exec.stdout stderr = qm_exec.stderr causes = [] if sys.platform != "win32": if os.WIFEXITED(exit_status): if exit_status != self.exit_code: causes.append("exit_code") result["RunProgram.exit_code"] = str(exit_status) elif os.WIFSIGNALED(exit_status): self.__cause= "Process %s terminated by signal %d." % (basename, os.WTERMSIG(exit_status)) elif os.WIFSTOPPED(exit_status): self.__cause= "Process %s stopped by signal %d." % (basename, os.WSTOPSIG(exit_status)) else: self.__cause= "Process %s terminated abnormally." % basename # Check to see that the standard error matches. if stderr: causes.append("standard error") result["RunProgram.stderr"] = "'''" + stderr + "'''" # If anything went wrong, the test failed. if causes: result.Fail("Unexpected %s." % string.join(causes, ", ")) except: result.NoteException()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def RunProgram(self, program, arguments, context, result):\n\n # Construct the environment.\n environment = self.MakeEnvironment(context)\n e_stdin = self.stdin\n c = {}\n for pair in context.items():\n c[pair[0]] = pair[1]\n for substitution in c.keys():\n pattern = \"$(\"+substitution.upper()+\")\"\n replacement = context[substitution]\n e_stdin = e_stdin.replace(pattern, replacement)\n\n basename = os.path.split(arguments[0])[-1]\n qm_exec = qm.executable.Filter(e_stdin, -2)\n\n try:\n exit_status= qm_exec.Run(arguments, environment)\n stdout = qm_exec.stdout\n stderr = qm_exec.stderr\n causes = []\n\n if sys.platform != \"win32\":\n if os.WIFEXITED(exit_status):\n if exit_status != self.exit_code:\n causes.append(\"exit_code\")\n result[\"RunProgram.exit_code\"] = str(exit_status)\n elif os.WIFSIGNALED(exit_status):\n self.__cause= \"Process %s terminated by signal %d.\" % (basename, os.WTERMSIG(exit_status))\n\n elif os.WIFSTOPPED(exit_status):\n self.__cause= \"Process %s stopped by signal %d.\" % (basename, os.WSTOPSIG(exit_status))\n\n else:\n self.__cause= \"Process %s terminated abnormally.\" % basename\n\n # Check to see if the standard output matches.\n # First strip out ISQL junk\n stdout_stripped = re.sub(\"Database:.*\\n\",\"\",stdout)\n stdout_stripped = re.sub(\"SQL>\\s*\",\"\",stdout_stripped)\n stdout_stripped = re.sub(\"CON>\\s*\",\"\",stdout_stripped)\n stdout_stripped = re.sub(\"-->\\s*\",\"\",stdout_stripped)\n stdout_stripped = self.__PerformSubstitutions(stdout_stripped)\n stdout_stripped = re.compile(\"^\\s+\",re.I+re.M).sub(\"\",stdout_stripped)\n stdout_stripped = re.compile(\"\\s+$\",re.I+re.M).sub(\"\",stdout_stripped)\n\n self.stdout_stripped = re.sub(\"Database:.*\\n\",\"\",self.stdout)\n self.stdout_stripped = re.sub(\"SQL>\\s*\",\"\",self.stdout_stripped)\n self.stdout_stripped = re.sub(\"CON>\\s*\",\"\",self.stdout_stripped)\n self.stdout_stripped = re.sub(\"-->\\s*\",\"\",self.stdout_stripped)\n self.stdout_stripped = self.__PerformSubstitutions(self.stdout_stripped)\n self.stdout_stripped = re.compile(\"^\\s+\",re.I+re.M).sub(\"\",self.stdout_stripped)\n self.stdout_stripped = re.compile(\"\\s+$\",re.I+re.M).sub(\"\",self.stdout_stripped)\n\n if stdout_stripped != self.stdout_stripped:\n causes.append(\"standard output\")\n result[\"ExecTest.stdin\"] = \"<pre>\" + e_stdin + \"</pre>\"\n result[\"ExecTest.stdout_expected\"] = \"<pre>\" + self.stdout + \"</pre>\"\n result[\"ExecTest.stdout\"] = \"<pre>\" + stdout + \"</pre>\"\n result[\"ExecTest.stdout_stripped\"] = \"<pre>\" + stdout_stripped + \"</pre>\"\n result[\"ExecTest.stdout_stripped_expected\"] = \"<pre>\" + self.stdout_stripped + \"</pre>\"\n result[\"ExecTest.stripped_diff\"] = \"<pre>\"+'\\n'.join(difflib.ndiff(stdout_stripped.splitlines(0),self.stdout_stripped.splitlines(0)))+\"</pre>\"\n # Check to see that the standard error matches.\n stderr_stripped = re.sub(\"Use CONNECT or CREATE DATABASE to specify a database.*\\n\",\"\",stderr)\n if stderr_stripped != self.stderr:\n causes.append(\"standard error\")\n result[\"ExecTest.stdin\"] = \"<pre>\" + e_stdin + \"</pre>\"\n result[\"ExecTest.stderr\"] = \"<pre>\" + stderr + \"</pre>\"\n result[\"ExecTest.expected_stderr\"] = \"<pre>\" + self.stderr + \"</pre>\"\n # If anything went wrong, the test failed.\n if causes:\n result.Fail(\"Unexpected %s.\" % string.join(causes, \", \"))\n except:\n result.NoteException()", "def run_program_from_python_test(program, args, stdin=''):\n #environ = self.__MakeEnvironment()\n # PC: fix values so they are strings. Needed for Windows.\n #for key in environ.iterkeys():\n #environ[key] = str(environ[key])\n # provide full path for standard tools\n program = context.environment.get('%s_path' % program, program)\n basename = os.path.split(program)[-1]\n args.insert(0,program)\n\n if self.connection_character_set:\n args.extend(['-ch',self.connection_character_set])\n script = stdin.encode(DB_CHAR_SET_NAME_TO_PYTHON_ENCODING_MAP[self.connection_character_set])\n else:\n script = stdin.encode('ascii')\n script = substitute_macros(script)\n try:\n return_code, stdout, stderr = runProgram(args,[],stdin=script)\n sys.stdout.writelines(stdout)\n sys.stderr.writelines(stderr)\n except:\n result.note_exception(cause=\"Python test: Exception raised while running external program from Python test.\")\n result[\"failing_program\"] = program\n #cleanup()", "def execute (self, program, cache=False):\n ast = None\n if cache:\n requests_cache.install_cache('demo_cache',\n allowable_methods=('GET', 'POST', ))\n else:\n requests_cache.disabled()\n\n if isinstance(program, str):\n ast = self.parse (program)\n if not ast:\n raise ValueError (f\"Unhandled type: {type(program)}\")\n for statement in ast.statements:\n logger.debug (f\"execute: {statement} type={type(statement).__name__}\")\n statement.execute (interpreter=self)\n return self.context", "def run(name, program, arguments, cache_enabled, filename):\n output = None\n if cache_enabled:\n output = get_output_from_cache(name, filename)\n\n if output is None:\n call_arguments = [program] + arguments + [filename]\n try:\n output = subprocess.check_output(\n call_arguments, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as error:\n output = error.output\n except OSError:\n return {\n filename: {\n 'error': [('Could not execute \"%s\".%sMake sure all ' +\n 'required programs are installed') %\n (' '.join(call_arguments), os.linesep)]\n }\n }\n output = output.decode('utf-8')\n if cache_enabled:\n save_output_in_cache(name, filename, output)\n return output", "def run_program(\n environment='emulator',\n block_device=None,\n dump_file=None,\n dump_base=None,\n dump_length=None,\n timeout=60,\n flush_l2=False,\n trace=False,\n executable=None):\n if not executable:\n executable = HEX_FILE\n\n if environment == 'emulator':\n args = [BIN_DIR + 'emulator']\n args += [ '-a' ] # Enable thread scheduling randomization by default\n if block_device:\n args += ['-b', block_device]\n\n if dump_file:\n args += ['-d', dump_file + ',' +\n hex(dump_base) + ',' + hex(dump_length)]\n\n args += [executable]\n return _run_test_with_timeout(args, timeout)\n elif environment == 'verilator':\n args = [BIN_DIR + 'verilator_model']\n if block_device:\n args += ['+block=' + block_device]\n\n if dump_file:\n args += ['+memdumpfile=' + dump_file,\n '+memdumpbase=' + hex(dump_base)[2:],\n '+memdumplen=' + hex(dump_length)[2:]]\n\n if flush_l2:\n args += ['+autoflushl2=1']\n\n if trace:\n args += ['+trace']\n\n args += ['+bin=' + executable]\n output = _run_test_with_timeout(args, timeout)\n if '***HALTED***' not in output:\n raise TestException(output + '\\nProgram did not halt normally')\n\n return output\n else:\n raise TestException('Unknown execution environment')", "def execute_file (self, program):\n with open (program, \"r\") as stream:\n self.execute (stream.read ())\n return self.context", "def run_program(program, args=None, **subprocess_kwargs):\n if 'shell' in subprocess_kwargs and subprocess_kwargs['shell']:\n raise ProgramError(\n \"This function is only for non-shell programs, \"\n \"use run_shell_command() instead.\")\n fullcmd = find_program(program)\n if not fullcmd:\n raise ProgramError(\"Program %s was not found\" % program)\n # As per subprocess, we make a complete list of prog+args\n fullcmd = [fullcmd] + (args or [])\n for stream in ['stdin', 'stdout', 'stderr']:\n subprocess_kwargs.setdefault(stream, subprocess.PIPE)\n subprocess_kwargs = alter_subprocess_kwargs_by_platform(\n **subprocess_kwargs)\n return subprocess.Popen(fullcmd, **subprocess_kwargs)", "def main(args: List[Union[str, bytes]] = sys.argv,):\n\tprogram_name, *args = args\n\targs = decode_raw_args(args, str)\n\n\tgen = Generator(*args)\n\tgen.generate_data()\n\tgen.print_return_list()", "def execute_program(args: List[str], mode: TestMode) -> None:\n args_str = ' '.join(args)\n\n def check_output(result: CompletedProcess):\n stdout = strip_ascii_codes(result.stdout.decode())\n error_string = f'Failure:\\nMode: {mode.value}\\nCommand: {args_str}\\nExit code: {result.returncode:X}\\nOutput:\\n{stdout}\\n'\n if mode == TestMode.STUB:\n if result.returncode not in (1, 2) or 'Test FAILED' not in stdout:\n raise RuntimeError(error_string)\n elif mode == TestMode.SOLUTION:\n if (result.returncode != 0 or\n \"*** You've passed ALL tests. Congratulations! ***\" not in stdout):\n raise RuntimeError(error_string)\n else:\n print('Invalid mode: {}'.format(mode))\n sys.exit(1)\n\n try:\n result = subprocess.run(args, stdout=subprocess.PIPE, timeout=300)\n check_output(result)\n except RuntimeError as e:\n print(e)\n sys.exit(1)\n except TimeoutExpired:\n print(f\"{mode} > {args_str}: TIMEOUT\")\n sys.exit(1)", "def exec(self, program, *args, cwd=os.getcwd(), **kwargs):\n if len(args) > 0:\n raise RuntimeError(\"Program arguments are not supported for real hardware devices\")\n\n assert self.platform is not None, \"TVM targets need a platform to execute programs\"\n\n if self.timeout_sec > 0:\n raise NotImplementedError\n\n ret = self.platform.run(program, self)\n return ret", "def main(cli_args=None):\n # build an arg parser\n parser = get_arg_parser()\n\n # run the parser on cli args\n args = parser.parse_args(cli_args)\n\n print(f\"Running script with arguments: {args}\")\n test_input(args.raw_training_data)\n test_input(args.raw_testing_data)\n test_output(args.train_output)\n test_output(args.test_output)", "def Run(self, cli, args):\n metrics.Loaded()\n\n tool_context = {}\n if self._parent_group:\n self._parent_group.RunGroupFilter(tool_context, args)\n\n command_instance = self._common_type(cli=cli, context=tool_context)\n\n log.debug('Running %s with %s.', self.dotted_name, args)\n resources = command_instance.Run(args)\n resources = display.Displayer(command_instance, args, resources,\n display_info=self.ai.display_info).Display()\n metrics.Ran()\n\n if command_instance.exit_code != 0:\n raise exceptions.ExitCodeNoError(exit_code=command_instance.exit_code)\n\n return resources", "def main(*arguments):\n\n args = parse_args(arguments)\n\n if args.test_suite is not None:\n test_suite = report_manager.load_test_suite_conf(args.test_suite)\n for i, test in enumerate(test_suite):\n args = parse_args(test)\n process_args_and_run(args, test_suite_iter=i)\n else:\n process_args_and_run(args)", "def main(self, argv=None):\n\n p = self.build_parser()\n\n args = p.parse_args(argv)\n\n try:\n return self.run(args) or 0\n except CommandFailed as e:\n print(str(e))\n return 1", "def run(args, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr):\n\n parser = argparse.ArgumentParser(description = 'Validate a HXL dataset.')\n parser.add_argument(\n 'infile',\n help='HXL file to read (if omitted, use standard input).',\n nargs='?',\n type=argparse.FileType('r'),\n default=stdin\n )\n parser.add_argument(\n 'outfile',\n help='HXL file to write (if omitted, use standard output).',\n nargs='?',\n type=argparse.FileType('w'),\n default=stdout\n )\n parser.add_argument(\n '-s',\n '--schema',\n help='Schema file for validating the HXL dataset (if omitted, use the default core schema).',\n metavar='schema',\n type=argparse.FileType('r'),\n default=None\n )\n parser.add_argument(\n '-a',\n '--all',\n help='Include all rows in the output, including those without errors',\n action='store_const',\n const=True,\n default=False\n )\n args = parser.parse_args(args)\n\n source = HXLReader(args.infile)\n if args.schema:\n schema = readHXLSchema(HXLReader(args.schema), baseDir=os.path.dirname(args.schema.name))\n else:\n schema = readHXLSchema()\n filter = HXLValidateFilter(source, schema, args.all)\n writeHXL(args.outfile, filter)", "def runTool(self, filename, expected_out, args):\n\n input_path = os.path.join(self.inputs_dir, filename)\n return_value, actual_output = create_subprocess(self.executable_binary, args + [input_path] + ['--'])\n actual_output = actual_output.decode('utf-8')\n\n self.assertEqual(return_value, 0)\n self.evaluate(expected_out, actual_output, command=f'{[self.executable_binary] + args} {filename}')", "def run(self, argv):\n global console\n\n console = init_console()\n\n command, options = self.parse_options(argv)\n\n # We call out to things like the test runner, which expect to operate\n # off of sys.argv. We want to simulate that now that we've parsed\n # options. We'll restore sys.argv after the command finishes.\n old_argv = sys.argv\n sys.argv = argv\n\n try:\n return command.run(options)\n except Exception as e:\n logger.exception('Unexpected exception when running command '\n '\"%s\": %s',\n command.name, e)\n return 1\n finally:\n sys.argv = old_argv", "def run_program(program):\n halt = False\n instruction_pointer = 0\n\n while not halt:\n halt = process_instruction(instruction_pointer, program)\n instruction_pointer += STEP_SIZE\n\n return program", "def test_with_command_line_arguments(self, arguments):\n fixed_arguments = self.get_argument_string(arguments)\n result = self.run(\n arguments=fixed_arguments,\n timeout=self.full_timeout,\n use_fresh_profile=True)\n return self._handle_test_result(result)", "def run(argv=None):\n program_conf = get_program_conf()\n\n # Test usage of utils... do something with ddl\n print(get_ddl_list('dev')[2])\n\n # Parse args\n program_args = program_conf[PROGRAM_ARGS_CONF]\n pipeline_args = program_conf[PIPELINE_ARGS_CONF]\n pipeline_options = PipelineOptions(pipeline_args)\n pipeline_options.view_as(SetupOptions).save_main_session = True\n\n # Get schema path key for validation\n gcs_client = storage.Client()\n etl_config_dict = get_gcs_json_as_dict(program_args[ETL_CONFIG_PATH_CONF], gcs_client)\n schema_dict = get_gcs_json_as_dict(program_args[SCHEMA_PATH_KEY], gcs_client)\n input_file_path = program_args[INPUT_CONF]\n\n with beam.Pipeline(options=pipeline_options) as pipeline:\n validated_records = (\n pipeline \n | 'read' >> ReadFromText(input_file_path)\n | 'validate' >> beam.ParDo(ValidateRecord(schema_dict, input_file_path, etl_config_dict))\n | 'filter_data' >> beam.Filter(lambda x: x[1] and x[2] and x[3])\n | 'recover_data' >> beam.Map(lambda x: x[0])\n # | beam.Map(print)\n )\n if IS_VALID_FILE:\n validated_records | 'write_success' >> WriteToText(program_args[OUTPUT_CONF]) \n else:\n validated_records | 'write_reject' >> WriteToText(program_args[REJECT_CONF])", "def run():\n params = parseParams()\n args = []\n kwargs = {}\n if params.source != None:\n kwargs['source'] = os.path.abspath(params.source)\n else:\n kwargs['source'] = sys.stdin\n kwargs['output'] = os.path.abspath(params.output)\n kwargs['fmt'] = params.fmt\n kwargs['syntax'] = params.syntax\n return parse.parse(*args, **kwargs) # pylint: disable=W0142", "def main(argv=sys.argv):\n log = _setup_logging()\n log.info(\"Starting {f} version {v} dataset manipulator\".format(\n f=__file__, v=__VERSION__))\n parser = get_parser()\n args = parser.parse_args()\n if args.debug:\n log.setLevel(logging.DEBUG)\n return args.func(args)\n #return main_runner_default(argv[1:], get_parser(), log)", "def Run(self, context, result):\n\n # Was the program not specified?\n\n self.program = context[\"isql_path\"]\n\n if context.has_key(\"database_path\"):\n database = context[\"database_path\"]\n else:\n database = \"\"\n self.RunProgram(self.program,\n\t\t\t[ self.program , database ,\n \"-user\", context[\"user_name\"], \"-password\", context[\"user_password\"] ],\n context, result)", "def test_run(prog, correct, inp=None):\n print(f\"Testing [{prog2str(prog)}] (inp={inp})...\", end=\"\")\n out = list()\n run_prog(prog, inp, out)\n if (out and out == correct) or (not out and prog == correct):\n print(\" OK!\")\n else:\n fail_str = prog2str(out if out else prog)\n print(f\" Failed: [{fail_str}] != [{prog2str(correct)}]!\")", "def main():\n program_name = os.path.basename(sys.argv[0])\n\n _initialize_debugging(program_name)\n _handle_signals()\n _process_environment_variables()\n arguments = _process_command_line()\n\n exit_status = 1 # no match\n if arguments:\n for filename in arguments:\n if os.path.isfile(filename):\n if not parameters[\"No formatting\"]:\n print(filename + \":\")\n for _, printable_string in strings.strings(filename):\n if what_in_string(printable_string):\n exit_status = 0 # match found\n if parameters[\"First match only\"]:\n break\n else:\n logging.error('\"%s\": No such file or directory', filename)\n elif parameters[\"Stdin unused\"]:\n logging.critical(\"At least one filename expected\")\n else:\n for _, printable_string in strings.strings():\n if what_in_string(printable_string):\n exit_status = 0 # match found\n if parameters[\"First match only\"]:\n break\n\n sys.exit(exit_status)", "def main():\n\n parser = get_populated_argparser()\n cli_dict = generate_cli_dictionary()\n\n # Parse arguments.\n if len(sys.argv) > 1:\n args = parser.parse_args(sys.argv[1:])\n\n # Validate arguments.\n args = cli_dict[args.tool].validate_args(args)\n\n # Run the tool.\n cli_dict[args.tool].run(args)\n\n else:\n\n parser.print_help()", "def run(self, inp=\"\"):\n result = self.run_process(inp)\n if result.returncode == 0:\n outcome = self.PASS\n elif result.returncode < 0:\n outcome = self.FAIL\n else:\n outcome = self.UNRESOLVED\n return (result, outcome)", "def run(ctx, problem,\n hparam_set: str = None,\n progress: bool = False,\n cuda: bool = False,\n device: str = None,\n log_dir: str = None,\n checkpoint_prefix: str = 'checkpoint',\n **kwargs):\n\n # Read custom transformation arbitrary CLI key value pairs.\n extra_hparams = ctx.obj.get('extra_hparams')\n kwargs.pop('extra_hparams')\n\n do_run(problem,\n hparam_set=hparam_set,\n extra_hparams=extra_hparams,\n progress=progress,\n cuda=cuda,\n device=device,\n log_dir=log_dir,\n checkpoint_prefix=checkpoint_prefix,\n **kwargs)", "def main(cls, *args, **kwargs):\n assert not (bool(args) and bool(kwargs))\n if args:\n return cls._run_args(args)\n elif kwargs:\n return cls._run_kwargs(kwargs)\n else:\n return cls._run_args(None)", "def runScript(self, script):\n data = FilePath(__file__).parent().child('data')\n sample_file = data.child('1.input.ofx')\n\n args = (script, [sample_file.path])\n log.msg('executing %r' % (args,))\n out, err, rc = yield utils.getProcessOutputAndValue(*args, env=None)\n log.msg('rc: %r' % (rc,))\n log.msg('out: %r' % (out,))\n log.msg('err: %r' % (err,))\n if rc != 0:\n self.fail(\"Failed: %s\\n\\n%s\" % (out, err))" ]
[ "0.69363123", "0.63061756", "0.6056023", "0.5911862", "0.5623276", "0.55638814", "0.5554909", "0.5461146", "0.5455719", "0.545486", "0.5423107", "0.54222107", "0.5385501", "0.53396916", "0.5325689", "0.5288438", "0.52590066", "0.52466375", "0.5239515", "0.52368325", "0.5236189", "0.51934296", "0.5183509", "0.5182613", "0.51675504", "0.5167169", "0.5130491", "0.51245993", "0.51092076", "0.50862086" ]
0.7962142
0
Restore a database from a backup file. 'database' A database specification. 'backupfile' A backup file name. 'arguments' A list of the arguments to the GBAK without backup file name and database location. 'result' A 'Result' object. The outcome will be 'Result.PASS' when this method is called. The 'result' may be modified by this method to indicate outcomes other than 'Result.PASS' or to add annotations.
def RestoreDatabase(self, database, backupfile, arguments, result): self.RunProgram("\""+self.__context["gbak_path"]+"\"", [ self.__context["gbak_path"] ] + [ "-C ", backupfile ] + arguments + [ database ], "", self.__context, result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def restore_backup(self):\n print \"Restoring backup for database: %s\" % self.database['NAME']\n # Fetch the latest backup if filepath not specified\n if not self.filepath:\n print \" Finding latest backup\"\n filepaths = self.storage.list_directory()\n filepaths = self.dbcommands.filter_filepaths(filepaths, self.servername)\n if not filepaths:\n raise CommandError(\"No backup files found in: %s\" % self.storage.backup_dir())\n self.filepath = filepaths[-1]\n # Restore the specified filepath backup\n print \" Restoring: %s\" % self.filepath\n backupfile = self.storage.read_file(self.filepath)\n print \" Restore tempfile created: %s\" % utils.handle_size(backupfile)\n self.dbcommands.run_restore_commands(backupfile)", "def restore(args):\n password = os.environ.get('CH_PASSWORD', '')\n client = Client(host=args.host, port=args.port, secure=args.secure, user=args.user, password=password, database=args.db)\n\n if not os.path.exists(args.dir):\n print(f'Backup dir \"{args.dir}\" does not exist')\n sys.exit(-1)\n\n measurements = args.measurements\n if measurements:\n measurements = measurements.split(',')\n\n ignore_measurements = args.ignore_measurements\n if ignore_measurements:\n ignore_measurements = ignore_measurements.split(',')\n\n if not measurements:\n if args.gzip:\n files = [f[:-3] for f in os.listdir(args.dir) if os.path.isfile(args.dir+'/'+f) and f.endswith('.gz')]\n else:\n files = [f for f in os.listdir(args.dir) if os.path.isfile(args.dir+'/'+f) and not f.endswith('.gz')]\n\n files.sort()\n measurements = filter_measurements(files, args.from_measurement, ignore_measurements)\n\n if not measurements:\n print('Nothing to restore. If backup is gzipped, use --gzip option.')\n sys.exit(-1)\n\n print('Files:')\n print(measurements)\n print()\n\n if not args.force and input(f'> Confirm restore into \"{args.db}\" db? [yes/no] ') != 'yes':\n sys.exit(0)\n\n print()\n # Get list of columns for each table.\n data = client.execute(f\"SELECT table, name, type FROM system.columns WHERE database='{args.db}'\")\n columns = {}\n for x in data:\n if x[0] not in columns:\n columns[x[0]] = {}\n\n columns[x[0]][x[1]] = x[2]\n\n # Iterate over files.\n for m in measurements:\n print(f'Loading {m}... ')\n if args.mixed_files:\n print()\n\n if not args.mixed_files and m not in columns:\n print('Skipping because the corresponding table does not exist.')\n continue\n\n lines = []\n line_count = 0\n if args.gzip:\n f = gzip.open(f'{args.dir}/{m}.gz', 'rt')\n else:\n f = open(f'{args.dir}/{m}', 'r')\n\n for i in f:\n if len(lines) == args.insert_size:\n write_records(client, columns, lines, args)\n lines = []\n line_count += args.insert_size\n\n lines.append(i)\n\n if lines:\n write_records(client, columns, lines, args)\n\n print('- Total:', line_count+len(lines))\n f.close()", "def restore_backup(self, backup, name, flavor, volume):\n return self._manager.restore_backup(backup, name, flavor, volume)", "def restore_backup(self, backup, name, flavor, volume):\n flavor_ref = self.api._get_flavor_ref(flavor)\n body = {\"instance\": {\n \"name\": name,\n \"flavorRef\": flavor_ref,\n \"volume\": {\"size\": volume},\n \"restorePoint\": {\"backupRef\": utils.get_id(backup)},\n }}\n uri = \"/%s\" % self.uri_base\n resp, resp_body = self.api.method_post(uri, body=body)\n return CloudDatabaseInstance(self, resp_body.get(\"instance\", {}))", "def restore_backup(self, backup_id, volume_id=None):\n aname = \"cinder_v%s.restore_backup\" % self.version\n with atomic.ActionTimer(self, aname):\n restore = self._get_client().restores.restore(backup_id, volume_id)\n restored_volume = self._get_client().volumes.get(restore.volume_id)\n return self._wait_available_volume(restored_volume)", "def restoreBackup(self, filename, warnMissing = True):\n if (os.path.isfile(filename + '.bak')):\n with open(filename, 'wb') as oldf:\n with open(filename + '.bak', 'rb') as bakf:\n oldf.write(bakf.read())\n print(filename + \" reverted\")\n else:\n if ((not self.useDCX) and (not \"FRPG_SfxBnd\" in filename) and warnMissing):\n print(\"Failed to restore \" + filename + \", \" + filename + \".bak not found.\")", "def restore_db(server, user, databases, old=False, port=None, repair=False):\n if user is None or databases[0] == '':\n print(\"!! Invalid syntax\\n\")\n print(\"Usage:\")\n print(\"\\tdkey [--backupold] <-D|--restoredb> SERVER USER DB[,DB2,DB3,...]\")\n print(\"Examples:\")\n print(\"\\tdkey --restoredb res140 userna5 userna5_wp1\")\n print(\"\\tdkey -D biz200 userna5 userna5_db1,userna5_db2,userna5_db3\\n\")\n sys.exit(181)\n\n dblist = databases\n\n # determine backup node\n btype = None\n if re.match(r'^vps', server, re.I):\n try:\n veid = re.match(r'vps([0-9]+)', server, re.I).group(1)\n except:\n print(\"!! Unable to parse VEID from server name\")\n return\n vpnode = find_vps(veid, retval=True)\n banode = find_backup_node(vpnode, retval=True, veid=veid)\n btype = 'vps'\n elif re.match(r'^(e|w)hub([0-9]+)', server):\n banode = find_backup_hub(server, retval=True)\n btype = 'shared'\n else:\n banode = find_backup(server, retval=True)\n btype = 'shared'\n\n try:\n baid = str(1800 + int(re.search(r'([0-9]{1,2})$', banode).group(1)))\n except:\n print(\"!! Unable to parse ID from backup node\")\n return\n\n # establish connection to backup node\n print(\">> Connecting to %s...\" % (banode))\n msh = MoonShell(banode, username=udata.userauth['user'])\n\n # switch to container\n print(\">> Entering container %s...\" % (baid))\n msh.run(\"vzctl enter %s\" % (baid))\n\n # get path to server backups\n if btype == 'vps':\n vzbase = msh.run(\"echo /mnt/m*/%s*/%s\" % (vpnode, veid)).split()[0]\n if vzbase.find('*') >= 0:\n print(\"!! Backups do not exist on backup node for this VPS :(\")\n return\n backup_tstamp = msh.run(\"stat --format=%%y %s/backed-up-*\" % (vzbase))\n print(\"-- VPS Backup Timestamp: %s\" % (backup_tstamp))\n # determine base (HA and non-HA have different paths)\n if re.search(r'cannot stat', msh.run(\"stat %s/fs/root\" % (vzbase)), re.I|re.M):\n # HA path\n bakbase = vzbase\n else:\n # non-HA path (fs/root)\n bakbase = vzbase + '/fs/root'\n datadir = bakbase + \"/var/lib/mysql\"\n else:\n bakbase = msh.run(\"echo /mnt/m*/%s*\" % (server)).split()[0]\n if old:\n datadir = bakbase + \"/var/lib/mysql_old\"\n else:\n datadir = bakbase + \"/var/lib/mysql\"\n cnfpath = bakbase + \"/root/.my.cnf\"\n print(\"-- Backup basedir: %s\" % (bakbase))\n\n # check if backups are stored on SSD mount\n ssdbase = msh.run(\"stat /mnt/mysql/%s/mysql\" % (server))\n if not old and re.search(r'Device', ssdbase, re.I|re.M):\n datadir = \"/mnt/mysql/%s/mysql\" % (server)\n\n print(\"-- Datadir : %s\" % (datadir))\n print(\"-- my.cnf path : %s\" % (cnfpath))\n\n msh.run(\"cd %s\" % (datadir))\n msh.run(\"\\\\mv -f ib_logfile0{,.old}\")\n msh.run(\"\\\\mv -f ib_logfile1{,.old}\")\n msh.run(\"\\\\cp -f %s /root/\" % (cnfpath))\n msh.run(\"cd /root/\")\n\n # generate random socket & port\n mysqlsock = '/mnt/mysql-%s.sock' % (''.join(random.choice(string.hexdigits) for i in range(8)))\n if port is None:\n mysqlport = str(random.randrange(MYSQLPORT_MIN, MYSQLPORT_MAX))\n else:\n mysqlport = str(mysqlport)\n\n # get list of mySQL releases available, and choose the latest version\n myrels = msh.run(\"echo mysql-*\")\n myver = sorted(filter(lambda x: re.match(r'^mysql\\-[0-9]{1,2}\\.[0-9]{1,2}\\.[0-9]{1,2}\\-.+[0-9]$', x), myrels.split()), reverse=True)[0]\n mysqldir = \"/root/\"+myver\n print(\"-- mySQL release : %s\" % (mysqldir))\n print(\"-- mySQL port : %s\" % (mysqlport))\n print(\"-- mySQL socket : %s\" % (mysqlsock))\n\n # open a screen\n msh.run(\"cd %s\" % (mysqldir))\n myscr = msh.screen_open()\n msh.send(\"%s/bin/mysqld --user=root --port=%s --datadir=%s --lc-messages-dir=%s/share/english --socket=%s\\n\" % (mysqldir, mysqlport, datadir, mysqldir, mysqlsock))\n\n # wait for mySQL to start...\n sys.stdout.write(\"Waiting for mySQL to start on backup node...\")\n sys.stdout.flush()\n startok = False\n fullmsg = ''\n while True:\n if msh.ready():\n resp = msh.recv()\n fullmsg += resp\n if re.search(r'ready for connection', resp, re.I|re.M):\n sys.stdout.write(' OK\\n')\n startok = True\n break\n elif re.search(r'(shutdown complete|killed|exiting)', resp, re.I|re.M):\n sys.stdout.write(' FAILED\\n')\n break\n else:\n sys.stdout.write('.')\n sys.stdout.flush()\n time.sleep(2.0)\n\n if not startok:\n msh.screen_terminate()\n msh.exit()\n msh.exit()\n parse_mysql_errors(fullmsg)\n return\n\n # detach screen\n msh.screen_detach()\n\n # check if we should run a repair before dumping\n if repair:\n mchk_extra = '-r'\n else:\n mchk_extra = ''\n\n # increase timeout for dumping DBs\n msh._channel.settimeout(300)\n\n for tdb in dblist:\n print(\">> Checking database [%s]...\" % (tdb))\n chkres = msh.run(\"%s/bin/mysqlcheck %s -v %s --port=%s --socket=%s\" % (mysqldir, mchk_extra, tdb, mysqlport, mysqlsock))\n print('\\n'.join(chkres.splitlines()[1:]))\n if len(chkres) > 200:\n print(\">> Dumping database [%s]...\" % (tdb))\n zout = msh.run(\"%s/bin/mysqldump -v %s --port=%s --socket=%s --result-file=%s/home/%s/%s.sql\" % (mysqldir, tdb, mysqlport, mysqlsock, bakbase, user, tdb))\n print('\\n'.join(zout.splitlines()[1:]))\n if btype == 'vps':\n print(\"** Dumped to %s/home/%s/%s.sql\" % (bakbase, user, tdb))\n else:\n print(\"** Dumped to /bkmnt/home/%s/%s.sql\" % (user, tdb))\n else:\n print(\"!! Skipping, database has no tables :(\")\n\n # reattach to screen, send ^\\, then wait for shutdown, terminate screen\n msh.screen_attach(myscr)\n msh.send('\\x1c')\n msh.recv_to_prompt()\n msh.screen_terminate()\n\n # exit from container, exit from node\n msh.exit()\n msh.exit()\n\n print(\"** Disconnected from remote host\")\n return", "def restore(self, backup_id):\n request = Request(\n method='post',\n endpoint='/_admin/backup/restore',\n data={'id': backup_id}\n )\n\n def response_handler(resp):\n if resp.is_success:\n return format_backup_restore(resp.body['result'])\n raise BackupRestoreError(resp, request)\n\n return self._execute(request, response_handler)", "def restore(\n context, backup, user=get_local_user(), remote=False, instance=None, stack=None,\n):\n command = f\"exec postgres pkill -f {PROJECT}\"\n run_command(context, user, remote, instance, stack, command)\n\n command = f\"run --rm postgres restore {backup}\"\n run_command(context, user, remote, instance, stack, command)", "async def restore_certificate_backup(self, backup: bytes, **kwargs) -> KeyVaultCertificate:\n bundle = await self._client.restore_certificate(\n vault_base_url=self.vault_url,\n parameters=self._models.CertificateRestoreParameters(certificate_bundle_backup=backup),\n **kwargs\n )\n return KeyVaultCertificate._from_certificate_bundle(certificate_bundle=bundle)", "def restore_project(filename):\n _require_environment()\n\n # Confirms action\n if not console.confirm('ATTENTION! This will destroy current database! Confirm?', default=False):\n return\n\n # Unless explicitly provided, uses local Django settings to\n # extract username/password to access remote database\n database = env.project.get('database', None)\n if not database:\n django.settings_module(env.project['settings'])\n database = django_settings.DATABASES['default']\n\n # Remote side\n with prefix(_django_prefix()):\n with cd(_django_project_dir()):\n # Uploads tar file\n tarfile = os.path.basename(filename)\n basename = tarfile[:tarfile.index('.tar.gz')]\n if console.confirm('Upload backup?'):\n put(filename, '../backup/%s' % tarfile)\n\n # Drop and recreate current database\n _drop_database_mysql()\n _setup_project_mysql()\n\n # Restore MySQL\n # To avoid silly mistakes, instead of using project's user & password, uses root's\n with cd('../'):\n run('tar -xzvf backup/%s' % tarfile)\n run('mysql -u root -p %s < backup/%s/%s.sql' % (\n #database['USER'],\n #database['PASSWORD'],\n database['NAME'],\n basename,\n env.project['project'],\n ))\n\n # Restore extra files\n extra_backup_files = env.project.get('extra_backup_files', [])\n for file in extra_backup_files:\n run('cp -R ../backup/%s/%s ./%s' % (basename, os.path.basename(file), os.path.dirname(file)))\n\n # Removes uncompressed files, but leaves .tar.gz\n run('rm -rf ../backup/%s' % basename)", "def restore_db(dump_file):\n drop_db()\n _init_db()\n c = ppc.app().config['PUBLICPRIZE']['DATABASE']\n subprocess.check_call([\n 'pg_restore',\n '--dbname=' + c['name'],\n '--user=' + c['user'],\n dump_file,\n ])", "def restore(ctx, destination, filesystem, backup_time):\n config_path = ctx.obj['config_path']\n\n config = Config(config_path)\n job = config.jobs.get(filesystem)\n\n if job is None:\n print('Filesystem does not exist.')\n sys.exit(1)\n\n job.restore(backup_time, destination)\n\n print('Restore successful.')", "def RestoreFromBackup(self, request, global_params=None):\n config = self.GetMethodConfig('RestoreFromBackup')\n return self._RunMethod(\n config, request, global_params=global_params)", "def task_restore(self, localfile, restoreDb=True, withAttachments=True):\n restoreDb = str(restoreDb).lower() in ('true', '1', 'yes', 'ok', 'y')\n\n if restoreDb:\n msg = (\n 'All existing files present in the backup will be overwritten and\\n'\n 'the database dropped and recreated.'\n )\n else:\n msg = (\n 'All existing files present in the backup will be overwritten\\n'\n '(the database will not be touched).'\n )\n\n print('')\n if confirm(msg):\n # TODO: Ask for confirmation here\n if restoreDb:\n postgres.dropDb('trac')\n postgres.createDb('trac', 'trac')\n\n with settings(user=self.serviceUser):\n with utils.tempfile() as temp:\n files = {\n 'db.dump': temp,\n }\n\n if withAttachments is True:\n #files['attachments'] = 'attachments'\n files['trac-attachments'] = 'config/trac-env/files/attachments'\n\n archive.restore(files, localfile)\n if restoreDb:\n postgres.restoreFromPath('trac', temp)", "def restore(self, dest: str, remove_existing: bool = False):\n if os.path.isdir(dest):\n dest = os.path.join(dest, \"lightningd.sqlite3\")\n if os.path.exists(dest):\n if not remove_existing:\n raise ValueError(\n \"Destination for backup restore exists: {dest}\".format(\n dest=dest\n )\n )\n os.unlink(dest)\n\n self.db = self._db_open(dest)\n for c in tqdm(self.stream_changes(), total=self.version_count):\n if c.snapshot is not None:\n self._restore_snapshot(c.snapshot, dest)\n if c.transaction is not None:\n self._restore_transaction(c.transaction)\n self.db.commit()", "def command(database, filename):\n\n click.secho(\n \"Backing up the database '{database}' on host '{host}' to file '{filename}'...\".format(\n database=settings.DATABASES[database]['NAME'],\n host=settings.DATABASES[database]['HOST'],\n filename=filename,\n )\n )\n # Make sure the backup path exists\n backup_path = get_backup_path()\n if not os.path.exists(backup_path):\n os.makedirs(backup_path)\n\n os.environ[\"PGPASSWORD\"] = settings.DATABASES[database]['PASSWORD']\n os.system(\n 'pg_dump -Fc -c -x -h {host} -U {username} --file={filename} {database}'.format(\n host=settings.DATABASES[database]['HOST'],\n username=settings.DATABASES[database]['USER'],\n database=settings.DATABASES[database]['NAME'],\n filename=filename,\n )\n )\n os.environ[\"PGPASSWORD\"] = ''", "def backup_database():\n logger.info(\"start database_backup\")\n management.call_command('dbbackup', compress=True)\n logger.info(\"end database_backup\")", "def restore_vm_backup(self, sVmUuid, sBackupUuid, sTargetHost, nTargetPort, sTargetSessionId, sTargetVmHomePath = '', sTargetVmName = '', restore_flags = consts.PVMSL_LOW_SECURITY, reserved_flags = 0, force_operation = True):\n\t\treturn Job(SDK.PrlSrv_RestoreVmBackup(self.handle, sVmUuid, sBackupUuid, sTargetHost, nTargetPort, sTargetSessionId, sTargetVmHomePath, sTargetVmName, restore_flags, reserved_flags, force_operation)[0])", "def test_backup_cluster_restore_negative_args(self):\n remote_client = RemoteMachineShellConnection(self.backupset.backup_host)\n self.backup_create()\n cmd_to_test = self.input.param(\"command\", \"backup\")\n if cmd_to_test == \"restore\":\n cmd = cmd_to_test + \" --archive {0} --repo {1} --host http://{2}:{3} --username {4} \\\n --password {5}\".format(self.backupset.directory,\n self.backupset.name,\n self.backupset.cluster_host.ip,\n self.backupset.cluster_host.port,\n self.backupset.cluster_host_username,\n self.backupset.cluster_host_password)\n command = \"{0}/cbbackupmgr {1}\".format(self.cli_command_location, cmd)\n output, error = remote_client.execute_command(command)\n remote_client.log_command_output(output, error)\n if \"7.0.1\" in self.cb_version:\n self.assertIn(\"Error restoring cluster: Backup backup doesn't contain any backups\", output[-1])\n else:\n self.assertIn(\"Error restoring cluster: Repository 'backup' doesn't contain any backups\", output[-1])\n self.backup_cluster()\n cmd = cmd_to_test\n command = \"{0}/cbbackupmgr {1}\".format(self.cli_command_location, cmd)\n output, error = remote_client.execute_command(command)\n remote_client.log_command_output(output, error)\n cmd_test = cmd_to_test\n if cmd_to_test.startswith('\"') and cmd_to_test.endswith('\"'):\n cmd_test = cmd_to_test[1:-1]\n self.assertEqual(output[0], \"cbbackupmgr {} [<args>]\".format(cmd_test))\n cmd = cmd_to_test + \" --archive\"\n command = \"{0}/cbbackupmgr {1}\".format(self.cli_command_location, cmd)\n output, error = remote_client.execute_command(command)\n remote_client.log_command_output(output, error)\n self.assertEqual(output[0], \"Expected argument for option: --archive\", \"Expected error message not thrown\")\n cmd = cmd_to_test + \" --archive xyz -c http://localhost:8091 -u Administrator -p password -r aa\"\n command = \"{0}/cbbackupmgr {1}\".format(self.cli_command_location, cmd)\n output, error = remote_client.execute_command(command)\n remote_client.log_command_output(output, error)\n self.assertTrue(self._check_output(\"archive '{0}xyz' does not exist\".format(self.root_path), output))\n cmd = cmd_to_test + \" --archive {0} -c http://localhost:8091 -u Administrator -p password\".format(\n self.backupset.directory)\n command = \"{0}/cbbackupmgr {1}\".format(self.cli_command_location, cmd)\n output, error = remote_client.execute_command(command)\n remote_client.log_command_output(output, error)\n self.assertEqual(output[0], \"Flag required, but not specified: -r/--repo\", \"Expected error message not thrown\")\n cmd = cmd_to_test + \" --archive {0} --repo\".format(self.backupset.directory)\n command = \"{0}/cbbackupmgr {1} -c http://localhost:8091 -u Administrator -p password -r\".format(\n self.cli_command_location, cmd)\n output, error = remote_client.execute_command(command)\n remote_client.log_command_output(output, error)\n self.assertEqual(output[0], \"Expected argument for option: --repo\", \"Expected error message not thrown\")\n cmd = cmd_to_test + \" --archive {0} --repo {1} -u Administrator -p password\".format(self.backupset.directory,\n self.backupset.name)\n command = \"{0}/cbbackupmgr {1}\".format(self.cli_command_location, cmd)\n output, error = remote_client.execute_command(command)\n remote_client.log_command_output(output, error)\n self.assertEqual(output[0], \"Flag required, but not specified: -c/--cluster\",\n \"Expected error message not thrown\")\n cmd = cmd_to_test + \" --archive {0} --repo {1} -c -u Administrator -p password -r repo\".format(\n self.backupset.directory, self.backupset.name)\n command = \"{0}/cbbackupmgr {1}\".format(self.cli_command_location, cmd)\n output, error = remote_client.execute_command(command)\n remote_client.log_command_output(output, error)\n self.assertEqual(output[0], \"Expected argument for option: -c\", \"Expected error message not thrown\")\n cmd = cmd_to_test + \" --archive {0} --repo {1} -c http://{2}:{3}\".format(self.backupset.directory,\n self.backupset.name,\n self.backupset.cluster_host.ip,\n self.backupset.cluster_host.port)\n command = \"{0}/cbbackupmgr {1}\".format(self.cli_command_location, cmd)\n output, error = remote_client.execute_command(command)\n remote_client.log_command_output(output, error)\n self.assertIn(\"cluster credentials required, expected --username/--password or --client-cert/--client-key\", output[0],\n \"Expected error message not thrown\")\n cmd = cmd_to_test + \" --archive {0} --repo {1} --cluster http://{2}:{3} \\\n --username\".format(self.backupset.directory,\n self.backupset.name,\n self.backupset.cluster_host.ip,\n self.backupset.cluster_host.port)\n command = \"{0}/cbbackupmgr {1}\".format(self.cli_command_location, cmd)\n output, error = remote_client.execute_command(command)\n remote_client.log_command_output(output, error)\n self.assertEqual(output[0], \"Expected argument for option: --username\", \"Expected error message not thrown\")\n cmd = cmd_to_test + \" --archive {0} --repo {1} --cluster http://{2}:{3} \\\n --username {4}\".format(self.backupset.directory,\n self.backupset.name,\n self.backupset.cluster_host.ip,\n self.backupset.cluster_host.port,\n self.backupset.cluster_host_username)\n command = \"{0}/cbbackupmgr {1}\".format(self.cli_command_location, cmd)\n output, error = remote_client.execute_command(command)\n remote_client.log_command_output(output, error)\n self.assertIn(\"the --username/--password flags must be supplied together\", output[0],\n \"Expected error message not thrown\")\n cmd = cmd_to_test + \" --archive {0} --repo abc --cluster http://{1}:{2} --username {3} \\\n --password {4}\".format(self.backupset.directory,\n self.backupset.cluster_host.ip,\n self.backupset.cluster_host.port,\n self.backupset.cluster_host_username,\n self.backupset.cluster_host_password)\n command = \"{0}/cbbackupmgr {1}\".format(self.cli_command_location, cmd)\n output, error = remote_client.execute_command(command)\n remote_client.log_command_output(output, error)\n part_message = \"backing up\"\n if cmd_to_test.startswith('\"') and cmd_to_test.endswith('\"'):\n cmd_test = cmd_to_test[1:-1]\n if cmd_test == \"restore\":\n part_message = 'restoring'\n self.assertTrue(\"Error {0} cluster: Backup Repository `abc` not found\"\\\n .format(part_message) in output[-1],\n \"Expected error message not thrown. Actual output %s \" % output[-1])\n cmd = cmd_to_test + \" --archive {0} --repo {1} --cluster abc --username {2} \\\n --password {3}\".format(self.backupset.directory,\n self.backupset.name,\n self.backupset.cluster_host_username,\n self.backupset.cluster_host_password)\n command = \"{0}/cbbackupmgr {1}\".format(self.cli_command_location, cmd)\n output, error = remote_client.execute_command(command)\n remote_client.log_command_output(output, error)\n self.assertIn(f\"Error {part_message} cluster: failed to bootstrap client: failed to connect to any host(s) from the connection string\", output[-1])\n cmd = cmd_to_test + \" --archive {0} --repo {1} --cluster http://{2}:{3} --username abc \\\n --password {4}\".format(self.backupset.directory,\n self.backupset.name,\n self.backupset.cluster_host.ip,\n self.backupset.cluster_host.port,\n self.backupset.cluster_host_password)\n command = \"{0}/cbbackupmgr {1}\".format(self.cli_command_location, cmd)\n output, error = remote_client.execute_command(command)\n remote_client.log_command_output(output, error)\n self.assertTrue(\"check username and password\" in output[-1], \"Expected error message not thrown\")\n cmd = cmd_to_test + \" --archive {0} --repo {1} --cluster http://{2}:{3} --username {4} \\\n --password abc\".format(self.backupset.directory,\n self.backupset.name,\n self.backupset.cluster_host.ip,\n self.backupset.cluster_host.port,\n self.backupset.cluster_host_username)\n command = \"{0}/cbbackupmgr {1}\".format(self.cli_command_location, cmd)\n output, error = remote_client.execute_command(command)\n remote_client.log_command_output(output, error)\n remote_client.disconnect()\n self.assertTrue(\"check username and password\" in output[-1], \"Expected error message not thrown\")", "def restore_cluster(ctx, zone, db_instance, from_zone=None, from_db_instance=None, backup_folder=None, target_time=None):\n\n if from_zone == None:\n from_zone = zone\n if from_db_instance == None:\n from_db_instance = db_instance\n if backup_folder == None:\n get_env('AWS_SECRET_ACCESS_KEY', 'to list the backup buckets at AWS S3.')\n get_env('AWS_ACCESS_KEY_ID', 'to list the backup buckets at AWS S3.')\n get_env('AWS_REGION', 'to list the backup buckets at AWS S3.')\n print(\"Available values for --backup-folder :\\n\")\n res = ctx.run(\"aws s3 ls \" + backup_bucket_name(from_zone, from_db_instance), pty=True, hide=\"stdout\")\n for line in res.stdout.splitlines():\n print(re.search(\"PRE ([^ /]+)\", line).group(1))\n else:\n recover_from = \"{}/{}\".format(backup_bucket_name(from_zone, from_db_instance), backup_folder)\n print(\"\"\"\n Starting recovery\n \"\"\")\n more_vars = {'recover_from': recover_from}\n if target_time:\n more_vars['recovery_target_time'] = '\"{}\"'.format(target_time) # need quoting due to space char\n\n ctx.run(init_pg_servers_play_run(zone, db_instance, more_vars=more_vars), pty=True, echo=True)", "def restore(self, dbname, filename, node=None):\n if not node:\n node = self\n\n path = os.path.join(node.base_dir, filename)\n self.psql(dbname, filename=path)", "def test_restore_backup():", "def backup_database():\n db_path = os.path.join(config.cum_dir, 'cum.db')\n backup_path = os.path.join(config.cum_dir, 'cum.db.bak')\n copyfile(db_path, backup_path)", "def Run(self, args):\n sql = self.context['sql']\n instance_id = util.GetInstanceIdWithoutProject(args.instance)\n project_id = util.GetProjectId(args.instance)\n # TODO(user): as we deprecate P:I args, simplify the call to .Parse().\n instance_ref = resources.Parse(\n instance_id, collection='sql.instances',\n params={'project': project_id})\n due_time = args.due_time\n instance = self.command.ParentGroup().ParentGroup().instances.get(\n instance=instance_ref.instance)\n # At this point we support only one backup-config. So, we just use that id.\n backup_config = instance['settings']['backupConfiguration'][0]['id']\n request = sql.instances().restoreBackup(\n project=instance_ref.project, instance=instance_ref.instance,\n backupConfiguration=backup_config, dueTime=due_time)\n try:\n result = request.execute()\n operations = self.command.ParentGroup().ParentGroup().operations()\n operation = operations.get(instance=str(instance_ref),\n operation=result['operation'])\n return operation\n except errors.HttpError as error:\n raise exceptions.HttpException(util.GetError(error))\n except errors.Error as error:\n raise exceptions.ToolException(error)", "def backup_database(db_host=None, db_name=None, cfg='project'):\n data = __salt__['mc_project.get_configuration'](cfg)\n db = data['data']['django_settings']['DATABASES']['default']\n if not db_host:\n db_host = db['HOST']\n if not db_name:\n db_name = db['NAME']\n dump_filename = '/tmp/{0}-{1}.dump'.format(\n db_name,\n datetime.now().strftime('%Y-%m-%d-%H-%M'))\n script = BACKUP.format(**locals())\n script += \"exit $?\\n\"\n ret = run(host=db_host, script=script)\n if ret['retcode']:\n pprint(ret)\n raise Exception('dump failed')\n return dump_filename", "def __restoreBackup(self):\n pass #FIXME!!!", "def test_backup_restore_with_optional_flags(self):\n self.log.info(\"Load 1st batch docs\")\n create_gen1 = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n self._load_all_buckets(self.master, create_gen1, \"create\", 0)\n self.backup_create()\n verify_data = True\n output, error = self.backup_cluster()\n if self.backupset.secure_conn:\n if self.backupset.bk_no_cert:\n if self._check_output(\"Backup completed successfully\", output):\n self.fail(\"Taking cluster backup failed.\")\n elif self._check_output(\"Error\", output):\n verify_data = False\n else:\n if not self._check_output(\"Backup completed successfully\", output):\n self.fail(\"Taking cluster backup failed.\")\n\n if verify_data:\n self.validate_backup_data(self.backupset.backup_host,\n self.servers[:self.nodes_init],\n \"ent-backup\", False, False, \"memory\",\n self.num_items, None)\n if self.do_restore:\n self.log.info(\"Restore with secure connection\")\n self.backup_restore()", "def backup(backupName, full, verify, verifyIncrementally = False, doTheBackup = True):\n testRestoreDir = localenv.backups.testRestoreDir\n backupDetails = localenv.backups.backups[backupName]\n backupMap = getBackupMap(backupName)\n BackupOperations.doBackup (backupDetails.source, backupMap, testRestoreDir, full = full, \n verify = verify, verifyIncrementally = verifyIncrementally, \n doTheBackup = doTheBackup, \n recordTrigger = localenv.backups.recordTrigger)", "def test_backup_restore_with_alerts(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n rest = RestConnection(self.backupset.cluster_host)\n rest.set_alerts_settings('couchbase@localhost', 'root@localhost', 'user', 'pwd')\n self.backup_create()\n self.backup_cluster_validate()\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\">=\")" ]
[ "0.64738756", "0.6441385", "0.6043816", "0.59941", "0.59136015", "0.5861948", "0.5844177", "0.5674559", "0.5635281", "0.5470779", "0.54675585", "0.52948487", "0.5271796", "0.52364796", "0.51926756", "0.5166014", "0.5152477", "0.5145427", "0.508932", "0.5002277", "0.49899507", "0.4923746", "0.491434", "0.49017602", "0.49003208", "0.48266587", "0.4771187", "0.47453952", "0.47377405", "0.47357678" ]
0.8807153
0
Run an ISQL script. 'database' A database specification. 'script' An ISQL script. 'arguments' A list of the arguments to the ISQL without database location. 'result' A 'Result' object. The outcome will be 'Result.PASS' when this method is called. The 'result' may be modified by this method to indicate outcomes other than 'Result.PASS' or to add annotations.
def RunScript(self, database, script, arguments, result): self.RunProgram("\""+self.__context["isql_path"]+"\"", [ self.__context["isql_path"] ] + [ database ] + arguments, script, self.__context, result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Run(self, context, result):\n\n # Was the program not specified?\n\n self.program = context[\"isql_path\"]\n\n if context.has_key(\"database_path\"):\n database = context[\"database_path\"]\n else:\n database = \"\"\n self.RunProgram(self.program,\n\t\t\t[ self.program , database ,\n \"-user\", context[\"user_name\"], \"-password\", context[\"user_password\"] ],\n context, result)", "def DBExecuteScript( DB: sqlite3.Connection, sql:str, *args ):\n assert isinstance( DB, sqlite3.Connection )\n DB.executescript( sql )\n DB.commit()", "def execute_script(self, script, nolog=True, close=True):\n self._check_connection()\n if not nolog: # pragma: no cover\n lines = script.split(\"\\n\")\n if len(lines) > 20:\n self.LOG(\"SQL start + \",\n \"\\n\".join([repr(x) for x in lines[:20]]))\n else:\n self.LOG(\"SQL start + \",\n \"\\n\".join([repr(x) for x in lines]))\n cur = self._connection.cursor()\n res = cur.executescript(script)\n if close:\n cur.close()\n if not nolog:\n self.LOG(\"SQL end\") # pragma: no cover\n else:\n return res", "def executescript(self, script: SQLQuery) -> \"Cursor\":\n return self.execute(script)", "def run(self, sql, *args):\n return self.database.execute(sql, args)", "def run_athena_query(self, **kwargs):\n LOGGER.debug('Executing query: %s', kwargs['query'])\n query_execution_resp = self.athena_client.start_query_execution(\n QueryString=kwargs['query'], QueryExecutionContext={\n 'Database': kwargs.get(\n 'database', self.DATABASE_DEFAULT)}, ResultConfiguration={\n 'OutputLocation': '{}/{}'.format(\n self.athena_results_bucket, self.athena_results_key)})\n\n query_execution_result = self.check_query_status(\n query_execution_resp['QueryExecutionId'])\n\n if query_execution_result != 'SUCCEEDED':\n LOGGER.error(\n 'The query %s returned %s, exiting!',\n kwargs['query'],\n query_execution_result)\n return False, {}\n\n query_results_resp = self.athena_client.get_query_results(\n QueryExecutionId=query_execution_resp['QueryExecutionId'],\n )\n\n # The idea here is to leave the processing logic to the calling functions.\n # No data being returned isn't always an indication that something is wrong.\n # When handling the query result data, iterate over each element in the Row,\n # and parse the Data key.\n # Reference: https://bit.ly/2tWOQ2N\n if not query_results_resp['ResultSet']['Rows']:\n LOGGER.debug('The query %s returned empty rows of data', kwargs['query'])\n\n return True, query_results_resp", "def run_script (script, *l) :\n if not os.path.exists (script) :\n raise PQHException (\"file %s not found\" % script)\n py = get_interpreter_path ()\n cmd = \"%s %s\" % (py, script)\n if len (l) > 0 :\n cmd += \" \" + \" \".join ( [str (x) for x in l])\n out,err = run_cmd (cmd)\n return out,err", "def run(self, script, *args, **kwargs):\n return self._run('run', script, *args, **kwargs)", "def execute(self, untrusted_user, script, arguments, logfile, cwd=None):\n return self.execute_helper(self.containers, script, arguments, logfile)", "def run_sql(self, sql):\n def mk_run_sql_q(sql):\n return {\n 'type' : 'run_sql',\n 'args': {\n 'sql' : sql\n }\n }\n return self.v1q(mk_run_sql_q(sql))", "def run_query(database, query, parameters=()) -> list:\n conn = None\n query_result = None\n try:\n conn = sqlite3.connect(database)\n cursor = conn.cursor()\n query_result = cursor.execute(query, parameters).fetchall()\n except sqlite3.Error as e:\n print(\"Error in run_query: {}\".format(e.args[0]))\n finally:\n if conn:\n conn.close()\n\n return query_result", "def sql_scripts_execute(self, sql_scripts, params={}):\n ps = self.parameter_handler(params)\n log.debug('Got parameters: %s', ps)\n cursor = self._get_cursor()\n for q in sql_scripts:\n with open(q, 'r') as s:\n sql_string_formatted = s.read().format(**ps)\n cursor.execute(sql.SQL(sql_string_formatted), ps)\n self.connection.commit()\n self.connection.close()", "def run_query(self, sql_query='', *parameters):\n if not self.cursor:\n raise BaseException(\"Database not selected\")\n\n return self.cursor.execute(sql_query, parameters)", "def RunProgram(self, program, arguments, context, result):\n\n # Construct the environment.\n environment = self.MakeEnvironment(context)\n e_stdin = self.stdin\n c = {}\n for pair in context.items():\n c[pair[0]] = pair[1]\n for substitution in c.keys():\n pattern = \"$(\"+substitution.upper()+\")\"\n replacement = context[substitution]\n e_stdin = e_stdin.replace(pattern, replacement)\n\n basename = os.path.split(arguments[0])[-1]\n qm_exec = qm.executable.Filter(e_stdin, -2)\n\n try:\n exit_status= qm_exec.Run(arguments, environment)\n stdout = qm_exec.stdout\n stderr = qm_exec.stderr\n causes = []\n\n if sys.platform != \"win32\":\n if os.WIFEXITED(exit_status):\n if exit_status != self.exit_code:\n causes.append(\"exit_code\")\n result[\"RunProgram.exit_code\"] = str(exit_status)\n elif os.WIFSIGNALED(exit_status):\n self.__cause= \"Process %s terminated by signal %d.\" % (basename, os.WTERMSIG(exit_status))\n\n elif os.WIFSTOPPED(exit_status):\n self.__cause= \"Process %s stopped by signal %d.\" % (basename, os.WSTOPSIG(exit_status))\n\n else:\n self.__cause= \"Process %s terminated abnormally.\" % basename\n\n # Check to see if the standard output matches.\n # First strip out ISQL junk\n stdout_stripped = re.sub(\"Database:.*\\n\",\"\",stdout)\n stdout_stripped = re.sub(\"SQL>\\s*\",\"\",stdout_stripped)\n stdout_stripped = re.sub(\"CON>\\s*\",\"\",stdout_stripped)\n stdout_stripped = re.sub(\"-->\\s*\",\"\",stdout_stripped)\n stdout_stripped = self.__PerformSubstitutions(stdout_stripped)\n stdout_stripped = re.compile(\"^\\s+\",re.I+re.M).sub(\"\",stdout_stripped)\n stdout_stripped = re.compile(\"\\s+$\",re.I+re.M).sub(\"\",stdout_stripped)\n\n self.stdout_stripped = re.sub(\"Database:.*\\n\",\"\",self.stdout)\n self.stdout_stripped = re.sub(\"SQL>\\s*\",\"\",self.stdout_stripped)\n self.stdout_stripped = re.sub(\"CON>\\s*\",\"\",self.stdout_stripped)\n self.stdout_stripped = re.sub(\"-->\\s*\",\"\",self.stdout_stripped)\n self.stdout_stripped = self.__PerformSubstitutions(self.stdout_stripped)\n self.stdout_stripped = re.compile(\"^\\s+\",re.I+re.M).sub(\"\",self.stdout_stripped)\n self.stdout_stripped = re.compile(\"\\s+$\",re.I+re.M).sub(\"\",self.stdout_stripped)\n\n if stdout_stripped != self.stdout_stripped:\n causes.append(\"standard output\")\n result[\"ExecTest.stdin\"] = \"<pre>\" + e_stdin + \"</pre>\"\n result[\"ExecTest.stdout_expected\"] = \"<pre>\" + self.stdout + \"</pre>\"\n result[\"ExecTest.stdout\"] = \"<pre>\" + stdout + \"</pre>\"\n result[\"ExecTest.stdout_stripped\"] = \"<pre>\" + stdout_stripped + \"</pre>\"\n result[\"ExecTest.stdout_stripped_expected\"] = \"<pre>\" + self.stdout_stripped + \"</pre>\"\n result[\"ExecTest.stripped_diff\"] = \"<pre>\"+'\\n'.join(difflib.ndiff(stdout_stripped.splitlines(0),self.stdout_stripped.splitlines(0)))+\"</pre>\"\n # Check to see that the standard error matches.\n stderr_stripped = re.sub(\"Use CONNECT or CREATE DATABASE to specify a database.*\\n\",\"\",stderr)\n if stderr_stripped != self.stderr:\n causes.append(\"standard error\")\n result[\"ExecTest.stdin\"] = \"<pre>\" + e_stdin + \"</pre>\"\n result[\"ExecTest.stderr\"] = \"<pre>\" + stderr + \"</pre>\"\n result[\"ExecTest.expected_stderr\"] = \"<pre>\" + self.stderr + \"</pre>\"\n # If anything went wrong, the test failed.\n if causes:\n result.Fail(\"Unexpected %s.\" % string.join(causes, \", \"))\n except:\n result.NoteException()", "def execute(self):\n if self.sql is None:\n self.sql = self.construct_query()\n # Only SQL strings can be split, not (e.g.) SQLAlchemy statements.\n if self.multiple_statements and isinstance(self.sql, str):\n statements = self._split_sql()\n else:\n statements = [self.sql]\n single_statement = True if len(statements) == 1 and self.filename else False\n try:\n for statement in statements:\n result_proxy = self.cm.conn.execute(statement)\n log_string = self.filename if single_statement else str(statement)[:25]\n self.logger.info(\"Executed {} against {}\".format(log_string, self.cm.db))\n if result_proxy.cursor:\n return self.fetch_results(result_proxy)\n except Exception as e:\n self.logger.exception(e)\n raise", "def execute_script(self):\n\n # render script variables\n script = self.replable.render_script_from_flo(self.flo, **self.template_engine_kwargs)\n\n # run over script lines\n for cmd in script.split(\"\\n\"):\n\n # no empty lines\n if cmd:\n\n self.brief_logger.info(cmd)\n if self.verbose_logger and self.log_file_echo_command:\n self.verbose_logger.info(\"$> '%s'\", cmd)\n\n # execute command\n cmd = cmd + \"\\n\"\n self.sock.send(cmd.encode())\n\n res = self.wait_for_command_execution(timeout=self.timeout)\n # read all data which is not covered by the regex used for stream searching\n # TODO: use loop here?!\n res += read_remaining_data(self.sock, SOCKET_READ_BUF_SIZE)\n\n # apply the custom check function\n if self.return_value_checker is not None:\n try:\n self.return_value_checker(cmd, res)\n except Exception as e:\n raise REPLUnexpectedResult(\n \"The following output is unexpected to the method `return_value_checker`:\\n%s\" % res,\n caused_by=e)\n\n yield res", "def db_execute(self, database_name, statement, params):\n with self.db_create_cursor(database_name) as cursor:\n if self.debug:\n self.logger.debug(\"Running statement: \" + statement)\n return cursor.execute(statement, params)", "def execute_sql_script(conn, script_filename):\n file_contents = open_sql_script(script_filename)\n cursor = conn.cursor()\n cursor.execute(file_contents)\n conn.commit()", "def executeQuery(args):\n\n if 'dbname' not in args and 'query' not in args and 'type' not in args:\n print \"invalid executeQuery options\"\n else:\n querytype = args['type']\n connection = connect({ 'dbname' : args['dbname'] })\n if connection:\n cursor = connection.cursor()\n query = args['query']\n\n # FIND QUERY\n if(querytype == 'find'):\n if 'values' not in args:\n cursor.execute(query)\n else:\n cursor.execute(query,args['values'])\n if cursor.rowcount > 0:\n result = cursor.fetchall()\n else:\n result = 0\n\n # DELETE QUERY\n if(querytype == 'delete'):\n cursor.execute(query)\n result = cursor.rowcount\n\n # INSERT QUERY\n if(querytype == 'insert'):\n if 'values' not in args:\n print \"no values to insert\"\n else:\n cursor.execute(query,args['values'])\n result = cursor.rowcount\n\n connection.commit()\n cursor.close()\n connection.close()\n return result", "def main(passed_arguments):\n\n # use real data as default\n scripts_path = os.path.abspath(os.path.join(PYTHON_PATH, 'scripts'))\n meta_path = os.path.abspath(os.path.join(scripts_path, 'meta.json'))\n manifest_path = os.path.abspath(os.path.join(scripts_path, 'manifest.csv'))\n\n # Locally, we can optionally have sample data\n if passed_arguments.sample and passed_arguments.database != 'remote':\n meta_path = os.path.abspath(os.path.join(scripts_path,\n 'meta_sample.json'))\n manifest_path = os.path.abspath(\n os.path.join(scripts_path, 'manifest_sample.csv'))\n\n # for case of more than one database choice default to the option with\n # the lowest risk if database is updated\n if passed_arguments.database == 'docker':\n database_choice = 'docker_database'\n drop_tables = True\n\n elif passed_arguments.database == 'docker_local':\n database_choice = 'docker_with_local_python'\n drop_tables = True\n\n elif passed_arguments.database == 'remote':\n database_choice = 'remote_database'\n drop_tables = False #TODO this is a hacky way to avoid dropping tables because it's not working with RDS...\n\n # Only users with additional admin privileges can rebuild the\n # remote database\n if not passed_arguments.update_only:\n database_choice = 'remote_database_master'\n\n # TODO: do we want to default to local or docker?\n elif passed_arguments.database == 'local':\n database_choice = 'local_database'\n drop_tables = True\n\n # universal defaults\n keep_temp_files = True\n\n # Instantiate and run the loader\n loader = LoadData(database_choice=database_choice, meta_path=meta_path,\n manifest_path=manifest_path,\n keep_temp_files=keep_temp_files,\n drop_tables=drop_tables)\n\n if passed_arguments.update_only:\n loader.update_database(passed_arguments.update_only)\n else:\n loader.rebuild()\n\n\n\n #TODO add in failures report here e.g. _failed_table_count", "def runScript(self, script):\n data = FilePath(__file__).parent().child('data')\n sample_file = data.child('1.input.ofx')\n\n args = (script, [sample_file.path])\n log.msg('executing %r' % (args,))\n out, err, rc = yield utils.getProcessOutputAndValue(*args, env=None)\n log.msg('rc: %r' % (rc,))\n log.msg('out: %r' % (out,))\n log.msg('err: %r' % (err,))\n if rc != 0:\n self.fail(\"Failed: %s\\n\\n%s\" % (out, err))", "def __ExecISQLCommands(self):\n\n try:\n stdout, stderr= self.__RunProgram(self.source_code,[self.__context[\"isql_path\"],\n self.__dsn,\n \"-user\", self.user_name,\n \"-password\", self.user_password])\n\n except:\n self.__result.Fail(cause= self.__cause)\n exc_info = sys.exc_info()\n self.__result[Result.EXCEPTION]= \"%s: %s\" % exc_info[:2]\n\n else:\n\n stdout_stripped= self.__StringStrip(stdout) # strip whole stdout\n stdout_e_stripped= self.__StringStrip(self.result_string) # strip whole expected stdout\n stderr_stripped= self.__StringStrip(stderr) # strip whole stderr\n stderr_e_stripped= self.__StringStrip(self.expected_stderr) # strip whole expected stderr\n\n if stderr_stripped != stderr_e_stripped: # if error outputs do not match\n self.__AnnotateErrorDiff(\"ISQL\",\n self.expected_stderr,\n stderr,\n stderr_e_stripped,\n stderr_stripped)\n elif stdout_stripped == stdout_e_stripped: # if they match\n return True # ok\n else:\n\n self.__AnnotateDiff(\"ISQL\",\n self.result_string,\n stdout,\n stdout_e_stripped,\n stdout_stripped)", "def run(self, script_args):\n run_url = '{0}/{1}/run'.format(self.url, self.script_name)\n headers = {'Content-Type': 'text/plain'}\n payload = json.dumps(script_args)\n\n resp = False\n if self.get():\n log.debug('Running script: {0}'.format(self.script_name))\n req = requests.post(run_url, auth=(self.username, self.password), headers=headers, data=payload)\n if req.status_code == 204 or 200:\n resp = req.json()\n return resp\n log.error('Failed running script: {0}\" Reason: {1} {2}'.format(self.script_name, req.status_code, req.json()))\n\n return resp", "def RunGsec(self, script, arguments, result):\n try:\n self.RunProgram(\"\\\"\"+self.__context[\"gsec_path\"]+\"\\\"\",\n [ self.__context[\"gsec_path\"], \"-database\", self.__context[\"server_location\"]+ self.__context[\"isc4_path\"], \"-user\", \"SYSDBA\", \"-password\", \"masterkey\" ]+arguments,\n script, self.__context, result)\n except:\n result.NoteException()", "def sql(self, db, sql, args=()):\n assert db in ('source', 'target'), u\"First arg of sql() should be 'source' or 'target'\"\n connection = self.target_connection if db == 'target' else self.source_connection\n with connection.cursor() as cursor:\n cursor.execute(sql, args)\n return cursor.fetchall() if 'select ' in sql.lower() else ()", "def execute(self, sql, sql_args=None):\n # Check that sql arguments have the correct type\n self._check_sql_args(sql_args)\n # Execute the query\n try:\n pgcursor = self.get_postgres_cursor()\n pgcursor.execute(sql, sql_args)\n self._connection.commit()\n except PostgresError, e:\n self._connection.rollback()\n raise RuntimeError(\"Error running SQL query: %s\", str(e))\n finally:\n pgcursor.close()", "def execute(self, *sql):\n # assemble the command and pass it on to the connection\n return self.postgres.execute(self.connection, \"\\n\".join(sql))", "def run(self, script, **kwargs):\r\n # don't return a value from a script\r\n kwargs['nout'] = 0\r\n return self.call(script, **kwargs)", "def run_query(db, query, args=None):\n # You don't have to do anything for this function! It's already written for\n # you. It's meant as a helper function to reduce the amount of copy-pasting\n # you'd have to do.\n con = sqlite3.connect(db)\n cur = con.cursor()\n if args is None:\n cur.execute(query)\n else:\n cur.execute(query, args)\n # Note that we're using cur.fetchall() here instead of a for loop because\n # we WANT a list of the values we've SELECTed.\n data = cur.fetchall()\n cur.close()\n con.close()\n return data", "def execute(self, parseString, *args):\r\n debug.write('[SourceRPG] Executing SQL String: %s' % parseString, 1, True)\r\n self.cursor.execute(parseString, args)\r\n debug.write('[SourceRPG] SQL String executed successfully', 2, True)" ]
[ "0.63101566", "0.6261756", "0.59878993", "0.57192934", "0.56583416", "0.5588258", "0.5548203", "0.554179", "0.54586357", "0.5448663", "0.5403661", "0.5383299", "0.534008", "0.53358716", "0.52607375", "0.52274305", "0.5176673", "0.51720655", "0.5166656", "0.51541513", "0.5115914", "0.5115445", "0.5069928", "0.50657517", "0.5059611", "0.5044877", "0.50393355", "0.50233173", "0.49954236", "0.49834338" ]
0.8646154
0
Run an ISQL script. 'script' An (optional) GSEC script. 'arguments' A list of the arguments to the GSEC without ISC4 database location and sysdba username and password. 'result' A 'Result' object. The outcome will be 'Result.PASS' when this method is called. The 'result' may be modified by this method to indicate outcomes other than 'Result.PASS' or to add annotations.
def RunGsec(self, script, arguments, result): try: self.RunProgram("\""+self.__context["gsec_path"]+"\"", [ self.__context["gsec_path"], "-database", self.__context["server_location"]+ self.__context["isc4_path"], "-user", "SYSDBA", "-password", "masterkey" ]+arguments, script, self.__context, result) except: result.NoteException()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def RunScript(self, database, script, arguments, result):\n\n self.RunProgram(\"\\\"\"+self.__context[\"isql_path\"]+\"\\\"\",\n [ self.__context[\"isql_path\"] ] + [ database ] + arguments,\n script, self.__context, result)", "def execute_script(self, script, nolog=True, close=True):\n self._check_connection()\n if not nolog: # pragma: no cover\n lines = script.split(\"\\n\")\n if len(lines) > 20:\n self.LOG(\"SQL start + \",\n \"\\n\".join([repr(x) for x in lines[:20]]))\n else:\n self.LOG(\"SQL start + \",\n \"\\n\".join([repr(x) for x in lines]))\n cur = self._connection.cursor()\n res = cur.executescript(script)\n if close:\n cur.close()\n if not nolog:\n self.LOG(\"SQL end\") # pragma: no cover\n else:\n return res", "def run_script (script, *l) :\n if not os.path.exists (script) :\n raise PQHException (\"file %s not found\" % script)\n py = get_interpreter_path ()\n cmd = \"%s %s\" % (py, script)\n if len (l) > 0 :\n cmd += \" \" + \" \".join ( [str (x) for x in l])\n out,err = run_cmd (cmd)\n return out,err", "def Run(self, context, result):\n\n # Was the program not specified?\n\n self.program = context[\"isql_path\"]\n\n if context.has_key(\"database_path\"):\n database = context[\"database_path\"]\n else:\n database = \"\"\n self.RunProgram(self.program,\n\t\t\t[ self.program , database ,\n \"-user\", context[\"user_name\"], \"-password\", context[\"user_password\"] ],\n context, result)", "def DBExecuteScript( DB: sqlite3.Connection, sql:str, *args ):\n assert isinstance( DB, sqlite3.Connection )\n DB.executescript( sql )\n DB.commit()", "def executescript(self, script: SQLQuery) -> \"Cursor\":\n return self.execute(script)", "def run(self, script, *args, **kwargs):\n return self._run('run', script, *args, **kwargs)", "def execute_script(self):\n\n # render script variables\n script = self.replable.render_script_from_flo(self.flo, **self.template_engine_kwargs)\n\n # run over script lines\n for cmd in script.split(\"\\n\"):\n\n # no empty lines\n if cmd:\n\n self.brief_logger.info(cmd)\n if self.verbose_logger and self.log_file_echo_command:\n self.verbose_logger.info(\"$> '%s'\", cmd)\n\n # execute command\n cmd = cmd + \"\\n\"\n self.sock.send(cmd.encode())\n\n res = self.wait_for_command_execution(timeout=self.timeout)\n # read all data which is not covered by the regex used for stream searching\n # TODO: use loop here?!\n res += read_remaining_data(self.sock, SOCKET_READ_BUF_SIZE)\n\n # apply the custom check function\n if self.return_value_checker is not None:\n try:\n self.return_value_checker(cmd, res)\n except Exception as e:\n raise REPLUnexpectedResult(\n \"The following output is unexpected to the method `return_value_checker`:\\n%s\" % res,\n caused_by=e)\n\n yield res", "def execute(self, untrusted_user, script, arguments, logfile, cwd=None):\n return self.execute_helper(self.containers, script, arguments, logfile)", "def execute_script(self, script, enterpreter='/bin/sh'):\n destination = '/tmp/' + ''.join(\n random.choice(string.lowercase) for i in range(16))\n\n self.upload(script, destination)\n self.execute('%s %s' % (enterpreter, destination))\n self.execute('rm %s' % destination)", "def script(self, object_id, script, args=None, timeout=None):\n # Resolve object data\n data = yield self.service.get_executor(\"db\").submit(self.get_object_data, object_id)\n # Find pool name\n pool = self.service.get_pool_name(data[\"pool_id\"])\n if not pool:\n metrics[\"error\", (\"type\", \"pool_not_found\")] += 1\n raise APIError(\"Pool not found\")\n # Check script is exists\n script_name = \"%s.%s\" % (data[\"profile\"], script)\n if not loader.has_script(script_name):\n metrics[\"error\", (\"type\", \"invalid_scripts_request\")] += 1\n raise APIError(\"Invalid script\")\n #\n url = yield self.get_activator_url(pool)\n if not url:\n raise APIError(\"No active activators for pool '%s'\" % pool)\n self.redirect(\n url,\n \"script\",\n [\n script_name,\n data[\"credentials\"],\n data[\"capabilities\"],\n data[\"version\"],\n args,\n timeout,\n ],\n )", "def run_script(self, script, env=None, return_output=False):\n command = [\"/bin/sh\", \"-e\"]\n command.append(script)\n\n return self.run(command, env, return_output)", "def runScript(self, script):\n data = FilePath(__file__).parent().child('data')\n sample_file = data.child('1.input.ofx')\n\n args = (script, [sample_file.path])\n log.msg('executing %r' % (args,))\n out, err, rc = yield utils.getProcessOutputAndValue(*args, env=None)\n log.msg('rc: %r' % (rc,))\n log.msg('out: %r' % (out,))\n log.msg('err: %r' % (err,))\n if rc != 0:\n self.fail(\"Failed: %s\\n\\n%s\" % (out, err))", "def run(self, script, **kwargs):\r\n # don't return a value from a script\r\n kwargs['nout'] = 0\r\n return self.call(script, **kwargs)", "def run_script(self, script):\n script = dedent(script)\n\n def _raise_for_result(result):\n raise self.PowerShellScriptError(\n \"Script returned {}!: {}\"\n .format(result.status_code, result.std_err)\n )\n\n # Add retries for error id 1600\n num_tries = 6\n sleep_time = 10\n for attempt in range(1, num_tries + 1):\n self.logger.debug(' Running PowerShell script:\\n%s\\n', script)\n result = self.api.run_ps(\"{}\\n\\n{}\".format(self.pre_script, script))\n if result.status_code == 0:\n break\n elif hasattr(result, 'std_err') and 'Error ID: 1600' in result.std_err:\n if attempt == num_tries:\n self.logger.error(\"Retried %d times, giving up\", num_tries)\n _raise_for_result(result)\n\n self.logger.warning(\n \"Hit scvmm error 1600 running script, waiting %d sec... (%d/%d)\",\n sleep_time, attempt, num_tries\n )\n time.sleep(sleep_time)\n else:\n _raise_for_result(result)\n\n return result.std_out.strip()", "def execute_sql_script(conn, script_filename):\n file_contents = open_sql_script(script_filename)\n cursor = conn.cursor()\n cursor.execute(file_contents)\n conn.commit()", "def run(self, script_args):\n run_url = '{0}/{1}/run'.format(self.url, self.script_name)\n headers = {'Content-Type': 'text/plain'}\n payload = json.dumps(script_args)\n\n resp = False\n if self.get():\n log.debug('Running script: {0}'.format(self.script_name))\n req = requests.post(run_url, auth=(self.username, self.password), headers=headers, data=payload)\n if req.status_code == 204 or 200:\n resp = req.json()\n return resp\n log.error('Failed running script: {0}\" Reason: {1} {2}'.format(self.script_name, req.status_code, req.json()))\n\n return resp", "def execute(cls, operation, kind, *args, **kwargs):\n\n if isinstance(operation, tuple) and operation[0] == 'CATNIP':\n\n if operation[1] not in cls.scripts:\n raise RuntimeError('Invalid script requested: \"%s\".' % operation[1])\n\n # it's a script - check to see if it's loaded\n try:\n return cls.execute(*tuple([\n cls.Operations.EVALUATE_STORED, # operation\n '__meta__', # kind\n cls.scripts[operation[1]][0], # script hash\n len(kwargs.get('keys', []))] + ( # of key arguments\n kwargs.get('keys', [])) + ( # key arguments\n [i for i in args]))) # positional arguments\n\n except wire_errors.NoScriptError:\n\n # load it and try again\n r_hash = cls.execute(*(\n cls.Operations.SCRIPT_LOAD,\n '__meta__', cls.scripts[operation[1]][1]))\n\n assert r_hash == cls.scripts[operation[1]][0], (\n \"script hashes must stay consistent (for db script '%s')\" % operation[1])\n\n return cls.execute(operation, kind, *args, **kwargs)\n return super(RedisWarehouse, cls).execute(operation, kind, *args, **kwargs)", "def __ExecISQLCommands(self):\n\n try:\n stdout, stderr= self.__RunProgram(self.source_code,[self.__context[\"isql_path\"],\n self.__dsn,\n \"-user\", self.user_name,\n \"-password\", self.user_password])\n\n except:\n self.__result.Fail(cause= self.__cause)\n exc_info = sys.exc_info()\n self.__result[Result.EXCEPTION]= \"%s: %s\" % exc_info[:2]\n\n else:\n\n stdout_stripped= self.__StringStrip(stdout) # strip whole stdout\n stdout_e_stripped= self.__StringStrip(self.result_string) # strip whole expected stdout\n stderr_stripped= self.__StringStrip(stderr) # strip whole stderr\n stderr_e_stripped= self.__StringStrip(self.expected_stderr) # strip whole expected stderr\n\n if stderr_stripped != stderr_e_stripped: # if error outputs do not match\n self.__AnnotateErrorDiff(\"ISQL\",\n self.expected_stderr,\n stderr,\n stderr_e_stripped,\n stderr_stripped)\n elif stdout_stripped == stdout_e_stripped: # if they match\n return True # ok\n else:\n\n self.__AnnotateDiff(\"ISQL\",\n self.result_string,\n stdout,\n stdout_e_stripped,\n stdout_stripped)", "def run_script(self, script):\n self._update_container()\n self._run_session_checks()\n self._send_script(script)\n return self._get_logs()", "def execute(self, task, script, **kwargs):\n locals().update(kwargs)\n exec(script)", "def execute_script(self, process_manager, script, username, password):\n\n auth = vim.vm.guest.NamePasswordAuthentication()\n auth.username = username\n auth.password = password\n try:\n copy_content = (\n \"'\"\n + open(script).read().replace(\"'\", '\"\\'\"')\n + \"' >> \"\n + os.path.basename(script)\n )\n program_spec = vim.vm.guest.ProcessManager.ProgramSpec()\n program_spec.programPath = \"/bin/echo\"\n program_spec.arguments = copy_content\n pid = process_manager.StartProgramInGuest(self.vm_obj, auth, program_spec)\n assert pid > 0\n program_spec.programPath = \"/bin/sh\"\n log_file = \"/var/log/vhpc_toolkit.log\"\n execute_content = os.path.basename(script) + \" 2>&1 | tee \" + log_file\n program_spec.arguments = execute_content\n pid = process_manager.StartProgramInGuest(self.vm_obj, auth, program_spec)\n assert pid > 0\n self.logger.info(\n \"Script {0} is being executed in VM {1} guest OS \"\n \"and PID is {2}\".format(os.path.basename(script), self.vm_obj.name, pid)\n )\n except IOError:\n self.logger.error(\"Can not open script {0}\".format(script))\n raise SystemExit\n except AssertionError:\n self.logger.error(\"Script is not launched successfully.\")\n raise SystemExit\n except vim.fault.InvalidGuestLogin as e:\n self.logger.error(e.msg)\n raise SystemExit\n else:\n return pid, auth, self.vm_obj", "def RunProgram(self, program, arguments, context, result):\n\n # Construct the environment.\n environment = self.MakeEnvironment(context)\n e_stdin = self.stdin\n c = {}\n for pair in context.items():\n c[pair[0]] = pair[1]\n for substitution in c.keys():\n pattern = \"$(\"+substitution.upper()+\")\"\n replacement = context[substitution]\n e_stdin = e_stdin.replace(pattern, replacement)\n\n basename = os.path.split(arguments[0])[-1]\n qm_exec = qm.executable.Filter(e_stdin, -2)\n\n try:\n exit_status= qm_exec.Run(arguments, environment)\n stdout = qm_exec.stdout\n stderr = qm_exec.stderr\n causes = []\n\n if sys.platform != \"win32\":\n if os.WIFEXITED(exit_status):\n if exit_status != self.exit_code:\n causes.append(\"exit_code\")\n result[\"RunProgram.exit_code\"] = str(exit_status)\n elif os.WIFSIGNALED(exit_status):\n self.__cause= \"Process %s terminated by signal %d.\" % (basename, os.WTERMSIG(exit_status))\n\n elif os.WIFSTOPPED(exit_status):\n self.__cause= \"Process %s stopped by signal %d.\" % (basename, os.WSTOPSIG(exit_status))\n\n else:\n self.__cause= \"Process %s terminated abnormally.\" % basename\n\n # Check to see if the standard output matches.\n # First strip out ISQL junk\n stdout_stripped = re.sub(\"Database:.*\\n\",\"\",stdout)\n stdout_stripped = re.sub(\"SQL>\\s*\",\"\",stdout_stripped)\n stdout_stripped = re.sub(\"CON>\\s*\",\"\",stdout_stripped)\n stdout_stripped = re.sub(\"-->\\s*\",\"\",stdout_stripped)\n stdout_stripped = self.__PerformSubstitutions(stdout_stripped)\n stdout_stripped = re.compile(\"^\\s+\",re.I+re.M).sub(\"\",stdout_stripped)\n stdout_stripped = re.compile(\"\\s+$\",re.I+re.M).sub(\"\",stdout_stripped)\n\n self.stdout_stripped = re.sub(\"Database:.*\\n\",\"\",self.stdout)\n self.stdout_stripped = re.sub(\"SQL>\\s*\",\"\",self.stdout_stripped)\n self.stdout_stripped = re.sub(\"CON>\\s*\",\"\",self.stdout_stripped)\n self.stdout_stripped = re.sub(\"-->\\s*\",\"\",self.stdout_stripped)\n self.stdout_stripped = self.__PerformSubstitutions(self.stdout_stripped)\n self.stdout_stripped = re.compile(\"^\\s+\",re.I+re.M).sub(\"\",self.stdout_stripped)\n self.stdout_stripped = re.compile(\"\\s+$\",re.I+re.M).sub(\"\",self.stdout_stripped)\n\n if stdout_stripped != self.stdout_stripped:\n causes.append(\"standard output\")\n result[\"ExecTest.stdin\"] = \"<pre>\" + e_stdin + \"</pre>\"\n result[\"ExecTest.stdout_expected\"] = \"<pre>\" + self.stdout + \"</pre>\"\n result[\"ExecTest.stdout\"] = \"<pre>\" + stdout + \"</pre>\"\n result[\"ExecTest.stdout_stripped\"] = \"<pre>\" + stdout_stripped + \"</pre>\"\n result[\"ExecTest.stdout_stripped_expected\"] = \"<pre>\" + self.stdout_stripped + \"</pre>\"\n result[\"ExecTest.stripped_diff\"] = \"<pre>\"+'\\n'.join(difflib.ndiff(stdout_stripped.splitlines(0),self.stdout_stripped.splitlines(0)))+\"</pre>\"\n # Check to see that the standard error matches.\n stderr_stripped = re.sub(\"Use CONNECT or CREATE DATABASE to specify a database.*\\n\",\"\",stderr)\n if stderr_stripped != self.stderr:\n causes.append(\"standard error\")\n result[\"ExecTest.stdin\"] = \"<pre>\" + e_stdin + \"</pre>\"\n result[\"ExecTest.stderr\"] = \"<pre>\" + stderr + \"</pre>\"\n result[\"ExecTest.expected_stderr\"] = \"<pre>\" + self.stderr + \"</pre>\"\n # If anything went wrong, the test failed.\n if causes:\n result.Fail(\"Unexpected %s.\" % string.join(causes, \", \"))\n except:\n result.NoteException()", "def runScript(self, script):\n c = self\n game = self.game\n app = self.game.app\n shell = self.shell\n sprite = self.sprite\n s = shell\n self = self.env\n exec(open(\"script/\" + script).read())", "def execute_script(script, variables):\n code = compile(script, 'fake-filename', 'exec')\n output = io.StringIO()\n with contextlib.redirect_stdout(output):\n exec(code, variables)\n output = output.getvalue()\n return output", "def sql_scripts_execute(self, sql_scripts, params={}):\n ps = self.parameter_handler(params)\n log.debug('Got parameters: %s', ps)\n cursor = self._get_cursor()\n for q in sql_scripts:\n with open(q, 'r') as s:\n sql_string_formatted = s.read().format(**ps)\n cursor.execute(sql.SQL(sql_string_formatted), ps)\n self.connection.commit()\n self.connection.close()", "def runscript(host, script, list_scripts, multi_host, hosts_filter):\n if list_scripts:\n pprint(menu_generator(cs.get_scripts()))\n if host:\n session = cs.init_session(host)\n response = cs.execute_active_responder_command(\"runscript\", f\"-CloudFile={script}\", session)\n pprint(response)\n if multi_host:\n batch_id = cs.new_batch_job(hosts_string=multi_host)\n response = cs.execute_batch_job(\"runscript\", batch_id, f\"-CloudFile={script}\")\n pprint(response)\n if hosts_filter:\n query_filter = hosts_filter.split(\":\")\n batch_id = cs.new_batch_job(filter_parameter=query_filter[0], filter_value=query_filter[1])\n response = cs.execute_batch_job(\"runscript\", batch_id, f\"-CloudFile={script}\")\n pprint(response)", "def run_script_across_bridge(script_file, python=\"python\", argstring=\"\"):\n\n # spawn a jfx_bridge_ida server - use server port 0 to pick a random port\n server = bridge.BridgeServer(\n server_host=\"127.0.0.1\",\n server_port=0,\n loglevel=logging.INFO,\n local_call_hook=hook_local_call,\n local_eval_hook=hook_local_eval,\n )\n # start it running in a background thread\n server.start()\n\n try:\n # work out where we're running the server\n server_host, server_port = server.get_server_info()\n\n print(\"Running \" + script_file)\n\n # spawn an external python process to run against it\n try:\n output = subprocess.check_output(\n \"{python} {script} --connect_to_host={host} --connect_to_port={port} {argstring}\".format(\n python=python,\n script=script_file,\n host=server_host,\n port=server_port,\n argstring=argstring,\n ),\n stderr=subprocess.STDOUT,\n shell=True,\n )\n print(output)\n except subprocess.CalledProcessError as exc:\n print(\"Failed ({}):{}\".format(exc.returncode, exc.output))\n\n print(script_file + \" completed\")\n\n finally:\n # when we're done with the script, shut down the server\n server.shutdown()", "def execute_script(self, script, asynchronous=False):\n pass", "def run_script():\n # pylint: disable=unsupported-assignment-operation\n script_source.data['script'] = [inp_script.value]" ]
[ "0.7854742", "0.61550087", "0.59776443", "0.5792648", "0.5619949", "0.55894285", "0.55582255", "0.5536295", "0.54101485", "0.536589", "0.5349416", "0.53441995", "0.5326733", "0.53031874", "0.5293181", "0.529026", "0.52394783", "0.5232753", "0.5228306", "0.521149", "0.52065", "0.5166577", "0.5133957", "0.5074078", "0.50738883", "0.5069975", "0.50101393", "0.5008663", "0.4966692", "0.49546844" ]
0.68645024
1
Run the 'program'. 'program' The path to the program to run. 'arguments' A list of the arguments to the program. This list must contain a first argument corresponding to 'argv[0]'. 'context' A 'Context' giving runtime parameters to the test. 'result' A 'Result' object. The outcome will be 'Result.PASS' when this method is called. The 'result' may be modified by this method to indicate outcomes other than 'Result.PASS' or to add annotations.
def RunProgram(self, program, arguments, context, result): # Construct the environment. environment = self.MakeEnvironment(context) e_stdin = self.stdin c = {} for pair in context.items(): c[pair[0]] = pair[1] for substitution in c.keys(): pattern = "$("+substitution.upper()+")" replacement = context[substitution] e_stdin = e_stdin.replace(pattern, replacement) basename = os.path.split(arguments[0])[-1] qm_exec = qm.executable.Filter(e_stdin, -2) try: exit_status= qm_exec.Run(arguments, environment) stdout = qm_exec.stdout stderr = qm_exec.stderr causes = [] if sys.platform != "win32": if os.WIFEXITED(exit_status): if exit_status != self.exit_code: causes.append("exit_code") result["RunProgram.exit_code"] = str(exit_status) elif os.WIFSIGNALED(exit_status): self.__cause= "Process %s terminated by signal %d." % (basename, os.WTERMSIG(exit_status)) elif os.WIFSTOPPED(exit_status): self.__cause= "Process %s stopped by signal %d." % (basename, os.WSTOPSIG(exit_status)) else: self.__cause= "Process %s terminated abnormally." % basename # Check to see if the standard output matches. # First strip out ISQL junk stdout_stripped = re.sub("Database:.*\n","",stdout) stdout_stripped = re.sub("SQL>\s*","",stdout_stripped) stdout_stripped = re.sub("CON>\s*","",stdout_stripped) stdout_stripped = re.sub("-->\s*","",stdout_stripped) stdout_stripped = self.__PerformSubstitutions(stdout_stripped) stdout_stripped = re.compile("^\s+",re.I+re.M).sub("",stdout_stripped) stdout_stripped = re.compile("\s+$",re.I+re.M).sub("",stdout_stripped) self.stdout_stripped = re.sub("Database:.*\n","",self.stdout) self.stdout_stripped = re.sub("SQL>\s*","",self.stdout_stripped) self.stdout_stripped = re.sub("CON>\s*","",self.stdout_stripped) self.stdout_stripped = re.sub("-->\s*","",self.stdout_stripped) self.stdout_stripped = self.__PerformSubstitutions(self.stdout_stripped) self.stdout_stripped = re.compile("^\s+",re.I+re.M).sub("",self.stdout_stripped) self.stdout_stripped = re.compile("\s+$",re.I+re.M).sub("",self.stdout_stripped) if stdout_stripped != self.stdout_stripped: causes.append("standard output") result["ExecTest.stdin"] = "<pre>" + e_stdin + "</pre>" result["ExecTest.stdout_expected"] = "<pre>" + self.stdout + "</pre>" result["ExecTest.stdout"] = "<pre>" + stdout + "</pre>" result["ExecTest.stdout_stripped"] = "<pre>" + stdout_stripped + "</pre>" result["ExecTest.stdout_stripped_expected"] = "<pre>" + self.stdout_stripped + "</pre>" result["ExecTest.stripped_diff"] = "<pre>"+'\n'.join(difflib.ndiff(stdout_stripped.splitlines(0),self.stdout_stripped.splitlines(0)))+"</pre>" # Check to see that the standard error matches. stderr_stripped = re.sub("Use CONNECT or CREATE DATABASE to specify a database.*\n","",stderr) if stderr_stripped != self.stderr: causes.append("standard error") result["ExecTest.stdin"] = "<pre>" + e_stdin + "</pre>" result["ExecTest.stderr"] = "<pre>" + stderr + "</pre>" result["ExecTest.expected_stderr"] = "<pre>" + self.stderr + "</pre>" # If anything went wrong, the test failed. if causes: result.Fail("Unexpected %s." % string.join(causes, ", ")) except: result.NoteException()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def RunProgram(self, program, arguments, stdin, context, result):\n\n # Construct the environment.\n environment = self.MakeEnvironment(context)\n e_stdin = stdin\n c = {}\n for pair in context.items():\n c[pair[0]] = pair[1]\n for substitution in c.keys():\n pattern = \"$(\"+substitution.upper()+\")\"\n replacement = context[substitution]\n e_stdin = e_stdin.replace(pattern, replacement)\n basename = os.path.split(arguments[0])[-1]\n qm_exec = qm.executable.Filter(e_stdin, -2)\n\n try:\n exit_status= qm_exec.Run(arguments, environment)\n stdout = qm_exec.stdout\n stderr = qm_exec.stderr\n causes = []\n\n if sys.platform != \"win32\":\n if os.WIFEXITED(exit_status):\n if exit_status != self.exit_code:\n causes.append(\"exit_code\")\n result[\"RunProgram.exit_code\"] = str(exit_status)\n elif os.WIFSIGNALED(exit_status):\n self.__cause= \"Process %s terminated by signal %d.\" % (basename, os.WTERMSIG(exit_status))\n\n elif os.WIFSTOPPED(exit_status):\n self.__cause= \"Process %s stopped by signal %d.\" % (basename, os.WSTOPSIG(exit_status))\n\n else:\n self.__cause= \"Process %s terminated abnormally.\" % basename\n\n # Check to see that the standard error matches.\n if stderr:\n causes.append(\"standard error\")\n result[\"RunProgram.stderr\"] = \"'''\" + stderr + \"'''\"\n # If anything went wrong, the test failed.\n if causes:\n result.Fail(\"Unexpected %s.\" % string.join(causes, \", \"))\n except:\n result.NoteException()", "def execute (self, program, cache=False):\n ast = None\n if cache:\n requests_cache.install_cache('demo_cache',\n allowable_methods=('GET', 'POST', ))\n else:\n requests_cache.disabled()\n\n if isinstance(program, str):\n ast = self.parse (program)\n if not ast:\n raise ValueError (f\"Unhandled type: {type(program)}\")\n for statement in ast.statements:\n logger.debug (f\"execute: {statement} type={type(statement).__name__}\")\n statement.execute (interpreter=self)\n return self.context", "def run(name, program, arguments, cache_enabled, filename):\n output = None\n if cache_enabled:\n output = get_output_from_cache(name, filename)\n\n if output is None:\n call_arguments = [program] + arguments + [filename]\n try:\n output = subprocess.check_output(\n call_arguments, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as error:\n output = error.output\n except OSError:\n return {\n filename: {\n 'error': [('Could not execute \"%s\".%sMake sure all ' +\n 'required programs are installed') %\n (' '.join(call_arguments), os.linesep)]\n }\n }\n output = output.decode('utf-8')\n if cache_enabled:\n save_output_in_cache(name, filename, output)\n return output", "def run_program_from_python_test(program, args, stdin=''):\n #environ = self.__MakeEnvironment()\n # PC: fix values so they are strings. Needed for Windows.\n #for key in environ.iterkeys():\n #environ[key] = str(environ[key])\n # provide full path for standard tools\n program = context.environment.get('%s_path' % program, program)\n basename = os.path.split(program)[-1]\n args.insert(0,program)\n\n if self.connection_character_set:\n args.extend(['-ch',self.connection_character_set])\n script = stdin.encode(DB_CHAR_SET_NAME_TO_PYTHON_ENCODING_MAP[self.connection_character_set])\n else:\n script = stdin.encode('ascii')\n script = substitute_macros(script)\n try:\n return_code, stdout, stderr = runProgram(args,[],stdin=script)\n sys.stdout.writelines(stdout)\n sys.stderr.writelines(stderr)\n except:\n result.note_exception(cause=\"Python test: Exception raised while running external program from Python test.\")\n result[\"failing_program\"] = program\n #cleanup()", "def execute_program(args: List[str], mode: TestMode) -> None:\n args_str = ' '.join(args)\n\n def check_output(result: CompletedProcess):\n stdout = strip_ascii_codes(result.stdout.decode())\n error_string = f'Failure:\\nMode: {mode.value}\\nCommand: {args_str}\\nExit code: {result.returncode:X}\\nOutput:\\n{stdout}\\n'\n if mode == TestMode.STUB:\n if result.returncode not in (1, 2) or 'Test FAILED' not in stdout:\n raise RuntimeError(error_string)\n elif mode == TestMode.SOLUTION:\n if (result.returncode != 0 or\n \"*** You've passed ALL tests. Congratulations! ***\" not in stdout):\n raise RuntimeError(error_string)\n else:\n print('Invalid mode: {}'.format(mode))\n sys.exit(1)\n\n try:\n result = subprocess.run(args, stdout=subprocess.PIPE, timeout=300)\n check_output(result)\n except RuntimeError as e:\n print(e)\n sys.exit(1)\n except TimeoutExpired:\n print(f\"{mode} > {args_str}: TIMEOUT\")\n sys.exit(1)", "def run_program(\n environment='emulator',\n block_device=None,\n dump_file=None,\n dump_base=None,\n dump_length=None,\n timeout=60,\n flush_l2=False,\n trace=False,\n executable=None):\n if not executable:\n executable = HEX_FILE\n\n if environment == 'emulator':\n args = [BIN_DIR + 'emulator']\n args += [ '-a' ] # Enable thread scheduling randomization by default\n if block_device:\n args += ['-b', block_device]\n\n if dump_file:\n args += ['-d', dump_file + ',' +\n hex(dump_base) + ',' + hex(dump_length)]\n\n args += [executable]\n return _run_test_with_timeout(args, timeout)\n elif environment == 'verilator':\n args = [BIN_DIR + 'verilator_model']\n if block_device:\n args += ['+block=' + block_device]\n\n if dump_file:\n args += ['+memdumpfile=' + dump_file,\n '+memdumpbase=' + hex(dump_base)[2:],\n '+memdumplen=' + hex(dump_length)[2:]]\n\n if flush_l2:\n args += ['+autoflushl2=1']\n\n if trace:\n args += ['+trace']\n\n args += ['+bin=' + executable]\n output = _run_test_with_timeout(args, timeout)\n if '***HALTED***' not in output:\n raise TestException(output + '\\nProgram did not halt normally')\n\n return output\n else:\n raise TestException('Unknown execution environment')", "def main(*arguments):\n\n args = parse_args(arguments)\n\n if args.test_suite is not None:\n test_suite = report_manager.load_test_suite_conf(args.test_suite)\n for i, test in enumerate(test_suite):\n args = parse_args(test)\n process_args_and_run(args, test_suite_iter=i)\n else:\n process_args_and_run(args)", "def execute_file (self, program):\n with open (program, \"r\") as stream:\n self.execute (stream.read ())\n return self.context", "def exec(self, program, *args, cwd=os.getcwd(), **kwargs):\n if len(args) > 0:\n raise RuntimeError(\"Program arguments are not supported for real hardware devices\")\n\n assert self.platform is not None, \"TVM targets need a platform to execute programs\"\n\n if self.timeout_sec > 0:\n raise NotImplementedError\n\n ret = self.platform.run(program, self)\n return ret", "def Run(self, cli, args):\n metrics.Loaded()\n\n tool_context = {}\n if self._parent_group:\n self._parent_group.RunGroupFilter(tool_context, args)\n\n command_instance = self._common_type(cli=cli, context=tool_context)\n\n log.debug('Running %s with %s.', self.dotted_name, args)\n resources = command_instance.Run(args)\n resources = display.Displayer(command_instance, args, resources,\n display_info=self.ai.display_info).Display()\n metrics.Ran()\n\n if command_instance.exit_code != 0:\n raise exceptions.ExitCodeNoError(exit_code=command_instance.exit_code)\n\n return resources", "def test_with_command_line_arguments(self, arguments):\n fixed_arguments = self.get_argument_string(arguments)\n result = self.run(\n arguments=fixed_arguments,\n timeout=self.full_timeout,\n use_fresh_profile=True)\n return self._handle_test_result(result)", "def Run(self, context, result):\n\n # Was the program not specified?\n\n self.program = context[\"isql_path\"]\n\n if context.has_key(\"database_path\"):\n database = context[\"database_path\"]\n else:\n database = \"\"\n self.RunProgram(self.program,\n\t\t\t[ self.program , database ,\n \"-user\", context[\"user_name\"], \"-password\", context[\"user_password\"] ],\n context, result)", "def runTool(self, filename, expected_out, args):\n\n input_path = os.path.join(self.inputs_dir, filename)\n return_value, actual_output = create_subprocess(self.executable_binary, args + [input_path] + ['--'])\n actual_output = actual_output.decode('utf-8')\n\n self.assertEqual(return_value, 0)\n self.evaluate(expected_out, actual_output, command=f'{[self.executable_binary] + args} {filename}')", "def main(self, argv=None):\n\n p = self.build_parser()\n\n args = p.parse_args(argv)\n\n try:\n return self.run(args) or 0\n except CommandFailed as e:\n print(str(e))\n return 1", "def main(args: List[Union[str, bytes]] = sys.argv,):\n\tprogram_name, *args = args\n\targs = decode_raw_args(args, str)\n\n\tgen = Generator(*args)\n\tgen.generate_data()\n\tgen.print_return_list()", "def run_program(program, args=None, **subprocess_kwargs):\n if 'shell' in subprocess_kwargs and subprocess_kwargs['shell']:\n raise ProgramError(\n \"This function is only for non-shell programs, \"\n \"use run_shell_command() instead.\")\n fullcmd = find_program(program)\n if not fullcmd:\n raise ProgramError(\"Program %s was not found\" % program)\n # As per subprocess, we make a complete list of prog+args\n fullcmd = [fullcmd] + (args or [])\n for stream in ['stdin', 'stdout', 'stderr']:\n subprocess_kwargs.setdefault(stream, subprocess.PIPE)\n subprocess_kwargs = alter_subprocess_kwargs_by_platform(\n **subprocess_kwargs)\n return subprocess.Popen(fullcmd, **subprocess_kwargs)", "def _test_program(self, result_success=True, verbose=False):\n\n # set testing parameters\n if self.sim_type is InstructionListJointsFlags.Position:\n self.program.simulation_type = self.sim_type\n self.program.max_mm_step = self.sim_step_mm\n self.program.max_deg_step = self.sim_step_deg\n elif self.sim_type is InstructionListJointsFlags.TimeBased:\n self.program.simulation_type = self.sim_type\n self.program.max_time_step = self.sim_step_time\n elif self.sim_type is None:\n raise ValueError(\"No 'sim_type' provided\")\n\n self.program.load_to_robodk()\n self.program.simulate()\n\n if verbose:\n self.program.print()\n self.program.simulation_result.add_to_robodk()\n\n if not result_success:\n # other checks don't make sense\n return\n\n # perform checks on simulation result\n self._test_if_result_message_is_success()\n self._test_for_playback_frame_errors()\n self._test_for_missing_move_ids()\n self._test_for_valid_move_ids()\n self._test_max_simulation_step()\n self._test_if_stop_points_reached()", "def run(self, argv):\n global console\n\n console = init_console()\n\n command, options = self.parse_options(argv)\n\n # We call out to things like the test runner, which expect to operate\n # off of sys.argv. We want to simulate that now that we've parsed\n # options. We'll restore sys.argv after the command finishes.\n old_argv = sys.argv\n sys.argv = argv\n\n try:\n return command.run(options)\n except Exception as e:\n logger.exception('Unexpected exception when running command '\n '\"%s\": %s',\n command.name, e)\n return 1\n finally:\n sys.argv = old_argv", "def main():\n args = get_args()\n prg = args.program\n\n if not os.path.isfile(prg):\n die('Missing expected program \"{}\"'.format(prg))\n\n for name in args.name:\n cmd = '{} \"{}\"'.format(prg, name)\n rv, out = getstatusoutput(cmd)\n if rv != 0:\n warn('Failed to run: {}\\nError: {}'.format(cmd, out))\n else:\n print('Success: \"{}\"'.format(out))\n\n print('Done.')", "def run_program(program):\n halt = False\n instruction_pointer = 0\n\n while not halt:\n halt = process_instruction(instruction_pointer, program)\n instruction_pointer += STEP_SIZE\n\n return program", "def test_run(prog, correct, inp=None):\n print(f\"Testing [{prog2str(prog)}] (inp={inp})...\", end=\"\")\n out = list()\n run_prog(prog, inp, out)\n if (out and out == correct) or (not out and prog == correct):\n print(\" OK!\")\n else:\n fail_str = prog2str(out if out else prog)\n print(f\" Failed: [{fail_str}] != [{prog2str(correct)}]!\")", "def testCreateProgram(self):\n try:\n contextID, retErr = PyOpenCLInterface.CreateContext(self.testResources.listDevicesIDs, self.testResources.dictProperties)\n self.assertEqual(retErr, 0)\n # create mem program\n programID, retErr = PyOpenCLInterface.CreateProgram(contextID, self.testResources.programCodeStrings)\n self.assertEqual(retErr, 0)\n listPrograms = PyOpenCLInterface.ListPrograms()\n self.assertEqual(listPrograms, [programID])\n programProperty, retErr = PyOpenCLInterface.GetProgramProperties(programID)\n self.assertEqual(programProperty['id'], programID)\n self.assertEqual(programProperty['Devices'], self.testResources.listDevicesIDs)\n self.assertEqual(programProperty['Context'], contextID)\n retErr = PyOpenCLInterface.ReleaseProgram(programID)\n self.assertEqual(retErr, 0)\n listPrograms = PyOpenCLInterface.ListPrograms()\n self.assertEqual(listPrograms, [])\n retErr = PyOpenCLInterface.ReleaseContext(contextID)\n self.assertEqual(retErr, 0)\n except:\n print \"Exception caught:\", sys.exc_info()[0]", "def run(context, path=\"\"):\n common.success(f\"Tests {path} running \")\n return start.run_python(\n context,\n f\"-m pytest {path}\"\n )", "def main(cls, *args, **kwargs):\n assert not (bool(args) and bool(kwargs))\n if args:\n return cls._run_args(args)\n elif kwargs:\n return cls._run_kwargs(kwargs)\n else:\n return cls._run_args(None)", "def main():\n\n parser = argparse.ArgumentParser(prog=\"run_test.py\",\n formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument('id', help=\"Id of a test\")\n args = parser.parse_args()\n\n configure_logger()\n\n test_info = TESTS.get(args.id, None)\n if not test_info:\n test_info.log.error(f'{args.id} does not exist')\n exit(ErrorCode.CRITICAL)\n os.environ['DISPLAY'] = \":0.0\"\n\n test = Test(args.id, test_info)\n result = test.run()\n\n test.log.info('#' * 80)\n if not result:\n test.log.error('TEST FAILED')\n else:\n test.log.info('TEST PASSED')\n test.log.info('#' * 80)\n exit(not result)", "def main(argv=sys.argv):\n log = _setup_logging()\n log.info(\"Starting {f} version {v} dataset manipulator\".format(\n f=__file__, v=__VERSION__))\n parser = get_parser()\n args = parser.parse_args()\n if args.debug:\n log.setLevel(logging.DEBUG)\n return args.func(args)\n #return main_runner_default(argv[1:], get_parser(), log)", "def main(self):\n try:\n self.parse_args()\n self.run()\n return 0\n except AnalysisBackendError as e:\n L.error(e)\n return 1", "def execute_tool(description, *args):\n command_line = list(args) + files_and_directories\n click.echo(f\"{description}: {' '.join(command_line)}\")\n rv = call(command_line)\n if rv != 0:\n exit(rv)", "def main():\n args, subparser = parse_args()\n if not args:\n sys.exit(1)\n\n # configure logging\n Log.config(args[0].log_level, args[0].log_file)\n\n # display GeneFlow version\n Log.some().info('GeneFlow %s', __version__)\n\n # call the appropriate command\n if not args[0].func(\n args=args[0],\n other_args=args[1],\n subparser=subparser\n ):\n sys.exit(1)\n\n sys.exit(0)", "def main(cli_args=None):\n # build an arg parser\n parser = get_arg_parser()\n\n # run the parser on cli args\n args = parser.parse_args(cli_args)\n\n print(f\"Running script with arguments: {args}\")\n test_input(args.raw_training_data)\n test_input(args.raw_testing_data)\n test_output(args.train_output)\n test_output(args.test_output)" ]
[ "0.77603376", "0.6088054", "0.60775167", "0.58894455", "0.5795883", "0.57625437", "0.5644391", "0.5612917", "0.5577483", "0.54712975", "0.54391956", "0.5397551", "0.5342152", "0.53416663", "0.53210723", "0.5283056", "0.5247545", "0.5224499", "0.51938117", "0.51841336", "0.5168212", "0.5164869", "0.51189053", "0.50694346", "0.5053899", "0.50491637", "0.50486887", "0.5039772", "0.5035316", "0.5018021" ]
0.69340014
1
Perform substitutions on a body of text. returns The string 'text', processed with the substitutions configured for this test instance.
def __PerformSubstitutions(self, text): for substitution in self.substitutions: pattern, replacement = self.SplitValue(substitution) text = re.compile(pattern,re.M).sub(replacement, text) return text
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def postprocess(self, text):\r\n return text", "def preprocess(self, text):\r\n return text", "def post_process_text(self, text):\n\t\treturn text", "def substitution(plainText, key):\n return plainText", "def apply(self, text):", "def applyRegularExpressions(strText, substitutionPatternList, languageId, debug=False):\n # print substitutionPatternList\n if debug:\n RegularExpressionFormula.logger.info(\n \"Applying regular expressions to transcript ...\")\n\n # For successive regular expressions\n strText = RegularExpressionFormula.normalizeSpaces(strText, True)\n\n if debug:\n RegularExpressionFormula.logger.info(\n \"Initial transcript: \" + strText)\n\n # For each known regular expression\n for regex, alternate, regexType, regexLanguageId in substitutionPatternList:\n regexLanguageId = int(regexLanguageId)\n\n # Does it match the text language\n if regexLanguageId != languageId and \\\n regexLanguageId != 0:\n continue\n\n # Convert from type\n regexListForType = \\\n RegexType.typeToRegularExpressions(\n regex, alternate, int(regexType))\n\n # Get regular expressions for the given type\n for regexForType in regexListForType:\n regexPattern = regexForType[0] # What to match\n regexSubstitution = regexForType[1] # What to substitute\n\n strLineOriginal = strText\n\n # Is it some python code\n if alternate.startswith(\"lambda\"):\n # Use alternate version\n strText = re.sub(regexPattern, eval(\n alternate), strText, flags=re.UNICODE | re.MULTILINE)\n else:\n # print regexPattern, regexSubstitution\n # No ignore case available\n # print regexPattern, \" --> \", strText\n strText = re.sub(regexPattern, regexSubstitution,\n strText, flags=re.UNICODE | re.MULTILINE)\n\n if debug:\n if strText.encode('utf-8') != strLineOriginal.encode('utf-8'):\n sys.stdout.write(\n \" --> Original string: >\" + strLineOriginal.encode('utf-8') + \"<\\n\")\n sys.stdout.write(\" Match pattern: >\" + regexPattern.encode('utf-8') + \"<\"\n \"\\n Substitution: >\" + regexSubstitution.encode('utf-8') + \"<\")\n sys.stdout.write(\n \"\\n >\" + strText.encode('utf-8') + \"<\\n\")\n\n strText = RegularExpressionFormula.normalizeSpaces(strText)\n\n if debug:\n sys.stdout.flush()\n RegularExpressionFormula.logger.info(\n \"Final transcript: \" + strText + \"\\n\")\n\n return strText", "def translate_text(target, text):\n return text", "def process_text(self, text, language):", "def run(self, text):\r\n for i in range(self.markdown.htmlStash.html_counter):\r\n html, safe = self.markdown.htmlStash.rawHtmlBlocks[i]\r\n if self.markdown.safeMode and not safe:\r\n if str(self.markdown.safeMode).lower() == 'escape':\r\n html = self.escape(html)\r\n elif str(self.markdown.safeMode).lower() == 'remove':\r\n html = ''\r\n else:\r\n html = self.markdown.html_replacement_text\r\n if self.isblocklevel(html) and (safe or not self.markdown.safeMode):\r\n text = text.replace(\"<p>%s</p>\" % \r\n (self.markdown.htmlStash.get_placeholder(i)),\r\n html + \"\\n\")\r\n text = text.replace(self.markdown.htmlStash.get_placeholder(i), \r\n html)\r\n return text", "def evaluateText(compiled_expression):", "def post_get_convert(self, site, getText):\n return getText", "def apply(\n self,\n text: str,\n *,\n limit: int = 2000,\n context_id: Any = None,\n ) -> str:\n\n context = self.get_context(text=text, context_id=context_id)\n\n for replacement in self._replacements:\n text = replacement.apply(\n text,\n severity=self.severity,\n limit=limit,\n context=context,\n )\n\n return text", "def contextualize_text(text, context): # private\r\n if not text:\r\n return text\r\n for key in sorted(context, lambda x, y: cmp(len(y), len(x))):\r\n # TODO (vshnayder): This whole replacement thing is a big hack\r\n # right now--context contains not just the vars defined in the\r\n # program, but also e.g. a reference to the numpy module.\r\n # Should be a separate dict of variables that should be\r\n # replaced.\r\n if '$' + key in text:\r\n try:\r\n s = str(context[key])\r\n except UnicodeEncodeError:\r\n s = context[key].encode('utf8', errors='ignore')\r\n text = text.replace('$' + key, s)\r\n return text", "def refined_text(text):\n import re\n text = text.replace('<e1>','')\n text = text.replace('</e1>','')\n text = text.replace('<e2>','')\n text = text.replace('</e2>','')\n\n text = text[1:-1] # trim quotes\n # text = text.replace('\"','')\n # text = text.replace(',','')\n # text = text.replace('.','')\n # text = text.replace(';','')\n # text = text.replace('`','')\n # text = text.replace('\\'','')\n # text = text.replace('(','')\n # text = text.replace(')','')\n # text = text.replace('/','')\n\n return text", "def plain_text_body(self, val: str):\n self._plain_text_body = val", "def _text(self, text):\r\n URL_REGEX.sub(self._parse_urls, text)\r\n USERNAME_REGEX.sub(self._parse_users, text)\r\n LIST_REGEX.sub(self._parse_lists, text)\r\n HASHTAG_REGEX.sub(self._parse_tags, text)\r\n return None", "def recept(self, text, *args, **kwargs):\n return text", "def materialize(template, substitutions):\n\n script_str = template\n for param, value in substitutions.items():\n script_str = re.sub(param, str(value), script_str)\n\n return script_str", "def adjustText(cls, text):\n\t\t\n\t\t\"\"\"Adjust dates so to transform strings such as '21 August' to 'August\n\t\t 21' and have them recognized by the SCNLP tools\"\"\"\n\t\tmonths = (u'January|February|March|April|May|June|July'\n\t\t\t\t\t'August|September|October|November|December')\n\t\tdates = re.compile('(?P<day>\\d{1,2})\\s+(?P<month>%s)(\\s+(?P<year>(\\d{2,4})))?' % months)\n\t\ttext = dates.sub(cls.normalizeDate, text)\n\t\t# Strip any remaining HTML (WikiExtractor is not perfect)\n\t\thtmlTags = re.compile('<[^>]+>')\n\t\t\n\t\ttext = htmlTags.sub(\"\", text)\n\t\t\n\t\treturn text", "def run(self, content):\n parts = []\n offset = 0\n for match in self.regexp.finditer(content):\n parts.append(content[offset:match.start(0)])\n parts.append(self.replace(match))\n offset = match.end(0)\n parts.append(content[offset:])\n return ''.join(parts)", "def _prepare_text(body):\n text = body.lower()\n text = text.replace('\\n', ' ')\n regex = re.compile('[^a-z ]')\n return regex.sub('', text)", "def textfrombodies(self) -> str:\n type_priority = [\"plain\", \"html\", \"other\"] # TODO: Make configurable\n\n for texttype in type_priority:\n if texttype == \"plain\" and texttype in self.textbodies:\n \"\"\"Text is plain, so it can be used verbatim\"\"\"\n return self.textbodies[texttype]\n if texttype == \"html\" and texttype in self.textbodies:\n \"\"\"HTML text. Convert to markup with html2text and remove extra spaces\"\"\"\n text = html2text.html2text(self.textbodies[texttype])\n # Remove every second newline which is added to distinguish between paragraphs in Markdown, but makes\n # the jira ticket hard to read.\n return re.sub(\"(\\n.*?)\\n\", \"\\g<1>\", text)\n if texttype == \"other\" and len(self.textbodies):\n # If no other text is found, return the first available body if any.\n return self.textbodies[list(self.textbodies.keys())[0]]\n return \"The email contained no text bodies.\"", "def process_text(txt):\n\n # Make text all lowercase, remove line breaks and tabs\n txt = txt.lower()\n txt = sub(\"\\n\", \" \", txt)\n txt = sub(\"\\t\", \" \", txt)\n txt = sub(\"/\", \" \", txt)\n txt = sub(\"’\", \"\", txt)\n\n # Convert numbers, urls, email addresses, and dollar signs\n txt = sub(\"[0-9]+\", \"number\", txt)\n txt = sub(\"(http|https)://[^\\s]*\", \"httpaddr\", txt)\n txt = sub(\"[^\\s]+@[^\\s]+\", \"emailaddr\", txt)\n txt = sub(\"[$]+\", \"dollar\", txt)\n\n # Remove additional punctuation\n table = str.maketrans({key: None for key in punctuation})\n txt = txt.translate(table)\n\n return txt", "def tr(self, text, gender=None):\n _LOG.debug(\"text = %r\", text)\n if self._tr:\n variants = [text]\n if gender:\n variants = [text + \"#\" + gender] + variants\n _LOG.debug(\"variants = %r\", variants)\n for txt in variants:\n _LOG.debug(\"variant = %r\", txt)\n if hasattr(self._tr, \"ugettext\"):\n tr_text = self._tr.ugettext(txt)\n else:\n tr_text = self._tr.gettext(txt)\n _LOG.debug(\"translation = %r\", tr_text)\n if tr_text:\n return tr_text\n _LOG.debug(\"return original = %r\", text)\n return text", "def apply(self, strText, languageId, debug=False):\n if len(self.substitutionPatternList) == 0:\n if self.rulesFile == None:\n return strText\n else:\n self.logger.info(\"Loading regexes from %s\" %\n str(self.rulesFile))\n self.substitutionPatternList = \\\n RegexList.loadFromFile(self.rulesFile)\n\n return RegularExpressionFormula.applyRegularExpressions(strText,\n self.substitutionPatternList, languageId, debug)", "def _html(self, text):\r\n html = URL_REGEX.sub(self._parse_urls, text)\r\n html = USERNAME_REGEX.sub(self._parse_users, html)\r\n html = LIST_REGEX.sub(self._parse_lists, html)\r\n return HASHTAG_REGEX.sub(self._parse_tags, html)", "def convert(self, text):\r\n # Main function. The order in which other subs are called here is\r\n # essential. Link and image substitutions need to happen before\r\n # _EscapeSpecialChars(), so that any *'s or _'s in the <a>\r\n # and <img> tags get encoded.\r\n\r\n # Clear the global hashes. If we don't clear these, you get conflicts\r\n # from other articles when generating a page which contains more than\r\n # one article (e.g. an index page that shows the N most recent\r\n # articles):\r\n self.reset()\r\n\r\n if not isinstance(text, unicode):\r\n #TODO: perhaps shouldn't presume UTF-8 for string input?\r\n text = unicode(text, 'utf-8')\r\n\r\n if self.use_file_vars:\r\n # Look for emacs-style file variable hints.\r\n emacs_vars = self._get_emacs_vars(text)\r\n if \"markdown-extras\" in emacs_vars:\r\n splitter = re.compile(\"[ ,]+\")\r\n for e in splitter.split(emacs_vars[\"markdown-extras\"]):\r\n if '=' in e:\r\n ename, earg = e.split('=', 1)\r\n try:\r\n earg = int(earg)\r\n except ValueError:\r\n pass\r\n else:\r\n ename, earg = e, None\r\n self.extras[ename] = earg\r\n\r\n # Standardize line endings:\r\n text = re.sub(\"\\r\\n|\\r\", \"\\n\", text)\r\n\r\n # Make sure $text ends with a couple of newlines:\r\n text += \"\\n\\n\"\r\n\r\n # Convert all tabs to spaces.\r\n text = self._detab(text)\r\n\r\n # Strip any lines consisting only of spaces and tabs.\r\n # This makes subsequent regexen easier to write, because we can\r\n # match consecutive blank lines with /\\n+/ instead of something\r\n # contorted like /[ \\t]*\\n+/ .\r\n text = self._ws_only_line_re.sub(\"\", text)\r\n\r\n # strip metadata from head and extract\r\n if \"metadata\" in self.extras:\r\n text = self._extract_metadata(text)\r\n\r\n text = self.preprocess(text)\r\n\r\n if self.safe_mode:\r\n text = self._hash_html_spans(text)\r\n\r\n # Turn block-level HTML blocks into hash entries\r\n text = self._hash_html_blocks(text, raw=True)\r\n\r\n # Strip link definitions, store in hashes.\r\n if \"footnotes\" in self.extras:\r\n # Must do footnotes first because an unlucky footnote defn\r\n # looks like a link defn:\r\n # [^4]: this \"looks like a link defn\"\r\n text = self._strip_footnote_definitions(text)\r\n text = self._strip_link_definitions(text)\r\n\r\n text = self._run_block_gamut(text)\r\n\r\n if \"footnotes\" in self.extras:\r\n text = self._add_footnotes(text)\r\n\r\n text = self.postprocess(text)\r\n\r\n text = self._unescape_special_chars(text)\r\n\r\n if self.safe_mode:\r\n text = self._unhash_html_spans(text)\r\n\r\n text += \"\\n\"\r\n\r\n rv = UnicodeWithAttrs(text)\r\n if \"toc\" in self.extras:\r\n rv._toc = self._toc\r\n if \"metadata\" in self.extras:\r\n rv.metadata = self.metadata\r\n return rv", "def inner_text(self, text) -> None:\n logging.info(f\"inner text. {self.desc}\")\n js = f\"\"\"var elm = document.querySelectorAll(\"{self.css}\")[{self.index}];\n elm.innerText=\"{text}\";\"\"\"\n self._execute_javascript(js)", "def parse_text(self, text):\n self._text_paragraph = text.split(\"\\n\")\n self._render()", "def process_text(document):\n return preprocess_string(document,\n filters=[strip_tags, strip_punctuation,\n strip_multiple_whitespaces,\n strip_numeric, remove_stopwords,\n strip_short]\n )" ]
[ "0.65611976", "0.6435312", "0.6424799", "0.6276254", "0.6202044", "0.61083364", "0.6037628", "0.6034791", "0.59750575", "0.57668275", "0.5727748", "0.57236975", "0.5721389", "0.5685642", "0.56766623", "0.5662096", "0.5638508", "0.56205267", "0.5587311", "0.55449075", "0.5541788", "0.55329967", "0.55227274", "0.5517054", "0.5494322", "0.5493339", "0.54426795", "0.5434009", "0.5430204", "0.5426383" ]
0.7525169
0
Get articles for a gives news source
def for_source(source, articles=None): if not articles: articles = load_articles(nl.read_data()) source_arts = [a for a in articles if a.source == source] for art in source_arts: yield art
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def listsources():\n\tmain_url = \" https://newsapi.org/v2/sources?apiKey=5f81b593f35d42a8980313250c03d7e7\"\n\n\t# fetching data in json format \n\topen_source = requests.get(main_url).json() \n\n\t# getting all articles in a string sources\n\tsource = open_source[\"sources\"] \n\n\t# empty list which will \n\t# contain all trending newssources \n\tresults = [] \n\t\n\tfor k in source: \n results.append(k[\"id\"])\n \n \t\n\tfor w in results[0:4]:\n print(w)", "def get_news(category):\n get_news_url = base_url.format(category,api_key)\n\n with urllib.request.urlopen(get_news_url) as url:\n get_news_data = url.read()\n get_news_response= json.loads(get_news_data)\n\n news_results = None\n\n if get_news_response['sources']:\n news_results_list = get_news_response['sources']\n news_results = process_results(news_results_list)\n\n return news_results", "def news()->str:#return array[news desc,news link]\n event_log(\"retrieve news data....\",\"\")\n c = 0\n location = read_json(\"news_api\")[0]\n main_url = \"https://newsapi.org/v2/top-headlines?country=\"+location+\"&apiKey=\"+read_json(\"news_api\")[1]+\"\"#add a country selection optin via json\n page = requests.get(main_url).json()\n article = page[\"articles\"]\n news_result = []\n for data in article:\n news_result.append([data[\"title\"],str(data[\"url\"]).replace('\"',\" \")])#exctracts the wanted data from api\n if c == 5:#add this to json file so scalibility\n break\n c+=1\n return news_result", "def get_articles(city):\n article_list = []\n\n url = 'https://news.search.yahoo.com/search;?p=' + city\n source = requests.get(url, timeout=5)\n plain_text = source.text\n soup = BeautifulSoup(plain_text, \"html5lib\")\n\n articles = soup.findAll('div', {'class': 'NewsArticle'})\n\n i = 0\n for item in articles:\n if i < 3:\n title = item.find('a', attrs={'class':'thmb'})['title']\n source = item.find('span', attrs={'class':'mr-5'}).text\n header4 = item.find('h4', attrs={'class':'fz-16'})\n link = header4.find('a')['href']\n\n article = format_article_list(title, source, \"| \" + link)\n article_list.append(article)\n\n i += 1\n else:\n break\n\n return article_list", "def news(self):\n\n # Get articles with search term, if available, from each News API source\n news_api_articles = pd.DataFrame()\n\n q = urllib.parse.quote(\" OR \".join(self.search_terms), safe='')\n\n response = requests.get(\"https://newsapi.org/v2/everything?q=\" + q + \"&from=\" + datetime.now().strftime(\n \"%Y-%m-%d\") + \"&sortBy=popularity&pageSize=100&apiKey=\" + self.__news_api_key)\n\n if response.status_code == 200:\n data = json.loads(response.text)\n\n source_articles = []\n\n for article in data['articles']:\n source_articles.append([article['title'],\n article['description'],\n article['url'],\n article['publishedAt']])\n\n source_articles = pd.DataFrame(source_articles, columns=['title', 'description', 'url', 'publishedAt'])\n news_api_articles = pd.concat([news_api_articles, source_articles])\n\n news_api_articles = news_api_articles.reset_index(drop='True')\n\n news_api_articles['publishedAt'] = news_api_articles['publishedAt'].apply(pd.to_datetime)\n\n news_api_articles = news_api_articles.fillna(' ')\n\n term_in_title = news_api_articles['title'].apply(self.any_term)\n\n news_api_articles = news_api_articles[term_in_title]\n\n if (len(news_api_articles) > 10):\n news_api_articles = news_api_articles[0:10]\n\n else:\n print(\"News API failed to return any items\")\n\n # Create shortened links using bitly if access token is provided\n if self.__bitly_access_token != '':\n\n bitly_urls = []\n\n for index, article in news_api_articles.iterrows():\n url = article['url']\n bitly_response = requests.get(\"https://api-ssl.bitly.com/v3/shorten\",\n params={'longUrl': url, 'access_token': self.__bitly_access_token})\n\n if bitly_response.status_code == 200:\n data = json.loads(bitly_response.text)\n bitly_urls.append(data['data']['url'])\n\n news_api_articles['url'] = bitly_urls\n\n # Store final list to TwitterBot object\n self.list = news_api_articles\n\n return", "def news_fetch(region,news_key):\n #Allows for customizable API key and weather location.\n url = (f\"http://newsapi.org/v2/top-headlines?country={region}&apiKey={news_key}\")\n #Gets API with requests and convert to .json\n news_api = requests.get(url)\n news_json = news_api.json()\n return news_json", "def get_news_articles():\n filename = 'inshorts_news_articles.csv'\n\n # checks if file exists if not make a new request and creates\n if os.path.exists(filename):\n return pd.read_csv(filename)\n else:\n return make_new_request()", "def get_articles_from_topic(url):\n headers = {'User-Agent': 'Codeup Data Science'}\n response = get(url, headers=headers)\n soup = BeautifulSoup(response.content, 'html.parser')\n\n output = []\n\n articles = soup.select(\".news-card\")\n\n for article in articles: \n title = article.select(\"[itemprop='headline']\")[0].get_text()\n body = article.select(\"[itemprop='articleBody']\")[0].get_text()\n author = article.select(\".author\")[0].get_text()\n published_date = article.select(\".time\")[0][\"content\"]\n category = response.url.split(\"/\")[-1]\n\n article_data = {\n 'title': title,\n 'body': body,\n 'category': category,\n 'author': author,\n 'published_date': published_date,\n }\n output.append(article_data)\n\n\n return output", "def get_news(url):\r\n \r\n # parse RSS feed into list of dictionaries\r\n feed = feedparser.parse(url)\r\n\r\n # no RSS feed articles for url\r\n if len(feed['entries']) == 0:\r\n return []\r\n \r\n # get first ten articles from the RSS feed\r\n news = []\r\n i = 0\r\n while True:\r\n if i == len(feed['entries']) or i > 30:\r\n break\r\n \r\n try:\r\n # get link to article\r\n link = feed[\"entries\"][i][\"link\"]\r\n\r\n # get title of article\r\n title = feed[\"entries\"][i][\"title\"]\r\n \r\n try:\r\n # get raw summary of article\r\n summary_raw = feed[\"entries\"][i][\"summary\"]\r\n \r\n # format summary\r\n summary = \"\"\r\n for c in summary_raw:\r\n if c == \"<\":\r\n summary += \"...\"\r\n break\r\n summary += c\r\n except KeyError as e:\r\n logging.error(\"no summary for RSS feed article: {}\".format(link))\r\n summary = \"read more here...\"\r\n \r\n # get raw date \r\n date_raw = feed[\"entries\"][i][\"published_parsed\"]\r\n \r\n if date_raw is None:\r\n date = feed[\"entries\"][i][\"published\"]\r\n \r\n else:\r\n # format date\r\n year = str(date_raw.tm_year)\r\n months = [\"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\", \"September\", \"October\", \"November\", \"December\"]\r\n month = months[date_raw.tm_mon - 1]\r\n day = str(date_raw.tm_mday)\r\n weekdays = [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"]\r\n wday = weekdays[date_raw.tm_wday]\r\n hour = str(date_raw.tm_hour)\r\n hour = \"{:2}\".format(hour).format(' ','0')\r\n min = str(date_raw.tm_min)\r\n min = \"{:2}\".format(min).replace(' ','0')\r\n date = hour + \":\" + min + \" - \" + wday + \" \" + month + \" \" + day + \", \" + year\r\n \r\n # compile entry and append to news list\r\n entry = {\"link\":link, \"title\":title, \"date\":date, \"summary\":summary}\r\n \r\n # sanitize entry\r\n for key in entry:\r\n # apostrophe\r\n entry[key] = entry[key].replace(\"&#39;\", \"'\")\r\n # right single quotation mark\r\n entry[key] = entry[key].replace(\"’\", \"&#8217;\")\r\n # left single quotation mark\r\n entry[key] = entry[key].replace('\"', \"&#8216;\")\r\n # right double quotation mark\r\n entry[key] = entry[key].replace(\"'\", \"&#8221;\")\r\n # left double quotation mark\r\n entry[key] = entry[key].replace(\"'\", \"&#8220;\")\r\n # Weird ampersand formatting\r\n entry[key] = entry[key].replace(\"&amp;\", \"&\")\r\n \r\n # prepare entry for sqlite queries\r\n entry[key] = surround(entry[key])\r\n \r\n # add entry to news list\r\n news.append(entry)\r\n \r\n # max 10 entries\r\n if len(news) == 10:\r\n break\r\n i += 1\r\n \r\n except Exception as e:\r\n logging.error(e)\r\n i += 1\r\n pass\r\n \r\n # success\r\n return news", "def sourcenews(source):\n\turlnews=urlsource\n\turl=urlnews+source\n\turlapi=url+'&'+'apiKey='\n\turlsour=urlapi+apikey\n\tresponse=requests.get(urlsour)\n\tdata=response.json()\n\treturn data", "def fetch(api_key, query='', page=1, from_date=False, to_date=False):\n fetch_articles(api_key, query, page, from_date, to_date)", "def get_headlines(newssource):\n \n \n newssource_dict = {}\n url = 'https://newsapi.org/v1/articles?source=' + newssource + '&sortBy=top&apiKey=' + api\n request = http.request('GET',url,timeout=4.0)\n\n headline = json.loads(request.data)\n \n if not headline['articles']:\n return \"NewsAPI can not receive information from\" + newsource + \"right now\"\n \n newssource_dict['url'] = headline['articles'][0]['url']\n newssource_dict['title']= headline['articles'][0]['title']\n newssource_dict['description'] = headline['articles'][0]['description']\n \n \n return newssource_dict", "def articles():\n\n # Store the 'geo' part of the URL as a string called 'geo'. Check 'geo' loaded, and produce runtime error if not.\n # e.g. '12589'\n geo = request.args.get(\"geo\")\n if not geo:\n raise RuntimeError(\"missing geo\")\n\n # Run 'geo' through 'lookup()' function, store resulting list of objects in 'rows'.\n # e.g. [{'link':'www.website1.com','title':'article_title1'},{'link':'www.website2.com','title':'article_title2'}]\n rows = lookup(geo)\n\n # Run 'rows' through 'jsonify()'' function, and return resulting dictionary w/ up to 5 objects. The 'jsonify()' function modifies the input to JSON.\n # e.g. [{'link':'www.website1.com','title':'article_title1'},{'link':'www.website2.com','title':'article_title2'}]\n if len(rows) > 5:\n return jsonify(rows[0], rows[1], rows[2], rows[3], rows[4])\n else:\n return jsonify(rows)", "def fetch_news(n):\n\n # This is the list we will use the pass back the news information.\n data = []\n\n # Get news stories from the MEN RSS feed.\n response = feedparser.parse('https://www.manchestereveningnews.co.uk/?service=rss')\n\n # Loop through the news items, and the pull out the data we need.\n for news in response.entries[:n]:\n data.append({\n 'headline': news.title,\n 'content': news.description,\n })\n\n return data", "async def get_news(q: str = None):\n\treturn aggregate_news(q)", "def dashboard_article_sources():\n sources = dict()\n past_30 = offset_time_past(30, str=True)\n articles = mongo.db[app.config['ARTICLES_COLLECTION']]\n results = articles.find({'collected': {'$gt': past_30}}, {'_id': 0})\n for result in results:\n sources[result['source']] = sources.get(result['source'], 0) + 1\n sources = sorted(sources.items(), key=operator.itemgetter(1), reverse=True)\n data = sources[:10]\n return jsonify(data)", "def show_sources_all():\n response = requests.get(SOURCE_URL)\n json = response.json()\n for source in json['sources']:\n print(u\"{0}: <{1}> {2}\".format(\"News Code\", source['id'], source['name']))", "def articles(news_id):\n #get news based on source id\n articles = get_articles(news_id)\n print(articles)\n title = f'{news_id}'\n return render_template(\"articles.html\", articles = articles, title = title)", "async def news(self):\n url = f\"https://newsapi.org/v2/top-headlines?country=nz&apiKey={self.bot.news_api_key}\"\n async with ClientSession() as session:\n async with session.get(url) as response:\n r = await response.json()\n firstArticle = r[\"articles\"][0]\n nSource = firstArticle[\"source\"][\"name\"]\n nTitle = firstArticle[\"title\"]\n nTimestamp = firstArticle[\"publishedAt\"]\n embed = discord.Embed(\n title=f\"News Title: {nTitle}\", description=f\"News Source: {nSource}\"\n )\n embed.add_field(name=\"News Content\", value=firstArticle[\"description\"])\n embed.set_image(url=firstArticle[\"urlToImage\"])\n embed.set_footer(text=f\"News Timestamp: {nTimestamp}\")\n\n channel = self.bot.get_channel(self.bot.main_channel_id)\n await channel.send(embed=embed)", "def pull_articles(self, *args, **kwargs):\n tasks.pull_articles()\n return Response({})", "def articles ():\n\n offset = int (arg ('offset', '0', re_integer_arg))\n limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)\n\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT no\n FROM article\n ORDER BY no\n LIMIT :limit\n OFFSET :offset\n \"\"\", { 'offset' : offset, 'limit' : limit })\n\n return make_articles_response (res, limit)", "def searchnews(self,\n keywords=None,\n dateStart=None,\n dateEnd=None,\n sortBy=None,\n domains=None,\n page=20,\n sources='abc-news',\n language=None,\n apiKey=None,\n version=None):\n\n # detect version\n version = self.version\n request_params=None\n\n # set up for version 1 articles\n if self.version != 2:\n if sortBy not in ['top','latest','popular']:\n sortBy='top'\n warnings.warn('Version 1 sorts by top, latest, or popular. '\n 'Defaulted to top for this search.')\n\n request_params = {\n \"sortBy\":sortBy,\n \"source\": sources,\n \"apiKey\": self._api_key,\n }\n\n if not keywords and self.version !=1:\n raise ValueError('You must enter a keywords to use the search '\n 'service.')\n\n # retrive the api key if set; otherwise, error\n if not self._api_key:\n raise ValueError(\n 'You must use use an API key; to get a key visit https://news'\n 'api.org/. If you have an API key, set it using the '\n 'Api.SetCredentials method.')\n\n # if api key is there, set the params\n else:\n if not request_params:\n if not sortBy:\n if sortBy not in ['publishedAt', 'relevancy', 'popularity']:\n sortBy='publishedAt'\n warnings.warn('Version 2 can only sort by publishedAt, '\n 'relevancy, or popularity. Defaulted to '\n 'publishedAt for this search.')\n request_params = {\n \"q\": keywords,\n \"dateStart\":dateStart,\n \"dateEnd\":dateEnd,\n \"sortBy\":sortBy,\n \"domains\":domains,\n \"page\":page,\n \"sources\":sources,\n \"language\":language,\n \"apiKey\": self._api_key,\n }\n\n\n # build the url\n url = self.base_url + self.__endpoints['search']\n\n # make the request\n r = requests.get(url,params=request_params,timeout=self._timeout)\n\n print(r.url)\n # return the json\n return r.json()", "def get_news(company_name: str) -> list[dict]:\n news_params = {\n \"q\": company_name,\n \"apiKey\": config.NEWS_API_KEY\n }\n response = requests.get(\"https://newsapi.org/v2/everything\", params=news_params)\n response.raise_for_status()\n news_data = response.json()\n return news_data[\"articles\"][:3]", "def get_remote_news_items(self):\n items = []\n params = {\n \"base_url\": self.osha_json_url,\n \"lang\": api.portal.get_tool(\"portal_languages\").getPreferredLanguage(),\n \"query_tags\": self.remote_news_query_tags,\n }\n qurl = \"{base_url}/{lang}/services/hw/news/{query_tags}\".format(**params)\n result = urlopen(qurl)\n if result.code == 200:\n json = load(result)\n for node in json.get(\"nodes\"):\n item = node.get(\"node\")\n pd = item.get('publication_date', '')\n items.append({\n 'remote_item': True,\n 'Title': item['title'],\n 'Date': (\n pd and DateTime(pd, datefmt=\"international\").strftime(\n \"%Y/%m/%d %H:%M\") or \"\"),\n 'getURL': item.get('path'),\n 'path': item.get('path'),\n 'Description': item.get('summary', '') or item.get('body', ''),\n 'text': item.get('summary', '') and item.get('body', '') or '',\n 'remote_image': item.get('image', ''),\n 'node_id': item.get('nid'),\n })\n return items", "def get_from_db(date, source, connection, logger):\n check_date(date, logger)\n result = execute_news(date, connection, source, logger)\n if len(result) == 0:\n raise SystemExit(f\"Sorry, there are no articles for {date}!\")\n else:\n return result", "def articles(self, page=None, per_page=None, sort=None):\r\n params = base.get_params(None, locals())\r\n request = http.Request('GET', self.get_url(), params)\r\n\r\n return request, parsers.parse_json", "def scrapeTopic(self, topic, num_articles, sources=list(site.all_sites)): \n pass", "def get_news(self, keyword, since=None, to=None, page=None):\n payload = {}\n url = \"https://newsapi.org/v2/everything\"\n payload['q'] = keyword\n if since is not None:\n try:\n start_dt = dateutil.parser.parse(since)\n if to is not None:\n to_dt = dateutil.parser.parse(to)\n else:\n to_dt = datetime.datetime.now()\n except ValueError:\n raise IOError('since parameter can not be converted to datetime')\n payload['from'] = start_dt.isoformat()\n payload['to'] = to_dt.isoformat()\n payload['language'] = 'en'\n payload['pageSize'] = 20\n payload['sortBy'] = 'popularity'\n payload['excludeDomains'] = 'startribune.com'\n if page is not None and type(page) == int and page > 0:\n payload['page'] = page\n r = requests.get(url, auth=self.auth, params=payload)\n return r.content", "def articleList():\n articles = get_news(\n 5, since=news.YESTERDAY.strftime(\"%yyyy-%mm-%dd\"), query=\"covid\"\n )\n title_list = []\n desc_list = []\n url_list = []\n image_list = []\n source_list = []\n for art in articles:\n image_list.append(art.image)\n title_list.append(art.title)\n source_list.append(art.source)\n desc_list.append(art.description)\n url_list.append(art.url)\n socketio.emit(\n ARTICLE,\n {\n \"title\": title_list,\n \"desc\": desc_list,\n \"url\": url_list,\n \"img\": image_list,\n \"sources\": source_list,\n },\n )\n return True", "def get_news(keywords, news='all'):\n if news is 'all':\n return news_client.get_everything(q=keywords)\n elif news is 'top':\n return news_client.get_top_headlines(q=keywords)\n else:\n raise AttributeError(\"Optional argument news expected 'top' or 'all'\")" ]
[ "0.7267898", "0.69831485", "0.6910983", "0.6892911", "0.68473136", "0.67912644", "0.6780562", "0.6722617", "0.6709383", "0.6705438", "0.66801316", "0.663876", "0.6628921", "0.6584156", "0.6563533", "0.65245885", "0.6516218", "0.65005183", "0.6477813", "0.64497817", "0.6429842", "0.6426314", "0.64171225", "0.6414864", "0.64013606", "0.6385523", "0.63827646", "0.63666064", "0.6359781", "0.6352771" ]
0.710921
1
Huber function. An analytic function that is quadratic around its minimum n and linear in its tails. Its minimum is at offset. Quadratic between offsetdelta and offset + delta and linear outside.
def huber(x, offset, delta): i = np.abs(x - offset) < delta return (x-offset)**2/2 * i + (1 - i)*delta*(np.abs(x-offset) - delta/2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def beeston_barlow_root1(a, p, U, d):\n return ((-U*p - U + a*p + d*p -\n np.sqrt(U**2*p**2 + 2*U**2*p + U**2 + 2*U*a*p**2 + 2*U*a*p -\n 2*U*d*p**2 - 2*U*d*p + a**2*p**2 + 2*a*d*p**2 + d**2*p**2))/(2*p*(p + 1)))", "def trapezium_rule(f, m, x, a, b, n):\n h = (b-a)/float(n)\n s = 0.5*(f(m, x, a) + f(m, x, b))\n for i in range(n):\n s = s + f(m, x, a + i*h)\n return h*s", "def Bernstein(i, n, t):\n return special.binom(n, i) * t ** i * (1 - t) ** (n - i)", "def f4():\n n = 4\n v = np.arange(n)**0.75 * 0.2\n e = (np.arange(n)+1)**0.7 * 1e-1\n\n n = 12\n v = np.arange(n)\n e = np.array([0.1]*n) * 10e-0\n\n print(Sumb(v,e))\n\n f = plt.figure()\n a = f.add_subplot(111)\n\n dx = 0.0001\n x = np.arange(-1,v[-1]+1,dx)\n y = x.copy()\n y[:] = 0.\n for i in range(n):\n yx = lg(x,v[i],e[i])\n a.plot(x,np.exp(yx),label='{:d}'.format(i))\n y += yx\n y = np.exp((y - np.max(y))/n**2)\n y /= np.sum(y) * dx \n a.plot(x,y,label='sum')\n s = np.argsort(y)[::-1]\n ys = np.cumsum(y[s]) * dx\n yi = np.argwhere(ys > 0.682689492137)[0][0]\n print('mean = {:2f}'.format(x[s[0]]))\n print('sigma = {:2f}'.format(yi*dx/2))\n xy = np.ndarray((yi+2,2))\n i0,i1 = min(s[:yi]), max(s[:yi])\n xy[:yi,0] = x[i0:i1+1]\n xy[:yi,1] = y[i0:i1+1]\n xy[yi:,1] = 0\n xy[yi:,0] = x[[i1,i0]]\n a.add_patch(Polygon(xy,fill=True,color='green',ec='none',alpha=0.25))\n \n leg = plt.legend()\n plt.draw()", "def huber(t=1.345):\n return sm.robust.norms.HuberT(t)", "def beeston_barlow_root2(a, p, U, d):\n return ((-U*p - U + a*p + d*p +\n np.sqrt(U**2*p**2 + 2*U**2*p + U**2 + 2*U*a*p**2 + 2*U*a*p -\n 2*U*d*p**2 - 2*U*d*p + a**2*p**2 + 2*a*d*p**2 + d**2*p**2))/(2*p*(p + 1)))", "def heptagonal(n: int) -> int:\n return int(n * (5 * n - 3) / 2)", "def g_sebal_func(ts, albedo_sur, ndvi):\n g = np.copy(ndvi).astype(np.float64)\n np.power(g, 4, out=g)\n g *= -0.98\n g += 1\n g *= ts\n g *= (albedo_sur * 0.0074 + 0.0038)\n return g", "def basis_fns(n=0):\n return lambda x: np.sum(x ** (n+1), axis=1)", "def b_n(n):\n if n <= 0.36: # MCH03\n ei = np.array([0, 1, 2, 3, 4])\n ai = np.array([0.01945, -0.8902, 10.95, -19.67, 13.43])\n else: # CB99\n ei = np.array([1, 0, -1, -2])\n ai = np.array([2, -1./3, 4./405, 46./25515])\n return np.sum(ai * np.power(float(n), ei))", "def nac_w_optimal_r(fan_in, fan_out):\n fan = max(fan_in + fan_out, 5)\n r = scipy.optimize.bisect(lambda r: fan * nac_w_variance(r) - 2, 0, 10)\n return r", "def gauss_sum_to(n):\n the_sum = n * (n + 1) / 2\n return the_sum", "def gauss_quad_1d(q, n=150):\n z, w = leggauss(n)\n theta = (THETA_HIGH-THETA_LOW)*(z + 1)/2 + THETA_LOW\n sin_theta = abs(sin(theta))\n Zq = kernel_1d(q=q, theta=theta)\n return np.sum(Zq*w*sin_theta)*(THETA_HIGH-THETA_LOW)/2", "def skew_js_fgan_lower_bound(f):\n n = tf.cast(f.shape[0], tf.float32)\n alpha = 1/n\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.softplus(f_diag))) / (n * (n - 1.))\n return alpha*first_term - (1-alpha)*second_term", "def nu0(self, Td):\n f = lambda nu: self.bCIB + self.cCIB + self.dLnBlackdLnNu(nu, Td)\n nuMin = 1.e1 * 1.e9 # in Hz\n nuMax = 1.e5 * 1.e9 # in Hz\n '''\n Nu = np.linspace(nuMin, nuMax, 201)\n F = np.array(map(f, Nu))\n plt.semilogx(Nu, F)\n plt.semilogx(Nu, 0.*Nu)\n plt.show()\n '''\n result = optimize.brentq(f , nuMin, nuMax)\n return result", "def greedy_binning(t, C, n_bins, maxit= 1000):\n b= n_bins\n n_u= generate_n_u(t)\n d= len(n_u)\n cum_n_u= np.hstack([[0], np.cumsum(n_u)])\n tau= np.unique(t)\n tau= np.hstack([tau, [np.max(tau) + 0.1]])\n \n splits= sorted(np.random.randint(1, d, b-1))\n while len(np.unique(splits)) < b-1:\n splits= sorted(np.random.randint(1, d, b-1)) \n bins= np.array([0] + splits + [d])\n \n sums= np.repeat(0.0, n_bins)\n\n for i in range(n_bins):\n sums[i]= block_sum(i, bins, C, n_u)\n \n ns= np.repeat(0.0, n_bins)\n for i in range(n_bins):\n ns[i]= cum_n_u[bins[i+1]] - cum_n_u[bins[i]]\n \n objective= 0.0\n \n for i in range(n_bins):\n objective+= sums[i]/ns[i]\n\n cum_n_u= np.hstack([[0], np.cumsum(n_u)])\n \n it= 0\n while True and it < maxit:\n it+= 1\n \n change_obj, change_idx, step_, new_sum_i, new_sum_im1, new_ns_i, new_ns_im1= 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0\n \n for i in range(1, n_bins):\n for step in [-1, 0]:\n if ns[i + step] > n_u[bins[i] + step]:\n change, sum_i, sum_im1, ns_i, ns_im1 = changes(i, step*2 + 1, bins, C, n_u, ns, sums)\n if change > change_obj:\n change_obj, change_idx, step_, new_sum_i, new_sum_im1, new_ns_i, new_ns_im1= change, i, step*2 + 1, sum_i, sum_im1, ns_i, ns_im1\n \n if change_obj > 0.0:\n objective= objective + change_obj\n bins[change_idx]+= step_\n sums[change_idx]= new_sum_i\n sums[change_idx-1]= new_sum_im1\n ns[change_idx]= new_ns_i\n ns[change_idx-1]= new_ns_im1\n else:\n break\n \n t_binning= []\n for i in range(len(t)):\n for j in range(len(bins)):\n if t[i] >= tau[bins[j]] and t[i] < tau[bins[j+1]]:\n t_binning.append(j)\n \n return np.array(t_binning)", "def make_roundflat(\n lower_bound=0.1,\n upper_bound=0.6,\n lower_steepness=2.0,\n upper_steepness=8.0,\n integration_bounds=(0.0, 10.0),\n):\n\n def roundflat(x):\n return -2 * (\n (x / lower_bound) ** (-2 * lower_steepness)\n + (x / upper_bound) ** (2 * upper_steepness)\n )\n\n value = quad(\n lambda x: np.exp(roundflat(x)), integration_bounds[0], integration_bounds[1]\n )[0]\n\n def prior(x):\n return roundflat(x) - np.log(value)\n\n return prior", "def thinning_sampler(rng, lamb, xmin=0, lamb_min=1e-10):\n while lamb(xmin) > lamb_min:\n dx = -np.log(rng.rand()) / lamb(xmin)\n x = xmin + dx\n accept_rate = lamb(x) / lamb(xmin)\n\n if rng.rand() < accept_rate:\n return x\n xmin = x\n raise ValueError(\n f\"require lamb({xmin})>{lamb_min} to guarantee cdf(infty)=1\"\n )", "def hurst(data):\n\tn = 6\n\tdata = pd.Series(data).pct_change()[1:]\n\tars = list()\n\tlag = list()\n\tfor i in range(n):\n\t\tm = 2 ** i\n\t\tsize = np.size(data) // m\n\t\tlag.append(size)\n\t\tpanel = {}\n\t\tfor j in range(m):\n\t\t\tpanel[str(j)] = data[j * size:(j + 1) * size].values\n\n\t\tpanel = pd.DataFrame(panel)\n\t\tmean = panel.mean()\n\t\tdeviation = (panel - mean).cumsum()\n\t\tmaxi = deviation.max()\n\t\tmini = deviation.min()\n\t\tsigma = panel.std()\n\t\trs = maxi - mini\n\t\trs = rs / sigma\n\t\tars.append(rs.mean())\n\n\tlag = np.log10(lag)\n\tars = np.log10(ars)\n\thurst_exponent = np.polyfit(lag, ars, 1)\n\tresult = hurst_exponent[0]\n\treturn result", "def hurst(p,n):\n # Create the range of lag values\n lags = range(2, n)\n tau = []; lagvec = []\n for lag in lags: \n # produce price difference with lag \n pp = subtract(p[lag:],p[:-lag]) \n # Write the different lags into a vector \n lagvec.append(lag) \n # Calculate the variance of the differnce vector \n tau.append(sqrt(std(pp))) \n\n # Use a linear fit to estimate the Hurst Exponent\n poly = polyfit(log(lags), log(tau), 1)\n\n # Return the Hurst exponent from the polyfit output\n return poly[0]*2.0", "def js_fgan_lower_bound(f):\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n n = tf.cast(f.shape[0], tf.float32)\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.softplus(f_diag))) / (n * (n - 1.))\n return first_term - second_term", "def __guassian_kernel(x, sigma=200):\n return (1 / (sqrt(2.*pi) * sigma)) * exp(-x ** 2 / (2.*sigma**2))", "def stirling(n):\n return n**n*isqrt(2*math.pi*n)/math.e**n", "def uniform(a: float, b: float) -> float:\n ...", "def _random_shoot_bernstein_ ( fun ) :\n xmn = fun.xmin ()\n xmx = fun.xmax ()\n ymx = max ( fun.bernstein().pars() )\n i = 0 \n while True : \n x = _uniform_ ( xmn , xmx ) \n y = _uniform_ ( 0 , ymx )\n v = fun ( x )\n if v >= y : return x", "def gini(values):\n v = values.copy()\n v.sort()\n\n sum_iy = 0\n for i, y in enumerate(v):\n i += 1\n sum_iy += i*y\n\n sum_y = sum(v)\n n = len(v)\n\n return 1 - (((2*sum_iy)/(n*sum_y)) - ((n+1)/n))", "def infs(n: int) -> np.ndarray:\n return np.ones(n) * np.inf", "def half_huber_relu(x, d=1):\n x = tf.where(x < 0, 0, x) # Apply flat part of ReLU\n x = tf.where(x > (1/(2*d)), x - 1/(4*d), x) # Apply linear part of ReLU\n x = tf.where((0 <= x) & (x <= (1/(2*d))), d*x**2, x) # Apply huber part of the transformation\n return x", "def zzX_mignotte_bound(f):\n a = zzX_max_norm(f)\n b = abs(zzX_zz_LC(f))\n n = sum(zzX_degree_all(f))\n\n return INT_TYPE(isqrt(n+1))*2**n*a*b", "def mc_integrate1d(f, a, b, N=10000):\n #use the approximation formula with a translate and scale of unit interval\n points = (b-a)*np.random.rand(N)+a\n y = f(points)\n return (b-a)*sum(y)/N" ]
[ "0.5800495", "0.57387626", "0.56889313", "0.56861275", "0.56719846", "0.5661993", "0.5647246", "0.56123304", "0.56039697", "0.55897033", "0.55779684", "0.556688", "0.5564874", "0.5559421", "0.55498475", "0.5544323", "0.55239946", "0.55232066", "0.5518701", "0.5500929", "0.5491718", "0.5489114", "0.5477032", "0.54716367", "0.5461163", "0.5457648", "0.5451791", "0.5440173", "0.5438299", "0.54196936" ]
0.71997106
0
Adds founder to the project model form, saves the project in the database, calls the generate_matches() function to find & save projectuser matches, and redirects to the newly created project.
def form_valid(self, form): form.instance.founder = self.request.user print('Project Create user:', self.request.user) form.save() tc_lib.generate_user_matches(form) return super(ProjectCreate, self).form_valid(form)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post_project():\n\n title = request.form.get('title')\n description = request.form.get('description')\n max_grade = request.form.get('max_grade')\n\n hackbright.make_new_project(title, description, max_grade)\n\n flash(\"Successfully added new project.\")\n\n return redirect(\"/project?title={}\".format(title))", "def form_valid(self, form):\n form.save()\n\n tc_lib.generate_user_matches(form)\n\n return super(ProjectUpdate, self).form_valid(form)", "def add_project():\n if request.method == \"POST\":\n result = add_project_to_db(\n request.form[\"title\"],\n request.form[\"link\"],\n request.form[\"description\"]\n )\n flash(result)\n return redirect(url_for(\"portfolio\"))\n else:\n return render_template(\"add_project.html\")", "def add_project(request):\n\n profile = get_object_or_404(Profile, user=request.user)\n\n if not profile.is_creator:\n messages.error(request, 'Sorry, only creators can do that.')\n return redirect(reverse('home'))\n\n if request.method == 'POST':\n project_form = ProjectForm(request.POST, request.FILES)\n if project_form.is_valid():\n project = project_form.save(commit=False)\n project.owner = profile\n project.save()\n messages.success(request, 'Successfully created project!')\n return redirect(reverse('project_detail', args=[project.id]))\n else:\n messages.error(\n request,\n 'Failed to create project. Please ensure the form is valid'\n )\n\n project_form = ProjectForm()\n\n template = 'gameproject/add_project.html'\n context = {\n 'project_form': project_form,\n }\n\n return render(request, template, context)", "def newproject_view(request):\n\n # Use to tell to the template that the user want to creat a new project\n is_new = True\n\n # Get all the user. Everyone may be member of the project\n users = User.objects.all()\n\n # If the view received data, try to creat a project\n if request.method == \"POST\":\n form = ProjectForm(request.user, request.POST)\n if form.is_valid():\n # Save the new project in the database\n form.save(commit=True)\n\n # redirect to the project list display page\n return redirect(\"projects\")\n else:\n # creat an empty form for the template\n form = ProjectForm(request.user)\n\n return render(request, 'newProject.html', locals())", "def add_project():\n \n if 'username' in session: \n form=ProjectForm()\n \n if request.method == 'POST':\n if form.validate_on_submit():\n user = mongo.db.user.find_one({'username': session['username']})\n mongo.db.projects.insert_one({'username': user['username'],\n 'date': datetime.utcnow(),\n 'title': form.title.data,\n 'deadline': datetime.strptime(form.deadline.data, \"%d/%m/%Y\"),\n 'brief': form.brief.data,\n 'status': \"open\",\n 'note': form.note.data,\n 'user_id': user['_id']\n })\n \n flash('Your project has been created.', 'success')\n return redirect(url_for('projects'))\n \n return render_template('pages/addproject.html', title='New Project', form=form, legend=\"Add a project\")\n \n flash('You need to be logged in to post any content.', 'info')\n return redirect(url_for('login'))", "def openproject():\n\n # POST\n if request.method == \"POST\":\n\n # Validate form submission\n if not request.form.get(\"projectname\"):\n return apology(\"missing project name\")\n elif not request.form.get(\"link\"):\n return apology(\"missing project link\")\n\n\n # Record project in the database\n db.execute(\"\"\"INSERT INTO projects (projectname, link)\n VALUES(:projectname, :link)\"\"\", projectname=request.form.get(\"projectname\"), link=request.form.get(\"link\"))\n\n # Display that the project has been opened\n flash(\"Opened!\")\n return redirect(\"/\")\n\n # GET\n else:\n return render_template(\"openproject.html\")", "def test_form_after_the_project_has_been_approved(self):\n self.approve_project(self.project)\n\n form = ProjectUserMembershipCreationForm(\n initial={\n 'user': self.project_applicant,\n },\n data={\n 'project_code': self.project_code,\n },\n )\n self.assertTrue(form.is_valid())", "def submit():\n handlers = {\"canon\": canon, \"vivitek\": vivitek}\n projectors = request.form.getlist(\"projector\")\n command = request.form[\"command\"]\n args = {\"projectors\": \",\".join(projectors)}\n try:\n with ExitStack() as stack:\n for label in projectors:\n type, conn = app.config[\"PROJECTORS\"][label]\n print(f\"{label} ({type}:{conn}): {command}\")\n stack.enter_context(handlers.get(type, unknown)(label, conn, command))\n except Exception as e:\n args[\"error\"] = str(e)\n return redirect(url_for(\"display\", **args), 303)", "def post(self):\n # fetch parameter\n get_parser = reqparse.RequestParser(bundle_errors=True)\n get_parser.add_argument(\"user_id\", required=True, help=\"User ID required to ad to their favourite projects\")\n get_parser.add_argument(\"project_id\", required=True, help=\"Project ID required to add to the favourite projects\")\n args = get_parser.parse_args(strict=True)\n\n # get user_id and project_id\n user_id = args[\"user_id\"]\n project_id = args[\"project_id\"]\n\n # convert parameter ids into objectids\n try:\n user_id = ObjectId(user_id)\n project_id = ObjectId(project_id)\n except:\n return {\"message\": \"invalid user id or project id\"}, 400\n\n # add project to the user's favourites\n if ('user_id' or 'project_id') not in args.keys():\n return {\"message\": \"both user and project id are required\"}, 400\n else:\n # check if user is valid\n user = self.users.find_one({\"_id\": user_id})\n project = self.projects.find_one({\"_id\": project_id})\n if user is None:\n return {\"message\": \"user not found\"}, 404\n elif project is None:\n return {\"message\": \"project not found\"}, 404\n else:\n # add project to favourites\n user_favourites = self.favourites.find_one({\"user_id\": user_id})\n if user_favourites is None:\n # insert a new doc into favourites collection\n favourites_list = []\n favourites_list.append(deepcopy(project)) \n self.favourites.insert({\n \"user_id\": user_id,\n \"favourite_projects\": favourites_list\n })\n else:\n new_favourite_list = user_favourites[\"favourite_projects\"]\n\n # check if this project is already in the user's favourites\n for proj in new_favourite_list:\n if proj[\"_id\"] == project_id:\n return {\"message\": \"project is already in the favourites list\"}, 400\n\n new_favourite_list.append(deepcopy(project))\n updated_list = {\"favourite_projects\": new_favourite_list}\n\n self.favourites.update({\"user_id\": user_id}, {\"$set\": updated_list}, upsert=False)\n \n return {\"status\": \"project has been added to favourites successfully\"}, 200", "def create_project_form(request):\n \n # First we check to see the site has been set up, otherwise we throw the user to the config screen\n if not bool(os.path.isdir(Project.project_options.repository_directory)):\n request.user.message_set.create(message=\"The site has not been set up yet. Log in as your admin user and create your settings!\")\n return HttpResponseRedirect(reverse('site-config'))\n \n if request.is_ajax():\n template ='project/project_create_ajax.html'\n else:\n template = 'project/project_create.html'\n \n # Lets check if this form is being shown or processed\n if request.method == \"POST\":\n # We're processing the form, so lets create the instance\n form = NewProjectForm(request.POST, auto_id=False)\n # The form is correct, lets proceeed.\n if form.is_valid():\n # Lets check the user has conformed to a sites T&C's\n if form.cleaned_data['t_and_c'] == True:\n # Create the project instance\n project = Project(\n project_id = string.lower(form.cleaned_data['project_id']),\n project_name = form.cleaned_data['project_name'],\n short_description = form.cleaned_data['short_description'],\n full_description = form.cleaned_data['full_description'],\n project_manager = request.user,\n hgweb_style = form.cleaned_data.get('hgweb_style', ''),\n project_icon = form.cleaned_data['project_icon'],\n )\n # Ok, we're all good, so lets save.\n project.save()\n # We'll tell the user that there site has been saved\n request.user.message_set.create(message=_(\"The project \" + form.cleaned_data['project_name'] + \" has been created\"))\n if request.is_ajax():\n return HttpResponse(\n \"{'success': 'true', 'url': '\" + reverse('project-detail', kwargs={'slug':form.cleaned_data['project_id']}) + \"', 'project': \" + json_encode(project) + \"}\"\n , mimetype=\"application/json\")\n else:\n return HttpResponseRedirect(reverse('project-detail', kwargs={'slug': form.cleaned_data['project_id']}))\n else:\n return render_to_response(template,\n {\n 'form':form.as_table(),\n }, context_instance=RequestContext(request)\n )\n #return HttpResponseRedirect(reverse('project-detail', kwargs={'slug':form.cleaned_data['name_short']}))\n else:\n form = NewProjectForm()\n is_auth = request.user.is_authenticated()\n \n return render_to_response(template,\n {\n 'form':form.as_table(),\n 'is_auth': is_auth\n }, context_instance=RequestContext(request)\n )", "def follow_project(cls, user, project):\r\n pass", "def register(self, **form_data):\n g.security.require_access(self.neighborhood, 'register')\n shortname, reg_kwargs = self._parse_add_project_data(form_data)\n\n # install the project\n try:\n c.project = self.neighborhood.register_project(\n shortname, **reg_kwargs)\n except RegistrationError:\n redirect_to = self.neighborhood.url()\n ming.odm.odmsession.ThreadLocalODMSession.close_all()\n flash(\"You do not have permission to register\", \"error\")\n else:\n redirect_to = c.project.script_name + 'home/'\n ming.odm.odmsession.ThreadLocalODMSession.flush_all()\n flash('Welcome to your new project!')\n\n redirect(redirect_to)", "def upload_project(request):\n current_user = request.user\n current_user_name = current_user.username\n # project_ratings=Rating.objects.filter(id=project_id)\n if request.method == 'POST':\n form = ProjectForm(request.POST, request.FILES)\n if form.is_valid():\n project_post = form.save(commit=True) \n else:\n raise Http404 \n \n return redirect(view_projects)\n else: \n project_form=ProjectForm()\n \n return render(request, 'upload_project.html', {'project_form':project_form})", "def new(request):\n template = loader.get_template('team/new.html')\n\n if request.method == 'POST':\n form = TeamForm(request.user, request.POST)\n if form.is_valid():\n team = form.save(commit=False)\n team.year = datetime.datetime.now().year\n if 'logo_image' in request.FILES:\n team.logo = request.FILES['logo_image']\n if request.POST.get('team_info'):\n team.information = request.POST.get('team_info')\n team.save()\n\n # assign team to all members\n request.user.profile.team = team\n request.user.save()\n if form.cleaned_data['member2'] is not '':\n member2 = User.objects.get(pk=form.cleaned_data['member2'])\n member2.profile.team = team\n member2.save()\n if form.cleaned_data['member3'] is not '':\n member3 = User.objects.get(pk=form.cleaned_data['member3'])\n member3.profile.team = team\n member3.save()\n if form.cleaned_data['member4'] is not '':\n member4 = User.objects.get(pk=form.cleaned_data['member4'])\n member4.profile.team = team\n member4.save()\n\n messages.success(request, _('Your team has been created.'))\n\n else:\n if request.user.profile.team is not None:\n return redirect('/team/my-team')\n form = TeamForm(request.user)\n\n context = {'form': form}\n return CustomHttpResponse.send(template, context, request)", "def follow_project(cls, user, project):\n pass", "def save_proj(app, project, fy_model, casc_model):\n # casc_model = app.casc.query.filter_by(id=casc_model).first()\n # fy_model = app.FiscalYear.query.filter_by(id=fy_model).first()\n proj = app.db.session.query(app.Project).filter(\n app.Project.sb_id == project.ID).first()\n if proj is None: # The Fiscal Year was not found in the db\n print(\"---------SQL--------- [Project] Could not find \" +\n \"{} in database...\".format(project.name.encode('utf-8')))\n pi_list = get_pi_list(app, project.sb_json)\n proj = app.Project(sb_id=project.ID,\n url=project.URL,\n name=project.name,\n total_data=project.data_in_project,\n item_count=project.project_items\\\n [\"Project_Item_Count\"],\n file_count=project.project_files\\\n [\"Project_File_Count\"],\n start_date=get_sb_date(\"start\", project.sb_json),\n end_date=get_sb_date(\"end\", project.sb_json),\n summary=project.sb_json['summary'])\n # Many-to-many relationship definitions:\n proj.cascs.append(casc_model)\n proj.fiscal_years.append(fy_model)\n for pi_model in pi_list:\n proj.principal_investigators.append(pi_model)\n app.db.session.add(proj)\n else:\n try:\n print(\"---------SQL--------- [Project] Found {} in database...\"\n .format(project.name.encode('utf-8')))\n pi_list = get_pi_list(app, project.sb_json)\n if proj.sb_id != project.ID:\n proj.sb_id = project.ID\n if proj.name != project.name:\n proj.name = project.name\n if proj.url != project.URL:\n proj.url = project.URL\n if proj.total_data != project.data_in_project:\n proj.total_data = project.data_in_project\n if proj.item_count != project.project_items[\"Project_Item_Count\"]:\n proj.item_count = project.project_items[\"Project_Item_Count\"]\n if proj.file_count != project.project_files[\"Project_File_Count\"]:\n proj.file_count = project.project_files[\"Project_File_Count\"]\n proj.start_date = get_sb_date(\"start\", project.sb_json)\n proj.end_date = get_sb_date(\"end\", project.sb_json)\n if proj.summary != project.sb_json['summary']:\n proj.summary = project.sb_json['summary']\n\n # Many-to-many relationships (need db model):\n # Check if the casc is already related to the project by iterating\n # through proj.cascs and seeing\n # if the ids match. If not found, add it.\n if not (any(casc.id == casc_model.id for casc in proj.cascs)):\n proj.cascs.append(casc_model)\n if not (any(fy.id == fy_model.id for fy in proj.fiscal_years)):\n proj.fiscal_years.append(fy_model)\n for pi_model in pi_list:\n if not (any(pi.id == pi_model.id for pi in \\\n proj.principal_investigators)):\n proj.principal_investigators.append(pi_model)\n\n # Add new timestamp\n proj.timestamp = datetime.utcnow()\n except Exception as e:\n print('Other error: ' + str(e))\n\n try:\n app.db.session.commit()\n except Exception as e:\n print('Commit error: ' + str(e))\n\n print(\"---------SQL--------- \\\n [Project] Done with {}.\".format(proj.name.encode('utf-8')))\n return proj", "def create_and_join(request):\n c = {}\n c.update(csrf(request))\n if request.method == 'POST': # If the form has been submitted...\n form = TeamForm(request.POST) # A form bound to the POST data\n if form.is_valid(): # All validation rules pass\n team = form.save()\n member = request.user.member\n member.team = team\n member.save()\n messages.add_message(request, messages.SUCCESS, 'Team info created!')\n return HttpResponseRedirect(reverse('team_details', args=(team.id,)))\n else:\n form = TeamForm() # An unbound form\n\n return render_to_response(\"teams/create_and_join.html\", {'form': form, 'c':c},\n context_instance=RequestContext(request))", "def form_valid(self, form):\n auth_login(self.request, form.get_user())\n if self.request.session.get('payment'):\n Payment.objects.filter(id=self.request.session['payment']).update(\n user_id=self.request.user.revolvuserprofile, entrant_id=self.request.user.revolvuserprofile)\n payment = Payment.objects.get(id=self.request.session['payment'])\n Tip.objects.filter(id=payment.tip_id).update(user_id=self.request.user.revolvuserprofile)\n Project.objects.get(id=payment.project_id).donors.add(self.request.user.revolvuserprofile)\n AnonymousUserDonation.objects.filter(payment_id=self.request.session['payment']).delete()\n del self.request.session['payment']\n\n # messages.success(self.request, 'Logged in as ' + self.request.POST.get('username'))\n # return redirect(reverse('project:view', kwargs={'title':title})+'?amount='+amount+'&tip='+tip)\n messages.success(self.request, 'Logged in as ' + self.request.POST.get('username'))\n return redirect(self.next_url)", "def project(projectname,targetamount):\n if (validatename(projectname) and validatenum(targetamount)):\n targetamount=float(targetamount)\n con = lite.connect(databasefile)\n with con:\n cur = con.cursor() \n cur.execute(\"SELECT Id FROM projects where name=?\", (projectname,))\n exists = cur.fetchone()\n if exists:\n click.echo(\"Project name already exists!\")\n sys.exit()\n cur.execute(\"INSERT INTO projects (Name, Tamount) VALUES (?, ?)\", (projectname, targetamount))\n click.echo(\"Added %s project with target of $%-.2f\" % (projectname, targetamount))", "def create_project(request):\n if request.method == \"POST\":\n temp = json.loads(request.body)\n form = ProjectForm(temp)\n\n # check whether it's valid:\n if form.is_valid():\n prj_obj = form.save(commit=False)\n # prj_obj.description = bleach.clean(prj_obj.description, strip=True)\n # fint the user profile object based on the email in session\n user_profile = UserProfile.objects.get(email=request.session['email'])\n prj_obj.user = user_profile\n # Save the project object - project needs to exist before\n # manytomany field is accessed.\n prj_obj.save()\n # get the list of tag objects to add to project\n tag_objects_list = _get_tags(form.cleaned_data['tags_list'])\n article_object_list = _get_articles(form.cleaned_data['articles'])\n for tag_object in tag_objects_list:\n prj_obj.tags.add(tag_object)\n for article_object in article_object_list:\n prj_obj.articles.add(article_object)\n prj_obj.save()\n return HttpResponse(str(prj_obj.id))\n # return HttpResponseRedirect('/projects/' + str(prj_obj.id))\n else:\n print form.errors.as_data()\n else:\n # Remove when front end updated.\n form = ProjectForm()\n return render(request, 'projects/create_project.html', {'form': form})", "def member_onaccept(form):\n\n db = current.db\n s3db = current.s3db\n\n utable = current.auth.settings.table_user\n ptable = s3db.pr_person\n ltable = s3db.pr_person_user\n mtable = s3db.member_membership\n\n # Get the full record\n id = form.vars.id\n if id:\n query = (mtable.id == id)\n record = db(query).select(mtable.id,\n mtable.person_id,\n mtable.organisation_id,\n mtable.deleted,\n limitby=(0, 1)).first()\n else:\n return\n\n data = Storage()\n\n # Affiliation\n s3db.pr_update_affiliations(mtable, record)\n\n # Update the location ID from the Home Address\n atable = s3db.pr_address\n query = (atable.pe_id == ptable.pe_id) & \\\n (ptable.id == record.person_id) & \\\n (atable.type == 1) & \\\n (atable.deleted == False)\n address = db(query).select(atable.location_id,\n limitby=(0, 1)).first()\n if address:\n data.location_id = address.location_id\n\n # Add record owner (user)\n query = (ptable.id == record.person_id) & \\\n (ltable.pe_id == ptable.pe_id) & \\\n (utable.id == ltable.user_id)\n user = db(query).select(utable.id,\n utable.organisation_id,\n utable.site_id,\n limitby=(0, 1)).first()\n if user:\n user_id = user.id\n data.owned_by_user = user.id\n\n if not data:\n return\n record.update_record(**data)\n\n if data.location_id:\n # Populate the Lx fields\n current.response.s3.lx_update(mtable, record.id)", "def post(self, *args, **kwargs):\n name = self.get_argument('name', None)\n description = self.get_argument('description', None)\n url = self.get_argument('url', None)\n leader = self.get_argument('leader', None)\n members = self.get_argument('members', None)\n teams = self.get_argument('teams', None)\n repos = self.get_argument('repos', None)\n tags = self.get_argument('tags', None)\n if 'user' not in kwargs:\n self.raise401()\n\n try:\n # todo - better arguments handler\n url = url.strip()\n url = url if url else None\n members_list = []\n repos_list = []\n teams_list = []\n project_leader = kwargs['user']\n if leader:\n project_leader = User.objects(username=leader).first()\n\n if repos:\n for repo in parse_listed_strs(repos):\n r = Repo.objects(name=repo).first()\n if not r:\n continue\n repos_list.append(r)\n if members:\n for member in parse_listed_strs(members):\n u = User.objects(username=member).first()\n if not u or u == project_leader:\n continue\n members_list.append(u)\n if teams:\n for team in parse_listed_strs(teams):\n t = Team.objects(name=team).first()\n if not t:\n continue\n teams_list.append(t)\n members_list.append(project_leader)\n tags_list = parse_listed_strs(tags)\n project = Project(\n name=name, description=description,\n url=url, repos=repos_list,\n leader=project_leader, members=members_list,\n teams=teams_list, tags=tags_list)\n project.save()\n project_data = document_to_json(project, filter_set=_FILTER)\n self.set_status(201)\n self.write(project_data)\n except Exception as e:\n reason = e.message\n self.raise400(reason=reason)", "def add_project(request):\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n if request.method == 'POST':\n form = ProjectForm(request.POST, request.FILES)\n if form.is_valid():\n project = form.save()\n messages.success(request, 'Project added successfully!')\n return redirect(reverse('portfolio'))\n else:\n messages.error(request, 'Failed to add project.\\\n # Please ensure the form is valid')\n else:\n form = ProjectForm()\n\n form = ProjectForm()\n template = 'portfolio/add_project.html'\n context = {\n 'form': form,\n }\n\n return render(request, template, context)", "def _on_new_project(self):\n lang = self.ddnGuiLanguage.get()\n projectfile = filedialog.asksaveasfilename(\\\n filetypes=[('Paratext Biblical Terms', '.htm'), ], \\\n initialdir=self.BibTerm, \\\n initialfile='', \\\n title=LOCALIZED_TEXT[lang]['BibTerms2Dict project'], \\\n defaultextension='.prj')\n if os.path.exists(projectfile):\n messagebox.showwarning(LOCALIZED_TEXT[lang]['New Project'], \\\n LOCALIZED_TEXT[lang]['{} already exist choose another name.'].\\\n format(os.path.basename(projectfile)))\n return\n else:\n newfile = codecs.open(fileout, mode='w', encoding='utf-8')\n newfile.close()\n self.list_projects = [f.rstrip('.prj') \\\n for f in os.listdir(self.BibTerm) \\\n if f.endswith('.prj')]\n self.ddnCurProject['values'] = self.list_projects\n self.ddnCurProject.set(os.path.basename(projectfile)[:-4])\n self.update\n\n pass", "def add_user_process():\n\n # extract form data, add, commit, then redirect to /users\n first_name = request.form[\"first-name\"]\n last_name = request.form[\"last-name\"]\n image_url = request.form[\"image-url\"]\n\n msg = db_add_user(first_name, last_name, image_url)\n\n flash(msg[\"text\"], msg[\"severity\"])\n\n return redirect(\"/users\")", "def users_page(request):\n if request.method == 'POST':\n user = request.user\n form = CompetenceForm(request.POST)\n\n if form.is_valid():\n form.instance.person = request.user\n form.save()\n # return redirect('user-page')\n # competence = Competence.objects.create_competence(user, form.title_of_competence, form.level_of_competence)\n else:\n form = CompetenceForm()\n\n return render(request, 'core/user-page.html', {'form': form})", "def post(self, project_id):\n project_model = ProjectDBModel.query.get(project_id)\n if not project_model:\n ns.abort(404, status=PROJECT_NOT_FOUND_ERROR)\n try:\n data = request.get_json()\n users = FavoritesProjectDBModel.add_project_to_favorites_of_user_id(\n data['user_id'], project_id)\n response_object = {\n \"project_id\": project_id,\n \"users_id\": users,\n }\n return response_object, 201\n except KeyError:\n ns.abort(404, status=MISSING_VALUES_ERROR)", "def project_created_handler(event):\n obj = event.obj\n # submit Project after creation\n obj.workflow.start()", "def project_refresh(project_name):\n if not db_find_project(project_name):\n abort(404)\n analyser.add_repos(current_user.username, [project_name])\n return redirect(url_for('main.admin_manage'))" ]
[ "0.64061403", "0.6365428", "0.6048063", "0.602952", "0.5996432", "0.5994576", "0.5934493", "0.5910736", "0.5836684", "0.5801637", "0.5649979", "0.56395286", "0.56146103", "0.56023854", "0.55345714", "0.5534347", "0.5526762", "0.54719496", "0.5466361", "0.5405478", "0.5380776", "0.53220963", "0.5277754", "0.52610314", "0.52560115", "0.52392524", "0.52247185", "0.52170485", "0.520588", "0.51980466" ]
0.74070966
0
Create Glue Dev Endpoint
def create_dev_endpoint(self): self.dev_endpoint = self.glue_engine.create_dev_endpoint( EndpointName=self.dev_endpoint_name, RoleArn=self.dev_endpoint_role, PublicKey=self.dev_endpoint_pub_rsa, NumberOfNodes=2, ExtraPythonLibsS3Path=self.python_library, GlueVersion="1.0", Arguments={"GLUE_PYTHON_VERSION": "3"})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_endpoint(EndpointName=None, EndpointConfigName=None, Tags=None):\n pass", "def create_endpoint(path, workspace):\n client = Client()\n\n client.create_endpoint(path, workspace=workspace)", "def endpoint_create(self, endpoint_name=None, config=None):\n if config is None:\n raise Exception(\"Config required!\")\n if endpoint_name is None:\n self.request('/v1.1/endpoint', 'PUT', body=config)\n else:\n self.request('/v1.1/endpoints/%s' % endpoint_name, 'PUT', body=config)", "def create_endpoint_and_deploy_model(\n project: str,\n location: str,\n model_resource_name: str,\n display_name: str,\n regional_endpoint: str,\n endpoint_resource_name: dsl.OutputPath(str),\n create_endpoint_gcp_resources: dsl.OutputPath(str),\n deploy_model_gcp_resources: dsl.OutputPath(str),\n encryption_spec_key_name: str = '',\n service_account: str = '',\n deploy_model: bool = True,\n):\n import json\n import logging\n import os\n import sys\n from typing import Any, Dict\n\n try:\n from google_cloud_pipeline_components.container.v1.gcp_launcher import lro_remote_runner\n except ImportError:\n from google_cloud_pipeline_components.container.v1.gcp_launcher import lro_remote_runner\n\n def run_lro_remote_runner(\n url: str, payload: Dict[str, Any], gcp_resources: str\n ) -> Any:\n remote_runner = lro_remote_runner.LroRemoteRunner(location)\n lro = remote_runner.create_lro(url, json.dumps(payload), gcp_resources)\n return remote_runner.poll_lro(lro=lro)\n\n try:\n os.makedirs(os.path.dirname(endpoint_resource_name), exist_ok=True)\n\n if not deploy_model:\n with open(endpoint_resource_name, 'w') as fout:\n fout.write('')\n return\n\n regional_endpoint = regional_endpoint.rstrip('/')\n\n create_endpoint_payload = {\n 'displayName': display_name,\n }\n\n pipeline_labels_str = os.getenv('VERTEX_AI_PIPELINES_RUN_LABELS')\n if pipeline_labels_str:\n create_endpoint_payload['labels'] = json.loads(pipeline_labels_str)\n\n if encryption_spec_key_name:\n create_endpoint_payload['encryption_spec'] = {\n 'kms_key_name': encryption_spec_key_name\n }\n\n create_endpoint_lro = run_lro_remote_runner(\n url=(\n f'{regional_endpoint}/projects/{project}/locations/{location}'\n '/endpoints'\n ),\n payload=create_endpoint_payload,\n gcp_resources=create_endpoint_gcp_resources,\n )\n\n response_endpoint = create_endpoint_lro['response']['name']\n with open(endpoint_resource_name, 'w') as fout:\n fout.write(response_endpoint)\n\n logging.info(\n 'Endpoint created successfully. Deploying model %s to endpoint',\n model_resource_name,\n )\n\n deploy_model_payload = {\n 'deployedModel': {\n 'model': model_resource_name,\n 'displayName': display_name,\n 'automaticResources': {'minReplicaCount': 1, 'maxReplicaCount': 1},\n }\n }\n if service_account:\n deploy_model_payload['deployedModel']['service_account'] = service_account\n\n _ = run_lro_remote_runner(\n url=f'{regional_endpoint}/{response_endpoint}:deployModel',\n payload=deploy_model_payload,\n gcp_resources=deploy_model_gcp_resources,\n )\n\n logging.info('Model deployed successfully!')\n except Exception as e: # pylint: disable=broad-exception-caught\n if isinstance(e, ValueError):\n raise\n logging.exception(str(e))\n sys.exit(13)", "def custom_service_endpoint(self) -> global___Snippet.ClientInitialization.ServiceEndpoint:", "def create_endpoint_config(EndpointConfigName=None, ProductionVariants=None, Tags=None, KmsKeyId=None):\n pass", "def create_endpoint(coriolis, name, platform_type, connection_info,\n barbican=None, description=''):\n # check provider type is installed server-side:\n providers_dict = coriolis.providers.list().to_dict()\n if platform_type not in providers_dict:\n raise ValueError(\n 'platform_type must be one of %s' % providers_dict.keys())\n\n # if Barbican is available, store the connection info in it:\n if barbican:\n secret_ref = store_barbican_secret_for_coriolis(\n barbican, connection_info, name='Coriolis Endpoint %s' % name)\n connection_info = {'secret_ref': secret_ref}\n\n # create the endpoint:\n endpoint = coriolis.endpoints.create(\n name, platform_type, connection_info, description)\n\n return endpoint", "def create_endpoint(*args):\n endpoint = ''\n for arg in args:\n endpoint = endpoint + str(arg) + \"/\"\n endpoint = endpoint[:-1]\n endpoint = endpoint + \".json\"\n return endpoint", "def createNewGlobusLocalEndpoint(self,ep_data=None):\n\n\t\tif ep_data is None:\n\t\t\tep_data = {'DATA_TYPE':\"endpoint\",'display_name':socket.gethostname(),\n\t\t\t\t\t\t'is_globus_connect': True,\n\t\t\t\t\t\t'myproxy_server': 'myproxy.globusonline.org'}\n\t\tcreate_result = self.transfer_client.create_endpoint(ep_data)\n\t\tself.setup_key = create_result['globus_connect_setup_key']\n\t\tself.local_ep_id = create_result['canonical_name'].split('#')[1]\n\t\t\n\t\t_ = self.transfer_client.endpoint_autoactivate(self.local_ep_id)\n\t\treturn(self.local_ep_id,self.setup_key)", "def register_endpoints(api):\n api.add_resource(EventList, '/events')", "def create_endpoint(request):\n service_name = request.registry.settings['service_name']\n host = socket.gethostbyname(socket.gethostname())\n port = request.server_port\n\n # Convert ip address to network byte order\n ipv4 = struct.unpack('!i', socket.inet_aton(host))[0]\n port = int(port)\n # Zipkin passes unsigned values in signed types because Thrift has no\n # unsigned types, so we have to convert the value.\n port = struct.unpack('h', struct.pack('H', port))[0]\n\n return zipkin_core.Endpoint(\n ipv4=ipv4, port=port, service_name=service_name)", "def create_handler(event, context):\n return update_endpoint(event)", "def register():\n\n print(\"Request: \", request)\n print(\"foo: \", request.app.ep_mapping)\n print(json.load(request.body))\n endpoint_details = json.load(request.body)\n print(endpoint_details)\n\n # Here we want to start an executor client.\n # Make sure to not put anything into the client, until after an interchange has\n # connected to avoid clogging up the pipe. Submits will block if the client has\n # no endpoint connected.\n endpoint_id = str(uuid.uuid4())\n fw = spawn_forwarder(request.app.address, endpoint_id=endpoint_id)\n connection_info = fw.connection_info\n ret_package = {'endpoint_id': endpoint_id}\n ret_package.update(connection_info)\n print(\"Ret_package : \", ret_package)\n\n print(\"Ep_id: \", endpoint_id)\n request.app.ep_mapping[endpoint_id] = ret_package\n return ret_package", "def create_endpoint(self, endpoint_id, endpoint_ref):\n raise exception.NotImplemented() # pragma: no cover", "def create_api_service(endpoint):\n # FLOC-1162 should add an API version prefix and integration with\n # DatasetAPIUser.\n return StreamServerEndpointService(endpoint, Site(Resource()))", "def create_app(env_name):\n app = Flask(__name__)\n app.config.from_object(configs[env_name])\n api = Api(app)\n\n ga_client = (google.ads.google_ads.client.GoogleAdsClient\n .load_from_storage())\n\n handler.defaultConversion = ga_client.get_type(\n 'ClickConversion', version='v2')\n\n api.add_resource(handler.Conversion,\n '/v1/customers/<string:customer_id>/conversion')\n\n return api", "def api_endpoint():\n return 'localhost'", "def qglue():\n from glue.core import DataCollection\n from glue.app.qt import GlueApplication\n\n dc = DataCollection()\n\n # Suppress pesky Glue warnings.\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', GlueDeprecationWarning)\n ga = GlueApplication(data_collection=dc)\n\n return ga", "def __init__(__self__, *,\n extended_location: pulumi.Input['ExtendedLocationArgs'],\n resource_group_name: pulumi.Input[str],\n additional_egress_endpoints: Optional[pulumi.Input[Sequence[pulumi.Input['EgressEndpointArgs']]]] = None,\n cloud_services_network_name: Optional[pulumi.Input[str]] = None,\n enable_default_egress_endpoints: Optional[pulumi.Input[Union[str, 'CloudServicesNetworkEnableDefaultEgressEndpoints']]] = None,\n location: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n pulumi.set(__self__, \"extended_location\", extended_location)\n pulumi.set(__self__, \"resource_group_name\", resource_group_name)\n if additional_egress_endpoints is not None:\n pulumi.set(__self__, \"additional_egress_endpoints\", additional_egress_endpoints)\n if cloud_services_network_name is not None:\n pulumi.set(__self__, \"cloud_services_network_name\", cloud_services_network_name)\n if enable_default_egress_endpoints is None:\n enable_default_egress_endpoints = 'True'\n if enable_default_egress_endpoints is not None:\n pulumi.set(__self__, \"enable_default_egress_endpoints\", enable_default_egress_endpoints)\n if location is not None:\n pulumi.set(__self__, \"location\", location)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)", "def _create_json_endpoint(self, endpoint, is_v1):\n json_endpoint = {}\n\n if endpoint.service_name:\n json_endpoint[\"serviceName\"] = endpoint.service_name\n elif is_v1:\n # serviceName is mandatory in v1\n json_endpoint[\"serviceName\"] = \"\"\n if endpoint.port and endpoint.port != 0:\n json_endpoint[\"port\"] = endpoint.port\n if endpoint.ipv4 is not None:\n json_endpoint[\"ipv4\"] = endpoint.ipv4\n if endpoint.ipv6 is not None:\n json_endpoint[\"ipv6\"] = endpoint.ipv6\n\n return json_endpoint", "def main():\n return execute_api(Freta(), [Endpoint], __version__)", "def describe_endpoint(EndpointName=None):\n pass", "def new_url(**kwargs):\n # To create the URL, we need to take the format string and return it with no params\n url_base = \"/axapi/v3/gslb/zone/{zone_name}/service/{service-port}+{service-name}\"\n f_dict = {}\n f_dict[\"service-port\"] = \"\"\n f_dict[\"service-name\"] = \"\"\n f_dict[\"zone_name\"] = kwargs[\"zone_name\"]\n\n return url_base.format(**f_dict)", "def _get_base_endpoint_name(self):", "def build_endpoint(self, **kwargs):\n\n raise NotImplementedError()", "def _create_deployment(self) -> aws.apigateway.Stage:\n deployment = aws.apigateway.Deployment(\n f\"{self.rest_api._name}-deployment\",\n rest_api=self.rest_api.id,\n # TODO: Still want to have a triggers function\n opts=pulumi.ResourceOptions(\n parent=self, depends_on=[p.lambda_integration for p in self.proxies]\n ),\n )\n\n stage = aws.apigateway.Stage(\n f\"{self.rest_api._name}-prod-stage\",\n deployment=deployment.id,\n rest_api=self.rest_api.id,\n stage_name=\"prod\",\n opts=pulumi.ResourceOptions(parent=self),\n )\n\n return stage", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n accelerator_id: Optional[pulumi.Input[str]] = None,\n basic_endpoint_group_name: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n endpoint_address: Optional[pulumi.Input[str]] = None,\n endpoint_group_region: Optional[pulumi.Input[str]] = None,\n endpoint_sub_address: Optional[pulumi.Input[str]] = None,\n endpoint_type: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def generate_endpoint(endpoint: Endpoint, schema_text: str, use_cache=True) -> str:\n\n sql_query = english2sql(endpoint.value, schema_text, use_cache=use_cache)\n func_name = english2summary_name(endpoint.value)\n inputs, outputs = parse_query(sql_query, schema_text)\n\n template = '''\n<<<RESPONSE_CLASS>>>\n\n@app.<<<METHOD>>>(\"<<<URL>>>\", response_model=<<<RESPONSE_MODEL>>>)\nasync def <<<FUNC_NAME>>>(<<<PARAMS>>>) -> <<<RESPONSE_MODEL>>>:\n \\'\\'\\'\n <<<ENGLISH_QUERY>>>\n \\'\\'\\'\n cur = con.cursor()\n cur.execute('<<<SQL_QUERY>>>', <<<BINDINGS>>>)\n res: List[Any] = []\n output_names: List[str] = <<<OUTPUT_NAME_LIST>>>\n for row in cur.fetchall():\n row_dict = dict()\n for k, v in zip(output_names, row):\n row_dict[k] = v\n res.append(row_dict)\n con.commit()\n <<<RETURN_STATEMENT>>>\n '''\n\n params = ', '.join(f'{c[\"name\"]}: {c[\"type\"]}' for c in inputs)\n if len(inputs) > 0:\n bindings = '(' + ', '.join(c[\"name\"] for c in inputs) + ',)'\n else:\n bindings = ''\n output_name_list = '[' + ', '.join([f'\\'{c[\"name\"]}\\'' for c in outputs]) + ']'\n\n if len(outputs) > 0:\n response_model = f'List[OutputType_{func_name}]'\n return_statement = 'return res'\n response_class = \\\n f'class OutputType_{func_name}(BaseModel):\\n' + \\\n ' ' + '\\n '.join(f'{c[\"name\"]}: {c[\"type\"]}' for c in outputs)\n else:\n response_model = 'None'\n return_statement = 'return None'\n response_class = ''\n\n template = template.replace('<<<FUNC_NAME>>>', func_name)\n template = template.replace('<<<URL>>>', endpoint.url)\n template = template.replace('<<<METHOD>>>', endpoint.method.lower())\n template = template.replace('<<<SQL_QUERY>>>', sql_query)\n template = template.replace('<<<PARAMS>>>', params)\n template = template.replace('<<<ENGLISH_QUERY>>>', endpoint.value)\n template = template.replace('<<<BINDINGS>>>', bindings)\n template = template.replace('<<<OUTPUT_NAME_LIST>>>', output_name_list)\n template = template.replace('<<<RESPONSE_MODEL>>>', response_model)\n template = template.replace('<<<RETURN_STATEMENT>>>', return_statement)\n template = template.replace('<<<RESPONSE_CLASS>>>', response_class)\n return template", "def __init__(__self__, *,\n accelerator_id: pulumi.Input[str],\n endpoint_group_region: pulumi.Input[str],\n basic_endpoint_group_name: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n endpoint_address: Optional[pulumi.Input[str]] = None,\n endpoint_sub_address: Optional[pulumi.Input[str]] = None,\n endpoint_type: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"accelerator_id\", accelerator_id)\n pulumi.set(__self__, \"endpoint_group_region\", endpoint_group_region)\n if basic_endpoint_group_name is not None:\n pulumi.set(__self__, \"basic_endpoint_group_name\", basic_endpoint_group_name)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if endpoint_address is not None:\n pulumi.set(__self__, \"endpoint_address\", endpoint_address)\n if endpoint_sub_address is not None:\n pulumi.set(__self__, \"endpoint_sub_address\", endpoint_sub_address)\n if endpoint_type is not None:\n pulumi.set(__self__, \"endpoint_type\", endpoint_type)", "def test_basic_add_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n collector = execute_tool(args, test_mode=True)\n time.sleep(2)\n\n config['config'].append(self.create_export_policy())\n self.write_config_file(config, args)\n collector.reload_config()\n time.sleep(2)\n\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg'))" ]
[ "0.633509", "0.6196592", "0.6004353", "0.593839", "0.5891074", "0.5852167", "0.58136016", "0.5763851", "0.57030064", "0.562547", "0.55829406", "0.55522805", "0.55486447", "0.55259037", "0.5472503", "0.54074615", "0.5405777", "0.5385364", "0.53611857", "0.5356907", "0.5353822", "0.5325224", "0.5301452", "0.52982336", "0.52866775", "0.5275998", "0.5245199", "0.52423173", "0.51949793", "0.5164391" ]
0.73661804
0
Delete Glue Dev Endpoint
def delete_dev_endpoint(self): self.glue_engine.delete_dev_endpoint(EndpointName=self.dev_endpoint_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_endpoint(EndpointName=None):\n pass", "def delete_handler(event, context):\n delete_endpoint_config(event)", "def delete_endpoint(self):\n logger.warning(f\"Deleting hosting endpoint '{self.endpoint_name}'...\")\n self._realtime_predictor.delete_endpoint()", "def delete_endpoint_config(EndpointConfigName=None):\n pass", "def delete_endpoint(self, endpoint_id):\n raise exception.NotImplemented() # pragma: no cover", "def cleanup(self):\n self.sagemaker.delete_endpoint(EndpointName=self.endpoint_name)\n self.sagemaker.delete_endpoint_config(EndpointConfigName=self.endpoint_name)", "def access_gemini_url_delete_method(context, endpoint):\n url = urljoin(context.gemini_api_url, endpoint)\n context.response = requests.delete(url)", "def delete_endpoint(self, endpoint):\n exists = self.get_endpoint(endpoint)\n if exists:\n self.endpoints.remove(exists)", "def delete_model_endpoint(\n project: str,\n endpoint_id: str,\n ):\n model_endpoint_store = get_model_endpoint_store(\n project=project,\n secret_provider=mlrun.api.crud.secrets.get_project_secret_provider(\n project=project\n ),\n )\n\n model_endpoint_store.delete_model_endpoint(endpoint_id=endpoint_id)\n\n logger.info(\"Model endpoint table cleared\", endpoint_id=endpoint_id)", "def test_basic_remove_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n self.add_endpoint(mac, ip, 'intersite-testsuite', 'app1', 'epg1')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app1-epg1'))\n self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app1', 'epg1')\n time.sleep(2)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app1-epg1'))", "def delete(self, endpoint: str) -> HorizonResponse:\n return HorizonResponse(\n self._session.delete(urljoin(base=self._root_url, url=endpoint)),\n )", "def test_basic_remove_endpoint(self):\n mac, ip = self.setup_with_endpoint()\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))\n self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg1')\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))", "def test_delete_deployment(self):\n pass", "def test_basic_remove_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n config['config'].append(self.create_export_policy())\n self.write_config_file(config, args)\n\n collector = execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg'))\n\n config = self.create_config_file()\n self.write_config_file(config, args)\n collector.reload_config()\n time.sleep(2)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg'))", "def test_basic_remove_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))", "def delete_endpoint(self, endpoint_id):\n if EndpointService.delete_endpoint(endpoint_id) is None:\n abort(404)\n\n return {}", "def delete(openstack_resource):\n openstack_resource.delete()", "def test_delete_virtual_service(self):\n pass", "def test_aws_service_api_snapshot_delete(self):\n pass", "def test_dlr_dgw_uninstall(self):\n self._common_uninstall_delete(\n 'dlr_id', dlr_dgw.delete,\n {'gateway': {}},\n delete_args=['routingConfig'],\n delete_kwargs={\n 'uri_parameters': {'edgeId': 'dlr_id'}\n }\n )", "def DELETE(self, env, start_response):\n key_args = set(['cors','lifecycle','policy','tagging','website'])\n\n qs = env.get('QUERY_STRING', '')\n args = urlparse.parse_qs(qs, 1)\n\n if not key_args & set(args):\n # DELETE a Bucket\n version = args.get('versionId')\n if version:\n vid = version[0]\n if vid.lower() == 'lastest':\n pass\n else:\n env['PATH_INFO'] = '/v1/AUTH_%s/%s/%s' % (quote(self.account_name),\n quote(self.version_name(self.container_name)),\n vid)\n\n body_iter = self._app_call(env)\n status = self._get_status_int()\n\n if status != HTTP_NO_CONTENT:\n if status in (HTTP_UNAUTHORIZED, HTTP_FORBIDDEN):\n return self.get_err_response('AccessDenied')\n elif status == HTTP_NOT_FOUND:\n return self.get_err_response('NoSuchBucket')\n elif status == HTTP_CONFLICT:\n return self.get_err_response('BucketNotEmpty')\n else:\n return self.get_err_response('InvalidURI')\n\n resp = Response()\n resp.status = HTTP_NO_CONTENT\n return resp\n else:\n # DELETE specified data\n action = args.keys().pop()\n if action == 'cors':\n # delete cors\n env['HTTP_X_CONTAINER_META_ACCESS_CONTROL_ALLOW_ORIGIN'] = ''\n env['HTTP_X_CONTAINER_META_ACCESS_CONTROL_MAX_AGE'] = ''\n env['HTTP_X_CONTAINER_META_ACCESS_CONTROL_EXPOSE_HEADERS'] = ''\n env['HTTP_X_CONTAINER_META_ACCESS_CONTROL_ALLOW_METHOD'] = ''\n env['QUERY_STRING'] = ''\n env['REQUEST_METHOD'] = 'POST'\n\n body_iter = self._app_call(env)\n status = self._get_status_int()\n\n if is_success(status):\n resp = Response()\n resp.status = HTTP_NO_CONTENT\n return resp\n elif status in (HTTP_UNAUTHORIZED, HTTP_FORBIDDEN):\n return self.get_err_response('AccessDenied')\n else:\n return self.get_err_response('InvalidURI')\n elif action == 'lifecycle':\n # delete lifecycle\n env['HTTP_X_CONTAINER_META_TRANS_AT'] = ''\n env['HTTP_X_CONTAINER_META_TRANS_AFTER'] = ''\n env['HTTP_X_CONTAINER_META_TRANS_CLASS'] = ''\n\n env['HTTP_X_CONTAINER_META_EXPIRATION_AT'] = ''\n env['HTTP_X_CONTAINER_META_EXPIRATION_AFTER'] = ''\n env['HTTP_X_CONTAINER_META_EXPIRATION_PREFIX'] = ''\n env['HTTP_X_CONTAINER_META_EXPIRATION_STATUS'] = ''\n env['REQUEST_METHOD'] = 'POST'\n env['QUERY_STRING'] = ''\n body_iter = self._app_call(env)\n status = self._get_status_int()\n if is_success(status):\n resp = Response()\n resp.status = HTTP_NO_CONTENT\n return resp\n elif status in (HTTP_UNAUTHORIZED, HTTP_FORBIDDEN):\n return self.get_err_response('AccessDenied')\n else:\n return self.get_err_response('InvalidURI')\n elif action == 'policy':\n # delete policy\n env['REQUEST_METHOD'] = 'POST'\n env['QUERY_STRING'] = ''\n env['HTTP_X_CONTAINER_META_POLICY'] = ''\n body_iter = self._app_call(env)\n status = self._get_status_int()\n if is_success(status):\n resp = Response()\n resp.status = HTTP_NO_CONTENT\n return resp\n elif status in (HTTP_UNAUTHORIZED, HTTP_FORBIDDEN):\n return self.get_err_response('AccessDenied')\n else:\n return self.get_err_response('InvalidURI')\n elif action == 'tagging':\n # delete tagging\n env2 = copy(env)\n container_info = get_container_info(env2, self.app)\n meta_keys = container_info['meta'].keys()\n for key in meta_keys:\n env['HTTP_X_CONTAINER_META_' + key.replace('-', '_').upper()] = ''\n env['QUERY_STRING'] = ''\n env['REQUEST_METHOD'] = 'POST'\n\n body_iter = self._app_call(env)\n status = self._get_status_int()\n\n if is_success(status):\n resp = Response()\n resp.status = HTTP_NO_CONTENT\n return resp\n elif status in (HTTP_UNAUTHORIZED, HTTP_FORBIDDEN):\n return self.get_err_response('AccessDenied')\n else:\n return self.get_err_response('InvalidURI')\n elif action == 'website':\n # delete website\n body = env['wsgi.input'].read()\n env['REQUEST_METHOD'] = 'POST'\n env['QUERY_STRING'] = ''\n env['HTTP_X_CONTAINER_META_WEBSITE'] = quote(body)\n\n body_iter = self._app_call(env)\n status = self._get_status_int()\n\n if is_success(status):\n resp = Response()\n resp.status = HTTP_OK\n return resp\n elif status in (HTTP_UNAUTHORIZED, HTTP_FORBIDDEN):\n return self.get_err_response('AccessDenied')\n else:\n return self.get_err_response('InvalidURI')\n else:\n return self.get_err_response('InvalidURI')", "def test_admin_api_endpoints_removed(self) -> None:\n self.expect_unrecognized(\"GET\", \"/_synapse/admin/v1/registration_tokens\")\n self.expect_unrecognized(\"POST\", \"/_synapse/admin/v1/registration_tokens/new\")\n self.expect_unrecognized(\"GET\", \"/_synapse/admin/v1/registration_tokens/abcd\")\n self.expect_unrecognized(\"PUT\", \"/_synapse/admin/v1/registration_tokens/abcd\")\n self.expect_unrecognized(\n \"DELETE\", \"/_synapse/admin/v1/registration_tokens/abcd\"\n )\n self.expect_unrecognized(\"POST\", \"/_synapse/admin/v1/reset_password/foo\")\n self.expect_unrecognized(\"POST\", \"/_synapse/admin/v1/users/foo/login\")\n self.expect_unrecognized(\"GET\", \"/_synapse/admin/v1/register\")\n self.expect_unrecognized(\"POST\", \"/_synapse/admin/v1/register\")\n self.expect_unrecognized(\"GET\", \"/_synapse/admin/v1/users/foo/admin\")\n self.expect_unrecognized(\"PUT\", \"/_synapse/admin/v1/users/foo/admin\")\n self.expect_unrecognized(\"POST\", \"/_synapse/admin/v1/account_validity/validity\")", "def remove_endpoint_from_sipserver(self, endpoint: str) -> None:", "def delete_deployment(request, deployment, **_kwargs):\n pass", "def test_basic_remove_one_of_multiple_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac1 = '00:11:22:33:33:34'\n ip1 = '3.4.3.5'\n self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app1', 'epg1')\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app2', 'epg2')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app1-epg1'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app2-epg2'))\n\n self.remove_endpoint(mac1, ip1, 'intersite-testsuite', 'app1', 'epg1')\n self.assertFalse(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app1-epg1'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app2-epg2'))", "def test_esg_gateway_uninstall(self):\n self._common_uninstall_read_update(\n 'esg_id|dgw_ip', esg_gateway.delete,\n {'gateway': {}},\n # read\n read_args=['routingConfigStatic'],\n read_kwargs={'uri_parameters': {'edgeId': \"esg_id\"}},\n read_response={\n 'status': 204,\n 'body': test_nsx_base.EDG_STATIC_ROUTING_BEFORE\n },\n # update\n update_args=['routingConfigStatic'],\n update_kwargs={\n 'uri_parameters': {'edgeId': \"esg_id\"},\n 'request_body_dict': {\n 'staticRouting': {\n 'staticRoutes': {},\n 'defaultRoute': None\n }\n }\n }\n )", "async def deregister_endpoint(self, handle: str) -> None:\n await self.AD.http.deregister_endpoint(handle, self.name)", "def test_basic_remove_one_of_multiple_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac1 = '00:11:22:33:33:34'\n ip1 = '3.4.3.5'\n self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg')\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry_with_provided_contract(mac1, ip1, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg', 'contract-1'))\n self.assertTrue(self.verify_remote_site_has_entry_with_provided_contract(mac2, ip2, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg', 'contract-1'))\n\n self.remove_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg')\n self.assertFalse(self.verify_remote_site_has_entry_with_provided_contract(mac1, ip1, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg', 'contract-1'))\n self.assertTrue(self.verify_remote_site_has_entry_with_provided_contract(mac2, ip2, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg', 'contract-1'))", "def test_dashboards_v2_delete(self):\n pass", "def test_delete_deployment_run(self):\n pass" ]
[ "0.7355473", "0.7192038", "0.69872594", "0.6578153", "0.6334313", "0.6297704", "0.62821496", "0.61767966", "0.6053079", "0.6046499", "0.60339475", "0.6027786", "0.5983133", "0.59529054", "0.5951396", "0.5926549", "0.5921632", "0.57891357", "0.5771835", "0.576563", "0.573931", "0.57155466", "0.5698269", "0.56854355", "0.56627315", "0.56611013", "0.56340134", "0.5630221", "0.55996853", "0.5599063" ]
0.7549312
0
Connect to Glue Dev Endpoint
def connect_dev_endpoint(self): done = False while not done: endpoint = self.glue_engine.get_dev_endpoint(EndpointName=self.dev_endpoint_name) status = endpoint["DevEndpoint"]["Status"] done = status == "READY" if status == "PROVISIONING": print("Still provisionning...") time.sleep(30) elif status == "READY": print("Done") done = True else: print("There was an error") print(status) public_ip = endpoint["DevEndpoint"]["PublicAddress"] os.system( "ssh -i {} glue@{} -t gluepyspark".format(self.dev_endpoint_private_rsa, public_ip))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_dev_endpoint(self):\n\n self.dev_endpoint = self.glue_engine.create_dev_endpoint(\n EndpointName=self.dev_endpoint_name,\n RoleArn=self.dev_endpoint_role,\n PublicKey=self.dev_endpoint_pub_rsa,\n NumberOfNodes=2,\n ExtraPythonLibsS3Path=self.python_library,\n GlueVersion=\"1.0\",\n Arguments={\"GLUE_PYTHON_VERSION\": \"3\"})", "def guiding_connect():\r\n try:\r\n host = request.form[\"host\"]\r\n app.guider.connect(host)\r\n return jsonify({\"status\": True})\r\n except Exception as e:\r\n return jsonify(\r\n {\"status\": False, \"error\": \"Failed connecting to guider: %s\" % e}\r\n )", "def _Connect(self):\n return bq.connect(self.api_endpoint_,\n self.auth_policy_.GetToken(),\n self.transport_)", "def connect():\n return connection.Connection(username=api_user,\n api_key=api_key,\n region=api_region)", "def qglue():\n from glue.core import DataCollection\n from glue.app.qt import GlueApplication\n\n dc = DataCollection()\n\n # Suppress pesky Glue warnings.\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', GlueDeprecationWarning)\n ga = GlueApplication(data_collection=dc)\n\n return ga", "def build_connection():\n # Retrieve organisation API key and password from the database\n org_id = session.get('org_id')\n\n base_url = config.BASE_URL\n api_org_key = config.API_ORG_KEY\n api_org_pw = config.API_ORG_PASSWORD\n supplier_org_id = config.SUPPLIER_ORG_ID\n \n return SquizzGatewayService(base_url, \n org_id,\n api_org_key,\n api_org_pw,\n supplier_org_id)", "def connect(self):\n\n self.openstack = connection.Connection(auth_url=self.args.OS_AUTH_URL,\n project_name=self.args.OS_TENANT,\n username=self.args.OS_USER,\n password=self.args.OS_PASS)\n\n self.scaleio = SIOWrapper(self.args.SIO_GATEWAY,\n self.args.SIO_PORT,\n self.args.SIO_USER,\n self.args.SIO_PASS)", "def connect(update, context):\n url = os.environ['URL'] # getenv('URL') # Serve the endpoint to client node (https://purestake.com)\n algod_token = ALGODTOKEN # Your personal token (https://purestake.com)\n headers = {\"X-API-Key\": algod_token}\n try:\n return algod.AlgodClient(algod_token, url, headers)\n except Exception as e:\n return e", "def connect():\n return boto3.client('glacier',\n \"us-west-2\",\n aws_access_key_id=ACCESS_KEY_ID,\n aws_secret_access_key=SECRET_ACCESS_KEY)", "def connect(self):\n self.connector = connectWS(self)", "def connect(self):\n self.connector = connectWS(self)", "def do_use(self, region):\n if self._local_endpoint is not None:\n host, port = self._local_endpoint # pylint: disable=W0633\n self.engine.connect(\n region,\n session=self.session,\n host=host,\n port=port,\n is_secure=False,\n access_key=\"\",\n secret_key=\"\",\n )\n else:\n self.engine.connect(region, session=self.session)", "def connect_td_db(apikey, endpoint, dbname, default_engine='presto'):\n try:\n cli = pytd.Client(apikey=apikey,\n endpoint=endpoint,\n database=dbname,\n default_engine=default_engine)\n print('Connection Successful')\n return cli\n except Exception as e:\n print('Exception in connect_td_db(): ', str(e))\n cli.close()", "def connect(\n host=\"vogon.reports.mn\",\n port=9090,\n path=\"/api/spark/sql/\",\n scheme=\"https\",\n user=None,\n password=None,\n context=None,\n header=False,\n ssl_verify_cert=True,\n ssl_client_cert=None,\n proxies=None,\n): # noqa: E125\n context = context or {}\n\n return Connection(\n host,\n port,\n path,\n scheme,\n user,\n password,\n context,\n header,\n ssl_verify_cert,\n ssl_client_cert,\n proxies,\n )", "async def connect(self):\n await self.ws_connect()\n await self._set_schema(SCHEMA_VERSION)\n await self._set_products()", "def connect():", "def rhevConnect():\n rhev = rhev_settings.HOST_PORT\n conn = httplib.HTTPSConnection(rhev)\n return conn", "def connect(host=\"localhost\", port=1717, username=\"admin\", password=\"admin\", environment=\"\", **kwargs):\n return Concourse(host, port, username, password, environment, **kwargs)", "def ow_connect():\n ow = boto3.client('opsworks', region_name='us-east-1')\n return ow", "def api_endpoint():\n return 'localhost'", "def connect_syndicate( username=CONFIG.SYNDICATE_OPENCLOUD_USER, password=CONFIG.SYNDICATE_OPENCLOUD_PASSWORD, user_pkey_pem=CONFIG.SYNDICATE_OPENCLOUD_PKEY ):\n debug = True \n if hasattr(CONFIG, \"DEBUG\"):\n debug = CONFIG.DEBUG\n \n client = syntool.Client( username, CONFIG.SYNDICATE_SMI_URL,\n password=password,\n user_pkey_pem=user_pkey_pem,\n debug=debug )\n\n return client", "def connect():\r\n # print ('Testing connection..')\r\n get_qlik_sense.get_about()", "def main(ctx: click.core.Context, host: str, user: str, password: typing.Optional[str], schema: str) -> None:\n ctx.obj = {}\n creds = tribble.database.Creds(host, user, password, schema)\n engine = tribble.database.connect_db(creds)\n contract.Session.configure(bind=engine)\n ctx.obj['creds'] = creds\n ctx.obj['engine'] = engine", "def _connect(self):\n if self.cluster.get('encrypted_password'):\n self.cluster['password'] = aws_etl.utils.decrypt(\n self.cluster['encrypted_password'])\n\n self.connection = connect(\n host=self.cluster['host'],\n port=self.cluster['port'],\n sslmode='require',\n user=self.cluster['user'],\n password=self.cluster['password'],\n database=self.cluster['database'])\n return self.connection", "def connect(self):\n self.conn.connect()", "def _connect(self):\n cluster = Cluster('http://{}:{}'.format(self.host, self.port))\n authenticator = PasswordAuthenticator('Administrator', self.password)\n cluster.authenticate(authenticator)\n self.client = cluster.open_bucket(self.bucket)", "def connect():\n\n # Exchange authorization code for acceess token and create session\n session = auth_flow.get_session(request.url)\n client = UberRidesClient(session)\n\n # Fetch profile for driver\n profile = client.get_driver_profile().json\n\n # Fetch last 50 trips and payments for driver\n trips = client.get_driver_trips(0, 50).json\n payments = client.get_driver_payments(0, 50).json\n\n return render_template('driver_dashboard.html',\n profile=profile,\n trips=trips['trips'],\n payments=payments['payments']\n )", "def connect(self):\n if self.dbapi.__name__ == \"psycopg2\":\n self.connection[\"sslmode\"] = \"require\"\n elif self.dbapi.__name__ == \"pg8000\":\n self.connection[\"ssl_context\"] = True\n super(Redshift, self).connect()", "def __init__(self, hostname, port, username, password, tenant_id, connect=True):\n self.cmd_gw_ws_api = HawkularWebsocketClient(\n url=\"ws://{}:{}/hawkular/command-gateway/ui/ws\".format(hostname, port),\n headers={\"Hawkular-Tenant\": tenant_id, \"Accept\": \"application/json\"},\n username=username, password=password)\n self.tenant_id = tenant_id\n if connect:\n self.cmd_gw_ws_api.connect()", "def connectionDB():\n os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = CREDENTIAL\n client = bq.Client()\n return client" ]
[ "0.63102233", "0.6122564", "0.6047225", "0.60375047", "0.5978164", "0.58321375", "0.5771107", "0.573747", "0.5714928", "0.5713471", "0.5713471", "0.5709945", "0.57074934", "0.5687956", "0.5638669", "0.56293046", "0.5627169", "0.5550863", "0.5526117", "0.54849017", "0.54776275", "0.54638135", "0.54313004", "0.5423131", "0.54190904", "0.54176205", "0.5394743", "0.53869665", "0.53828055", "0.5357298" ]
0.6406663
0
This method support to config the advance option of zd syslog feature
def _set_advance_syslog(zd, **kwargs): xlocs = LOCATOR_CFG_SYSTEM_NETWORKMGMT adv_opt = ['zd_facility_name', 'zd_priority_level', 'ap_facility_name', 'ap_priority_level'] adv_cfg = {'pause': 1} adv_cfg.update(kwargs) if zd.s.is_element_present(xlocs['syslog_advanced_setting_collapse']): zd.s.click_and_wait(xlocs['syslog_advanced_setting_click']) time.sleep(adv_cfg['pause']) for key in adv_opt: if adv_cfg.get(key) is not None: zd.s.select_value(xlocs[key], adv_cfg[key])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_enable_syslog(self) -> Union[bool, None]:\n # read the original value passed by the command\n enable_syslog = self.raw_param.get(\"enable_syslog\")\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return enable_syslog", "def enableCLangLogger(self):", "def syslog_config(self, syslog_config):\n\n self._syslog_config = syslog_config", "def configure_logging():\n logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')\n\n # Enable logging to syslog as well:\n # Normally this would not be necessary but logging assumes syslog listens on\n # localhost syslog/udp, which is disabled on 10.5 (rdar://5871746)\n syslog = logging.handlers.SysLogHandler('/var/run/syslog')\n syslog.setFormatter(logging.Formatter('%(name)s: %(message)s'))\n syslog.setLevel(logging.INFO)\n logging.getLogger().addHandler(syslog)", "def _modified_option_defaults(self) -> Dict[str, Any]:\n return {\n # Change 'debug.traceback' default to True if debug logging is enabled.\n 'debug.traceback': logging.getLogger('pyocd').isEnabledFor(logging.DEBUG),\n }", "def setupLogging(loglevel=logging.INFO):\n\n # The following configures two loggers, the root logger and a logger named \"phone_ctlr_log\". Messages sent to the\n # root logger will be sent to the system log using the syslog protocol, and messages to the \"phone_ctlr_log\" logger will\n # be written to the Phone_Agent.log file which will be rotated once the log reaches 1Mb.\n\n configdict = {\n 'version': 1, # Configuration schema in use; must be 1 for now\n #'disable_existing_loggers': True, # Disables all existing logging configurations\n\n 'formatters': {\n 'brief': {\n 'format' : '%(levelname)-8s %(asctime)s (%(created)s) %(message)s',\n 'datefmt': '%Y%m%dT%H%M%S.%Z' },\n 'standard': {\n 'format' : '%(levelname)-8s %(asctime)s %(name)-15s %(message)s',\n 'datefmt': '%Y%m%dT%H%M%S.%Z' },\n 'console': {\n 'format' : '%(levelname)-8s %(asctime)s -- %(message)s',\n 'datefmt': '%Y%m%dT%H%M%S.%Z' },\n 'custom': {\n 'format' : '%(asctime)s - %(message)s',\n 'datefmt': '%Y-%m-%dT%H:%M:%S.%Z' } ### Ex,: 2038-01-01T05:05:02\n },\n\n 'handlers': {'applog': {'class': 'logging.handlers.RotatingFileHandler',\n 'filename': '/opt/tools/phone_agent/Phone_Agent.log',\n #'filename': 'Phone_Agent.log',\n 'backupCount': 3,\n 'formatter': 'custom',\n 'level': 'INFO',\n 'maxBytes': 1024*1024},\n 'conlog': {'class': 'logging.StreamHandler',\n 'formatter': 'console',\n #'stream': 'console',\n 'level': 'DEBUG'},\n 'syslog': {'class': 'logging.handlers.SysLogHandler',\n 'formatter': 'standard',\n 'level': 'ERROR'}},\n\n # Specify all the subordinate loggers\n 'loggers': {\n 'phone_ctlr_log': {\n 'handlers': ['applog']\n },\n 'console_log': {\n 'handlers': ['conlog']\n }\n },\n # Specify properties of the root logger\n 'root': {\n 'handlers': ['syslog']\n },\n }\n\n # Set up configuration\n logging.config.dictConfig(configdict)", "def _configure_logging(self):\n pass", "def init_logging():\n \n loglevel=logging.DEBUG\n #logging.basicConfig(level=loglevel)\n logger = logging.getLogger()\n logger.setLevel(loglevel)\n slh=SysLogHandler(address = '/dev/log')\n slh.setFormatter(logging.Formatter(\"rbldnspy[%(process)d]: %(message)s\"))\n #log debug/error messages to syslog info level\n slh.priority_map[\"DEBUG\"]=\"info\"\n slh.priority_map[\"ERROR\"]=\"info\"\n \n slh.setLevel(loglevel)\n logger.addHandler(slh)\n return logger", "def setup(self, cfg):\n super().setup(cfg)\n\n \"\"\"\n TODO override the date format to ISOsomething standard...\n \"\"\"\n #general_fmt = r\"%(asctime)s [%(process)3d] [%(levelname)-7s] %(message)s\"\n #Gunicorn 'access' somehow has a very different requestion context. So the ip getting is left out, it is inserted by access below\n general_formatter = RequestFormatter(\n '[%(asctime)s] [%(base_hostname)s:%(hostname)s:%(process)3d] [%(levelname)-7s] %(message)s'\n )\n #print(self.cfg.access_log_format)\n #self.cfg.access_log_format = general_fmt\n\n # Override Gunicorn's `error_log` configuration.\n self._set_handler( self.error_log, cfg.errorlog, general_formatter )\n\n #Push the general format at our the access formatter, which will publish specialised messages\n self._set_handler( self.access_log, cfg.accesslog, general_formatter )", "def set_daemon_log():\n global toconsole\n toconsole = False", "def syslog_config_from_platform_setting(self, syslog_config_from_platform_setting):\n\n self._syslog_config_from_platform_setting = syslog_config_from_platform_setting", "def config_log_level_cb(data, option):\n change_log_level(\n G.CONFIG.network.debug_category, G.CONFIG.network.debug_level\n )\n return 1", "def setAppendLog(self,value):\n self.PDFreactorConfiguration.in1[\"appendLog\"] = value", "def enablePyLangLogger(self):", "def service_bus_cli():\n configure_logging()", "def setLogLevel(self,value):\n self.PDFreactorConfiguration.in1[\"logLevel\"] = value", "def settings_init(self):\n config_console = configparser.ConfigParser()\n config_console.read(CONFIG_FILE_NAME)\n self.logmode = config_console[\"LOG\"][\"log_mode\"]", "def config():\n sudo(\n r\"sed -i '/#password=/c\\password=abcdefghijklmnopq' /etc/minv/minv.conf\"\n )\n sudo(\n r\"sed -i '/log_level = INFO/c\\log_level = DEBUG' /etc/minv/minv.conf\"\n )", "def set_config(self, file_path_name):\n level = logging.DEBUG\n format = '%(asctime)s %(levelname)-8s %(message)s' \n datefmt = '%a, %d %b %Y %H:%M:%S'\n filemode = 'a'\n \n\n logging.basicConfig(level = level,\n format = format,\n datefmt = datefmt,\n filename = file_path_name,\n filemode = filemode)", "def fusion_api_configure_remote_syslog(self, body, api=None, headers=None):\n return self.remote_syslog.create(body, api, headers)", "def configure_logging():\n\n class _CustomFormatter(logging.Formatter):\n \"\"\"\n A private formatter class, this is required to provide microsecond precision timestamps and utc\n conversion\n \"\"\"\n converter = dt.datetime.utcfromtimestamp\n\n def formatTime(self, record, datefmt):\n ct = self.converter(record.created)\n return ct.strftime(datefmt)\n\n logging.addLevelName(AUDIT, \"AUDIT\")\n logger = logging.getLogger()\n log_level = config.get_config('LOG_LEVEL')\n logger.setLevel(log_level)\n handler = logging.StreamHandler(sys.stdout)\n\n formatter = _CustomFormatter(\n fmt='[%(asctime)sZ] %(message)s pid=%(process)d LogLevel=%(levelname)s LoggerName=%(name)s',\n datefmt='%Y-%m-%dT%H:%M:%S.%f')\n\n handler.setFormatter(formatter)\n logger.handlers = []\n logger.addHandler(handler)\n\n _check_for_insecure_log_level(log_level)", "def init_logs(self):\n\n handler = logging.FileHandler(self.app.config['LOG'])\n handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s'))\n self.app.logger.addHandler(handler)\n if self.app.config.get(\"LOG_LEVEL\") == \"DEBUG\":\n self.app.logger.setLevel(logging.DEBUG)\n elif self.app.config.get(\"LOG_LEVEL\") == \"WARN\":\n self.app.logger.setLevel(logging.WARN)\n else:\n self.app.logger.setLevel(logging.INFO)\n self.app.logger.info('Startup with log: %s' % self.app.config['LOG'])", "def test_syslog_shortcut_enhanced(self):\n with cleanup_handlers():\n the_expected_message = random_string(50)\n not_an_expected_message = random_string(50)\n coloredlogs.install(syslog='warning')\n logging.info(\"%s\", not_an_expected_message)\n logging.warning(\"%s\", the_expected_message)\n if os.path.isfile(UNIX_SYSTEM_LOG):\n with open(UNIX_SYSTEM_LOG) as handle:\n assert any(the_expected_message in line for line in handle)\n assert not any(not_an_expected_message in line for line in handle)", "def setup_logfile():\r\n from core.general.appinit import log_init\r\n log_init(\r\n 'general',\r\n 'django_api'\r\n )", "def test_set_subsystem_logger_level(self):\n pass", "def logger_settings(self):\n LOG_CONFIG['root']['handlers'].append(self.logmode)\n flask_log = logging.getLogger(DEFAULT_NAME_FLASK_LOGGER)\n flask_log.setLevel(logging.ERROR)\n dictConfig(LOG_CONFIG)\n self.logger = logging.getLogger()", "def vv_flag():\n log.setLevel(logging.DEBUG)", "def syslog_remote_enable(handle, name, hostname,\n severity=\"emergencies\", forwarding_facility=\"local0\"):\n\n from ucsmsdk.mometa.comm.CommSyslogClient import CommSyslogClient\n\n mo = CommSyslogClient(parent_mo_or_dn=\"sys/svc-ext/syslog\",\n forwarding_facility=forwarding_facility,\n hostname=hostname, admin_state=\"enabled\",\n severity=severity, name=name)\n handle.add_mo(mo, modify_present=True)\n handle.commit()\n return mo", "def setLogEvents(self, enable):\n if enable == True:\n DPxEnableDinLogEvents()\n else:\n DPxDisableDinLogEvents()", "def handle_adminlogplug(bot, event):\n if not event.rest: event.missing(\"<plugname>\") ; return\n if len(event.rest) < 3: event.reply(\"min 3 chars plz\") ; return\n setlogplug(event.rest)\n event.done()" ]
[ "0.6081552", "0.6067496", "0.6043397", "0.5979013", "0.5853631", "0.5756815", "0.5755001", "0.5656631", "0.5582538", "0.5562076", "0.5491174", "0.5480679", "0.5466313", "0.54245096", "0.5423763", "0.5375122", "0.536843", "0.53460175", "0.53303653", "0.5327427", "0.53229415", "0.5310751", "0.5308233", "0.52927923", "0.5278474", "0.5277301", "0.5271043", "0.52696794", "0.52538115", "0.524978" ]
0.68598795
0
Configure the country code and related option
def set_country_code(zd, option, **kwargs): cfg_option = {'country_code': '', 'channel_optimization': '', 'channel_mode':''} cfg_option.update(option) xloc = LOCATOR_CFG_SYSTEM_COUNTRY_CODE xloc_map = { 'country_code': xloc['country_code_listbox'], 'compatibility': xloc['optimization_for_compatibility_radio'], 'interoperability': xloc['optimization_for_interoperability_radio'], 'performance': xloc['optimization_for_performance_radio'], 'allow_indoor': xloc['allow_indoor_channel_checkbox'], } nav_to(zd) if cfg_option['country_code']: zd.s.select_option(xloc_map['country_code'], re.escape(cfg_option['country_code'])) if cfg_option['channel_optimization']: zd.s.click_and_wait(xloc_map[cfg_option['channel_optimization']]) if cfg_option['channel_mode']: zd.s.click_if_not_checked(xloc_map[cfg_option['channel_mode']]) zd.s.choose_ok_on_next_confirmation() zd.s.click_and_wait(zd.info['loc_cfg_sys_ctrycode_apply_button']) if not zd.s.is_confirmation_present(5): raise Exception("No dialog confirmation for setting country code appeared") zd.s.get_confirmation() logging.info("Change country code option for ZoneDirector to %s successfully" % str(cfg_option))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def domain_settings_set_country(self, country):\n return self._request('domain/settings/set_country', inspect_args_func(inspect.currentframe()))", "def setup_plugins(self):\n super(Site, self).setup_plugins()\n self.plugins.countries.configure(hide_region=True)\n self.plugins.ledger.configure(use_pcmn=True)\n self.plugins.countries.configure(country_code='BE')", "def country_code(self, country_code):\n\n self._country_code = country_code", "def test_default_country_set(self):\n response = self.client.get(reverse(\"billing_info\"))\n self.assertEqual(response.status_code, 200)\n self.assertContains(\n response, '<option value=\"PL\" selected>Poland</option>', html=True\n )", "def set_CountryCode(self, value):\n super(AddressValidationInputSet, self)._set_input('CountryCode', value)", "def country_code(self) -> str | None:\n pass", "def __create_country_dropdown(self):\n return dcc.Dropdown(\n id=\"dd_country\",\n options=self.data_reader.get_country_options(),\n value=\"Canada\",\n )", "def country(self, country):\n\n self._country = country", "def country(self, country):\n\n self._country = country", "def country(self, country):\n\n self._country = country", "def country(self, country):\n\n self._country = country", "def country(self, country):\n\n self._country = country", "def onchange_country(self):\n if self.country_id and self.country_id.code == 'SA':\n self.is_saudi = True\n else:\n self.is_saudi = False", "def test_default_country_set_no_ip(self):\n response = self.client.get(reverse(\"billing_info\"))\n self.assertEqual(response.status_code, 200)\n self.assertContains(\n response, '<option value=\"PL\" selected>Poland</option>', html=True\n )", "def country(alpha_2_code: str) -> None:", "def test_country_overrides(self):\n # Retrieve the registration form description\n with override_settings(REGISTRATION_EXTRA_FIELDS={\"country\": \"required\"}):\n response = self.client.get(self.url)\n self.assertHttpOK(response)\n\n self.assertContains(response, 'Kosovo')", "def set_country_for_search(self, country_name_list):\n self.multiple_items_selection_from_kendo_dropdown(self.country_dropdown_locator, country_name_list)\n self.wait_for_ajax_spinner_load()", "def country_hint(self, value):\n return None", "def configure_geo_type_question(self, question_data):\n self.driver.find_radio_button(GEO_RB).click()\n return self", "def edit_city(g, city_name, option, value):\n city_code = g.convert[city_name]\n \n if(option == \"country\"):\n g.city_dict[city_code].set_country(value)\n \n if(option == \"continent\"):\n g.city_dict[city_code].set_continent(value)\n \n if(option == \"timezone\"):\n g.city_dict[city_code].set_timezone(int(value)) \n \n if(option == \"coordinates\"):\n g.city_dict[city_code].set_coordinates(value) \n \n if(option == \"population\"):\n g.city_dict[city_code].set_population(int(value))\n \n if(option == \"region\"):\n g.city_dict[city_code].set_region(int(value))\n \n return g", "def test_default_country_by_ip_no_settings(self):\n\n response = self.client.get(\n reverse(\"billing_info\"), HTTP_X_FORWARDED_FOR=\"85.214.132.117\"\n )\n self.assertEqual(response.status_code, 200)\n self.assertContains(\n response, '<option value=\"\" selected>---------</option>', html=True\n )", "def __config_attributes(self):\n self.__name = self.__data[self.__code][\"airportName\"]\n self.__country = Country(name=self.__data[self.__code][\"countryName\"],\n code=self.__data[self.__code][\"countryCode\"])\n try:\n self.__city = self.__data[self.__code][\"city\"]\n except Exception:\n self.__city = ''", "def with_preset_issuing_country(self, country):\n self.__preset_issuing_country = country\n return self", "def test_default_country_by_ip(self):\n\n response = self.client.get(\n reverse(\"billing_info\"), HTTP_X_FORWARDED_FOR=\"85.214.132.117\"\n )\n self.assertEqual(response.status_code, 200)\n self.assertContains(\n response, '<option value=\"DE\" selected>Germany</option>', html=True\n )\n\n response = self.client.get(\n reverse(\"billing_info\"), REMOTE_ADDR=\"85.214.132.117\"\n )\n self.assertEqual(response.status_code, 200)\n self.assertContains(\n response, '<option value=\"DE\" selected>Germany</option>', html=True\n )", "def test_default_country_unset(self):\n response = self.client.get(reverse(\"billing_info\"))\n self.assertEqual(response.status_code, 200)\n self.assertContains(\n response, '<option value=\"\" selected>---------</option>', html=True\n )", "def set_option(self, field):\n self.lnp.set_option(field, self.controls[field].get())\n self.update_displays()", "def country_code(self):\n return self.__country_code", "def country_code(self):\n return self.__country_code", "def country(self) -> str:\n return pulumi.get(self, \"country\")", "def country(self, country):\n if country is None:\n raise ValueError(\"Invalid value for `country`, must not be `None`\")\n\n self._country = country" ]
[ "0.68588054", "0.667291", "0.64471006", "0.63912165", "0.637591", "0.63027674", "0.6246238", "0.6234282", "0.6234282", "0.6234282", "0.6234282", "0.6234282", "0.61908615", "0.61586636", "0.6097614", "0.6056722", "0.59633374", "0.5948035", "0.5808251", "0.57715803", "0.5738969", "0.5727871", "0.5656625", "0.564544", "0.5640699", "0.5618411", "0.56087816", "0.56087816", "0.55326676", "0.5491732" ]
0.7575244
0
clear and reload the menu with a new set of options. valueList list of new options value initial value to set the optionmenu's menubutton to
def SetMenu(self,valueList,value=None): self['menu'].delete(0,'end') for item in valueList: self['menu'].add_command(label=item, command=_setit(self.variable,item,self.command)) if value: self.variable.set(value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_values( self, values ):\n #self.listbox.configure( values )\n # clear\n #for", "def callback_ResetDropdown(window):\n # set values and value to empty to get rid of previously specified answers\n window['changeMod'].update('Change ___:')\n window['changeOptions'].update(values=[''])\n window['changeOptions'].update(value='')\n\n return None", "def _optionsmenu_restart():\n self.input_box.delete(1.0, END)\n pass", "def reset(self):\n self.menu.removeAll()", "def _clear(self):\n self._items = []\n self.key_listbox.delete(0, tk.END)\n self.value_listbox.delete(0, tk.END)", "def _update_combobox(self):\n self.section_combobox.clear()\n self.section_ids = sorted(self.settings['schematisation']['flooddefence_ids'])\n self.section_combobox.addItems(self.section_ids)\n self.section_combobox.setCurrentText('')", "def reset(self, ):\n self.clear()\n self.create_all_menus()", "def updateacc(cls):\n cls.var_1.set('')\n cls.right_accentry['menu'].delete(0, 'end')\n\n # Insert list of new options (tk._setit hooks them up to var)\n temp = database3.Db03(\"\", \"\")\n new_choices = temp.accounts()\n for choice in new_choices:\n cls.right_accentry['menu'].add_command(label=choice, \\\n command=tk._setit(cls.var_1, choice, God.changedacc))\n try:\n cls.var_1.set(new_choices[0])\n except IndexError:\n cls.var_1.set('None')\n God.changedacc()", "def update_namelist_menu(self):\n new_nml = wx.Menu() # build new menu\n\n # populate entries and bind their selection\n for i,nml in enumerate(self.input_file.namelists.keys()):\n item = new_nml.Append(i, self.input_file.namelists[nml].name)\n self.Bind(wx.EVT_MENU, self.SelectNamelist, item, id=i)\n\n # replace old menu in the 1st position with updated one (0-based indexing)\n self.menubar.Replace(self.nml_menu_index, new_nml, '&Namelists')\n\n # reset the namelist entries that are displayed\n self.nmlpanel.reset(unset_namelist=True) # ensure no namelist is currently selected\n\n self.statusbar.SetStatusText(\"Choose a namelist from the menu\", 1)", "def delete_menu():", "def my_option_menu(master, choice_list, r, c, rsp, csp, px, py) -> object:\n variable = tk.StringVar(master)\n menu = tk.OptionMenu(master, variable, *choice_list)\n menu.grid(row=r, column=c, rowspan=rsp, columnspan=csp, padx=px, pady=py)\n return variable, menu", "def DebugMenuProviderMixin_on_menus_update(self):\n self._DebugMenuProviderMixin_clear_menu_actions() # clear the existing menu actions\n \n ## Update Drivers Menu:\n curr_drivers_items = list(self.connection_man.registered_available_drivers.keys())\n for a_driver_key in curr_drivers_items:\n self.activeMenuReference.active_drivers_menu.addAction(a_driver_key)\n ## Update Drivable Menu:\n curr_drivable_items = list(self.connection_man.registered_available_drivables.keys())\n for a_driveable_key in curr_drivable_items:\n self.activeMenuReference.active_drivables_menu.addAction(a_driveable_key)\n ## Update Connections Menu:\n curr_connections_descriptions = list([a_conn_ref.description for a_conn_ref in self.connection_man.active_connections.values()])\n for a_connection_key in curr_connections_descriptions:\n self.activeMenuReference.active_connections_menu.addAction(a_connection_key)", "def refresh_playlist_menu(self, playlists):\n menu = self.playlist_dropdown[\"menu\"]\n menu.delete(0, \"end\")\n for playlist in playlists:\n name = playlist.playlist_name\n menu.add_command(\n label=name,\n command=lambda value=name: self.intermediate_playlist_select_command(value)\n )\n\n # menu.add_command(\n # label=\"Working Playlist\",\n # command=lambda value=name:\n # self.intermediate_playlist_select_command(\"Working Playlist\")\n # )\n\n self.display_data(self.parent.song_object_list)\n self.user.active_group.playlists = playlists", "def setMenuOptions(self, menu_obj, options: [], binding_function):\n\n for item in options:\n menu_obj.addAction(item)\n menu_obj.triggered.connect(binding_function)", "def updateUI(self, updateWindow=False):\n\n global window\n global rsUtility\n\n shaderUtility.update()\n\n grps = shaderUtility.getShaderGroups()\n\n def _selectOptionMenuItem(optionMenu, value):\n items = cmds.optionMenu(optionMenu, query=True,\n itemListShort=True)\n if not items:\n return\n for (index, item) in enumerate(cmds.optionMenu(optionMenu,\n query=True, itemListShort=True)):\n label = cmds.menuItem(item, query=True, label=True)\n if label != value:\n continue\n cmds.optionMenu(optionMenu, edit=True, select=index + 1)\n\n # Menu1\n\n if cmds.optionMenu('%s_optionMenu01' % self.windowID,\n query=True, numberOfItems=True) > 0:\n value = cmds.optionMenu('%s_optionMenu01' % self.windowID,\n query=True, value=True)\n for item in cmds.optionMenu('%s_optionMenu01'\n % self.windowID, query=True, itemListLong=True):\n cmds.deleteUI(item)\n else:\n value = None\n\n if grps.keys() == []:\n return False\n if grps.keys() is None:\n return False\n\n if grps.keys() != []:\n for item in util.natsort(grps.keys()):\n cmds.menuItem(label=item, parent='%s_optionMenu01'\n % self.windowID)\n if value is not None:\n _selectOptionMenuItem('%s_optionMenu01'\n % self.windowID, value)\n\n # Menu2\n\n key = value\n if key is None:\n return False\n\n if cmds.optionMenu('%s_optionMenu02' % self.windowID,\n query=True, numberOfItems=True) > 0:\n value = cmds.optionMenu('%s_optionMenu02' % self.windowID,\n query=True, value=True)\n for item in cmds.optionMenu('%s_optionMenu02'\n % self.windowID, query=True, itemListLong=True):\n cmds.deleteUI(item)\n else:\n value = None\n\n if key in grps.keys():\n for item in util.natsort(grps[key]):\n cmds.menuItem(label=item, parent='%s_optionMenu02'\n % self.windowID)\n if value is not None:\n _selectOptionMenuItem('%s_optionMenu02'\n % self.windowID,\n cmds.textField('%s_textField02'\n % self.windowID, query=True, text=True))\n\n value = cmds.optionMenu('%s_optionMenu03' % self.windowID,\n query=True, value=True)\n if value is None:\n for item in SHADER_TYPES:\n cmds.menuItem(item, label=item, parent='%s_optionMenu03'\n % self.windowID)\n\n # Update the main Render Setup Window to reflect new group assignments\n\n if updateWindow:\n cmds.evalDeferred(window.updateUI)", "def reset_namelist_menu(self):\n new_nml = wx.Menu() # build new menu\n\n # add single element, don't bind it to anything\n nmlItem = new_nml.Append(wx.ID_ANY, '--No File Loaded--', '--No File Loaded--')\n\n # replace the second menu, index=1\n self.menubar.Replace(self.nml_menu_index, new_nml, '&Namelists')\n\n self.namelist = None # there is no longer a current namelist\n self.statusbar.SetStatusText(\"Namelist: --No File Loaded--\", 1)", "def populateComboBox(self, comboBox, contents, \n\t\treplace=True, \n\t\taddEmptyItems=False, \n\t\tsetSelected=None, \n\t\tblockSignals=False):\n\t\tif blockSignals:\n\t\t\tcomboBox.blockSignals(True)\n\n\t\t# Store current value\n\t\tif setSelected is None:\n\t\t\tcurrent = comboBox.currentText()\n\t\telse:\n\t\t\tcurrent = setSelected\n\n\t\t# Clear menu\n\t\tif replace:\n\t\t\tcomboBox.clear()\n\n\t\t# Populate menu\n\t\tif contents:\n\t\t\tfor item in contents:\n\t\t\t\tif addEmptyItems:\n\t\t\t\t\tcomboBox.addItem(item)\n\t\t\t\telse:\n\t\t\t\t\tif item:\n\t\t\t\t\t\tcomboBox.addItem(item)\n\n\t\t# Set to current value or specified value\n\t\tindex = comboBox.findText(current)\n\t\tif index == -1:\n\t\t\tcomboBox.setCurrentIndex(0)\n\t\telse:\n\t\t\tcomboBox.setCurrentIndex(index)\n\n\t\tif blockSignals:\n\t\t\tcomboBox.blockSignals(False)", "def popup_empty_menu ( self, control ):\n self._cur_control = control\n control.PopupMenuXY( MakeMenu( self.empty_list_menu, self, True, \n control ).menu, 0, 0 )", "def add(self, data):\n self.menuItems.update(data)", "def __setup_menu(self):\r\n self.menu.clear()\r\n if self.data:\r\n actions = self.original_actions\r\n else:\r\n actions = (self.plugin.new_action, self.plugin.open_action)\r\n self.setFocus() # --> Editor.__get_focus_editortabwidget\r\n add_actions(self.menu, actions + self.additional_actions)\r\n self.close_action.setEnabled( len(self.plugin.editortabwidgets) > 1 )", "def _onoptions(self):\n\n dlg = OptionsDialog(self)\n\n if dlg.exec_():\n self._storeoptions(dlg)", "def update_all(self, event=None):\n key = str(self.comboBox.currentText())\n self.update_combobox()\n if key:\n idx = self.comboBox.findText(key)\n if idx == -1:\n idx = 0\n else:\n idx = 0\n self.comboBox.setCurrentIndex(idx)\n self.update_list()", "def create_menus( self ):", "def __showMenuUpdate(self):\n self.__showMenu.clear()\n self.__showMenuActions = {}\n\n action = QtWidgets.QAction('All Shows', self.__showMenu)\n self.__showMenu.addAction(action)\n self.__showMenuActions['All Shows'] = action\n\n action = QtWidgets.QAction('Clear', self.__showMenu)\n self.__showMenu.addAction(action)\n self.__showMenuActions['Clear'] = action\n self.__showMenu.addSeparator()\n\n try:\n shows = sorted([show.name() for show in opencue.api.getActiveShows()])\n except opencue.exception.CueException as e:\n logger.critical(e)\n shows = []\n\n monitored = [show.name() for show in self.__monitorCue.getShows()]\n\n for show in shows:\n action = QtWidgets.QAction(show, self.__showMenu)\n action.setCheckable(True)\n if show in monitored:\n action.setChecked(True)\n self.__showMenu.addAction(action)\n self.__showMenuActions[show] = action", "def reset(self):\n self.clear()\n self.addItem(self.default_entry)\n self.default_present = True", "def datas(self, newDatas):\n self.__datas = newDatas\n\n self.dropDown.clear()\n self.dropDown.addItems(self.__datas)\n self.update()", "def SetFilters(self):\n \n try:\n \n for Element in self.OptionMenuList:\n \n Element.destroy()\n \n except:\n \n pass\n \n self.OptionMenuList = []\n self.Variables = []\n self.ImagingTypeList = []\n \n self.DefaultList = ['',\n 'All Types',\n 'All Wavelength',\n 'All Powers',\n 'All Gratings',\n 'All Objectifs',\n 'All Durations',\n 'All N. Acquisis.',\n 'All Sample IDs',\n 'All Samples',\n 'All Substrates',\n 'All Sam. Info',\n 'All Sub. Info']\n \n if not self.Condensensed == None:\n \n for i in range(1,len(self.Condensensed)):\n \n #create the variable for this drop down\n self.Variables.append(StringVar())\n \n #create the two lists\n self.ImagingTypeList.append([self.DefaultList[i]])\n \n for j in range(0, len(self.Condensensed[i])):\n \n self.ImagingTypeList[-1].append(self.Condensensed[i][j][0])\n \n #Create the two elements\n self.OptionMenuList.append(ttk.OptionMenu(self.FilterFrame,\n self.Variables[-1],\n self.ImagingTypeList[-1][0],\n *self.ImagingTypeList[-1],\n command = self.Filter))\n\n #set it\n self.OptionMenuList[-1].grid(column = (i-1)%6, row = (i-1)/6, sticky = 'ew')\n\n\n for i in range(6):\n\n self.FilterFrame.grid_columnconfigure(i, weight = 1)", "def menuItem(*args):\n\toptionsWindow()", "def Adjust_Menu( self, menuoptions = 0):\r\n pass\r\n #base_tree = 6\r\n #profile_tree = 7\r\n #if( menuoptions == 0 ):\r\n # self.treeview_menu.entryconfig( base_tree , state=\"active\" )\r\n # self.treeview_menu.entryconfig( profile_tree , state=\"disabled\" )\r\n # self.menu.entryconfig( 4 , state=\"active\" )\r\n # self.menu.entryconfig( 5 , state=\"disabled\" )\r\n #elif(menuoptions == 1):\r\n # self.treeview_menu.entryconfig(base_tree ,state=\"disabled\")\r\n # self.treeview_menu.entryconfig(profile_tree ,state=\"active\")\r\n # self.menu.entryconfig(4 ,state=\"disabled\")\r\n # self.menu.entryconfig(5 ,state=\"active\")\r", "def openOptions(self, e):\n\n\t\tself.unBind()\n\t\tself.menu_manager.runOptions()\n\t\tself.main_menu_window.root.destroy()" ]
[ "0.6825974", "0.6282686", "0.6281264", "0.60528725", "0.59343994", "0.58950996", "0.58235645", "0.57722116", "0.5739688", "0.5734824", "0.57013357", "0.56410277", "0.56107605", "0.5551239", "0.5550398", "0.55394113", "0.5537057", "0.543177", "0.54266053", "0.54241765", "0.5370043", "0.5331483", "0.5323069", "0.53021634", "0.52956754", "0.5288695", "0.5285261", "0.5284565", "0.527936", "0.5278407" ]
0.73797125
0
Encodings for "Embarked" column 2 == "S" == Southampton == 644 people 0 == "C" == Cherbourg == 168 people 1 == "Q" == Queenstown == 77 people 3 == "Unknown" == 2 people 177 records missing age values set to the average age Missing embark_towns are set to "Other" Encodings for "Class" First class == 0 Second class == 1 Third class == 2 Encodings for "Sex" 1 == male 0 == female
def prepare_titanic_data(df): df.embark_town.fillna('Other', inplace=True) # Drop deck and embarked_town df.drop(columns=['deck', 'embark_town'], inplace=True) # Encoding: Objects (Categorical Variables) to Numeric # Use sklearn's LabelEncoder encoder = LabelEncoder() # Set Unknown and encode Embarked column to numbers # 2 == "S" == Southampton == 644 people # 0 == "C" == Cherbourg == 168 people # 1 == "Q" == Queenstown == 77 people # 3 == "Unknown" == 2 people df.embarked.fillna('Unknown', inplace=True) encoder.fit(df.embarked) df.embarked = encoder.transform(df.embarked) # Encode the Class (first class, second, etc...) # First class == 0 # Second class == 1 # Third class == 2 encoder.fit(df["class"]) df["class_encoded"] = encoder.transform(df["class"]) # Encode gender # male == 1 == 577 records # female == 0 == 314 records encoder.fit(df.sex) df["sex_encoded"] = encoder.transform(df.sex) # Handle the 177 records with missing age values average_age = df.age.mean() df.age.fillna(average_age, inplace=True) scaler = MinMaxScaler() scaler.fit(df[['fare']]) df["fare_scaled"] = scaler.transform(df[['fare']]) scaler = MinMaxScaler() scaler.fit(df[['age']]) df["age_scaled"] = scaler.transform(df[['age']]) # Set the index to the passenger id df = df.set_index("passenger_id") return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pre_process_data(df):\n # setting `passengerID` as Index since it wont be necessary for the analysis\n df = df.set_index(\"PassengerId\")\n\n # convert 'Sex' values\n df['gender'] = df['Sex'].map({'female': 0, 'male': 1}).astype(int)\n\n # We see that 2 passengers embarked data is missing, we fill those in as the most common Embarked value\n df.loc[df.Embarked.isnull(), 'Embarked'] = df['Embarked'].mode()[0]\n\n # Replace missing age values with median ages by gender\n for gender in df['gender'].unique():\n median_age = df[(df['gender'] == gender)].Age.median()\n df.loc[(df['Age'].isnull()) & (df['gender'] == gender), 'Age'] = median_age\n\n # convert 'gender' values to new columns\n df = pd.get_dummies(df, columns=['gender'])\n\n # convert 'Embarked' values to new columns\n df = pd.get_dummies(df, columns=['Embarked'])\n\n # bin Fare into five intervals with equal amount of values\n df['Fare-bin'] = pd.qcut(df['Fare'], 5, labels=[1, 2, 3, 4, 5]).astype(int)\n\n # bin Age into seven intervals with equal amount of values\n # ('baby','child','teenager','young','mid-age','over-50','senior')\n bins = [0, 4, 12, 18, 30, 50, 65, 100]\n age_index = (1, 2, 3, 4, 5, 6, 7)\n df['Age-bin'] = pd.cut(df['Age'], bins, labels=age_index).astype(int)\n\n # create a new column 'family' as a sum of 'SibSp' and 'Parch'\n df['family'] = df['SibSp'] + df['Parch'] + 1\n df['family'] = df['family'].map(lambda x: 4 if x > 4 else x)\n\n # create a new column 'FTicket' as the first character of the 'Ticket'\n df['FTicket'] = df['Ticket'].map(lambda x: x[0])\n # combine smaller categories into one\n df['FTicket'] = df['FTicket'].replace(['W', 'F', 'L', '5', '6', '7', '8', '9'], '4')\n # convert 'FTicket' values to new columns\n df = pd.get_dummies(df, columns=['FTicket'])\n\n # get titles from the name\n df['title'] = df.apply(lambda row: re.split('[,.]+', row['Name'])[1], axis=1)\n\n # convert titles to values\n df['title'] = df['title'].map({' Capt': 'Other', ' Master': 'Master', ' Mr': 'Mr', ' Don': 'Other',\n ' Dona': 'Other', ' Lady': 'Other', ' Col': 'Other', ' Miss': 'Miss',\n ' the Countess': 'Other', ' Dr': 'Other', ' Jonkheer': 'Other', ' Mlle': 'Other',\n ' Sir': 'Other', ' Rev': 'Other', ' Ms': 'Other', ' Mme': 'Other', ' Major': 'Other',\n ' Mrs': 'Mrs'})\n # convert 'title' values to new columns\n df = pd.get_dummies(df, columns=['title'])\n\n df = df.drop(['Name', 'Ticket', 'Cabin', 'Sex', 'Fare', 'Age'], axis=1)\n\n return df", "def encode_features(item):\n item['is_male'] = int(item['Sex'] == 'male')\n del item['Name']\n del item['Sex']\n # del item['Fare']\n del item['Cabin']\n del item['Ticket']\n\n # One-hot encoding: Embarked\n item['embarked_s'] = int(item['Embarked'] == 'S')\n item['embarked_c'] = int(item['Embarked'] == 'C')\n item['embarked_q'] = int(item['Embarked'] == 'Q')\n del item['Embarked']\n\n # One-hot encoding: Title\n item['title_mr'] = int(item['Title'] == 'Mr')\n item['title_miss'] = int(item['Title'] == 'Miss')\n item['title_mrs'] = int(item['Title'] == 'Mrs')\n item['title_master'] = int(item['Title'] == 'Master')\n item['title_other'] = 1 - (item['title_mr'] +\n item['title_miss'] +\n item['title_mrs'] +\n item['title_master'])\n del item['Title']\n return item", "def clean_data(df):\n \n # Put in code here to execute all main cleaning steps:\n # convert missing value codes into NaNs, ...\n count_miss = df.isnull().sum(axis=0).values #find number of nans for each column\n count_miss = [val for val in count_miss]\n \n drop_cols = []\n\n for ind, val in enumerate(count_miss):\n if val > 200000:\n drop_cols.append(ind)\n \n df_drop_cols = list(azdias.columns[drop_cols])\n df = df.drop(df_drop_cols, axis=1)\n \n for col in range(df.shape[1]): #loop through columns\n column_name = df.columns[col] #get column name\n missing_list = feat_info.iloc[col,3] #get missing_or_unknown column from feature info\n missing_list = missing_list.replace('[','') #remove left bracket from string\n missing_list = missing_list.replace(']','') #remove right bracket from string\n missing_list = missing_list.split(',') #split into individual strings\n \n #find data that is natually missing and continue loop to omit\n if missing_list == ['']:\n continue\n \n else:\n for dat_type in missing_list: \n if df[column_name].dtype == 'object': #find values that contain x\n df.loc[df[column_name] == dat_type, column_name] = np.nan #replace x with nan\n \n else:\n dat_type = int(dat_type) #if no x, convert to integer and replace with nan\n df.loc[df[column_name] == dat_type, column_name] = np.nan\n \n # select, re-encode, and engineer column values.\n \n # encode OST_WEST_KZ\n df.loc[df['OST_WEST_KZ'] == 'W','OST_WEST_KZ'] = 0\n df.loc[df['OST_WEST_KZ'] == 'O','OST_WEST_KZ'] = 1\n \n # Re-encode categorical variable(s) to be kept in the analysis.\n \n \n #get list of attributes with type categorical\n feat_info[feat_info['type'] == 'categorical']\n \n cat_new_cols = [] #initialize\n for i in feat_info[feat_info['type'] == 'categorical']['attribute']:\n cat_new_cols.append(i)\n \n for cols in df.columns:\n if cols in cat_new_cols:\n if df[cols].nunique(dropna=True) > 2: #if the number of unique values is greater than 2 \n df = df.drop(cols, axis=1) #drop from the analysis\n print(\"more than 2 categories: {}\".format(cols))\n \n else:\n if not df[cols].unique()[0] > 0:\n #if not df[cols].unique()[0] > 0:\n dummies = pd.get_dummies(df[cols], prefix=cols)\n df = df.drop(cols, axis=1) #create dummy variable\n df = df.join(dummies)\n print(\"transformed to dummy variable: {}\".format(cols))\n \n # create variable: MOVEMENT\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([1,3,5,8,10,12,14]),'MOVEMENT'] = 1\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([2,4,6,7,9,11,13,15]),'MOVEMENT'] = 2\n \n #Capture Decade\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([1,2]), 'DECADE'] = 40\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([3,4]), 'DECADE'] = 50\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([5,6,7]), 'DECADE'] = 60\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([8,9]), 'DECADE'] = 70\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([10,11,12,13]), 'DECADE'] = 80\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([14,15]), 'DECADE'] = 90\n \n df['CAMEO_INTL_2015'] = df['CAMEO_INTL_2015'].astype(float)\n\n # create new variable: WEALTH\n df.loc[df['CAMEO_INTL_2015'].isin([51,52,53,54,55]), 'WEALTH'] = 1\n df.loc[df['CAMEO_INTL_2015'].isin([41,42,43,44,45]), 'WEALTH'] = 2\n df.loc[df['CAMEO_INTL_2015'].isin([31,32,33,34,35]), 'WEALTH'] = 3\n df.loc[df['CAMEO_INTL_2015'].isin([21,22,23,24,25]), 'WEALTH'] = 4\n df.loc[df['CAMEO_INTL_2015'].isin([11,12,13,14,15]), 'WEALTH'] = 5\n \n # create new variable: LIFE_STAGE\n df.loc[df['CAMEO_INTL_2015'].isin([11,21,31,41,51]),'LIFE_STAGE'] = 1\n df.loc[df['CAMEO_INTL_2015'].isin([12,22,32,42,52]),'LIFE_STAGE'] = 2\n df.loc[df['CAMEO_INTL_2015'].isin([13,23,33,43,53]),'LIFE_STAGE'] = 3\n df.loc[df['CAMEO_INTL_2015'].isin([14,24,34,44,54]),'LIFE_STAGE'] = 4\n df.loc[df['CAMEO_INTL_2015'].isin([15,25,35,45,55]),'LIFE_STAGE'] = 5\n \n # remove selected columns and rows, ...\n df = df.drop('PRAEGENDE_JUGENDJAHRE', axis=1)\n df = df.drop('CAMEO_INTL_2015',axis=1)\n \n # Return the cleaned dataframe.\n return df", "def init():\n\n # Reading the data from the CSV file using the latin1 encoding.\n data_read = pd.read_csv(\"gender-classifier-DFE-791531.csv\", encoding='latin1') # Dataset Size = 20050\n\n # If all the attribute values are empty for any of the rows, we drop them.\n data = data_read.dropna(how='all') # After dropping, data set size is still 20050\n\n # Checking the names of the columns/attributes which contains at least one null value\n columns_containing_missing_values = data.columns[data.isnull().any()].tolist()\n print(\"Column names which has missing values\")\n print(columns_containing_missing_values)\n\n # Since 'gender' is our target variable, we would like to have values for it.\n # So, dropping all the rows which have no values for the 'gender' attribute.\n data = data[data['gender'].notnull()] # After dropping, dataset size = 19953 rows\n # Also, dropping all the rows which have values as 'unknown' for the 'gender' attribute\n data = data[data['gender'] != 'unknown'] # After dropping, dataset size = 18836 rows\n\n male_profile_count = len(data[data['gender'] == 'male'])\n print(\"Male Profile Count \" + str(male_profile_count))\n female_profile_count = len(data[data['gender'] == 'female'])\n print(\"Female Profile Count \" + str(female_profile_count))\n brand_profile_count = len(data[data['gender'] == 'brand'])\n print(\"Brand Profile Count \" + str(brand_profile_count))\n\n return data", "def fix_fields(self):\n males = self.df[\"Sex\"] == \"M\"\n self.df[\"Sex\"] = np.array(males, dtype=int)\n\n logger.debug(\"Fixing bounded values...\")\n self.fix_bounded_values()\n logger.debug(\"Fixing range values...\")\n self.fix_range_fields()\n logger.debug(\"Fixing keyworded fields...\")\n self.fix_keyword_fields()\n logger.debug(\"Fixing temperature fields...\")\n self.fix_temperature_fields()\n logger.debug(\"Fixing nationality fields...\")\n self.fix_nationality_field()\n logger.debug(\"Fixing percentage fields...\")\n self.fix_percentage_fields()\n logger.debug(\"Combining fields...\")\n self.combine_fields()", "def cleanCsv(): \n\n count_neutral = 0\n count_sad = 0\n count_angry = 0\n count_happy = 0\n\n count_session_neutral = 0 \n\n for column_values in raw_data:\n\n if significant_data.fieldnames is None:\n dh = dict((h, h) for h in raw_data.fieldnames)\n significant_data.fieldnames = raw_data.fieldnames\n significant_data.writerow(dh)\n\n if column_values['AOI[Sad_Right]Hit'] == '1':\n significant_data.writerow(column_values)\n count_sad = count_sad + 1\n\n if column_values['AOI[Neutral_Left]Hit'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Neutral_Left]Hit_0'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Neutral_Right]Hit'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Angry_Left]Hit'] == '1':\n significant_data.writerow(column_values)\n count_angry = count_angry + 1\n\n if column_values['AOI[Neutral_Right]Hit_0'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Happy_Right]Hit'] == '1':\n significant_data.writerow(column_values)\n count_happy = count_happy + 1\n\n if column_values['AOI[Neutral_Left]Hit_1'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Happy_Left]Hit'] == '1':\n significant_data.writerow(column_values)\n count_happy = count_happy + 1\n\n if column_values['AOI[Neutral_Right]Hit_1'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Sad_Left]Hit'] == '1':\n significant_data.writerow(column_values)\n count_sad = count_sad + 1\n\n if column_values['AOI[Neutral_Right]Hit_2'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Angry_Right]Hit'] == '1':\n significant_data.writerow(column_values)\n count_angry = count_angry + 1\n\n if column_values['AOI[Neutral_Left]Hit_2'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n return {\n 'count_neutral': count_neutral,\n 'count_sad': count_sad,\n 'count_angry': count_angry,\n 'count_happy': count_happy,\n }", "def test_ending(ending):\n sheet = get_sheet()\n rows = get_range(sheet)\n feminine = []\n masculine = []\n unknown = []\n total_count = 0\n\n if not rows:\n return 'No rows'\n for n, row in enumerate(rows):\n if row.GA.endswith(ending) and \\\n 'Noun' in row.PoS:\n total_count += 1\n if 'nf' in row.Gender:\n feminine.append(row.GA)\n if 'nm' in row.Gender:\n masculine.append(row.GA)\n elif 'nf' not in row.Gender:\n unknown.append(row.GA)\n print(f'Sample: {total_count}')\n if not total_count:\n return\n print('Fem: %.2f (%d)' % (len(feminine)/total_count, len(feminine)))\n if len(feminine) < 13:\n print(feminine)\n print('Mas: %.2f (%d)' % (len(masculine)/total_count, len(masculine)))\n if len(masculine) < 13:\n print(masculine)\n if unknown:\n print('Unk: %.2f - %r' % (len(unknown)/total_count, unknown))", "def get_encoded_faces_deaths():\n encoded = {}\n # c=0\n with open(\"unwanted_death.pkl\",\"rb\") as f:\n encoded=pickle.load(f)\n return encoded", "def compressing_ethnicity(data):\n\n data.ethnicity = data.ethnicity.apply(lambda x: 'WHITE'\n if (\"WHITE\" in x) else x)\n\n data.ethnicity = data.ethnicity.apply(lambda x: \"ASIAN\"\n if (\"ASIAN\" in x) else x)\n\n data.ethnicity = data.ethnicity.apply(lambda x: \"HISPANIC/LATINO\"\n if (\"LATINA\" in x) |\n (\"HISPANIC\" in x)\n else x)\n\n data.ethnicity = data.ethnicity.apply(lambda x: \"OTHER/UNKNOWN\"\n if (x == \"AMERICAN INDIAN/ALASKA NATIVE FEDERALLY RECOGNIZED TRIBE\") |\n (x == \"SOUTH AMERICAN\") |\n (x == \"CARIBBEAN ISLAND\") |\n (x == \"NATIVE HAWAIIAN OR OTHER PACIFIC ISLANDER\") |\n (x == \"AMERICAN INDIAN/ALASKA NATIVE\") |\n (x == \"MIDDLE EASTERN\") |\n (x == \"PORTUGUESE\") |\n (x == \"MULTI RACE ETHNICITY\") |\n (x == \"PATIENT DECLINED TO ANSWER\") |\n (x == \"OTHER\") |\n (\"UNKNOWN\" in x) |\n (\"OBTAIN\" in x)\n else x)\n\n data.ethnicity = data.ethnicity.apply(lambda x: \"BLACK_AFRICAN/OTHER\"\n if (\"BLACK\" in x) else x)\n\n return data", "def convert_semeval_2007_4(self):\n e1_tag = [\"<e1>\", \"</e1>\"]\n e2_tag = [\"<e2>\", \"</e2>\"]\n global mismatch\n mismatch = 0\n\n def extract_samples(all_lines, split):\n samples = pd.DataFrame(columns=self.scheme_columns)\n # each sample has three lines of information\n for idx, val in enumerate(all_lines):\n tmp = val.split(\" \", 1)\n if tmp[0].isalnum():\n original_id = copy.deepcopy(tmp[0])\n try:\n context = copy.deepcopy(tmp[1].replace(\"\\\"\", \"\"))\n\n # extracting elements of causal relation\n span1 = self._get_between_text(e1_tag[0], e1_tag[1], context)\n span2 = self._get_between_text(e2_tag[0], e2_tag[1], context)\n\n tmp = all_lines[idx + 1].split(\",\")\n if not (\"true\" in tmp[3] or \"false\" in tmp[3]):\n tmp_label = tmp[2].replace(\" \", \"\").replace(\"\\\"\", \"\").split(\"=\")\n relation_type = tmp[1]\n else:\n tmp_label = tmp[3].replace(\" \", \"\").replace(\"\\\"\", \"\").split(\"=\")\n relation_type = tmp[2]\n\n # finding label\n if \"Cause-Effect\" in relation_type and tmp_label[1] == \"true\":\n label = 1\n else:\n label = 0\n\n # finding direction\n # if 0: e1 => e2, if 1: e2 => e1\n if \"e2\" in tmp_label[0]:\n direction = 0\n elif \"e1\" in tmp_label[0]:\n direction = 1\n\n span1_start = context.find(e1_tag[0])\n span1_end = context.find(e1_tag[1]) - len(e1_tag[0])\n span2_start = context.find(e2_tag[0]) - (len(e1_tag[0]) + len(e1_tag[1]))\n span2_end = context.find(e2_tag[1]) - (len(e1_tag[0]) + len(e1_tag[1]) + len(e2_tag[0]))\n\n idx_val = {\"span1\": [[span1_start, span1_end]], \"span2\": [[span2_start, span2_end]],\n \"signal\": []}\n\n # replacing tags with standard tags\n context = context.replace(e1_tag[0], \"\").replace(e1_tag[1], \"\").replace(e2_tag[0], \"\").replace(\n e2_tag[1], \"\")\n\n new_row = {\"original_id\": int(original_id), \"span1\": [span1], \"span2\": [span2], \"signal\": [],\n \"context\": context.strip('\\n'),\n \"idx\": idx_val, \"label\": label, \"direction\": direction,\n \"source\": self.namexid[\"semeval_2007_4\"],\n \"ann_file\": \"\",\n \"split\": split}\n\n # span1_end < span2_start is to make sure e1 always appears first\n # in context and direction is correct\n if self.check_span_indexes(new_row) and span1_end < span2_start:\n samples = samples.append(new_row, ignore_index=True)\n else:\n mismatch += 1\n\n except Exception as e:\n print(\"[crest-log] semeval07-task4. Detail: {}\".format(e))\n return samples\n\n data = pd.DataFrame(columns=self.scheme_columns)\n\n relation_ids = [1, 2, 3, 4, 5, 6, 7]\n\n for relation_id in relation_ids:\n # reading files\n with open(\n self.dir_path + 'semeval_2007_4/task-4-training/relation-{}-train.txt'.format(str(relation_id)),\n mode='r',\n encoding='cp1252') as train:\n train_content = train.readlines()\n\n # this is the test set\n with open(self.dir_path + 'semeval_2007_4/task-4-scoring/relation-{}-score.txt'.format(str(relation_id)),\n mode='r',\n encoding='cp1252') as key:\n test_content = key.readlines()\n\n data = data.append(extract_samples(train_content, 0))\n data = data.append(extract_samples(test_content, 2))\n\n logging.info(\"[crest] semeval_2007_4 is converted.\")\n\n # adding global id to the data frame\n global_ids = [i for i in range(1, len(data) + 1)]\n data.insert(0, 'global_id', global_ids)\n data.reset_index()\n\n return data, mismatch", "def convert_semeval_2010_8(self):\n e1_tag = [\"<e1>\", \"</e1>\"]\n e2_tag = [\"<e2>\", \"</e2>\"]\n global mismatch\n mismatch = 0\n\n def extract_samples(all_lines, split):\n samples = pd.DataFrame(columns=self.scheme_columns)\n # each sample has three lines of information\n for idx, val in enumerate(all_lines):\n tmp = val.split(\"\\t\")\n if tmp[0].isalnum():\n original_id = copy.deepcopy(tmp[0])\n try:\n context = copy.deepcopy(tmp[1].replace(\"\\\"\", \"\"))\n\n # extracting elements of causal relation\n span1 = self._get_between_text(e1_tag[0], e1_tag[1], context)\n span2 = self._get_between_text(e2_tag[0], e2_tag[1], context)\n\n # finding label\n if \"Cause-Effect\" in all_lines[idx + 1]:\n label = 1\n else:\n label = 0\n\n # finding direction\n if \"e1,e2\" in all_lines[idx + 1]:\n direction = 0\n elif \"e2,e1\" in all_lines[idx + 1]:\n direction = 1\n else:\n direction = -1\n\n span1_start = context.find(e1_tag[0])\n span1_end = context.find(e1_tag[1]) - len(e1_tag[0])\n span2_start = context.find(e2_tag[0]) - (len(e1_tag[0]) + len(e1_tag[1]))\n span2_end = context.find(e2_tag[1]) - (len(e1_tag[0]) + len(e1_tag[1]) + len(e2_tag[0]))\n\n idx_val = {\"span1\": [[span1_start, span1_end]], \"span2\": [[span2_start, span2_end]],\n \"signal\": []}\n\n # replacing tags with standard tags\n context = context.replace(e1_tag[0], \"\").replace(e1_tag[1], \"\").replace(e2_tag[0],\n \"\").replace(\n e2_tag[1], \"\")\n\n new_row = {\"original_id\": int(original_id), \"span1\": [span1], \"span2\": [span2],\n \"signal\": [],\n \"context\": context.strip('\\n'), \"idx\": idx_val, \"label\": label,\n \"direction\": direction,\n \"source\": self.namexid[\"semeval_2010_8\"], \"ann_file\": \"\", \"split\": split}\n\n if self.check_span_indexes(new_row) and span1_end < span2_start:\n samples = samples.append(new_row, ignore_index=True)\n else:\n mismatch += 1\n\n except Exception as e:\n print(\"[crest-log] Incorrect formatting for semeval10-task8 record. Detail: \" + str(e))\n return samples\n\n # reading files\n with open(self.dir_path + 'semeval_2010_8/SemEval2010_task8_training/TRAIN_FILE.txt', mode='r',\n encoding='cp1252') as train:\n train_content = train.readlines()\n\n with open(self.dir_path + 'semeval_2010_8/SemEval2010_task8_testing_keys/TEST_FILE_FULL.txt',\n mode='r', encoding='cp1252') as key:\n test_content = key.readlines()\n\n data = pd.DataFrame(columns=self.scheme_columns)\n\n data = data.append(extract_samples(train_content, 0))\n data = data.append(extract_samples(test_content, 2))\n\n logging.info(\"[crest] semeval_2010_8 is converted.\")\n\n # adding global id to the data frame\n global_ids = [i for i in range(1, len(data) + 1)]\n data.insert(0, 'global_id', global_ids)\n data.reset_index()\n\n return data, mismatch", "def ex_eight_animals_data_table():\n data_dict = {'Calf': [4, 5, 6, 7, 8],\n 'Sire': [1, 3, 1, 4, 3],\n 'Dam': ['Unknown', 2, 2, 5, 6],\n 'Sex': ['Male', 'Female', 'Female', 'Male', 'Male'],\n 'WWG': [4.5, 2.9, 3.9, 3.5, 5.0]}\n\n df = pd.DataFrame(data_dict)\n\n return(df)", "def classify_face(im):\n faces_death = get_encoded_faces_deaths()\n faces_arrested = get_encoded_faces_arrested()\n faces_wanted = get_encoded_faces_wanted()\n\n faces_encoded_death = list(faces_death.values())\n known_face_names_death = list(faces_death.keys())\n\n faces_encoded_arrested = list(faces_arrested.values())\n known_face_names_arrested = list(faces_arrested.keys())\n\n faces_encoded_wanted = list(faces_wanted.values())\n known_face_names_wanted = list(faces_wanted.keys())\n\n img = cv2.imread(im, 1)\n face_locations = face_recognition.face_locations(img)\n unknown_face_encodings = face_recognition.face_encodings(img,face_locations)\n face_names = []\n find_in_db(im,known_face_names_death,unknown_face_encodings,face_names,faces_encoded_death,\"unnatural_death_images/unnatural_death_images\")\n find_in_db(im,known_face_names_arrested,unknown_face_encodings,face_names,faces_encoded_arrested,\"ArrestPerson_images\")\n find_in_db(im,known_face_names_wanted,unknown_face_encodings,face_names,faces_encoded_wanted,\"wanted\")", "def preprocess(df, edit = False):\n\tdf['binary_income'] = np.where(df['binary_income'].str.contains('-'), 0, 1)\n\ty = df['binary_income']\n\n\tTRAINING_SET_MEAN = y.mean() # For ensembling w/ linear combination.\n\n\tif edit:\n\t\t# If they make over $100 / hr but have no capital gains and losses, assume the wage entry is a mistake and replace with median.\n\t\tdf['wage'] = np.where(((df['wage'] > 100) & (df['capital_gains'] == 0) & (df['capital_gains'] == 0)), df['wage'].median(), df['wage'])\n\t\t# Total calculated earnings for the year. \n\t\tdf['ttl_clcltd_ernings'] = 40*df['wage']*df['weeks']\n\n\t\tdf['professional'] = np.where(df['majoroc'].str.contains('Professional|Executive|Sales|Precision',regex = True), 1,0)\n\t\tdf['white'] = np.where(df['mace'].str.contains('White|Asian',regex = True), 1,0)\n\t\tdf['ad_degree'] = np.where(df['education'].str.contains('Bachelors|Doctorate|Masters|Prof',regex = True), 1,0)\n\t\tdf['younger22'] = np.where(df['age']<22, 1,0)\n\t\tdf['older65'] = np.where(df['age']>65, 1,0)\n\t\tdf['occupation2'] = np.where(df['occupation'] == 2, 1,0)\t\t\n\t\t# df['log_cgs'] = np.log(df['capital_gains'])\n\t\tdf['jointu65_tax'] = np.where(df['tax'] == 'Joint both under 65', 1,0)\n\n\treturn df, y, TRAINING_SET_MEAN", "def get_titanic_fea(dataset):\n dataset['Name_length'] = dataset['Name'].apply(len)\n\n # Mapping Sex 不在map定义的 就是NaN\n dataset['Sex'] = dataset['Sex'].map({'female': 0, 'male': 1}).astype(int)\n\n dataset['Has_Cabin'] = dataset['Cabin'].apply(lambda x: 0 if type(x) == float else 1)\n dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1\n\n dataset['IsAlone'] = 0\n dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1\n\n # [Embarked]\n dataset['Embarked'] = dataset['Embarked'].fillna('0')\n dataset['Fare'] = dataset['Fare'].fillna(0)\n # Mapping Embarked\n dataset['Embarked'] = dataset['Embarked'].map({'0': 0, 'S': 1, 'C': 2, 'Q': 3}).astype(int)\n\n # [Fare]\n dataset['CategoricalFare'] = pd.qcut(dataset['Fare'], 4)\n # Mapping Fare\n dataset.loc[dataset['Fare'] <= 7.91, 'Fare'] = 0\n dataset.loc[(dataset['Fare'] > 7.91) & (dataset['Fare'] <= 14.454), 'Fare'] = 1\n dataset.loc[(dataset['Fare'] > 14.454) & (dataset['Fare'] <= 31), 'Fare'] = 2\n dataset.loc[dataset['Fare'] > 31, 'Fare'] = 3\n dataset['Fare'] = dataset['Fare'].astype(int)\n\n # [Age]\n age_avg = dataset['Age'].mean()\n age_std = dataset['Age'].std()\n age_null_count = dataset['Age'].isnull().sum()\n age_null_random_list = np.random.randint(age_avg - age_std, age_avg + age_std, size=age_null_count)\n dataset['Age'][np.isnan(dataset['Age'])] = age_null_random_list\n dataset['Age'] = dataset['Age'].astype(int)\n dataset['CategoricalAge'] = pd.cut(dataset['Age'], 5)\n # Mapping Age\n dataset.loc[dataset['Age'] <= 16, 'Age'] = 0\n dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 1\n dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 2\n dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 3\n dataset.loc[dataset['Age'] > 64, 'Age'] = 4\n\n # [Name]\n # 称谓 Mr 、Miss 等\n def get_title(name):\n title_search = re.search(' ([A-Za-z]+)\\.', name)\n # If the title exists, extract and return it.\n if title_search:\n return title_search.group(1)\n return \"\"\n dataset['Title'] = dataset['Name'].apply(get_title)\n\n # 只保留4类Title\n dataset['Title'] = dataset['Title'].replace(\n ['Lady', 'Countess', 'Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')\n dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')\n dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')\n dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')\n # Mapping titles\n title_mapping = {\"Mr\": 1, \"Miss\": 2, \"Mrs\": 3, \"Master\": 4, \"Rare\": 5}\n dataset['Title'] = dataset['Title'].map(title_mapping)\n dataset['Title'] = dataset['Title'].fillna(0)\n\n # Feature selection\n drop_elements = ['PassengerId', 'Name', 'Ticket', 'Cabin', 'SibSp']\n dataset = dataset.drop(drop_elements, axis=1)\n dataset = dataset.drop(['CategoricalAge', 'CategoricalFare'], axis=1)\n\n return dataset", "def _create_naics_map():\n # Read in list of industry topics.\n naics_codes = pd.read_excel(\n \"https://www.census.gov/eos/www/naics/2017NAICS/2-6%20digit_2017_Codes.xlsx\"\n )\n naics_codes = naics_codes.iloc[:, [1, 2]]\n naics_codes.columns = ['NAICSCode', 'Title']\n\n # Replace all ranges with individual rows. E.g. 31-33 -> 31, 32, 33.\n def range_to_array(read_code):\n if isinstance(read_code, str) and \"-\" in read_code:\n lower, upper = read_code.split(\"-\")\n return list(range(int(lower), int(upper) + 1))\n return read_code\n\n naics_codes = naics_codes.dropna()\n naics_codes['NAICSCode'] = naics_codes['NAICSCode'].apply(range_to_array)\n naics_codes = naics_codes.explode('NAICSCode')\n\n # Add unclassified code which is used in some statistical variables.\n naics_codes = naics_codes.append(\n {\n \"NAICSCode\": 99,\n \"Title\": \"Nonclassifiable\"\n }, ignore_index=True)\n\n # Query for only two digit codes.\n short_codes = naics_codes[naics_codes['NAICSCode'] < 100]\n short_codes = short_codes.set_index(\"NAICSCode\")\n short_codes = short_codes['Title'].to_dict()\n\n # Read in overview codes.\n overview_codes = pd.read_csv(\n \"https://data.bls.gov/cew/doc/titles/industry/high_level_industries.csv\"\n )\n overview_codes.columns = [\"NAICSCode\", \"Title\"]\n overview_codes = overview_codes.set_index(\"NAICSCode\")\n overview_codes = overview_codes['Title'].to_dict()\n\n # Combine the two sources of codes.\n NAICS_MAP = {}\n combined_codes = short_codes\n combined_codes.update(overview_codes)\n\n # Rename industries into Pascal case.\n for code, orig_name in combined_codes.items():\n NAICS_MAP[str(code)] = standard_name_remapper(orig_name)\n\n # Other edge cases.\n NAICS_MAP['00'] = 'Unclassified'\n return NAICS_MAP", "def _encode(self, dataset):\n if self._look_up is None: # if we are encoding training set\n self._look_up = dict() # initialize look-up table as empty\n for col in dataset:\n if not is_numeric_dtype(dataset[col]): # for each column that is not numeric\n for val, label in enumerate(dataset[col].unique()): # attach a encode value for each of its label\n self._look_up[label] = val # add that value to the lookup table\n # Problem: Try other method of pandas for this task\n\n dataset.replace(self._look_up, inplace=True)", "def test_incorrect_data_type():\n \n test_object = fa.read_in_envision(data_csv=list_A, platemap_csv=plate_map_file, data_type='typo', size=384)", "def augment_data(self):\n for char in self.hebrew.letter_li:\n char_path = self.training_folder / char\n img = cv.imread(\n str((self.training_folder / char / f\"{char}_original.jpeg\").resolve())\n ) # read font character\n h, w, _ = img.shape # image height and width\n\n for rep in range(self.repetitions):\n res = elastic_morphing(img, self.amp, self.sigma, h, w) # morph image\n cv.imwrite(\n str(char_path / f\"{char}{rep}.jpeg\"), res\n ) # write result to disk", "def map_diagnosis(df):\r\n\r\n diagnosis_cols = ['diag_1', 'diag_2', 'diag_3']\r\n\r\n for col in diagnosis_cols:\r\n df['tmp'] = np.nan\r\n df.loc[(df[col].str.contains(\"250\")), col] = '250'\r\n df.loc[(df[col].str.startswith('V')) | (df[col].str.startswith('E')), col] = '-999' \r\n\r\n df[col] = df[col].astype(float)\r\n \r\n #convert the correct ranges based on values given in paper\r\n df.loc[(((df[col] >=390) & (df[col]<=460)) | (df[col] == 785)), 'tmp'] = 'Circulatory'\r\n df.loc[(((df[col] >=460) & (df[col]<=519)) | (df[col] == 786)), 'tmp'] = 'Respiratory'\r\n df.loc[(((df[col] >=520) & (df[col]<=579)) | (df[col] == 787)), 'tmp'] = 'Digestive'\r\n df.loc[(((df[col] >=580) & (df[col]<=629)) | (df[col] == 788)), 'tmp'] = 'Genitourinary'\r\n df.loc[((df[col] >=800) & (df[col]<=999)), 'tmp'] = 'Injury'\r\n df.loc[((df[col] >=710) & (df[col]<=739)), 'tmp'] = 'Musculoskeletal'\r\n df.loc[((df[col] >=140) & (df[col]<=239)), 'tmp'] = 'Neoplasms'\r\n df.loc[(df[col] == 250), 'tmp'] = 'Diabetes'\r\n \r\n df['tmp'].fillna(value = \"Other\", inplace=True)\r\n \r\n df[col] = df['tmp']\r\n df.drop(columns=['tmp'], inplace=True)\r\n\r\n return df", "def get_age_fields():\n under_18_fields = CensusFields.get_under_18_fields()\n\n age_18_to_29_fields = [ \n 'B01001_007E', # Male:!!18 and 19 years\n 'B01001_008E', # Male:!!20 years\n 'B01001_009E', # Male:!!21 years\n 'B01001_010E', # Male:!!22 to 24 years\n 'B01001_011E', # Male:!!25 to 29 years\n 'B01001_031E', # Female:!!18 and 19 years\n 'B01001_032E', # Female:!!20 years\n 'B01001_033E', # Female:!!21 years\n 'B01001_034E', # Female:!!22 to 24 years\n 'B01001_035E', # Female:!!25 to 29 years\n ]\n age_30_to_39_fields = [\n 'B01001_012E', # Male:!!30 to 34 years\n 'B01001_013E', # Male:!!35 to 39 years\n 'B01001_036E', # Female:!!30 to 34 years\n 'B01001_037E', # Female:!!35 to 39 years\n ]\n age_40_to_49_fields = [\n 'B01001_014E', # Male:!!40 to 44 years\n 'B01001_038E', # Female:!!40 to 44 years\n 'B01001_015E', # Male:!!45 to 49 years\n 'B01001_039E', # Female:!!45 to 49 years\n\n ]\n age_50_to_59_fields = [\n 'B01001_016E', # Male:!!50 to 54 years\n 'B01001_017E', # Male:!!55 to 59 years\n 'B01001_040E', # Female:!!50 to 54 years\n 'B01001_041E', # Female:!!55 to 59 years\n\n ]\n age_60_to_69_fields = [\n 'B01001_018E', # Male:!!60 and 61 years\n 'B01001_019E', # Male:!!62 to 64 years\n 'B01001_020E', # Male:!!65 and 66 years\n 'B01001_021E', # Male:!!67 to 69 years\n 'B01001_042E', # Female:!!60 and 61 years\n 'B01001_043E', # Female:!!62 to 64 years\n 'B01001_044E', # Female:!!65 and 66 years\n 'B01001_045E', # Female:!!67 to 69 years\n ]\n age_70_to_79_fields = [\n 'B01001_022E', # Male:!!70 to 74 years\n 'B01001_023E', # Male:!!75 to 79 years\n 'B01001_046E', # Female:!!70 to 74 years\n 'B01001_047E', # Female:!!75 to 79 years\n ]\n age_81_plus_fields = [\n 'B01001_024E', # Male:!!80 to 84 years\n 'B01001_025E', # Male:!!85 years and over\n 'B01001_048E', # Female:!!80 to 84 years\n 'B01001_049E', # Female:!!85 years and over\n ]\n \n age_fields = OrderedDict()\n age_fields[ 'age_18_to_29' ] = { 'label': '18-29', 'fields': age_18_to_29_fields }\n age_fields[ 'age_30_to_39' ] = { 'label': '30s', 'fields': age_30_to_39_fields }\n age_fields[ 'age_40_to_49' ] = { 'label': '40s', 'fields': age_40_to_49_fields }\n age_fields[ 'age_50_to_59' ] = { 'label': '50s', 'fields': age_50_to_59_fields }\n age_fields[ 'age_60_to_69' ] = { 'label': '60s', 'fields': age_60_to_69_fields } \n age_fields[ 'age_70_to_79' ] = { 'label': '70s', 'fields': age_70_to_79_fields }\n age_fields[ 'age_81_plus' ] = { 'label': '80+', 'fields': age_81_plus_fields }\n\n return age_fields", "def field_digitization(): \n tt_file.Sex = tt_file.Sex.map({\"male\":0, \"female\":1})\n tt_file.Embarked = tt_file.Embarked.map({\"C\":0, \"Q\":1, \"S\":2})\n #df[‘column1’].map(lambda x: 10+x), this will add 10 to each element of column1.\n tt_file.Age = tt_file.Age.map(lambda x: int(round(float(x) / 10, 0)))\n tt_file.Fare = tt_file.Fare.map(lambda x: int(round(float(x) / 10, 0)))", "def get_edu_fields():\n edu_hs_men_fields = [\n 'B15002_003E', #\tMale:!!No schooling completed\t\n 'B15002_004E', #\tMale:!!Nursery to 4th grade\t\n 'B15002_005E', #\tMale:!!5th and 6th grade\t\n 'B15002_006E', #\tMale:!!7th and 8th grade\t\n 'B15002_007E', #\tMale:!!9th grade\t\n 'B15002_008E', #\tMale:!!10th grade\t\n 'B15002_009E', #\tMale:!!11th grade\t\n 'B15002_010E', #\tMale:!!12th grade, no diploma\t\n 'B15002_011E', #\tMale:!!High school graduate (includes equivalency)\n ]\n edu_hs_women_fields = [\n 'B15002_020E', #\tFemale:!!No schooling completed\t\n 'B15002_021E', #\tFemale:!!Nursery to 4th grade\t\n 'B15002_022E', #\tFemale:!!5th and 6th grade\t\n 'B15002_023E', #\tFemale:!!7th and 8th grade\t\n 'B15002_024E', #\tFemale:!!9th grade\t\n 'B15002_025E', #\tFemale:!!10th grade\t\n 'B15002_026E', #\tFemale:!!11th grade\t\n 'B15002_027E', #\tFemale:!!12th grade, no diploma\t\n 'B15002_028E', #\tFemale:!!High school graduate (includes equivalency)\t\n ]\n edu_some_college_men_fields = [\n 'B15002_012E', #\tMale:!!Some college, less than 1 year\t\n 'B15002_013E', #\tMale:!!Some college, 1 or more years, no degree\t\n 'B15002_014E', #\tMale:!!Associate's degree\t\n ]\n edu_some_college_women_fields = [\n 'B15002_029E', #\tFemale:!!Some college, less than 1 year\t\n 'B15002_030E', #\tFemale:!!Some college, 1 or more years, no degree\t\n 'B15002_031E', #\tFemale:!!Associate's degree\n ]\n edu_college_men_fields = [\n 'B15002_015E', #\tMale:!!Bachelor's degree\n ]\n edu_college_women_fields = [\n 'B15002_032E', #\tFemale:!!Bachelor's degree\n ]\n edu_postgrad_men_fields = [\n 'B15002_016E', #\tMale:!!Master's degree\n 'B15002_018E', #\tMale:!!Doctorate degree\n ]\n edu_postgrad_women_fields = [\n 'B15002_033E', #\tFemale:!!Master's degree\n 'B15002_035E', #\tFemale:!!Doctorate degree\t\n ]\n\n edu_fields = OrderedDict()\n\n return edu_fields", "def filter_gisaid(database, outfile, trim_left=0, trim_right=0,\n max_prop_n=0.05, minlen=29000):\n # lower-case label in place of country identifies non-human samples\n pat = re.compile('^[^/]+/[a-z]')\n pat2 = re.compile(\"^[HhCcOoVv]+-19/[A-Z][^/]+/[^/]+/[0-9-]+\\|[^|]+\\|[0-9]{4}-[0-9]+-[0-9]+\")\n pat3 = re.compile('^-*')\n pat4 = re.compile('-*$')\n\n accessions = {}\n discards = {'nonhuman': [], 'ambiguous': [], 'short': [],\n 'duplicates': [], 'mangled header': []}\n\n for h, s in get_aligned(database):\n if not type(h)==str or not type(s)==str:\n print(\"Error: entry {} not string type: sequence {}\".format(h, s))\n continue\n\n if pat.findall(h):\n discards['nonhuman'].append(h)\n continue\n \n if len(s) < minlen:\n discards['short'].append(h)\n continue\n\n # apply sequence trims\n seq = s[trim_left:(-trim_right)]\n\n # this is conservative - all internal gaps are interpreted as deletions\n gap_prefix = len(pat3.findall(seq)[0])\n gap_suffix = len(pat4.findall(seq)[0])\n seqlen = len(seq) - gap_prefix - gap_suffix\n\n n_ambig = seq.count('?') + seq.count('N') + gap_prefix + gap_suffix\n if n_ambig / float(len(seq)) > max_prop_n:\n discards['ambiguous'].append(h)\n continue\n\n if pat2.search(h) is None:\n discards['mangled header'].append(h)\n continue\n\n desc, accn, coldate = h.split('|')\n if accn in accessions:\n discards['duplicates'].append(h)\n continue\n accessions.update({accn: desc})\n\n # write genome to output file\n _ = outfile.write('>{}\\n{}\\n'.format(h, seq))\n\n return discards", "def latin_america_countries():\r\n latin_america_data = []\r\n years = []\r\n medians = []\r\n lst = []\r\n for idx in range(1960, 2016):\r\n years.append(idx)\r\n for idx in latin_america:\r\n latin_america_data.append(life_expectancy_graph(idx))\r\n y_idx = 0\r\n for idx in latin_america_data:\r\n if idx != None and idx != {}:\r\n if (list(idx.keys())[y_idx]) == years[y_idx]:\r\n lst.append((list(idx.values())[y_idx]))\r\n lst = sorted(lst)\r\n medians.append(median(lst))\r\n return medians", "def test_parse_classic_otu_table_consensus_lineage(self):\r\n data = \"\"\"#Full OTU Counts\r\n#OTU ID\tFing\tKey\tNA\tconsensusLineage\r\n0\t19111\t44536\t42\tBacteria; Actinobacteria; Actinobacteridae; Propionibacterineae; Propionibacterium\r\n1\t1216\t3500\t6\tBacteria; Firmicutes; Alicyclobacillaceae; Bacilli; Lactobacillales; Lactobacillales; Streptococcaceae; Streptococcus\r\n2\t1803\t1184\t2\tBacteria; Actinobacteria; Actinobacteridae; Gordoniaceae; Corynebacteriaceae\r\n3\t1722\t4903\t17\tBacteria; Firmicutes; Alicyclobacillaceae; Bacilli; Staphylococcaceae\r\n4\t589\t2074\t34\tBacteria; Cyanobacteria; Chloroplasts; vectors\"\"\"\r\n data_f = StringIO(data)\r\n obs = parse_classic_otu_table(data_f)\r\n exp = (['Fing', 'Key', 'NA'],\r\n ['0', '1', '2', '3', '4'],\r\n array([[19111, 44536, 42], [1216, 3500, 6], [1803, 1184, 2],\r\n [1722, 4903, 17], [589, 2074, 34]]),\r\n [['Bacteria', 'Actinobacteria', 'Actinobacteridae', 'Propionibacterineae', 'Propionibacterium'],\r\n ['Bacteria',\r\n 'Firmicutes',\r\n 'Alicyclobacillaceae',\r\n 'Bacilli',\r\n 'Lactobacillales',\r\n 'Lactobacillales',\r\n 'Streptococcaceae',\r\n 'Streptococcus'],\r\n ['Bacteria',\r\n 'Actinobacteria',\r\n 'Actinobacteridae',\r\n 'Gordoniaceae',\r\n 'Corynebacteriaceae'],\r\n ['Bacteria',\r\n 'Firmicutes',\r\n 'Alicyclobacillaceae',\r\n 'Bacilli',\r\n 'Staphylococcaceae'],\r\n ['Bacteria', 'Cyanobacteria', 'Chloroplasts', 'vectors']])\r\n self.assertEqual(obs[0], exp[0])\r\n self.assertEqual(obs[1], exp[1])\r\n assert_almost_equal(obs[2], exp[2])\r\n self.assertEqual(obs[3], exp[3])", "def test_checkBigamy(self):\n\n # No Bigamy\n fam: Dict = {'F23':\n {'fam': 'F23', 'MARR': '14 FEB 1980', 'HUSB': 'I01', 'WIFE': 'I07',\n 'CHIL': ['I19', 'I26', 'I30']},\n 'F16': {'fam': 'F16', 'MARR': '12 DEC 2007'}}\n\n indi: Dict = {\n 'I01': {'id': 'I01', 'name': 'Joe /Smith/', 'BIRT': '15 JUL 1960', 'sex': 'M', 'family': 'F23',\n 'DEAT': '31 DEC 2013'},\n 'I07': {'id': 'I07', 'name': 'Jennifer /Smith/', 'BIRT': '23 SEP 1960', 'sex': 'F',\n 'family': 'F23'},\n 'I19': {'id': 'I19', 'name': 'Dick /Smith/', 'BIRT': '13 FEB 1981', 'sex': 'M', 'family': 'F23'},\n 'I26': {'id': 'I26', 'name': 'Jane /Smith/', 'BIRT': '13 FEB 1981', 'sex': 'F', 'family': 'F23'},\n 'I30': {'id': 'I30', 'name': 'Mary /Test/', 'BIRT': '13 FEB 1981', 'sex': 'F', 'family': 'F23'},\n 'I32': {'id': 'I32', 'name': 'Nick /Tary/', 'BIRT': '13 FEB 1981', 'sex': 'M', 'family': 'F23'},\n 'I44': {'id': 'I44', 'name': 'Cersi /Lanister/', 'BIRT': '13 FEB 1981', 'sex': 'F',\n 'family': 'F23'}}\n\n # bigamy (same husband)\n fam2: Dict = {'F23':\n {'fam': 'F23', 'MARR': '14 FEB 1980', 'HUSB': 'I01', 'WIFE': 'I07',\n 'CHIL': ['I19', 'I26', 'I30']},\n 'F16': {'fam': 'F16', 'MARR': '12 DEC 2007', 'HUSB': 'I01'}}\n\n indi2: Dict = {\n 'I01': {'id': 'I01', 'name': 'Joe /Smith/', 'BIRT': '15 JUL 1960', 'sex': 'M', 'family': 'F23',\n 'DEAT': '31 DEC 2013'},\n 'I07': {'id': 'I07', 'name': 'Jennifer /Smith/', 'BIRT': '23 SEP 1960', 'sex': 'F',\n 'family': 'F23'},\n 'I19': {'id': 'I19', 'name': 'Dick /Smith/', 'BIRT': '13 FEB 1981', 'sex': 'M', 'family': 'F23'},\n 'I26': {'id': 'I26', 'name': 'Jane /Smith/', 'BIRT': '13 FEB 1981', 'sex': 'F', 'family': 'F23'},\n 'I30': {'id': 'I30', 'name': 'Mary /Test/', 'BIRT': '13 FEB 1981', 'sex': 'F', 'family': 'F23'},\n 'I32': {'id': 'I32', 'name': 'Nick /Tary/', 'BIRT': '13 FEB 1981', 'sex': 'M', 'family': 'F23'},\n 'I44': {'id': 'I44', 'name': 'Cersi /Lanister/', 'BIRT': '13 FEB 1981', 'sex': 'F',\n 'family': 'F23'}}\n\n # bigamy (same wife)\n fam3: Dict = {'F23':\n {'fam': 'F23', 'MARR': '14 FEB 1980', 'HUSB': 'I01', 'WIFE': 'I07',\n 'CHIL': ['I19', 'I26', 'I30']},\n 'F16': {'fam': 'F16', 'MARR': '12 DEC 2007', 'WIFE': 'I07'}}\n\n indi3: Dict = {\n 'I01': {'id': 'I01', 'name': 'Joe /Smith/', 'BIRT': '15 JUL 1960', 'sex': 'M', 'family': 'F23',\n 'DEAT': '31 DEC 2013'},\n 'I07': {'id': 'I07', 'name': 'Jennifer /Smith/', 'BIRT': '23 SEP 1960', 'sex': 'F',\n 'family': 'F23'},\n 'I19': {'id': 'I19', 'name': 'Dick /Smith/', 'BIRT': '13 FEB 1981', 'sex': 'M', 'family': 'F23'},\n 'I26': {'id': 'I26', 'name': 'Jane /Smith/', 'BIRT': '13 FEB 1981', 'sex': 'F', 'family': 'F23'},\n 'I30': {'id': 'I30', 'name': 'Mary /Test/', 'BIRT': '13 FEB 1981', 'sex': 'F', 'family': 'F23'},\n 'I32': {'id': 'I32', 'name': 'Nick /Tary/', 'BIRT': '13 FEB 1981', 'sex': 'M', 'family': 'F23'},\n 'I44': {'id': 'I44', 'name': 'Cersi /Lanister/', 'BIRT': '13 FEB 1981', 'sex': 'F',\n 'family': 'F23'}}\n\n us.checkBigamy(fam)\n self.assertTrue(('I01' in indi))\n self.assertTrue(('I01' == fam['F23']['HUSB']))\n us.checkBigamy(fam2)\n self.assertTrue(('I01' in indi2))\n self.assertTrue(('I01' in fam2['F23']['HUSB']))\n us.checkBigamy(fam3)\n self.assertTrue(('I07' in indi3))\n self.assertTrue(('WIFE' in fam3['F23']))", "def encode_augmentation_type(data):\n lookup = {'none': 0, 'inplace': 1, 'full': 2}\n return [lookup[datum['augmentation_type']] for datum in data]", "def clean_data(data):\n \n cols = data.columns\n \n #these columns had some extra characters in the strings becuase of encoding issues\n list_to_strip=[\n 'attributes_alcohol',\n 'attributes_restaurantsattire',\n 'attributes_wifi',\n 'attributes_smoking',\n 'attributes_noiselevel',\n ]\n #this removes quotation marks and u's from strings\n \n for col in list_to_strip:\n data[col]=data[col].str.strip(\"u\\'\")\n \n #this replaces the strings None and none with Nan objects\n for col in cols:\n data[col]=data[col].where(data[col]!='None')\n data[col]=data[col].where(data[col]!='none')\n \n #this creates a list of categorical and numerical features\n categorical_features = cols.drop([\n 'review_count',\n 'restaurant',\n 'latitude',\n 'longitude',\n 'business_id',\n 'meanfunny',\n 'meanuseful',\n 'avgwordcount',\n 'maxwordcount',\n 'minwordcount',\n 'avgfunnywordcount',\n 'maxfunnywordcount',\n 'avgusefulwordcount',\n 'maxusefulwordcount',\n 'medianwordcount',\n 'upperquartilewordcount',\n 'lowerquartilewordcount',\n 'target'])\n \n \n numerical_features = [\n 'review_count',\n 'latitude',\n 'longitude',\n 'meanfunny',\n 'meanuseful',\n 'avgwordcount',\n 'maxwordcount',\n 'minwordcount',\n 'avgfunnywordcount',\n 'maxfunnywordcount',\n 'avgusefulwordcount',\n 'maxusefulwordcount',\n 'medianwordcount',\n 'upperquartilewordcount',\n 'lowerquartilewordcount']\n \n #this replaces the categorial nans with 9 as a placeholder and fills numerical nans with 0\n data[categorical_features]=data[categorical_features].fillna(9)\n data[numerical_features]=data[numerical_features].fillna(0)\n \n #this makes all the categorical columns strings\n data[categorical_features]=data[categorical_features].astype(str)\n data = data\n \n return data, numerical_features, categorical_features", "def metadata(filename):\n import numpy as np\n import pandas as pd\n\n infos = \"\"\"IGRAID 1- 11 Character\nWMOID 13- 17 Integer\nNAME 19- 48 Character\nNAMFLAG 50- 50 Character\nLATITUDE 52- 60 Real\nLATFLAG 62- 62 Character\nLONGITUDE 64- 72 Real\nLONFLAG 74- 74 Character\nELEVATION 76- 81 Real\nELVFLAG 83- 83 Character\nYEAR 85- 88 Integer\nMONTH 90- 91 Integer\nDAY 93- 94 Integer\nHOUR 96- 97 Integer\nDATEIND 99- 99 Integer\nEVENT 101-119 Character\nALTIND 121-122 Character\nBEFINFO 124-163 Character\nBEFFLAG 164-164 Character\nLINK 166-167 Character\nAFTINFO 169-208 Character\nAFTFLAG 209-209 Character\nREFERENCE 211-235 Character\nCOMMENT 236-315 Character\nUPDCOM 316-346 Character\nUPDDATE 348-354 Character\n\"\"\"\n\n colspecs = []\n header = []\n types = {}\n for iline in infos.splitlines():\n if iline == '':\n continue\n ih = iline[0:11].strip().lower()\n header.append(ih)\n ii = int(iline[13:16]) - 1\n ij = int(iline[17:20])\n colspecs.append((ii, ij))\n it = iline[22:].strip()\n if it == 'Character':\n it = 'str'\n\n elif it == 'Real':\n it = 'float'\n\n else:\n it = 'int'\n\n types[ih] = it\n\n data = pd.read_fwf(filename, colspecs=colspecs, header=None, dtype=types, names=header)\n data = data.replace('nan', '')\n data['date'] = pd.to_datetime((data.year * 1000000 +\n np.where(data.month.values == 99, 6, data.month.values) * 10000 +\n np.where(data.day.values == 99, 15, data.day.values) * 100 +\n np.where(data.hour.values == 99, 0, data.hour.values)).apply(str), format='%Y%m%d%H')\n return data" ]
[ "0.585451", "0.5384007", "0.5313047", "0.53103215", "0.52985805", "0.5278899", "0.522569", "0.52152556", "0.520184", "0.51793325", "0.5156087", "0.50948936", "0.50056297", "0.49981564", "0.49954587", "0.4945608", "0.4944251", "0.49428275", "0.49331096", "0.49324706", "0.49301165", "0.4929805", "0.49108028", "0.4909888", "0.4900002", "0.4882283", "0.48802418", "0.4878332", "0.48779747", "0.4864754" ]
0.58713156
0
0 == 'setosa' 1 == 'versicolor' 2 == 'virginica' This function will encode the species by default, but can optionally show the species name as a string when the second argument is False. prepare_iris_data(df) returns encoded species name prepare_iris_data(df, False) returns species name
def prepare_iris_data(df, encode=True): # Drop primary/foreign keys df = df.drop(columns=["measurement_id", "species_id"]) # Rename "species_name" to species df = df.rename(columns={"species_name": "species"}) if(encode): encoder = LabelEncoder() encoder.fit(df.species) df.species = encoder.transform(df.species) return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rawSpecies(df, specie = \"Caenorhabditis elegans OX=6239\"):\n species = df[df[\"PG.Organisms\"] == specie]\n return species", "def prepare_iris_data(data):\n\n # One-Hot Encode target variable y\n \n X = data.iloc[:, 0:4]\n y = data.iloc[:,-1]\n Y = pd.get_dummies(y)\n \n # Recombine X and Y\n \n data_w_dummies = pd.concat([X, Y], axis=1)\n data_w_dummies.head()\n \n \n # Split data into train and test sets and \n # Make data iterable by batches with DataLoader\n \n train, test = train_test_split(data_w_dummies, test_size=0.2, shuffle=True)\n \n \n trainset = torch.Tensor(train.values)\n \n testset = torch.Tensor(test.values)\n \n return trainset, testset", "def preprocess_iris(self):\n\n print('[ INFO ]: Preprocessing iris data...')\n\n # Rename headers of data frame\n iris_data = pd.read_csv(self.iris_path, header=None)\n iris_data.columns = ['sepal_length','sepal_width','petal_length','petal_width', 'iris_class']\n\n df_columns = [iris_data.columns[j] for j in range(len(iris_data.columns)) if iris_data.columns[j] != 'iris_class']\n\n # Place classes into list\n classes = iris_data['iris_class'].unique().tolist()\n\n return iris_data, df_columns, classes", "def create_species_encode():\n\tdata = pd.read_csv(\"../train.csv\")\n\tspecies = sorted(data.species.unique())\n\tspecies_dict = {species: index for index, species in enumerate(species)}\n\treturn species_dict", "def iris_data():\n X, y = load_iris()['data'], load_iris()['target']\n y[y == 2.] = 0 # N.B. make binary, TODO simulate a competition dataset\n return BasicExamplesProvider(X, y)", "def known_species(self):\n # Import #\n from forest_puller.conversion.tree_species_info import df as species_info\n # Filter #\n df = species_info[['genus', 'species']]\n # Return #\n return df", "def setName(self, *args):\n return _libsbml.PossibleSpeciesFeatureValue_setName(self, *args)", "def iris():\n return IrisDataset()", "def encode_data(column: str, data):\n return label.fit_transform(data[column])", "def species_lookup_by_data_provider(self, provider):\n return self.species_name_lookup(provider)", "def construct_dataset_name(self, *args):\n raise NotImplementedError", "def setQualitativeSpecies(self, *args):\n return _libsbml.Output_setQualitativeSpecies(self, *args)", "def setName(self, *args):\n return _libsbml.SpeciesFeature_setName(self, *args)", "def train_data():\n raw = datasets.load_iris()\n iris = pd.DataFrame(raw.data, columns=raw.feature_names)\n iris = iris.join(pd.DataFrame(raw.target))\n iris.columns = [\"SepalLength\", \"SepalWidth\", \"PetalLength\", \"PetalWidth\", \"Species\"]\n iris[\"Species\"] = iris[\"Species\"].astype(\"category\")\n iris.Species.cat.categories = raw.target_names\n return iris.iloc[:, 0:4], iris[\"Species\"]", "def get_dataset() -> pd.DataFrame:\n\n data = load_iris(as_frame=True)\n\n dataset = data.frame\n dataset.rename(\n columns=lambda colname: colname.strip(' (cm)').replace(' ', '_'),\n inplace=True\n )\n\n return dataset", "def prepare_names(series_function: Callable[['CompanyCleaner', pd.Series], pd.Series]):\n def inner(self, series: pd.Series) -> pd.Series:\n series = series.fillna(\"\")\n if self.drop_invalid_utf8:\n series = series.str.encode('utf-8', 'ignore').str.decode('utf-8', 'ignore')\n if self.map_to_ascii:\n series = series.map(unidecode.unidecode) # can introduce capitalisation\n return series_function(self, series.str.lower())\n\n return inner", "def setName(self, *args):\n return _libsbml.QualitativeSpecies_setName(self, *args)", "def iris_generator(context, format=\"csv\"):\n iris = load_iris()\n iris_dataset = pd.DataFrame(data=iris.data, columns=iris.feature_names)\n iris_labels = pd.DataFrame(data=iris.target, columns=[\"label\"])\n iris_dataset = pd.concat([iris_dataset, iris_labels], axis=1)\n\n context.logger.info(\"saving iris dataframe to {}\".format(context.artifact_path))\n context.log_dataset(\"iris_dataset\", df=iris_dataset, format=format, index=False)", "def encoding_categorical_feature(dataset_dict: dict, feature_name: str,\n print_results: Union[bool, int] = True,\n print_counter: int = 0) -> dict:\n\n # Replacing the missing values with a special hash value to avoid having the same class of missing values and\n # non-missing values.\n hash_missing_value = hex(random.getrandbits(128))\n logger.debug(f\"The hash for the missing values is {hash_missing_value}\")\n\n # Concatenate datasets if needed\n\n if isinstance(print_results, bool) and print_results:\n print(f\"there are {len(dataset_dict)} datasets provided\")\n elif isinstance(print_results, int):\n if print_counter < print_results:\n print(f\"there are {len(dataset_dict)} datasets provided\")\n\n # the check here is not a good idea because it check each column alone which is not efficient\n valid_dataset_list = []\n valid_dataset_keys = []\n\n for key_i, dataseries in dataset_dict.items():\n if dataseries.shape[0] > 0:\n valid_dataset_list.append(dataseries) # get the dataframes\n valid_dataset_keys.append(key_i) # get the keys\n\n if len(valid_dataset_list) > 1:\n x_original = pd.concat(valid_dataset_list, axis=0)\n elif len(valid_dataset_list) == 1:\n x_original = valid_dataset_list[0]\n else:\n raise ValueError(\"No valid dataset was provided\")\n\n # define the encoder\n label_encoder = preprocessing.LabelEncoder()\n label_encoder.fit(x_original.fillna(hash_missing_value))\n\n dataset_dict_encoded = {}\n\n for dataset_key in valid_dataset_keys:\n dataset_dict_encoded[dataset_key] = label_encoder.transform(\n dataset_dict[dataset_key].fillna(hash_missing_value)\n )\n\n labels_nr = len(list(label_encoder.classes_))\n if isinstance(print_results, bool) and print_results:\n print(f\"encoding the feature in the dataset {dataset_key}\")\n print(f\"the number of classes in {feature_name} feature is: {labels_nr}\")\n\n elif isinstance(print_results, int):\n if print_counter < print_results:\n print(f\"encoding the feature in the dataset {dataset_key}\")\n print(f\"the number of classes in {feature_name} feature is: {labels_nr}\")\n\n logger.info(f\"Encoding categorical feature {feature_name} process is finished!\")\n return dataset_dict_encoded", "def read_iris_data():\n\n # Tomamos los datos del dataset\n # Esta es la parte en la que copio codigo de la fuente mencionada\n iris_dataset = datasets.load_iris()\n\n # Separamos caracteristicas de las clases\n data = iris_dataset.data\n classes = iris_dataset.target\n feature_names = iris_dataset.feature_names # Para saber el nombre de las caracteristicas\n target_names = iris_dataset.target_names # Los nombres de las flores que consideramos:\n # Son los nombres de las clases\n\n # Nos quedamos solo con la primera y tercera caracteristica que corresponden\n # a los indices 0 y 2\n data = [data[indx][0:3:2] for indx in range(len(data))]\n\n # Del mismo modo solo me quedo con los nombres de las caracteristicas con\n # las que me quedo en el paso anterior\n feature_names = [feature_names[0], feature_names[1]]\n\n return data, classes, feature_names, target_names", "def setQualitativeSpecies(self, *args):\n return _libsbml.Input_setQualitativeSpecies(self, *args)", "def setName(self, *args):\n return _libsbml.Species_setName(self, *args)", "def encode_sequential_labels(dataframe, variable_to_encode):\n # set name of encoded variable in dataframe \n encoded_variable_name = 'encoded_' + variable_to_encode\n # initialize encoder\n encoder = sklearn.preprocessing.LabelEncoder()\n # encode variable and add it to dataframe\n dataframe[encoded_variable_name] = encoder.fit_transform(dataframe[variable_to_encode].values)\n return encoder, dataframe", "def data_prep(df, params, if_resample=False):\n\n if if_resample and (params['balanced'] in ['Bootstrap', 'Handsample']):\n if params['balanced'] == 'Bootstrap':\n df = resample(df=df, balance=params['balanced'], nclass=params['classnum'])\n elif params['balanced'] == 'Handsample':\n df = resample(df=df, balance=params['balanced'], nclass=params['classnum'])\n\n if params['classnum'] == 6:\n df.drop(df[df['label']=='PTSD'].index, axis=0, inplace=True)\n\n data = list(df.dialog)\n label_encode = LabelEncoder()\n output = dict()\n output['data'] = data\n output['encoded_label'] = label_encode.fit_transform(df.label)\n output['binary_label'] = label_binarize(y=output['encoded_label'], classes=np.arange(params['classnum']))\n return output, label_encode", "def test_get_iris_setosa_data(self):\n iris = get_iris_setosa_data()\n self.assertEqual(len(iris.data), 150)\n self.assertEqual(len(iris.labels), 150)", "def pre_process_data(df):\n\n # one-hot encode categorical values\n df = pd.get_dummies(df)\n\n return df", "def setName(self, *args):\n return _libsbml.SpeciesFeatureType_setName(self, *args)", "def test_004_when_df_is_named() -> None:\n df = generate_test_data()\n df.name = \"Named dataframe\"\n skim(df)", "def featureprepare(self,df):\n try:\n df = self.featureselection(df)\n emp_len_dict= pickleload(self.dict_employ_len) # Load emp len\n df['emp_length'] = df['emp_length'].map(emp_len_dict)\n df['Long_emp_length'] = df['emp_length'].apply(lambda x: 'Yes' if x == 10 else 'No') # creating new feature\n df[\"emp_title\"].fillna('Missing', inplace=True)\n\n # Handling missing numerical value\n dict_Mean_var = pickleload(self.dict_Mean_var)\n for col, mean_val in dict_Mean_var.items():\n df[col].fillna(mean_val, inplace=True)\n\n # Handling rare values\n Freqlabels = pickleload(self.Freqlabels)\n for variable, frequent_labels in Freqlabels.items():\n df[variable] = np.where(df[variable].isin(frequent_labels), df[variable], 'Rare')\n\n # Encoding Categorical features\n x = pickleload(self.labelEncoder)\n for features, labels in x.items():\n df.loc[:, features] = labels.transform(df.loc[:, features])\n return df\n except Exception as e:\n self._Logger.error(\"Error in feature preparation: {}\".format(e))", "def dataset(name):\n t = \"unknown\"\n if name ==\"boston\":\n # regression (506x13feat)\n from sklearn.datasets import load_boston\n X, y = load_boston(return_X_y=True)\n t = \"R\"\n #X,y = shap.datasets.boston()\n #return X,y\n elif name == \"iris\":\n # classification (150x4featx3classes)\n from sklearn.datasets import load_iris\n data = load_iris()\n X = data.data\n y = data.target\n t = \"C\"\n elif name == \"diabetes\":\n # regression (442x10feat)\n from sklearn.datasets import load_diabetes\n X, y = load_diabetes(return_X_y=True)\n t = \"R\"\n elif name == \"digits\":\n # classification (1797x64featx10classes)\n from sklearn.datasets import load_digits\n X, y = load_digits(return_X_y=True)\n t = \"C\"\n elif name == \"wine\":\n # classification (178x13featuresx3classes)\n from sklearn.datasets import load_wine\n X, y = load_wine(return_X_y=True)\n t = \"C\"\n elif name == \"breast_cancer\":\n # classification (569x30featx2classes)\n from sklearn.datasets import load_breast_cancer\n X, y = load_breast_cancer(return_X_y=True)\n t = \"C\"\n elif name ==\"nhanesi\":\n X,y = shap.datasets.nhanesi()\n t = \"R\"\n elif name == \"segments\":\n X,y = make_led()\n t = \"C\"\n elif name == \"segments_sampled\":\n X,y = make_led_sample()\n t = \"C\"\n elif name == \"friedman1\":\n from sklearn.datasets import make_friedman1\n X,y= make_friedman1(n_samples=500, random_state=0)\n print('Done')\n X = pd.DataFrame(X, columns=list(range(X.shape[1])))\n t = 'R'\n elif name == \"friedman2\":\n from sklearn.datasets import make_friedman2\n X,y= make_friedman2(random_state=0)\n t = 'R'\n elif name == 'linear':\n X, y, t = draw_linear_function()\n elif name == \"linear2\":\n importlib.reload(lreg)\n X,y,t = lreg.lf_dataset(nsamples=5000, with_vimp=False)\n elif name == 'friendman3':\n X, y, t = friedman_modified()\n else:\n raise ValueError(\"dataset `{}` not implemented\".format(name))\n return X,y,t" ]
[ "0.5924976", "0.57568926", "0.55721927", "0.54730135", "0.5446134", "0.5353316", "0.5202682", "0.5201156", "0.515606", "0.512597", "0.5125128", "0.5123189", "0.5102725", "0.5086768", "0.50764376", "0.50676197", "0.5054898", "0.5050417", "0.5038685", "0.5023871", "0.49641922", "0.4953939", "0.49421608", "0.49073502", "0.48927647", "0.48731616", "0.48626676", "0.48594305", "0.48414844", "0.48250416" ]
0.74515724
0
Gets the snapchat IDs that have already been downloaded and returns them in a set.
def get_downloaded(): result = set() for name in os.listdir(PATH): filename, ext = name.split('.') if ext not in EXTENSIONS: continue ts, username, id = filename.split('+') result.add(id) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_id_set(self):\n s = set()\n for player in Player.select(Player.player_id):\n s.add(player.player_id)\n return s", "def filter_seen_messages(self, messages):\n seen_uids = set()\n for uid in messages:\n key = \"%s_%s_%s\" % (self.opt_pop3_server,\n self.opt_global_account[\"username\"], uid.split()[1])\n if self.helper.get_check_point(key) is not None:\n seen_uids.add(uid)\n new_uids = set(messages) - seen_uids\n self.helper.log_debug(\n 'filter_seen_messages: uids on pop3 %s' %\n set(messages))\n self.helper.log_debug(\n 'filter_seen_messages: uids in checkp %s' %\n seen_uids)\n self.helper.log_debug(\n 'filter_seen_messages: uids new %s' %\n new_uids)\n return new_uids", "def __init__(self):\n self.ids_seen = set()", "def remote_get_ids(self):\n return self.smultiengine.get_ids()", "def ids(self):\n return frozenset([seq.id for seq in self])", "def get_background_ids(self):\n with self.get_lock().read_lock():\n return frozenset(self._bg_cid_set) \\\n if self._bg_cid_set is not None \\\n else frozenset()", "def get_ids(self) -> List[str]:", "def getIDs():", "def bulk_has(self, ids_):\n with self._db_connection() as connection:\n existing = set(connection.datasets_intersection(ids_))\n\n return [x in existing for x in\n map((lambda x: UUID(x) if isinstance(x, str) else x), ids_)]", "def getTraceIDs(self, ids):\n traceids = set()\n\n with self.instanceTraceIDsLock:\n for id in ids:\n if id in self.instanceTraceIDs:\n traceids.update(self.instanceTraceIDs[id])\n\n return traceids", "def get_uids():\n DB_NAME = 'cloud_storage.db'\n DB_DIRECTORY = 'server_side_storage/'\n db = sqlite3.connect('{}{}'.format(DB_DIRECTORY, DB_NAME))\n cursor = db.cursor()\n cursor.execute(\"SELECT uid FROM user_ids\")\n all_uids = cursor.fetchall()\n db.commit()\n cursor.close()\n db.close()\n all_uids = list(itertools.chain(*all_uids))\n return all_uids", "def get_ids(self):\n return self._ids", "def _remove_seen_ids(ctrl, id_set):\n partition_range = range(0, ctrl.config[\"partitions\"])\n unseen_ids = id_set.copy()\n for partition_idx in partition_range:\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_idx)\n for seen_id in id_set:\n if lower_bound <= seen_id <= upper_bound:\n unseen_ids.remove(seen_id)\n return unseen_ids", "def get_sid_set(sources):\n sid_list = []\n for source_dict in sources:\n sid = source_dict['SID']\n sid_list.append(sid)\n sid_set = set(sid_list)\n\n assert len(sid_set) == len(sid_set), \"Duplicate SID detected\"\n return sid_set", "def get_ids(self):\n return self.redis.hkeys(self.feed_items)", "def remove_duplicates_for_fetch(items: list, last_fetched_ids: list) -> list:\n return [\n item\n for item in items\n if item.get('id') and item.get('id') not in last_fetched_ids\n ]", "def check_datastore(self):\n query = db.Query(RabiaStore)\n query.filter(\"url IN \", self.urls)\n\n # The fetch method requires a limit argument\n results = query.fetch(limit=len(self.urls))\n\n # List comprehension to build a list of results\n urls_from_datastore = [each.url for each in results]\n\n # Turn it into a Python set\n self.urls_from_datastore = Set(urls_from_datastore)", "def get_item_set(tag_set):\n s = set()\n tags = tag_set\n for itemKey in items[\"data\"]:\n item = items[\"data\"][itemKey]\n if \"tags\" in item:\n if not tags.isdisjoint(set(item[\"tags\"])):\n s.add(item[\"id\"])\n\n return s", "def get_tsIDs(self):\n tsIDs = set()\n for er in self.exercise_recordings:\n if er.tsID not in tsIDs:\n tsIDs.add(er.tsID)\n return list(tsIDs)", "def getMissingIds(self):\n return self._missingIds", "async def have_seen_events(\n self, room_id: str, event_ids: Iterable[str]\n ) -> Set[str]:\n\n # @cachedList chomps lots of memory if you call it with a big list, so\n # we break it down. However, each batch requires its own index scan, so we make\n # the batches as big as possible.\n\n results: Set[str] = set()\n for event_ids_chunk in batch_iter(event_ids, 500):\n events_seen_dict = await self._have_seen_events_dict(\n room_id, event_ids_chunk\n )\n results.update(\n eid for (eid, have_event) in events_seen_dict.items() if have_event\n )\n\n return results", "def _get_all_app_ids(config, client):\n rv = set()\n total_pages = client.get_published_apps(config.username, 0).json()[\"total_pages\"]\n for current_page in range(total_pages):\n current_page_results = client.get_published_apps(config.username, current_page).json()['results']\n for result in current_page_results:\n rv.add(result['id'])\n return rv", "def get_missing_seat_ids(boarding_passes: list) -> set:\n seat_ids = {get_seat_id(boarding_pass) for boarding_pass in boarding_passes}\n all_seat_ids = set(range(min(seat_ids), max(seat_ids) + 1))\n return all_seat_ids - seat_ids", "def getIds(self) -> List[int]:\n return list(self.users.keys())", "def ids(self):\n return self._ids", "def manually_screened_ids():\n # These have no meaning here.\n # low_resolution_meters = (35466301, 82218621, 15720078, 80690326, 65630886, \n # 13824685, 87785213, 12645122, 89454871)\n # missing_data = (73122950, 39281849, 99260911, 92959456, 79042288, 97564405,\n # 8751522)\n # return set(itertools.chain(low_resolution_meters, missing_data))\n return False", "def get_distinct_uuids_for_chunks(chunks_cache):\n return set(link.friendly_uuid() for link in chunks_cache.values()\n if link)", "def getUserIds(self):\n raise BorkedGetUserIds", "def getSet(unique_name):", "def getSet(unique_name):" ]
[ "0.61409533", "0.6138558", "0.5750224", "0.56933486", "0.5687567", "0.56794363", "0.5625306", "0.5603618", "0.5598983", "0.5564271", "0.55617774", "0.5561327", "0.55588824", "0.55514014", "0.5539512", "0.5530868", "0.55293196", "0.5450162", "0.5425994", "0.54205567", "0.5385508", "0.5379944", "0.5379906", "0.5373057", "0.5365015", "0.53408563", "0.5327397", "0.53170943", "0.5294992", "0.5294992" ]
0.64585745
0
Download a specific snap, given output from s.get_snaps().
def download(s, snap): id = snap['id'] name = snap['sender'] ts = str(snap['sent']).replace(':', '-') result = s.get_media(id) if not result: return False ext = s.is_media(result) filename = '{}+{}+{}.{}'.format(ts, name, id, ext) path = PATH + filename with open(path, 'wb') as fout: fout.write(result) return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_snaps(s):\n\n existing = get_downloaded()\n\n snaps = s.get_snaps()\n for snap in snaps:\n id = snap['id']\n if id[-1] == 's' or id in existing:\n print 'Skipping:', id\n continue\n\n result = download(s, snap)\n\n if not result:\n print 'FAILED:', id\n else:\n print 'Downloaded:', id", "async def download_snapshot(self, slug, output_path):\n command = COMMAND_SNAPSHOT_DOWNLOAD.format(slug=slug)\n\n try:\n with async_timeout.timeout(self._backup_timeout):\n request = await self._hassio.websession.request(\n \"get\",\n f\"http://{self._ip}{command}\",\n headers={X_HASSIO: os.environ.get(\"HASSIO_TOKEN\", \"\")},\n timeout=None,\n )\n\n if request.status not in (200, 400):\n _LOGGER.error(\"%s return code %d.\", command, request.status)\n raise HassioAPIError()\n\n with open(output_path, \"wb\") as file:\n file.write(await request.read())\n\n _LOGGER.info(\"Downloaded snapshot '%s' to '%s'\", slug, output_path)\n return\n\n except asyncio.TimeoutError:\n _LOGGER.error(\"Timeout on %s request\", command)\n\n except aiohttp.ClientError as err:\n _LOGGER.error(\"Client error on %s request %s\", command, err)\n\n except IOError:\n _LOGGER.error(\"Failed to download snapshot '%s' to '%s'\", slug, output_path)\n\n raise HassioAPIError(\"Snapshot download failed.\")", "def snap(self, path=None):\n if path is None:\n path = \"/tmp\"\n else:\n path = path.rstrip(\"/\")\n day_dir = datetime.datetime.now().strftime(\"%d%m%Y\")\n hour_dir = datetime.datetime.now().strftime(\"%H%M\")\n ensure_snapshot_dir(path+\"/\"+self.cam_id+\"/\"+day_dir+\"/\"+hour_dir)\n f_path = \"{0}/{1}/{2}/{3}/{4}.jpg\".format(\n path,\n self.cam_id,\n day_dir,\n hour_dir,\n datetime.datetime.now().strftime(\"%S\"),\n )\n\n urllib.urlretrieve(\n 'http://{0}/snapshot.cgi?user={1}&pwd={2}'.format(\n self.address, \n self.user, \n self.pswd,\n ),\n f_path,\n )\n #print resp[1]['Content-disposition'].replace(\"filename=\\\"\",\"\")[:-1]", "def open_snap_file(self, ifile):\n fname = \"%s/snapdir_%03d/%s_%03d.%d\" % (self.basedir, self.isnap, \n self.basename, self.isnap, ifile)\n return gadget_snapshot.open(fname)", "def snap(self, data, file_name, dev=None, folder=None):\n return self.action_api_based(dev, data, file_name, \"snap\", folder)", "def snap_info(mnode, snapname=\"\", volname=\"\"):\n\n if snapname != \"\" and volname != \"\":\n g.log.error(\"Incorrect cmd. snap info cli accepts either \"\n \"snapname or volname\")\n return (-1, None, None)\n\n if volname != '':\n volname = \"volume %s\" % volname\n\n cmd = \"gluster snapshot info %s %s\" % (snapname, volname)\n return g.run(mnode, cmd)", "def jail_snapshot(action, jnid = ''):\n\n jails = jails_list()\n\n try:\n int(jnid) == True\n testid = 0\n except ValueError:\n testid = 1\n\n if testid == 0:\n if jnid in jails[2]:\n j = jails[2].index(jnid)\n jnid = jails[1][j]\n else:\n msg = \" ERROR: Jail with ID '%s' not found!\" % (jnid)\n log(msg)\n return False\n\n if jnid == 'BASE':\n print \" ERROR: 'BASE' jail cannot have snapshots!\"\n return False\n\n if jnid in jails[1]:\n\n# check current time/date\n dt = str(datetime.utcnow().isoformat())\n# check if jail is skeleton based\n if 'BASE-' in jnid:\n jzfssnap = \"%s/BASE-RW/%s@%s\" % (jzfs, jnid, dt)\n else:\n jzfssnap = \"%s/%s@%s\" % (jzfs, jnid, dt)\n\n if action == \"createsnap\":\n os.system(\"zfs snapshot \"+jzfssnap)\n# print and add to log file \n logmsg = \" INFO: New snapshot '%s' was created for '%s'!\" % (jzfssnap, jnid)\n log(logmsg) \n \n return False\n\n if action == \"listsnap\":\n if jail_snapshot_list(jnid) == False:\n return False\n\n lmen = jail_snapshot_list(jnid)[1]\n jsnn = jail_snapshot_list(jnid)[0]\n print tabulate(jsnn, lmen)\n\n if action == \"rmsnap\":\n if jail_snapshot_list(jnid) == False:\n return False\n\n lmen = jail_snapshot_list(jnid)[1]\n jsnn = jail_snapshot_list(jnid)[0]\n print tabulate(jsnn, lmen)\n\n print \" \" \n while True:\n rmsnap = raw_input(\"snapshot number or (!) :> \")\n if rmsnap == \"!\":\n print \" INFO: Interrupted by user\"\n return False\n \n try:\n int(rmsnap)\n except ValueError:\n print \" ERROR: Please use only numbers!\"\n continue\n \n sn = []\n for i in jsnn:\n sn.append(i[0])\n\n if int(rmsnap) in sn:\n print \" WARNING: %s snapshot will be removed!\" % (jsnn[int(rmsnap)][1])\n yn = raw_input(\"confirm (y):> \")\n \n if yn == \"y\":\n os.system(\"zfs destroy \"+jsnn[int(rmsnap)][1])\n# prtin and add to log file \n logmsg = \" INFO: '%s' jail snapshot '%s' was removed!\" % (jnid, jsnn[int(rmsnap)][1])\n log(logmsg) \n return False \n else:\n print \" INFO: Interrupted by user\"\n return False\n \n else:\n print \" ERROR: Snapshot number '%s' not exist!\" % (rmsnap)\n continue\n\n if action == \"restoresnap\":\n if jail_snapshot_list(jnid) == False:\n return False\n\n lmen = jail_snapshot_list(jnid)[1]\n jsnn = jail_snapshot_list(jnid)[0]\n print tabulate(jsnn, lmen) \n print \" \"\n \n while True:\n snap = raw_input(\"snapshot number or (!) :> \")\n if snap == \"!\":\n print \" INFO: Interrupted by user\"\n return False\n\n try:\n int(snap)\n except ValueError:\n print \" ERROR: Please use only numbers!\"\n continue\n \n sn = []\n for i in jsnn:\n sn.append(i[0])\n \n if int(snap) in sn:\n print \" WARNING: '%s' will be restored from '%s' snapshot!\" % (jnid, jsnn[int(snap)][1])\n print \" WARNING: all snapshots newer than '%s' will be removed too!\" % (jsnn[int(snap)][1])\n yn = raw_input(\"confirm (y):> \")\n\n if yn == \"y\":\n# check if jail is running\n if jnid in jails[1]:\n if jail_isrun(jnid) == 1:\n while True:\n print \" WARNING: Jail '%s' should be stopped!\" % (jnid)\n yn = raw_input(\"stop '%s' jail (y):> \" % jnid)\n if yn not in \"yY\" or yn in ['', ' ', '\t']:\n return False\n else:\n# print and add to log file \n logmsg = \" INFO: modify jail> Jadm stop jail '%s'\" % (jnid)\n log(logmsg) \n os.system(\"jail -r %s\" % jnid)\n break \n if jail_isrun(jnid) == 'dying':\n print \" WARNING: Jail '%s' dying at the moment!\" % (jnid)\n return False\n\n os.system(\"zfs rollback -r %s\" % (jsnn[int(snap)][1]))\n logmsg = \" INFO: '%s' jail was restored from '%s' snapshot!\" % (jnid, jsnn[int(snap)][1])\n log(logmsg)\n return False\n else:\n print \" INFO: Interrupted by user\"\n return False\n else:\n print \" ERROR: Snapshot number '%s' not exist!\" % (snap)\n continue\n \n else:\n print \" ERROR: Jail with name '%s' not found!\" % (jnid)\n return False", "def download_screenshot_command():\n # 1. Get input scan id and resolution from Demisto\n scanid = demisto.args().get('scanid')\n resolution = demisto.args().get('resolution')\n # 2. Get the forensic webpage screenshot from SlashNext API\n response = download_screenshot(scanid=scanid, resolution=resolution)\n if response.get('errorNo') != 0:\n return\n # 3. Parse and format the response\n sc_base64 = response.get('scData').get('scBase64')\n sc_data = base64.b64decode(sc_base64)\n\n sc_file = fileResult('slashnext_{}.jpg'.format(scanid), sc_data, entryTypes['image'])\n\n demisto.results({\n 'Type': entryTypes['image'],\n 'ContentsFormat': formats['text'],\n 'Contents': 'Forensics: Webpage Screenshot for URL Scan ID = {}'.format(scanid),\n 'File': sc_file.get('File'),\n 'FileID': sc_file.get('FileID')\n })", "def get_snap_info_by_snapname(mnode, snapname):\n\n snap_info_list = get_snap_info(mnode)\n if not snap_info_list:\n g.log.error(\"Failed to parse snap info in \"\n \"get_snap_info_by_snapname()\")\n return None\n\n for snap_info in snap_info_list:\n if \"name\" in snap_info:\n if snap_info[\"name\"] == snapname:\n return snap_info\n g.log.error(\"The snap %s not found\" % (snapname))\n return None", "def link_snapshot(argstr):\n pass", "def snap_restore(mnode, snapname):\n\n cmd = \"gluster snapshot restore %s --mode=script\" % snapname\n return g.run(mnode, cmd)", "def _get_spectra_snap(self, snap, base):\n #If savefile exists, reload. Otherwise do not.\n def mkspec(snap, base, cofm, axis, rf):\n \"\"\"Helper function\"\"\"\n return spectra.Spectra(snap, base, cofm, axis, res=self.pix_res, savefile=self.savefile,spec_res = self.spec_res, reload_file=rf,sf_neutral=False,quiet=True, load_snapshot=rf)\n #First try to get data from the savefile, and if we can't, try the snapshot.\n try:\n ss = mkspec(snap, base, None, None, rf=False)\n if not self._check_redshift(ss.red):\n return None\n except OSError:\n #Check the redshift is ok\n red = 1./_get_header_attr_from_snap(\"Time\", snap, base)-1.\n if not self._check_redshift(red):\n return None\n #Make sure we have sightlines\n (cofm, axis) = self._get_cofm(snap, base)\n ss = mkspec(snap, base, cofm, axis, rf=True)\n #Get optical depths and save\n _ = ss.get_tau(\"H\",1,1215)\n ss.save_file()\n #Check we have the same spectra\n try:\n assert np.all(ss.cofm == self.cofm)\n except AttributeError:\n #If this is the first load, we just want to use the snapshot values.\n (self.cofm, self.axis) = (ss.cofm, ss.axis)\n return ss", "def get(isamAppliance, check_mode=False, force=False):\n return isamAppliance.invoke_get(\"Retrieving snapshots\", \"/snapshots\")", "def test_link_snapshot_by_snap_id(self):\n snapshot_info, sg_name = self.create_sg_snapshot()\n target_sg = \"{sg}_lnk\".format(sg=sg_name)\n snap_name = snapshot_info.get('name')\n snap_id = snapshot_info.get('snapid')\n self.assertIsNotNone(snap_id)\n self.replication.link_snapshot_by_snap_id(\n sg_name, target_sg, snap_name, snap_id)\n snap_details = self._test_get_ss_snapid_detail(\n sg_name, snap_name, snap_id, check_linked=True)\n self.assertTrue(snap_details.get('linked'))\n self.replication.modify_storage_group_snapshot_by_snap_id(\n sg_name, target_sg, snap_name, snap_id, unlink=True)\n snap_details = self._test_get_ss_snapid_detail(\n sg_name, snap_name, snap_id, check_unlinked=True)\n self.assertFalse(snap_details.get('linked'))\n self.provisioning.delete_storage_group(target_sg)", "def pull_snapshot(edition, force):\n from docker import DockerClient\n from docker.errors import ImageNotFound\n docker = DockerClient.from_env(version=\"auto\")\n artifact = resolve_artifact_name(edition)\n if force:\n return download_snapshot_artifact(artifact)\n else:\n derived = derive_image_tag(artifact)\n try:\n docker.images.get(derived)\n except ImageNotFound:\n return download_snapshot_artifact(artifact)\n else:\n return derived", "def download_SRA(SRA):\n\n print(\"Downloading SRA archive\")\n output = subprocess.run(['prefetch', '-f', 'yes', SRA], stderr=subprocess.STDOUT)\n\n print(\"Extracting FASTQ data\")\n output = subprocess.run(['fastq-dump', '--gzip', NCBI_DIR+SRA+'.sra'], stderr=subprocess.STDOUT)", "def cli(date, path, mission):\n download.main(path, mission, date)", "async def async_snapshot(self, switchinput):\n if self._state == STATE_UNAVAILABLE:\n return\n\n if not self._slave_mode:\n self._snapshot_active = True\n self._snap_source = self._source\n self._snap_state = self._state\n self._snap_nometa = self._nometa\n self._snap_playing_mediabrowser = self._playing_mediabrowser\n self._snap_media_source_uri = self._media_source_uri\n self._snap_playhead_position = self._playhead_position\n\n if self._playing_localfile or self._playing_spotify or self._playing_webplaylist:\n if self._state in [STATE_PLAYING, STATE_PAUSED]:\n self._snap_seek = True\n\n elif self._playing_stream or self._playing_mediabrowser:\n if self._state in [STATE_PLAYING, STATE_PAUSED] and self._playing_mediabrowser:\n self._snap_seek = True\n\n _LOGGER.debug(\"For %s SNAPSHOT, source: %s, volume: %s, uri: %s, seek: %s, pos: %s\", self.name, self._source, self._snap_volume, self._media_uri_final, self._snap_seek, self._playhead_position)\n\n if self._source == \"Network\":\n self._snap_uri = self._media_uri_final\n \n\n if self._playing_spotify:\n if not switchinput:\n await self.async_preset_snap_via_upnp(str(self._preset_key))\n await self.async_call_linkplay_httpapi(\"setPlayerCmd:stop\", None)\n else:\n self._snap_spotify_volumeonly = True\n self._snap_spotify = True\n self._snap_volume = int(self._volume)\n return\n\n elif self._playing_mass:\n await self.hass.services.async_call(\"mass\",\"queue_command\", service_data = {\"entity_id\": self.entity_id, \"command\": \"snapshot_create\"})\n self._snap_mass = True\n self._snap_volume = int(self._volume)\n\n elif self._state == STATE_IDLE:\n self._snap_volume = int(self._volume)\n\n elif switchinput and not self._playing_stream:\n value = await self.async_call_linkplay_httpapi(\"setPlayerCmd:switchmode:wifi\", None)\n await asyncio.sleep(0.2)\n await self.async_call_linkplay_httpapi(\"setPlayerCmd:stop\", None)\n if value == \"OK\":\n await asyncio.sleep(2) # have to wait for the sound fade-in of the unit when physical source is changed, otherwise volume value will be incorrect\n await self.async_get_status()\n if self._player_statdata is not None:\n try:\n self._snap_volume = int(self._player_statdata['vol'])\n except ValueError:\n _LOGGER.warning(\"Erroneous JSON during snapshot volume reading: %s, %s\", self.entity_id, self._name)\n self._snap_volume = 0\n else:\n self._snap_volume = 0\n else:\n self._snap_volume = 0\n else:\n self._snap_volume = int(self._volume)\n if self._playing_stream:\n if self._fwvercheck(self._fw_ver) >= self._fwvercheck(FW_SLOW_STREAMS):\n await self.async_call_linkplay_httpapi(\"setPlayerCmd:pause\", None)\n else:\n await self.async_call_linkplay_httpapi(\"setPlayerCmd:stop\", None)\n else:\n return\n #await self._master.async_snapshot(switchinput)", "def show_snapshot(self, snapshot_id):\n url = \"snapshots/%s\" % snapshot_id\n resp, body = self.get(url)\n body = json.loads(body)\n self.validate_response(schema.show_snapshot, resp, body)\n return rest_client.ResponseBody(resp, body)", "def get_snapshot_object(session, key, snapshot=None):\n # type: (Session, Text, Optional[Text]) -> Any\n url_tail = \"/{}/{}/{}/{}/{}\".format(\n CoordConstsV2.RSC_NETWORKS,\n session.network,\n CoordConstsV2.RSC_SNAPSHOTS,\n session.get_snapshot(snapshot),\n CoordConstsV2.RSC_OBJECTS,\n )\n return _get_stream(session, url_tail, {CoordConstsV2.QP_KEY: key})", "def getVCDRPGSnaps(**kwargs):\n strVCDRProdURL = kwargs['strVCDRProdURL']\n sessiontoken = kwargs['sessiontoken']\n if kwargs['cloud_fs_id'] is None:\n print(\"Please specify the ID of the cloud file system using '-cloud-fs-id'\")\n sys.exit(1)\n if kwargs['protection_group_id'] is None:\n print(\"Please specify the ID of the protection group using '-protection-group-id'\")\n sys.exit(1)\n cloud_fs_id = kwargs['cloud_fs_id']\n pg_id = kwargs['protection_group_id']\n if kwargs['protection_group_snap_id'] is None:\n json_response = get_vcdr_pg_snaps_json(strVCDRProdURL, cloud_fs_id, pg_id, sessiontoken)\n if json_response == None:\n print(\"API Error\")\n sys.exit(1)\n snaps = json_response[\"snapshots\"]\n table = PrettyTable(['Snapshot Name', 'Snaphot ID'])\n for i in snaps:\n table.add_row([i['name'], i['id']])\n print(table)\n else:\n snap_id = kwargs['protection_group_snap_id']\n json_response = get_vcdr_pg_snap_details_json(strVCDRProdURL, cloud_fs_id, pg_id, snap_id, sessiontoken)\n if json_response == None:\n print(\"API Error\")\n sys.exit(1)\n create_stamp_int = int(json_response['creation_timestamp'])\n create_stamp = datetime.utcfromtimestamp(create_stamp_int/1e9)\n expire_stamp_int = int(json_response['expiration_timestamp'])\n expire_stamp = datetime.utcfromtimestamp(expire_stamp_int/1e9)\n print(\" \")\n print(f\"Snapshot Name: {json_response['name']}\")\n # print(f\"Snapshot Creation: {json_response['creation_timestamp']}\")\n print(f\"Snapshot Creation: {create_stamp}\")\n print(f\"Snapshot Expiration: {expire_stamp}\")\n print(f\"Snapshot Trigger: {json_response['trigger_type']}\")\n print(f\"Number of VM: {json_response['vm_count']}\")\n print(\" \")", "def test_restore_snapshot_by_snap_id(self):\n snapshot_info, sg_name = self.create_sg_snapshot()\n snap_name = snapshot_info.get('name')\n snap_id = snapshot_info.get('snapid')\n self.replication.restore_snapshot_by_snap_id(\n sg_name, snap_name, snap_id)\n snap_details = self._test_get_ss_snapid_detail(\n sg_name, snap_name, snap_id, check_restored=True)\n self.assertTrue('Restored' in snap_details.get('state'))", "def get_snapshot(project, zone, instance):\n snapshot_disks(project, zone, *get_disks(instance))", "def get_from_snap_id(self):\n raise NotImplementedError()", "def get_snapshot(self, name=None, snapshot_id=None):\n if snapshot_id:\n return self._search_snapshot(key=\"snapshot_id\", value=snapshot_id)\n elif name:\n return self._search_snapshot(key=\"name\", value=name)\n else:\n raise ValueError(\"name or snapshot_id must be provided\")", "def screenGrab():\n box = (x_pad+1, y_pad+1, 796, 825)\n save_directory = os.getcwd()\n time_stamp = int(time.time())\n image_file_name = '{}\\\\full_snap__{}.png'.format(save_directory, time_stamp)\n im = ImageGrab.grab(box)\n im.save(image_file_name, 'PNG')", "async def get_file(self, link, name, md5, session):\n if os.path.exists(name) or md5 in opts.archived_md5:\n self.count += 1\n return\n\n async with session.get(link) as media:\n # Open file initially with .part suffix\n with open(f\"{name}.part\", \"wb\") as f:\n while True:\n chunk = await media.content.read(1024)\n if not chunk:\n break\n f.write(chunk)\n\n # Remove .part suffix once complete\n # After this point file won't get removed if script gets interrupted\n os.rename(f\"{name}.part\", name)\n\n if opts.archive:\n log_hash(md5)\n self.count += 1\n msg(f\"{self.fetch_progress()} {self.board}/{self.dir}/{name}\")", "def download_scn(self, unq_id):\n if not os.path.exists(self.baseDownloadPath):\n raise EODataDownException(\"The download path does not exist, please create and run again.\")\n\n logger.debug(\"Creating Database Engine and Session.\")\n db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)\n session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)\n ses = session_sqlalc()\n\n logger.debug(\"Perform query to find scenes which need downloading.\")\n query_result = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.PID == unq_id,\n EDDSentinel1ASF.Downloaded == False).filter(\n EDDSentinel1ASF.Remote_URL is not None).all()\n ses.close()\n success = False\n if query_result is not None:\n if len(query_result) == 1:\n record = query_result[0]\n logger.debug(\"Building download info for '\" + record.Remote_URL + \"'\")\n scn_lcl_dwnld_path = os.path.join(self.baseDownloadPath,\n \"{}_{}\".format(record.Product_File_ID, record.PID))\n if not os.path.exists(scn_lcl_dwnld_path):\n os.mkdir(scn_lcl_dwnld_path)\n out_filename = record.Remote_FileName\n _download_scn_asf([record.PID, record.Product_File_ID, record.Remote_URL, self.db_info_obj,\n os.path.join(scn_lcl_dwnld_path, out_filename), self.asfUser, self.asfPass])\n success = True\n elif len(query_result) == 0:\n logger.info(\"PID {0} is either not available or already been downloaded.\".format(unq_id))\n else:\n logger.error(\"PID {0} has returned more than 1 scene - must be unique something really wrong.\".\n format(unq_id))\n raise EODataDownException(\"There was more than 1 scene which has been found - \"\n \"something has gone really wrong!\")\n else:\n logger.error(\"PID {0} has not returned a scene - check inputs.\".format(unq_id))\n raise EODataDownException(\"PID {0} has not returned a scene - check inputs.\".format(unq_id))\n return success", "def main(args):\n args = parse_args(args)\n setup_logging(args.loglevel)\n\n dir = 'snapshots/' + args.name\n Path(dir).mkdir(parents=True, exist_ok=True)\n\n session = sql_session(dir)\n today = session.query(Sunrise).filter_by(date=date.today()).first()\n if today is None:\n today = add_sunrise(session, args.name, args.latitude, args.longitude)\n now = datetime.utcnow()\n username, password = password_from_netrc(args.url)\n\n if args.force == True or (now > today.sunrise_time and now < today.sunset_time):\n where = tzwhere.tzwhere()\n timezone_str = where.tzNameAt(float(args.latitude), float(args.longitude))\n local_timezone = pytz.timezone(timezone_str)\n utcnow = now.replace(tzinfo=pytz.utc)\n local_now = utcnow.astimezone(local_timezone)\n path = local_now.strftime('%Y%m%d-%H%M%S%z') + '_' + args.name + '.jpg'\n\n r = requests.get(args.url, auth=HTTPDigestAuth(username, password), verify=False, stream=True)\n if r.status_code == requests.codes.ok:\n with open(dir + '/' + path, 'wb') as f:\n for chunk in r.iter_content(4096):\n f.write(chunk)", "def download(self):\n\n if not self.plateifu:\n return None\n\n plate, ifu = self.plateifu.split('-')\n dir3d = self._get_image_dir()\n\n name = 'mangaimage'\n\n return super(Image, self).download(name, ifu=ifu, dir3d=dir3d,\n drpver=self._drpver, plate=plate)" ]
[ "0.73581344", "0.6964329", "0.6664417", "0.59816575", "0.58126813", "0.58090883", "0.5769466", "0.56545335", "0.5624676", "0.562231", "0.55915815", "0.5590657", "0.5560722", "0.55601645", "0.55212283", "0.55028254", "0.5490585", "0.5475541", "0.5471203", "0.5432722", "0.5416679", "0.5401287", "0.534606", "0.53455687", "0.53313893", "0.53181356", "0.5290474", "0.5273842", "0.5251784", "0.52295214" ]
0.7079678
1