query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Context processor to access all products from user wishlist | def user_wishlist(request):
if request.user.is_authenticated:
wishlist = get_object_or_404(Wishlist, user=request.user)
# Pagination show 12 products per page
paginator = Paginator(wishlist.products.all().order_by('id'), 12)
page = request.GET.get('page')
try:
all_wishlist = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
all_wishlist = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999),
# deliver last page of results.
all_wishlist = paginator.page(paginator.num_pages)
# Pagination was inspired, modified and
# adapted to this project from from this
# # Credit code
# https://www.youtube.com/watch?v=MAIFJ3_bcCY
index = all_wishlist.number - 1
max_index = len(paginator.page_range)
start_index = index - 2 if index >= 2 else 0
end_index = index + 3 if index <= max_index - 3 else max_index
page_range = paginator.page_range[start_index:end_index]
context = {'wishlist': all_wishlist,
'page_range': page_range, }
else:
context = {
'wishlist': [],
}
return context | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def wishlist(request):\n items = []\n user = get_object_or_404(UserProfile, user=request.user)\n wishlist = Wishlist.objects.get_or_create(user=user)\n wishlist_user = wishlist[0]\n existingWishlist = WishlistItem.objects.filter(\n wishlist=wishlist_user).exists()\n\n if existingWishlist:\n user_wishlist = get_list_or_404(WishlistItem, wishlist=wishlist_user)\n for obj in user_wishlist:\n product = get_object_or_404(Product, name=obj)\n items.append(product)\n context = {\n 'wishlist': True,\n 'products': items\n }\n return render(request, 'wishlist/wishlist.html', context)\n\n else:\n context = {\n 'wishlist': False,\n }\n return render(request, 'wishlist/wishlist.html', context)",
"def __iter__(self):\n products_ids = self.wishlist.keys()\n # get the products objects and add them to the wishlist\n products = Product.objects.filter(id__in=products_ids)\n\n wishlist_session = self.wishlist.copy()\n wishlist = {}\n for product in products:\n wishlist[str(product.id)] = {'product': product}\n\n for item, item_s in zip(wishlist.values(), wishlist_session.values()):\n item['per_now'] = 0\n item['price'] = Decimal(item_s['price'])\n if product.price != item['price']:\n new_price = Decimal(item['product'].price)\n old_price = Decimal(item['price'])\n per_now = (new_price - old_price) / old_price * Decimal('100')\n item['per_now'] = int(per_now)\n yield item",
"def see_products_for_rent_handler():\n\n products = ShowProductsAndCustomers()\n my_list = products.see_products_for_rent()\n my_result_list = []\n for product in my_list:\n my_result_list.append(product)\n print(product)\n return my_result_list",
"def __iter__(self):\n product_ids = self.wishlist.keys()\n products = Product.objects.filter(id__in=product_ids)\n for product in products:\n self.wishlist[str(product.id)]['product'] = product\n\n for item in self.wishlist.values():\n yield item",
"def index(self, user):\n\n cart_products = CartProduct.index(user)\n CartProductsView.index(cart_products)",
"def see_favorits(request):\n user_name = request.user\n print(user_name)\n # product = UserFavorite.objects.filter(user_name=user_name)\n list_favorits = UserFavorite.objects.all().filter(user_name=user_name)\n favorits_query = list_favorits\n favorits_list = []\n for favorite in favorits_query:\n favorits_list.append(Product.objects.get(pk=favorite.product.id))\n print(favorits_list)\n context = {\n # 'product' : product,\n 'user_name' : user_name,\n 'product' : favorits_list\n }\n\n\n return render(request,\"favorits.html\",context)",
"def get_all_products(self):\n\t\tpass",
"def productactivate():\n pass",
"def products(request):\n\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n products = Product.objects.all()\n template = \"auctionsmng/products.html\"\n\n context = {\n 'products': products\n }\n\n return render(request, template, context)",
"def extra_products(self, target):\r\n return []",
"def see_all_different_products_handler():\n\n products = ShowProductsAndCustomers()\n my_list = products.see_all_different_products()\n my_result_list = []\n for product in my_list:\n my_result_list.append(product)\n print(product)\n return my_result_list",
"def item_view_wished(request):\n r = {}\n u = request.user\n\n p = Product.objects.get_by_sku(request.POST['sku'])\n if p is not None:\n #r = p.details(u)\n\n wished = Wishlist.objects.filter(product=p).exclude(party=u)\n r['wished'] = [w.get_json(me=u) for w in wished]\n else:\n r['result'] = '0'\n\n return JSONHttpResponse(r)",
"def my_wishlist_view(request):\n data = {'success': False, 'msg': '', 'wishlist': []}\n if request.method == 'GET':\n # check if the user has already logged in.\n # if user has not logged in, return an error msg to frontend.\n # if user has logged in, let user view his/her wishlist\n if not request.session.get('login_flag', None):\n data['msg'] = 'user does not log in'\n return JsonResponse(data)\n # else use is logged in\n user_name = request.session.get('name', None)\n # return user_obj by user_name from login.models.User database\n try:\n user_obj = login.models.User.objects.get(name=user_name)\n except ObjectDoesNotExist:\n data['msg'] = 'does not have user: ' + str(user_name)\n return JsonResponse(data)\n\n data['success'] = True\n data['msg'] = 'successfully get wishlist of the current user'\n\n movie_id_list = list(\n models.Wish_list.objects.filter(user__exact=user_obj).order_by('movie').values_list('movie_id', flat=True))\n useful_keys = {'mid', 'name', 'region', 'released_date', 'average_rating', 'poster'}\n for mid in movie_id_list:\n movie_obj = models.Movie.objects.get(mid=mid)\n movie_dict = movie_to_dict(movie_obj, request)\n data['wishlist'].append({key: value for key, value in movie_dict.items() if key in useful_keys})\n\n return JsonResponse(data)\n\n else:\n data['msg'] = 'please use GET'\n return JsonResponse(data)",
"def ListProducts(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def browse_hot(request):\n\n result = {}\n\n u = request.user\n\n popular_products = TransactionLineItem.objects.values('product').annotate(num_bought=Count('product')).order_by('-num_bought')[:5] \n popular_wishes = Wishlist.objects.values('product').annotate(num_wishes=Count('product')).order_by('-num_wishes')[:5]\n\n result['bought'] = [Product.objects.get(id=p['product']).details(u) for p in popular_products]\n\n result['wished'] = [Product.objects.get(id=p['product']).details(u) for p in popular_wishes]\n\n return JSONHttpResponse(result)",
"def display(auth_context):\n\n products = product_catalog.list_products()\n # Get promoted products recommended by the AutoML model.\n promos = product_catalog.get_promos()\n return render_template('product_catalog.html',\n products=products,\n promos=promos,\n auth_context=auth_context,\n bucket=product_catalog.BUCKET)",
"def processProductsRequest(self):\n\n\t\t# Use the simple page renderer to create the body content\n\t\treturn self.render_simple_page('Products')",
"def go_product_ingredients_page(self, driver, product_id):\n pass",
"def get_wish_lists():\n flash(\"The Wish list feature is under construction! Please check back soon!\")\n return render_template('index.html')",
"def shop_products(request):\n\n shop = Shop.objects.get(user=request.user)\n products = Products.objects.filter(shop_rel=shop)\n paginator = pagination.PageNumberPagination()\n paginator.page_size = 7\n result_page = paginator.paginate_queryset(products, request=request)\n serializer = ProductSerializer(result_page, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def _getSessionsInWishlist(self):\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n profile = self._getProfileFromUser()\n # Fetch the entities and return them\n return ndb.get_multi(profile.sessionWishlist)",
"def products(self):\r\n return self._products",
"def item_view_bought(request):\n\n r = {}\n u = request.user\n\n p = Product.objects.get_by_sku(request.POST['sku'])\n if p is not None:\n #r = p.details(u)\n\n if u.experiment.id in [1,3]:\n purchases = TransactionLineItem.objects.filter(product=p).exclude(transaction__party=u)\n r['people'] = [pu.transaction.party.get_json() for pu in purchases]\n else:\n purchases = TransactionLineItem.objects.filter(product=p, transaction__party__in=u.friends()).exclude(transaction__party=u)\n r['people'] = [pu.transaction.party.get_json(level=1) for pu in purchases]\n\n #reviews = Review.objects.filter(product=p)\n #r['reviews'] = {'count': str(reviews.count()),\n # 'reviews': [rev.get_json(me=u) for rev in reviews]}\n else:\n r['result'] = '0'\n\n return JSONHttpResponse(r)",
"def add_to_wishlist(request, product_id):\n redirect_url = request.POST.get('redirect_url')\n\n user = get_object_or_404(UserProfile, user=request.user)\n wishlist = Wishlist.objects.get_or_create(user=user)\n wishlist_user = wishlist[0]\n\n product = Product.objects.get(pk=product_id)\n if request.POST:\n existingWishlistItem = WishlistItem.objects.filter(\n wishlist=wishlist_user, product=product).exists()\n if existingWishlistItem:\n messages.error(request, \"Item already in your wishlist\")\n return redirect(redirect_url)\n\n else:\n added_item = WishlistItem(\n wishlist=wishlist_user, product=product, date_added=timezone.now())\n added_item.save()\n messages.success(request, \"Product added to your wishlist\")\n return redirect(redirect_url)\n else:\n messages.error(request, \"Click 'Add to wishlist' to add a item \")\n return render(request, 'home/index.html')",
"def products_list(driver, login_action, open_products_page, products_page, logger):\n try:\n return products_page.all_products_list()\n except logger.on_exception(exception, driver):\n print(exception)",
"def grab_mApe_wishList(id_string) :\n returned_data = \"\"\n mape_wishList_url = 'https://www.mightyape.co.nz/wishlist/'+id_string\n print(\"Grabbing results from:\",mape_wishList_url)\n page = requests.get(mape_wishList_url)\n print(\"Status:\",page.status_code)\n #print(page.content)\n tree = html.fromstring(page.content)\n\n products = tree.xpath('//div[@class=\"product wishlist-item\"]') # <--- WORKS\n\n lineNum = 1\n for product in products:\n base_xpath = product.xpath('div[@class=\"detail\"]/a')\n alt_text = base_xpath[0].text+\" \"\n url = product.xpath('div[@class=\"detail\"]/a/@href')[0]+\" \"\n title = product.xpath('div[@class=\"detail\"]/a')[0].text+\" \"\n format = product.xpath('div[@class=\"detail\"]/div[@class=\"format\"]')[0].text + \" \"\n image_url_sml = product.xpath('div[@class=\"item\"]/div[@class=\"image\"]/a/img/@src')\n #print(\"[Image url]\",image_url_sml,format)\n #image_url_lrg = product.xpath('div[@class=\"detail\"]/a/@href')[0] + \" \"\n\n current_price = product.xpath('div[@class=\"pricing\"]/div[@class=\"product-price\"]/span')[0].text+\" \"\n previous_price = product.xpath('div[@class=\"pricing\"]/div[@class=\"saving\"]/s')\n if len(previous_price) > 0 :\n formatted_prev_price = float(str(previous_price[0].text).replace(\"$\",\"\"))\n price_difference = formatted_prev_price - float(str(current_price).replace(\"$\",\"\"))\n percent_off = price_difference/formatted_prev_price * 100\n formatted_price_difference = str(round(price_difference,2))+\" \"\n formatted_percent_off = str(round(percent_off,2))+\"%\"\n this_line = str('<p id=\"line'+str(lineNum)+'\" class=\"output-data\"> ') + str(title)+str(url)+str(alt_text)+\"~ On sale \"+str(current_price)+\"reduced from $\"+str(formatted_prev_price)+\" saving of $\"+str(formatted_price_difference)+str(formatted_percent_off)+\" off </p>\"\n #print(title, url, alt_text,\"~ On sale\", current_price,\"reduced from \"+previous_price[0].text, \"saving of $\"+str(formatted_price_difference),str(formatted_percent_off)+\" off\")\n print(this_line)\n returned_data = returned_data + this_line\n else :\n this_line = str('<p id=\"line'+str(lineNum)+'\" class=\"output-data\"> ') + str(title)+str(url)+str(alt_text)+str(current_price)+\"Normal Price </p>\"\n #print(title, url, alt_text, current_price, \"Normal Price\")\n print(this_line)\n returned_data = returned_data + this_line\n lineNum += 1\n\n return returned_data",
"def recommend_products(request):\n response, status_code = get_recommend_products(request)\n if status_code != 200:\n return JsonResponse(response, status=status_code)\n else:\n serialize_data = ProductSerializer(response, many=True).data\n return JsonResponse(serialize_data, status=200, safe=False)",
"def test_list_available_product(self):\n view = AvailableProductListView.as_view({'get': 'list'})\n uri = reverse('products:list-available-products')\n request = self.factory.get(uri, HTTP_AUTHORIZATION='Token {}'.format(self.token_user.key))\n request.user = self.user['user']\n response = view(request)\n self.assertEqual(response.status_code, 200,\n f'Expected Response Code 200, received {response.status_code} instead.')",
"def remove_from_wishlist(request, product_id):\n\n redirect_url = request.POST.get('redirect_url')\n\n user = get_object_or_404(UserProfile, user=request.user)\n wishlist = Wishlist.objects.get_or_create(user=user)\n wishlist_user = wishlist[0]\n if request.POST:\n product = Product.objects.get(pk=product_id)\n\n # look for item in the user's wishlistItem - returns true if it exists\n existingWishlistItem = WishlistItem.objects.filter(\n product=product).exists()\n\n if existingWishlistItem:\n product = WishlistItem.objects.get(product=product)\n product.delete()\n messages.success(request, \"Item removed from wishlist\")\n return redirect(redirect_url)\n\n if existingWishlistItem is None:\n messages.error(\n request, \"You can not delete a item thats not in the wishlist\")\n return redirect(redirect_url)\n else:\n messages.error(request, 'Item can not be deleted from your wishlist')\n return render(request, 'home/index.html')",
"def add_to_wishlist(request, product_id):\n product = get_object_or_404(Product, pk=product_id)\n wishlist = get_object_or_404(Wishlist, user=request.user)\n\n if product not in wishlist.products.all():\n wishlist.products.add(product)\n messages.info(request,\n f\"{product.name} has been added to your wishlist.\")\n else:\n messages.error(request,\n \"Error, you already have this item in your wishlist!\")\n return redirect(reverse(\"product_detail\", args=[product_id]))"
]
| [
"0.68520015",
"0.64964706",
"0.64570814",
"0.64333427",
"0.6225109",
"0.62219924",
"0.6135836",
"0.59208906",
"0.586585",
"0.5846289",
"0.58390373",
"0.58213365",
"0.57715636",
"0.5741449",
"0.56791115",
"0.5651308",
"0.5636304",
"0.5611469",
"0.5594619",
"0.55830675",
"0.55456823",
"0.55428064",
"0.5538935",
"0.55253434",
"0.5520183",
"0.5513082",
"0.550987",
"0.5501672",
"0.54995066",
"0.54946643"
]
| 0.6923329 | 0 |
unquantize weight before update weight, avoid training turbulence. | def unquant_weight(m):
global _QUANT_HANDLE
try:
m.weight.data = m.weight_origin
except AttributeError:
pass
except TypeError:
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_weights(self):\n\t\tpass",
"def normalize_weight(self, Z):\n self.weight /= Z",
"def update_weights(self):\n self._weights = self._weights + self.update_weights_value\n self.weights_clipping()",
"def update_weights(self):\n\n self.weights -= self.loss_grads\n self.loss_grads = np.zeros(self.weights.shape)",
"def normalize_weights(self):\n total_weight = sum(self.weights)\n self.norm_weights = self.weights / float(total_weight)",
"def update_weights(self):\n self._weights = self._weights + self.update_weights_value",
"def _weight_changed(self, value):\r\n # update internal data\r\n self._weight = value",
"def test_weight_decrease(self):\n new_weight = (1 - 0.05) * self.herb.weight\n self.herb.weightloss()\n nt.assert_equal(round(self.herb.weight, 7), round(new_weight, 7))",
"def updateWeights(self,weightUpdate):\n\t\n\t\tbranches = self.collectAllBranches()\n\n\t\tfor i in range(self.nBranches):\n\n\t\t\tbranches[i].weight -= weightUpdate[i]",
"def normalizeWeights(self):\n for wt in self.weights:\n wt[wt>1] = 1\n wt[wt<-1] = -1\n for bs in self.bias:\n bs[bs>1] = 1\n bs[bs<-1] = -1",
"def reset_weights(self):\r\n self._weights = deepcopy(self._tmp_weights)\r\n self._tmp_weights = None",
"def normalize_weights(self):\n \n # Set negative weights to zero\n # Normalize to sum to one.\n \n\n\n self.new_weight=[]\n for i in self._weights:\n if any(i < 0 for i in self._weights):\n self.new_weight = [0,1]\n\n elif all(i == 0 for i in self._weights):\n i = 1/len(self._weights)\n self.new_weight.append(i)\n else:\n i = i/sum(self._weights)\n self.new_weight.append(i)\n\n # If the weights are all zeros, set weights equal to 1/k, where k is the number\n # of components.\n self._weights = self.new_weight\n self._weights = np.round(self._weights,3)",
"def update_weights_negative(self):\n eta = self.config.eta\n self.w_xh -= eta * (self.x.T @ self.h)\n self.w_th -= eta * (self.t.T @ self.h)\n self.w_ho -= eta * (self.h.T @ self.o) \n self.w_hz -= eta * (self.h.T @ self.z)",
"def update_weights(self, example):\n pred = self.predict(example)\n if pred != example.label:\n self.weights[example.label] = self.weights[example.label] + example.fvector\n self.weights[pred] = self.weights[pred] - example.fvector",
"def update_weight(self,ctr,new_weight):\n self.sum1 -= self.data_set[ctr].weight\n self.data_set[ctr].weight = new_weight\n self.sum1 += new_weight",
"def reset_weights(self):\n self.head.reset_weights()",
"def update_w(self, w):\n # Need to update the scaled weights\n if self.scale_weights:\n self._scale_weights_to_degree(w)\n self._generate_weighted_adj_matrices()\n # once we get new DW matrices, multiply by weights\n super().update_w(w)\n self._degree_weight_weighted_matrices()",
"def _reweight(self):\n self._seed_weights = [self._graph.degree(seed) for seed in self._seeds]\n weight_sum = np.sum(self._seed_weights)\n self._seed_weights = [float(weight)/weight_sum for weight in self._seed_weights]",
"def _update_global_weights(self, global_weights):\n self.global_transform.from_vector_inplace(global_weights)",
"def modify_weights_after_load(model):\n # Prune heads if needed\n if model.config.pruned_heads:\n model.prune_heads(model.config.pruned_heads)\n\n # Tie weights if needed\n model.tie_weights()",
"def _recalculate(self):\n self.weighted_variants = [n for n in self.variants\n for i in range(self.variants[n])]\n default = self.control\n default_weight = self.variants[self.control]\n for name, weight in self.variants.items():\n if weight > default_weight:\n default, default_weight = name, weight\n self.default_variant = default",
"def normalize_weights(self, w):\n n = w.astype(np.float64, copy=True)\n c = float(np.sum(w))\n n /= c\n return n",
"def change_weight(self, new_weight):\r\n self.old_weight = self.weight\r\n self.weight = new_weight",
"def update_weights(self):\r\n\r\n inedges=self.in_edges\r\n for edge in inedges:\r\n weight=edge.weight+self.learning_rate*self.delta*(edge.source.activation)\r\n edge.change_weight(weight)",
"def test_weight_decrease_carn(self):\n new_weight = (1 - 0.125) * self.carn.weight\n self.carn.weightloss()\n nt.assert_equal(self.carn.weight, new_weight)",
"def calculate_prep_weight(weight, size):\n r = find_recovery_on_size(size)\n return weight / r",
"def update_weights(self):\n\n\n self.w += self.learn_rate * (self.X.T.dot(self.T - self.Y)\n - self.reg_L1 * np.sign(self.w)\n - self.reg_L2 * 2*self.w)",
"def update_weights(self):\n activation, activation_p = self.feedforward()\n # initialize delta_weights\n delta_w = np.zeros(2)\n # simultaneous calculate delta_weights\n for i, element in enumerate(self.y):\n delta_w += (activation[i]-element)*(activation_p[i])*self.X[i]\n # update weight\n self.weights -= self.alfa*delta_w",
"def restore(self):\n self.weight = self._backup_weight",
"def reset_weights(self):\n self.policy_backbone.reset_weights()\n self.value_backbone.reset_weights()\n self.action_head.reset_weights()\n self.critic_head.reset_weights()"
]
| [
"0.69327414",
"0.69287616",
"0.6750582",
"0.66629714",
"0.6540235",
"0.64524895",
"0.63917345",
"0.63575107",
"0.6343039",
"0.63287055",
"0.6307151",
"0.63064945",
"0.62739104",
"0.6245761",
"0.6244238",
"0.61442715",
"0.61392426",
"0.60856277",
"0.6049837",
"0.6031831",
"0.60162383",
"0.59733486",
"0.59556055",
"0.5922227",
"0.5908607",
"0.58914727",
"0.58854616",
"0.5845082",
"0.5823457",
"0.5807758"
]
| 0.7209583 | 0 |
r""" Test GFPG library QuantAndDeQuantGPU. | def test():
quant_handle = QuantAndDeQuantGPU()
import torch
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
tensor = torch.Tensor(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])).cuda()
logging.info("Origin Data: ")
logging.info(tensor)
start_time = datetime.datetime.now()
quant_tensor = quant_handle(tensor)
end_time = datetime.datetime.now()
logging.info("Quant Data: ")
logging.info(quant_tensor)
data_expected = np.array([
0.0000000000, 1.0000000000, 2.0000000000, 2.9536523819, 4.0000000000,
4.9674310684, 5.9073047638, 7.0250086784, 8.0000000000, 8.7240619659
])
logging.info("Data expected: ")
logging.info(" ".join([str(v) for v in data_expected]))
data_diff = quant_tensor.data.detach().cpu().numpy() - data_expected
flag = "success."
for num in data_diff:
if abs(num) > 0.000000001:
flag = "failed."
run_time = end_time - start_time
logging.info("QuantAndDeQuantGPU time: %s", str(run_time))
logging.info("QuantAndDeQuantGPU %s", flag) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_qc_quantize_op_gpu(self):\n graph = tf.Graph()\n config = tf.compat.v1.ConfigProto(log_device_placement=False)\n sess = tf.compat.v1.Session(graph=graph, config=config)\n bitwidth = 8\n use_symm_encoding = False\n with graph.as_default():\n\n inp = tf.compat.v1.placeholder(tf.float32, shape=[10], name='input')\n tensor_quantizer = libpymo.TensorQuantizer(libpymo.QuantizationMode.QUANTIZATION_TF_ENHANCED,\n libpymo.RoundingMode.ROUND_NEAREST)\n tensor_quantizer_val = libpymo.PtrToInt64(tensor_quantizer)\n tensor_quant_ref = tf.Variable(initial_value=tensor_quantizer_val, trainable=False, dtype=tf.int64)\n\n mode_var = tf.Variable(initial_value=int(libpymo.TensorQuantizerOpMode.updateStats),\n trainable=False, dtype=tf.int32)\n\n encoding_min = tf.Variable(initial_value=0.0, trainable=True, dtype=tf.double)\n encoding_max = tf.Variable(initial_value=0.0, trainable=True, dtype=tf.double)\n bit_width = tf.Variable(initial_value=bitwidth, trainable=False, dtype=tf.int8)\n use_symmetric_encoding = tf.Variable(initial_value=use_symm_encoding, trainable=False, dtype=tf.bool)\n is_int_data_type = tf.Variable(initial_value=True, trainable=False, dtype=tf.bool)\n\n sess.run([mode_var.initializer, tensor_quant_ref.initializer, encoding_min.initializer,\n encoding_max.initializer, bit_width.initializer, use_symmetric_encoding.initializer,\n is_int_data_type.initializer])\n\n # place holder for the input\n with tf.device(\"/device:GPU:0\"):\n\n pass_through_op_output = zero_out_module.qc_quantize(name='quant_op', in_tensor=inp,\n op_mode=mode_var,\n tensor_quantizer_reference=tensor_quant_ref,\n encoding_min=encoding_min,\n encoding_max=encoding_max,\n bit_width=bit_width,\n use_symmetric_encoding=use_symmetric_encoding,\n is_int_data_type=is_int_data_type)\n\n inp_tensor = sess.graph.get_tensor_by_name('input:0')\n inp_data = np.random.rand(10)\n\n # get the output\n\n print(\"inp_data\", inp_data)\n with tf.device(\"/device:GPU:0\"):\n out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})\n print(\"out_data\", out_data)\n\n # compare qc_quantize op's output with input\n assert np.allclose(out_data, inp_data)\n\n # compute encodings\n assert not tensor_quantizer.isEncodingValid\n encoding = tensor_quantizer.computeEncoding(bitwidth, use_symm_encoding)\n assert tensor_quantizer.isEncodingValid\n print('min=', encoding.min, ', max=', encoding.max)\n\n # get the output\n inp_data = np.random.rand(10) * 2\n print(\"inp_data\", inp_data)\n mode_var.load(int(libpymo.TensorQuantizerOpMode.quantizeDequantize), sess)\n with tf.device(\"/device:GPU:0\"):\n out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})\n print(\"out_data\", out_data)\n\n # compare qc_quantize op's output with input\n assert not np.allclose(out_data, inp_data)\n\n sess.close()",
"def test_qc_quantize_op_gpu_fp16(self):\n graph = tf.Graph()\n config = tf.compat.v1.ConfigProto(log_device_placement=False)\n sess = tf.compat.v1.Session(graph=graph, config=config)\n bitwidth = 8\n use_symm_encoding = False\n with graph.as_default():\n inp = tf.compat.v1.placeholder(tf.float32, shape=[10], name='input')\n tensor_quantizer = libpymo.TensorQuantizer(libpymo.QuantizationMode.QUANTIZATION_TF_ENHANCED,\n libpymo.RoundingMode.ROUND_NEAREST)\n tensor_quantizer_val = libpymo.PtrToInt64(tensor_quantizer)\n tensor_quant_ref = tf.Variable(initial_value=tensor_quantizer_val, trainable=False, dtype=tf.int64)\n\n mode_var = tf.Variable(initial_value=int(libpymo.TensorQuantizerOpMode.quantizeDequantize),\n trainable=False, dtype=tf.int32)\n\n encoding_min = tf.Variable(initial_value=0.0, trainable=True, dtype=tf.double)\n encoding_max = tf.Variable(initial_value=0.0, trainable=True, dtype=tf.double)\n bit_width = tf.Variable(initial_value=bitwidth, trainable=False, dtype=tf.int8)\n use_symmetric_encoding = tf.Variable(initial_value=use_symm_encoding, trainable=False, dtype=tf.bool)\n is_int_data_type = tf.Variable(initial_value=False, trainable=False, dtype=tf.bool)\n\n sess.run([mode_var.initializer, tensor_quant_ref.initializer, encoding_min.initializer,\n encoding_max.initializer, bit_width.initializer, use_symmetric_encoding.initializer,\n is_int_data_type.initializer])\n\n # place holder for the input\n with tf.device(\"/device:GPU:0\"):\n pass_through_op_output = zero_out_module.qc_quantize(name='quant_op', in_tensor=inp,\n op_mode=mode_var,\n tensor_quantizer_reference=tensor_quant_ref,\n encoding_min=encoding_min,\n encoding_max=encoding_max,\n bit_width=bit_width,\n use_symmetric_encoding=use_symmetric_encoding,\n is_int_data_type=is_int_data_type)\n\n inp_tensor = sess.graph.get_tensor_by_name('input:0')\n inp_data = np.array([0.78027299, 0.44164284, 0.6942797, 0.69774088, 0.55863863, 0.29553034, 0.219199,\n 0.09483732, 0.55075674, 0.6348504], dtype=np.float32)\n\n out_exp = np.array([0.78027344, 0.4416504, 0.69433594, 0.6977539, 0.55859375, 0.29541016, 0.21923828,\n 0.09484863, 0.55078125, 0.6347656], dtype=np.float32)\n\n print(\"inp_data\", inp_data)\n with tf.device(\"/device:GPU:0\"):\n out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})\n print(\"out_data\", out_data)\n assert np.allclose(out_data, out_exp)\n sess.close()",
"def test_gpu_cuda_code() -> None:\n if get_from_environ(\"DISABLE_GPU_FOR_TESTING\") is not None:\n print(\"GPU payload disabled for testing\")\n return\n\n # if the command exists it can run on the hardware below\n proc = subprocess.Popen([\"nvidia-smi\"], stdout=subprocess.PIPE)\n stdout, _ = proc.communicate()\n str_stdout = stdout.decode()\n assert \"NVIDIA-SMI\" in str_stdout, str_stdout\n assert proc.returncode == 0\n # search the history for the CUDA implementation",
"def __call__(self, tensor, mode=0):\n\n return tensor\n data_cuda_array = cuda.as_cuda_array(tensor.data.detach())\n data_p = data_cuda_array.device_ctypes_pointer\n self._param.mode = mode\n ret = self._libquant.HI_GFPQ_QuantAndDeQuant_GPU_PY(\n data_p, data_cuda_array.size, self._bit_width,\n ctypes.byref(self._param), self._stream.handle,\n self._cublas_handle)\n assert ret == 0, \"HI_GFPQ_QuantAndDeQuant failed(%d)\\n\" % (ret)\n return tensor",
"def test_qc_quantize_static_op_gpu(self):\n graph = tf.Graph()\n config = tf.compat.v1.ConfigProto(log_device_placement=False)\n sess = tf.compat.v1.Session(graph=graph, config=config)\n\n with graph.as_default():\n # place holder for the input\n with tf.device(\"/device:GPU:0\"):\n inp = tf.compat.v1.placeholder(tf.float32, shape=[10], name='input')\n\n pass_through_op_output = zero_out_module.qc_quantize_static(name='quant_op', in_tensor=inp,\n encoding_min=-1.0,\n encoding_max=1.0,\n bitwidth=8,\n quant_scheme=libpymo.QuantizationMode.QUANTIZATION_TF_ENHANCED,\n op_mode=libpymo.TensorQuantizerOpMode.passThrough,\n is_symmetric=False)\n\n inp_tensor = sess.graph.get_tensor_by_name('input:0')\n inp_data = np.random.rand(10).astype(np.float32)\n\n # get the output\n print(inp_data)\n out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})\n print(out_data)\n\n # compare qc_quantize op's output with input\n assert np.allclose(out_data, inp_data, atol=1e-6)\n sess.close()\n\n graph = tf.Graph()\n config = tf.compat.v1.ConfigProto(log_device_placement=False)\n sess = tf.compat.v1.Session(graph=graph, config=config)\n\n with graph.as_default():\n # place holder for the input\n with tf.device(\"/device:GPU:0\"):\n inp = tf.compat.v1.placeholder(tf.float32, shape=[10], name='input')\n\n pass_through_op_output = zero_out_module.qc_quantize_static(name='quant_op', in_tensor=inp,\n encoding_min=-1.0,\n encoding_max=0.5,\n bitwidth=8,\n quant_scheme=libpymo.QuantizationMode.QUANTIZATION_TF_ENHANCED,\n op_mode=libpymo.TensorQuantizerOpMode.quantizeDequantize,\n is_symmetric=False)\n\n inp_tensor = sess.graph.get_tensor_by_name('input:0')\n inp_data = np.random.rand(10).astype(np.float32)\n\n # get the output\n print(inp_data)\n out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})\n print(out_data)\n\n # compare qc_quantize op's output with input\n assert not np.allclose(out_data, inp_data, atol=1e-1)\n sess.close()\n\n graph = tf.Graph()\n config = tf.compat.v1.ConfigProto(log_device_placement=False)\n sess = tf.compat.v1.Session(graph=graph, config=config)\n\n with graph.as_default():\n # place holder for the input\n with tf.device(\"/device:GPU:0\"):\n inp = tf.compat.v1.placeholder(tf.float32, shape=[10], name='input')\n\n pass_through_op_output = zero_out_module.qc_quantize_static(name='quant_op', in_tensor=inp,\n encoding_min=-1.0,\n encoding_max=1.0,\n bitwidth=8,\n quant_scheme=libpymo.QuantizationMode.QUANTIZATION_TF_ENHANCED,\n op_mode=libpymo.TensorQuantizerOpMode.oneShotQuantizeDequantize,\n is_symmetric=False)\n\n inp_tensor = sess.graph.get_tensor_by_name('input:0')\n inp_data = np.random.rand(10).astype(np.float32)\n\n # get the output\n print(inp_data)\n out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})\n print(out_data)\n\n # compare qc_quantize op's output with input\n assert not np.allclose(out_data, inp_data, atol=1e-3)\n\n sess.close()",
"def benchmark_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(num_gpus=1)\n self._run_benchmark(params)",
"def test_qc_quantize_op_cpu_fp16_quantize_dequantize(self):\n graph = tf.Graph()\n config = tf.compat.v1.ConfigProto(log_device_placement=False)\n sess = tf.compat.v1.Session(graph=graph, config=config)\n bitwidth = 8\n use_symm_encoding = True\n\n with graph.as_default():\n # place holder for the input\n with tf.device(\"/device:CPU:0\"):\n inp = tf.compat.v1.placeholder(tf.float32, shape=[10], name='input')\n tensor_quantizer = libpymo.TensorQuantizer(libpymo.QuantizationMode.QUANTIZATION_TF_ENHANCED,\n libpymo.RoundingMode.ROUND_NEAREST)\n tensor_quantizer_val = libpymo.PtrToInt64(tensor_quantizer)\n tensor_quant_ref = tf.Variable(initial_value=tensor_quantizer_val, trainable=False, dtype=tf.int64)\n\n encoding_min = tf.Variable(initial_value=0.0, trainable=True, dtype=tf.double)\n encoding_max = tf.Variable(initial_value=0.0, trainable=True, dtype=tf.double)\n bit_width = tf.Variable(initial_value=bitwidth, trainable=False, dtype=tf.int8)\n use_symmetric_encoding = tf.Variable(initial_value=use_symm_encoding, trainable=False, dtype=tf.bool)\n is_int_data_type = tf.Variable(initial_value=False, trainable=False, dtype=tf.bool)\n mode_var = tf.Variable(initial_value=int(libpymo.TensorQuantizerOpMode.quantizeDequantize),\n trainable=False, dtype=tf.int32)\n\n sess.run([mode_var.initializer, tensor_quant_ref.initializer, encoding_min.initializer,\n encoding_max.initializer, bit_width.initializer, use_symmetric_encoding.initializer,\n is_int_data_type.initializer])\n\n pass_through_op_output = zero_out_module.qc_quantize(name='quant_op', in_tensor=inp,\n op_mode=mode_var,\n tensor_quantizer_reference=tensor_quant_ref,\n encoding_min=encoding_min,\n encoding_max=encoding_max,\n bit_width=bit_width,\n use_symmetric_encoding=use_symmetric_encoding,\n is_int_data_type=is_int_data_type)\n\n inp_tensor = sess.graph.get_tensor_by_name('input:0')\n\n inp_data = np.array([0.78027299, 0.44164284, 0.6942797, 0.69774088, 0.55863863, 0.29553034, 0.219199,\n 0.09483732, 0.55075674, 0.6348504], dtype=np.float32)\n\n out_exp = np.array([0.78027344, 0.4416504, 0.69433594, 0.6977539, 0.55859375, 0.29541016, 0.21923828,\n 0.09484863, 0.55078125, 0.6347656], dtype=np.float32)\n\n # get the output\n print(\"inp_data\", inp_data)\n out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})\n print(\"out_data\", out_data)\n\n # compare qc_quantize op's output with expected output\n assert np.allclose(out_data, out_exp)\n\n sess.close()",
"def benchmark_fp16_synth_1gpu_gpuparams(self):\n params = self._shared_params_fp16()._replace(num_gpus=1)\n self._run_benchmark(params)",
"def test_qc_quantize_op_cpu(self):\n graph = tf.Graph()\n config = tf.compat.v1.ConfigProto(log_device_placement=False)\n sess = tf.compat.v1.Session(graph=graph, config=config)\n bitwidth = 8\n use_symm_encoding = True\n\n with graph.as_default():\n # place holder for the input\n with tf.device(\"/device:CPU:0\"):\n inp = tf.compat.v1.placeholder(tf.float32, shape=[10], name='input')\n tensor_quantizer = libpymo.TensorQuantizer(libpymo.QuantizationMode.QUANTIZATION_TF_ENHANCED,\n libpymo.RoundingMode.ROUND_NEAREST)\n tensor_quantizer_val = libpymo.PtrToInt64(tensor_quantizer)\n tensor_quant_ref = tf.Variable(initial_value=tensor_quantizer_val, trainable=False, dtype=tf.int64)\n\n encoding_min = tf.Variable(initial_value=0.0, trainable=True, dtype=tf.double)\n encoding_max = tf.Variable(initial_value=0.0, trainable=True, dtype=tf.double)\n bit_width = tf.Variable(initial_value=bitwidth, trainable=False, dtype=tf.int8)\n use_symmetric_encoding = tf.Variable(initial_value=use_symm_encoding, trainable=False, dtype=tf.bool)\n is_int_data_type = tf.Variable(initial_value=True, trainable=False, dtype=tf.bool)\n\n mode_var = tf.Variable(initial_value=int(libpymo.TensorQuantizerOpMode.updateStats),\n trainable=False, dtype=tf.int32)\n\n sess.run([mode_var.initializer, tensor_quant_ref.initializer, encoding_min.initializer,\n encoding_max.initializer, bit_width.initializer, use_symmetric_encoding.initializer,\n is_int_data_type.initializer])\n\n pass_through_op_output = zero_out_module.qc_quantize(name='quant_op', in_tensor=inp,\n op_mode=mode_var,\n tensor_quantizer_reference=tensor_quant_ref,\n encoding_min=encoding_min,\n encoding_max=encoding_max,\n bit_width=bit_width,\n use_symmetric_encoding=use_symmetric_encoding,\n is_int_data_type=is_int_data_type)\n\n inp_tensor = sess.graph.get_tensor_by_name('input:0')\n inp_data = np.random.rand(10)\n\n # get the output\n print(\"inp_data\", inp_data)\n out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})\n print(\"out_data\", out_data)\n\n # compare qc_quantize op's output with input\n assert np.allclose(out_data, inp_data)\n\n # compute encodings\n assert not tensor_quantizer.isEncodingValid\n encoding = tensor_quantizer.computeEncoding(bitwidth, use_symm_encoding)\n assert tensor_quantizer.isEncodingValid\n print('min=', encoding.min, ', max=', encoding.max)\n\n # get the output\n inp_data = np.random.rand(10) * 2\n print(inp_data)\n mode_var.load(int(libpymo.TensorQuantizerOpMode.quantizeDequantize), sess)\n out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})\n print(out_data)\n\n # compare qc_quantize op's output with input\n assert not np.allclose(out_data, inp_data)\n sess.close()",
"def benchmark_fake_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1, data_dir=self.fake_data_dir, data_name='imagenet')\n self._run_benchmark(params)",
"def test_qc_quantize_op_oneshot_cpu(self):\n graph = tf.Graph()\n config = tf.compat.v1.ConfigProto(log_device_placement=False)\n sess = tf.compat.v1.Session(graph=graph, config=config)\n bitwidth = 8\n use_symm_encoding = False\n with graph.as_default():\n # place holder for the input\n with tf.device(\"/device:CPU:0\"):\n inp = tf.compat.v1.placeholder(tf.float32, shape=[10], name='input')\n tensor_quantizer = libpymo.TensorQuantizer(libpymo.QuantizationMode.QUANTIZATION_TF_ENHANCED,\n libpymo.RoundingMode.ROUND_NEAREST)\n tensor_quantizer_val = libpymo.PtrToInt64(tensor_quantizer)\n tensor_quant_ref = tf.Variable(initial_value=tensor_quantizer_val, trainable=False, dtype=tf.int64)\n\n mode_var = tf.Variable(initial_value=int(libpymo.TensorQuantizerOpMode.oneShotQuantizeDequantize),\n trainable=False, dtype=tf.int32)\n\n encoding_min = tf.Variable(initial_value=0.0, trainable=True, dtype=tf.double)\n encoding_max = tf.Variable(initial_value=0.0, trainable=True, dtype=tf.double)\n bit_width = tf.Variable(initial_value=bitwidth, trainable=False, dtype=tf.int8)\n use_symmetric_encoding = tf.Variable(initial_value=use_symm_encoding, trainable=False, dtype=tf.bool)\n is_int_data_type = tf.Variable(initial_value=True, trainable=False, dtype=tf.bool)\n\n sess.run([mode_var.initializer, tensor_quant_ref.initializer, encoding_min.initializer,\n encoding_max.initializer, bit_width.initializer, use_symmetric_encoding.initializer,\n is_int_data_type.initializer])\n\n pass_through_op_output = zero_out_module.qc_quantize(name='quant_op', in_tensor=inp,\n op_mode=mode_var,\n tensor_quantizer_reference=tensor_quant_ref,\n encoding_min=encoding_min,\n encoding_max=encoding_max,\n bit_width=bit_width,\n use_symmetric_encoding=use_symmetric_encoding,\n is_int_data_type=is_int_data_type)\n\n inp_tensor = sess.graph.get_tensor_by_name('input:0')\n inp_data = np.random.rand(10) * 256\n\n # get the output\n print(inp_data)\n out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})\n print(out_data)\n\n assert tensor_quantizer.isEncodingValid\n encoding = tensor_quantizer.computeEncoding(bitwidth, use_symm_encoding)\n\n print('min=', encoding.min, ', max=', encoding.max)\n\n # compare qc_quantize op's output with input\n assert not np.allclose(out_data, inp_data)\n\n sess.close()",
"def benchmark_fp16_fake_1gpu_gpuparams(self):\n params = self._shared_params_fp16()._replace(\n num_gpus=1, data_dir=self.fake_data_dir, data_name='imagenet')\n self._run_benchmark(params)",
"def test_svm_quantique():\n backend = BasicAer.get_backend('statevector_simulator')\n random_seed = 10598\n\n quantum_instance = QuantumInstance(backend, seed=random_seed, seed_transpiler=random_seed)\n pres = \"Test pour des donnรฉes gรฉnรฉrรฉes par ordinateur quantique (facile, quantique)\"\n print(pres)\n _, samp_train, samp_test, labels = ad_hoc_data(15, 10, 2, 0.3, True)\n sample_m, sample_p = stock_get(20, 0.3)\n\n labels_me = [-1, 1]\n samp_train_me = {-1: np.array(sample_m[:15]), 1: np.array(sample_p[:15])}\n samp_test_me = {-1: np.array(sample_m[15:]), 1: np.array(sample_p[15:])}\n print(samp_train)\n print(samp_train_me)\n print(samp_test)\n print(samp_test_me)\n classical_kernel_estimation(samp_train, samp_test, labels)\n classical_kernel_estimation(samp_train_me, samp_test_me, labels_me)\n\n # Generate the feature map\n feature_map = FirstOrderExpansion(feature_dimension=2, depth=2)\n\n # Run the Quantum Kernel Estimator and classify the test data\n qsvm = QSVM(feature_map=feature_map, training_dataset=samp_train,\n test_dataset=samp_test)\n qsvm_me = QSVM(feature_map=feature_map, training_dataset=samp_train_me,\n test_dataset=samp_test_me)\n\n result = qsvm.run(quantum_instance)\n result_me = qsvm_me.run(quantum_instance)\n print(\"Success of the FirstOrder feature map kernel:\")\n print(result['testing_accuracy'])\n print(result_me['testing_accuracy'])\n\n # Generate the feature map\n feature_map = SecondOrderExpansion(feature_dimension=2, depth=2)\n\n # Run the Quantum Kernel Estimator and classify the test data\n qsvm = QSVM(feature_map=feature_map, training_dataset=samp_train,\n test_dataset=samp_test)\n qsvm_me = QSVM(feature_map=feature_map, training_dataset=samp_train_me,\n test_dataset=samp_test_me)\n\n result = qsvm.run(quantum_instance)\n result_me = qsvm_me.run(quantum_instance)\n print(\"Success of the SecondOrder feature map kernel:\")\n print(result['testing_accuracy'])\n print(result_me['testing_accuracy'])\n\n # Last implementation using the custom circuit generator\n print(\"Success for my implementation (second order):\")\n my_impl(samp_train, samp_test, labels)\n my_impl(samp_train_me, samp_test_me, labels_me)\n\n feature_map = CustomExpansion(num_qubits=2, constructor_function=custom_constr, feature_param=[1])\n\n qsvm = QSVM(feature_map=feature_map, training_dataset=samp_train,\n test_dataset=samp_test)\n qsvm_me = QSVM(feature_map=feature_map, training_dataset=samp_train_me,\n test_dataset=samp_test_me)\n\n result = qsvm.run(quantum_instance)\n result_me = qsvm_me.run(quantum_instance)\n print(\"Success of the Custom feature map kernel:\")\n print(result['testing_accuracy'])\n print(result_me['testing_accuracy'])",
"def test_qpu_0_shots():\n _aws_device(wires=2, shots=0)",
"def benchmark_fp16_fake_1gpu_gpuparams(self):\n params = self._shared_params_fp16()._replace(\n num_gpus=1,\n data_dir=self.fake_data_dir,\n data_name='imagenet',\n variable_update='parameter_server')\n self._run_benchmark(params)",
"def benchmark_fp16_batch128_synth_1gpu_gpuparams(self):\n params = self._shared_params_fp16()._replace(num_gpus=1, batch_size=128)\n self._run_benchmark(params)",
"def test_fastqc():\n fastqc.FastQC(\"fastqc\")",
"def setup(self, num_qubit, fusion_enable, use_cu1):",
"def benchmark_fp16_xla_synth_1gpu_gpuparams(self):\n params = self._shared_params_fp16()._replace(num_gpus=1, xla=True)\n self._run_benchmark(params)",
"def benchmark_fp16_synth_1gpu_gpuparams(self):\n params = self._shared_params_fp16()._replace(\n num_gpus=1, variable_update='parameter_server')\n self._run_benchmark(params)",
"def benchmark_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1, variable_update='parameter_server')\n self._run_benchmark(params)",
"def benchmark_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1, variable_update='parameter_server')\n self._run_benchmark(params)",
"def benchmark_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1, variable_update='parameter_server')\n self._run_benchmark(params)",
"def cuda_test():\n # This flag enable the inbuilt cudnn auto-tuner\n torch.backends.cudnn.benchmark = True\n\n print('\\n__Python VERSION :', sys.version)\n print('__pyTorch VERSION :', torch.__version__)\n print('__CUDA VERSION : ', torch.version.cuda)\n print('__CUDNN VERSION : ', torch.backends.cudnn.version())\n print('__Number CUDA Devices : ', torch.cuda.device_count())\n print('__Devices : ')\n\n call([\"nvidia-smi\", \"--format=csv\", \n \"--query-gpu=index,name,driver_version,memory.total,memory.used,memory.free\"])\n\n print('Active CUDA Device: GPU', torch.cuda.current_device())\n print ('Available devices ', torch.cuda.device_count())\n print ('Current cuda device ', torch.cuda.current_device())\n\n return torch.cuda.is_available()",
"def benchmark_fp16_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1, use_fp16=True, variable_update='parameter_server')\n self._run_benchmark(params)",
"def benchmark_fp16_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1, use_fp16=True, variable_update='parameter_server')\n self._run_benchmark(params)",
"def benchmark_fp16_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1, use_fp16=True, variable_update='parameter_server')\n self._run_benchmark(params)",
"def benchmark_fp16_xla_compile_fake_1gpu_gpuparams(self):\n params = self._shared_params_fp16()._replace(\n num_gpus=1,\n data_dir=self.fake_data_dir,\n data_name='imagenet',\n xla_compile=True)\n self._run_benchmark(params)",
"def test_qc_quantize_op_cpu_fp16_pass_through(self):\n graph = tf.Graph()\n config = tf.compat.v1.ConfigProto(log_device_placement=False)\n sess = tf.compat.v1.Session(graph=graph, config=config)\n bitwidth = 8\n use_symm_encoding = True\n\n with graph.as_default():\n # place holder for the input\n with tf.device(\"/device:CPU:0\"):\n inp = tf.compat.v1.placeholder(tf.float32, shape=[10], name='input')\n tensor_quantizer = libpymo.TensorQuantizer(libpymo.QuantizationMode.QUANTIZATION_TF_ENHANCED,\n libpymo.RoundingMode.ROUND_NEAREST)\n tensor_quantizer_val = libpymo.PtrToInt64(tensor_quantizer)\n tensor_quant_ref = tf.Variable(initial_value=tensor_quantizer_val, trainable=False, dtype=tf.int64)\n\n encoding_min = tf.Variable(initial_value=0.0, trainable=True, dtype=tf.double)\n encoding_max = tf.Variable(initial_value=0.0, trainable=True, dtype=tf.double)\n bit_width = tf.Variable(initial_value=bitwidth, trainable=False, dtype=tf.int8)\n use_symmetric_encoding = tf.Variable(initial_value=use_symm_encoding, trainable=False, dtype=tf.bool)\n is_int_data_type = tf.Variable(initial_value=False, trainable=False, dtype=tf.bool)\n mode_var = tf.Variable(initial_value=int(libpymo.TensorQuantizerOpMode.passThrough),\n trainable=False, dtype=tf.int32)\n\n sess.run([mode_var.initializer, tensor_quant_ref.initializer, encoding_min.initializer,\n encoding_max.initializer, bit_width.initializer, use_symmetric_encoding.initializer,\n is_int_data_type.initializer])\n\n pass_through_op_output = zero_out_module.qc_quantize(name='quant_op', in_tensor=inp,\n op_mode=mode_var,\n tensor_quantizer_reference=tensor_quant_ref,\n encoding_min=encoding_min,\n encoding_max=encoding_max,\n bit_width=bit_width,\n use_symmetric_encoding=use_symmetric_encoding,\n is_int_data_type=is_int_data_type)\n\n inp_tensor = sess.graph.get_tensor_by_name('input:0')\n\n inp_data = np.array([0.78027299, 0.44164284, 0.6942797, 0.69774088, 0.55863863, 0.29553034, 0.219199,\n 0.09483732, 0.55075674, 0.6348504], dtype=np.float32)\n\n # get the output\n out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})\n\n # compare qc_quantize op's output with expected output\n assert np.allclose(out_data, inp_data)\n\n sess.close()",
"def utest_SGD_Test():\n model_fname = \"../work/model\"\n # test binary classification.\n if False:\n #test_fname = \"../work/train.bz2\"\n test_fname = \"../work/rcv1_test.binary.bz2\"\n if True:\n test_fname = \"../work/iris_multi.train\"\n test_logreg(model_fname,test_fname,prob=True,acc=True)\n pass"
]
| [
"0.6797666",
"0.6711396",
"0.6471242",
"0.63600224",
"0.6304373",
"0.62627554",
"0.61734736",
"0.61478865",
"0.61421573",
"0.6140544",
"0.6102282",
"0.6074853",
"0.5933609",
"0.59066427",
"0.58761114",
"0.58625305",
"0.58616734",
"0.583168",
"0.5812284",
"0.57926136",
"0.5761621",
"0.5761621",
"0.5761621",
"0.5752682",
"0.5744057",
"0.5744057",
"0.5744057",
"0.57318944",
"0.5711163",
"0.5690565"
]
| 0.75784075 | 0 |
Check that the 'FUN' and 'BBQ' variable values are unique with respect to each other. | def check_fun_bbq_not_unique(fun, bbq):
fun_set = set(str(fun))
bbq_set = set(str(bbq))
if len(fun_set.union(bbq_set)) != 5:
return True
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_summer_not_unique(fun, bbq, summer):\n fun_set = set(str(fun))\n bbq_set = set(str(bbq))\n summer_set = set(str(summer))\n\n summer_fun_union = fun_set.union(summer_set)\n summer_bbq_union = bbq_set.union(summer_set)\n\n if len(summer_fun_union) != 7 or len(summer_bbq_union) != 7 or len(summer_set) != 5:\n return True\n else:\n return False",
"def check_unique(self):\n pass",
"def assert_unique_cols_unique(self, df):\n assert not df.duplicated(self.unique_cols).any()",
"def verifyDistinct( options, data ):\n tot = 0\n for c in data.chrNames:\n s = set()\n d = mafDataOrNone( data.mafBlocksByChrom, c )\n if d is None:\n continue\n for mb in d:\n for i in xrange( mb.refStart, mb.refEnd + 1):\n if i in s:\n sys.stderr.write('duplicate base found! %s %d [%d-%d], %s [%d-%d]\\n'\n % (mb.refChr, i, mb.refStart, mb.refEnd, \n mb.pairChr, mb.pairStart, mb.pairEnd ))\n sys.exit( 1 )\n else:\n s.add( i )\n tot += len( s )\n sys.stderr.write( 'Verify all bases sent to be binned are distinct: Found %s distinct bases in the alignment to the reference genome, no duplicates, OK.\\n' % tot)",
"def has_uniquely_named_variables(formula: Formula) -> bool:\r\n forbidden_variables = set(formula.free_variables())\r\n def has_uniquely_named_variables_helper(formula: Formula) -> bool:\r\n if is_unary(formula.root):\r\n return has_uniquely_named_variables_helper(formula.first)\r\n elif is_binary(formula.root):\r\n return has_uniquely_named_variables_helper(formula.first) and \\\r\n has_uniquely_named_variables_helper(formula.second)\r\n elif is_quantifier(formula.root):\r\n if formula.variable in forbidden_variables:\r\n return False\r\n forbidden_variables.add(formula.variable)\r\n return has_uniquely_named_variables_helper(formula.predicate)\r\n else:\r\n assert is_relation(formula.root) or is_equality(formula.root)\r\n return True\r\n\r\n return has_uniquely_named_variables_helper(formula)",
"def test_uniq(self):\n\n test_cases = [\n Case(\n description=\"lists of strings\",\n val=[\"a\", \"b\", \"b\", \"a\"],\n args=[],\n kwargs={},\n expect=[\"a\", \"b\"],\n ),\n Case(\n description=\"lists of things\",\n val=[\"a\", \"b\", 1, 1],\n args=[],\n kwargs={},\n expect=[\"a\", \"b\", 1],\n ),\n Case(\n description=\"empty list\",\n val=[],\n args=[],\n kwargs={},\n expect=[],\n ),\n Case(\n description=\"unhashable items\",\n val=[\"a\", \"b\", [], {}],\n args=[\", \"],\n kwargs={},\n expect=FilterArgumentError,\n ),\n Case(\n description=\"unexpected argument\",\n val=[\"a\", \"b\"],\n args=[\", \"],\n kwargs={},\n expect=FilterArgumentError,\n ),\n Case(\n description=\"value not an array\",\n val=\"a, b\",\n args=[],\n kwargs={},\n expect=FilterValueError,\n ),\n Case(\n description=\"undefined left value\",\n val=self.env.undefined(\"test\"),\n args=[],\n kwargs={},\n expect=[],\n ),\n ]\n\n self._test(Uniq, test_cases)",
"def _test_obsdup(t):\n return t.shape[0] != len(set(t.ids(axis='observation')))",
"def is_unique(x):\n return len(set(x)) == len(x)",
"def _valid_sbu_combination(self, incidence, sbu_set):\n if incidence is None:\n return len([i for i in sbu_set if i.is_metal]) == \\\n self.options.metal_sbu_per_structure\n else:\n if set(sorted([i.degree for i in sbu_set])) == set(sorted(incidence)):\n return len([i for i in sbu_set if i.is_metal]) == \\\n self.options.metal_sbu_per_structure\n else:\n return False",
"def test_check_unique_var(self):\n\n for component_name in [\"var\", \"raw.var\"]:\n with self.subTest(component_name=component_name):\n\n # Resetting validator\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n\n # Duplicate 1st row in var and assign it to 2nd\n component = Validator.getattr_anndata(\n self.validator.adata, component_name\n )\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ],\n )",
"def unique():\n\n def _apply_fn(dataset):\n return dataset.unique()\n\n return _apply_fn",
"def __Unique_restriction_correct_ndarray(self):\n strTestName = 'Uniqness of elements in Numpy array (correct)'\n RxCSObject = _RxCSobject()\n\n # Let us define a Numpy Array\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramUnique('parameter1')\n\n RxCSObject.parameter1 = np.unique(np.random.randint(1, 1e6, 1e6))\n \n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def _check_duplicates(self):\n # check variables\n counter = Counter(self.variables())\n duplicates = [key for key, value in counter.items() if value > 1]\n if duplicates != []:\n raise DuplicateVariables(duplicates)\n\n # check parameters\n counter = Counter(self.parameters())\n duplicates = [key for key, value in counter.items() if value > 1]\n if duplicates != []:\n raise DuplicateParameters(duplicates)",
"def unique_field_value(verifield, unique_to_check):\n from polyglot.pyapi.unique import value_combo_exists\n return not value_combo_exists(verifield, **unique_to_check)",
"def test_check_bc_duplicates_var_len_no_dupes(self):\r\n\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n mapping_data = [['s-1', 'ACGTA', 'AAAA', '1', 's1&data'],\r\n ['s2', 'ACGT', 'TAAA', '2', 's2_data']]\r\n errors = []\r\n\r\n errors = check_bc_duplicates(header,\r\n mapping_data,\r\n errors,\r\n has_barcodes=True,\r\n variable_len_barcodes=True,\r\n added_demultiplex_field=None)\r\n\r\n # combination of primer seq and barcodes to match largest barcode\r\n # present is ACGTA and ACGTT, so should not get a duplicate hit.\r\n expected_errors = []\r\n\r\n self.assertEqual(errors, expected_errors)",
"def unique(combo, out):\n # This lets us find only minimally covering payments (you should never add cards to a payment that already\n # satisfies the charge)\n for el in out:\n if set(el).issubset(combo):\n return False\n return True",
"def consistency_checker(model,universals,existentials):\n universal_set=set(universals)\n existential_set=set(existentials)\n #Additionally to the universal and existential variables the model may\n #contain additional auxiliary variables -- e.g. for setting default values.\n #We consider these variables such as the existential variables.\n auxiliary_variables_in_model={abs(l) for clause in model for l in clause \n if (not abs(l) in universal_set) and (not abs(l) in existential_set)}\n existential_set = existential_set.union(auxiliary_variables_in_model)\n result, certificate = checkModelQBF(model, universal_set, existential_set)\n return result",
"def different_values_constraint(A, a, B, b):\r\n return a != b",
"def __Unique_restriction_incorrect_ndarray(self):\n strTestName = 'Uniqness of elements in Numpy array (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Let us define a Numpy Array\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramUnique('parameter1')\n\n RxCSObject.parameter1 = np.random.randint(1, 1e3, (1e2, 1e2))\n\n self.__parametersCheck_error(RxCSObject, UniqnessError, strTestName)",
"def _encode_check_unknown(values, uniques, return_mask=False):\n uniques_set = set(uniques)\n diff = list(set(values) - uniques_set)\n if return_mask:\n if diff:\n valid_mask = [val in uniques_set for val in values]\n else:\n valid_mask = [True] * len(values)\n return diff, valid_mask\n else:\n return diff",
"def consistent(self, assignment):\n # print(\"Entered consistent Function\")\n # print(\"assignment\")\n # print(assignment)\n\n overlaps = self.crossword.overlaps\n value_set = set()\n for variable in assignment: \n #checking overlaps with neighbors\n neighbors = self.crossword.neighbors(variable)\n for neighbor in neighbors:\n overlap = overlaps[(variable, neighbor)]\n if (neighbor in assignment):\n # print(\"var 1 overlap letter\")\n # print(assignment[variable][overlap[0]])\n # print(\"var 2 overlap letter\")\n # print(assignment[neighbor][overlap[1]])\n if (assignment[variable][overlap[0]] is not assignment[neighbor][overlap[1]]):\n return False\n \n # print(\"neighbors\")\n # print(neighbors)\n\n #checking that the assignment is the correct length for the variable\n if (variable.length != len(assignment[variable])):\n return False\n\n #the set to check for distinct variables later\n value_set.add(assignment[variable])\n\n #Checking that all variables are distinct\n #these should be the same length unless two or more variables share an value\n if( len(value_set) is not len(assignment)): \n return False\n \n return True\n\n # raise NotImplementedError",
"def unique_list(var):\n return len([x for x in set(var)]) == len(var)",
"def test_multiple_char_not_unique(self):\n self.assertFalse(all_unique_chars_no_set(\"aa\"))\n self.assertFalse(all_unique_chars_no_set(\"alabama\"))\n self.assertFalse(all_unique_chars_no_set(\"Ricardio\"))\n self.assertFalse(all_unique_chars_no_set(\"aardvark\"))\n self.assertFalse(all_unique_chars_no_set(\"Zimbabwe\"))\n self.assertFalse(all_unique_chars_no_set(\"....What?....\"))",
"def _check_picks_uniqueness(info, picks):\n info = pick_info(info, picks, copy=True)\n if len(info.get_channel_types(unique=True)) != 1:\n ch_types = info.get_channel_types(unique=False)\n ch_types, counts = np.unique(ch_types, return_counts=True)\n channels_msg = \", \".join(\n \"%s '%s' channel(s)\" % t for t in zip(counts, ch_types)\n )\n raise ValueError(\n \"Only one datatype can be selected, but 'picks' \"\n f\"results in {channels_msg}.\"\n )",
"def _test_sampdup(t):\n return t.shape[1] != len(set(t.ids(axis='sample')))",
"def _validate_unique_merge_col(self):\n msg = (\"Duplicate {}s were found. This is likely due to resource \"\n \"class binning, which is not supported at this time. \"\n \"Please re-run supply curve aggregation without \"\n \"resource class binning and ensure there are no duplicate \"\n \"values in {!r}. File: {!r}\")\n\n mc = ColNameFormatter.fmt(MERGE_COLUMN)\n for ds, cols, fp in zip([self.solar_meta, self.wind_meta],\n [self.__solar_cols, self.__wind_cols],\n [self.solar_fpath, self.wind_fpath]):\n merge_col = ds.columns[cols == mc].item()\n if not ds[merge_col].is_unique:\n e = msg.format(merge_col, merge_col, fp)\n logger.error(e)\n raise FileInputError(e)",
"def f3(a, b): \n return not a and b",
"def check_uniqueness_in_group(tX_grouped, unwanted_value):\n masks_check = []\n counts_check = []\n for i in range(len(tX_grouped)):\n unwanted_value_check = 1 * (tX_grouped[i] == unwanted_value)\n masks_and_counts = np.unique(unwanted_value_check, return_counts=True, axis=0)\n masks_check.append(masks_and_counts[0])\n counts_check.append(masks_and_counts[1])\n print(masks_check)\n print(counts_check)\n return None",
"def is_unique(s):\n\ta = s.to_numpy() # s.values (pandas<0.24)\n\treturn (a[0] == a).all()",
"def check_unique(self, data: Union['LedGroup', 'Sequencer', 'Step'], datatype: str, seq: Optional['Sequencer']) \\\n -> bool:\n if datatype == 'LedGroup':\n return data.Name.lower() not in self.get_group_list()\n elif datatype == 'Sequencer':\n return data.Name.lower() not in self.get_seq_names()\n else:\n if seq is not None:\n return data.Name.lower() not in seq.get_steps_names()\n return False"
]
| [
"0.73765635",
"0.6097503",
"0.6023298",
"0.58524156",
"0.56967366",
"0.56548667",
"0.55882883",
"0.558621",
"0.55075127",
"0.5472332",
"0.5461498",
"0.5419203",
"0.5411763",
"0.5389069",
"0.53699887",
"0.53594816",
"0.5310973",
"0.5302523",
"0.5223545",
"0.5216425",
"0.5207916",
"0.5206327",
"0.5188208",
"0.5186915",
"0.5164687",
"0.51525897",
"0.51509094",
"0.5128647",
"0.51194364",
"0.51085514"
]
| 0.83220375 | 0 |
Check that 'SUMMER' variable values are almost unique to 'FUN' (excluding the 'U' variable) and completely unique to 'BBQ'. Also check that the 'SUMMER' variable values are unique to each other. | def check_summer_not_unique(fun, bbq, summer):
fun_set = set(str(fun))
bbq_set = set(str(bbq))
summer_set = set(str(summer))
summer_fun_union = fun_set.union(summer_set)
summer_bbq_union = bbq_set.union(summer_set)
if len(summer_fun_union) != 7 or len(summer_bbq_union) != 7 or len(summer_set) != 5:
return True
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_fun_bbq_not_unique(fun, bbq):\n fun_set = set(str(fun))\n bbq_set = set(str(bbq))\n\n if len(fun_set.union(bbq_set)) != 5:\n return True\n else:\n return False",
"def _valid_sbu_combination(self, incidence, sbu_set):\n if incidence is None:\n return len([i for i in sbu_set if i.is_metal]) == \\\n self.options.metal_sbu_per_structure\n else:\n if set(sorted([i.degree for i in sbu_set])) == set(sorted(incidence)):\n return len([i for i in sbu_set if i.is_metal]) == \\\n self.options.metal_sbu_per_structure\n else:\n return False",
"def verifyDistinct( options, data ):\n tot = 0\n for c in data.chrNames:\n s = set()\n d = mafDataOrNone( data.mafBlocksByChrom, c )\n if d is None:\n continue\n for mb in d:\n for i in xrange( mb.refStart, mb.refEnd + 1):\n if i in s:\n sys.stderr.write('duplicate base found! %s %d [%d-%d], %s [%d-%d]\\n'\n % (mb.refChr, i, mb.refStart, mb.refEnd, \n mb.pairChr, mb.pairStart, mb.pairEnd ))\n sys.exit( 1 )\n else:\n s.add( i )\n tot += len( s )\n sys.stderr.write( 'Verify all bases sent to be binned are distinct: Found %s distinct bases in the alignment to the reference genome, no duplicates, OK.\\n' % tot)",
"def _test_obsdup(t):\n return t.shape[0] != len(set(t.ids(axis='observation')))",
"def test_special_U(self):\n self.check_oneq_special_cases(U3Gate(0.0, 0.1, -0.1).to_matrix(), \"U\", {})\n self.check_oneq_special_cases(U3Gate(0.0, 0.1, 0.2).to_matrix(), \"U\", {\"u\": 1})\n self.check_oneq_special_cases(U3Gate(np.pi / 2, 0.2, 0.0).to_matrix(), \"U\", {\"u\": 1})\n self.check_oneq_special_cases(U3Gate(np.pi / 2, 0.0, 0.2).to_matrix(), \"U\", {\"u\": 1})\n self.check_oneq_special_cases(U3Gate(0.1, 0.2, 0.3).to_matrix(), \"U\", {\"u\": 1})",
"def validate_accumulator_uniqueness(self, combiner, data, expected):\n acc = combiner.compute(data)\n acc2 = combiner.compute(data)\n self.assertIsNot(acc, acc2)\n self.assertAllClose(expected, combiner.extract(acc))",
"def test_u_statistic(self):\n for seed in range(5):\n\n random_state = np.random.RandomState(seed)\n\n for i in range(4, self.test_max_size + 1):\n arr1 = random_state.rand(i, 1)\n arr2 = random_state.rand(i, 1)\n\n u_stat = dcor_internals._u_distance_correlation_sqr_naive(\n arr1, arr2)\n u_stat_fast = dcor_internals._u_distance_correlation_sqr_fast(\n arr1, arr2)\n\n self.assertAlmostEqual(u_stat, u_stat_fast)",
"def check_unique(self):\n pass",
"def validate_unique_mof_names():\n names = list(FRAMEWORKS_DF['name'].str.lower()) + list(FRAMEWORKS_DF['alternative names'].dropna().str.lower())\n names = [ n for l in names for n in l.split(',') if l ]\n names = [ n.lower().replace('-', ' ') for n in names ]\n\n duplicates = [item for item, count in collections.Counter(list(names)).items() if count > 1]\n\n if duplicates:\n print('Warning: Duplicate CURATED-MOF names detected: {}'.format(duplicates))\n sys.exit(1)\n\n print('No duplicate CURATED-MOF names found.')",
"def check_uniqueness_in_group(tX_grouped, unwanted_value):\n masks_check = []\n counts_check = []\n for i in range(len(tX_grouped)):\n unwanted_value_check = 1 * (tX_grouped[i] == unwanted_value)\n masks_and_counts = np.unique(unwanted_value_check, return_counts=True, axis=0)\n masks_check.append(masks_and_counts[0])\n counts_check.append(masks_and_counts[1])\n print(masks_check)\n print(counts_check)\n return None",
"def test_uniq(self):\n\n test_cases = [\n Case(\n description=\"lists of strings\",\n val=[\"a\", \"b\", \"b\", \"a\"],\n args=[],\n kwargs={},\n expect=[\"a\", \"b\"],\n ),\n Case(\n description=\"lists of things\",\n val=[\"a\", \"b\", 1, 1],\n args=[],\n kwargs={},\n expect=[\"a\", \"b\", 1],\n ),\n Case(\n description=\"empty list\",\n val=[],\n args=[],\n kwargs={},\n expect=[],\n ),\n Case(\n description=\"unhashable items\",\n val=[\"a\", \"b\", [], {}],\n args=[\", \"],\n kwargs={},\n expect=FilterArgumentError,\n ),\n Case(\n description=\"unexpected argument\",\n val=[\"a\", \"b\"],\n args=[\", \"],\n kwargs={},\n expect=FilterArgumentError,\n ),\n Case(\n description=\"value not an array\",\n val=\"a, b\",\n args=[],\n kwargs={},\n expect=FilterValueError,\n ),\n Case(\n description=\"undefined left value\",\n val=self.env.undefined(\"test\"),\n args=[],\n kwargs={},\n expect=[],\n ),\n ]\n\n self._test(Uniq, test_cases)",
"def test_special_U321(self):\n self.check_oneq_special_cases(U3Gate(0.0, 0.1, -0.1).to_matrix(), \"U321\", {})\n self.check_oneq_special_cases(U3Gate(0.0, 0.11, 0.2).to_matrix(), \"U321\", {\"u1\": 1})\n self.check_oneq_special_cases(U3Gate(np.pi / 2, 0.2, 0.0).to_matrix(), \"U321\", {\"u2\": 1})\n self.check_oneq_special_cases(U3Gate(np.pi / 2, 0.0, 0.2).to_matrix(), \"U321\", {\"u2\": 1})\n self.check_oneq_special_cases(U3Gate(0.11, 0.27, 0.3).to_matrix(), \"U321\", {\"u3\": 1})",
"def unique(combo, out):\n # This lets us find only minimally covering payments (you should never add cards to a payment that already\n # satisfies the charge)\n for el in out:\n if set(el).issubset(combo):\n return False\n return True",
"def consistency_checker(model,universals,existentials):\n universal_set=set(universals)\n existential_set=set(existentials)\n #Additionally to the universal and existential variables the model may\n #contain additional auxiliary variables -- e.g. for setting default values.\n #We consider these variables such as the existential variables.\n auxiliary_variables_in_model={abs(l) for clause in model for l in clause \n if (not abs(l) in universal_set) and (not abs(l) in existential_set)}\n existential_set = existential_set.union(auxiliary_variables_in_model)\n result, certificate = checkModelQBF(model, universal_set, existential_set)\n return result",
"def assert_unique_cols_unique(self, df):\n assert not df.duplicated(self.unique_cols).any()",
"def test_fix_unique():\n\n orca = ORCA()\n\n calc = Calculation(name='tmp',\n molecule=test_mol,\n method=orca,\n keywords=orca.keywords.sp)\n calc._fix_unique()\n assert calc.name == 'tmp_orca'\n\n # Should generate a register\n assert os.path.exists('.autode_calculations')\n assert len(open('.autode_calculations', 'r').readlines()) == 1\n\n calc = Calculation(name='tmp',\n molecule=test_mol,\n method=orca,\n keywords=orca.keywords.opt)\n calc._fix_unique()\n assert calc.name != 'tmp_orca'\n assert calc.name == 'tmp_orca0'\n\n # no need to fix unique if the name is different\n calc = Calculation(name='tmp2',\n molecule=test_mol,\n method=orca,\n keywords=orca.keywords.opt)\n calc._fix_unique()\n assert calc.name == 'tmp2_orca'",
"def __Unique_restriction_correct_ndarray(self):\n strTestName = 'Uniqness of elements in Numpy array (correct)'\n RxCSObject = _RxCSobject()\n\n # Let us define a Numpy Array\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramUnique('parameter1')\n\n RxCSObject.parameter1 = np.unique(np.random.randint(1, 1e6, 1e6))\n \n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def test_multiple_char_not_unique(self):\n self.assertFalse(all_unique_chars_no_set(\"aa\"))\n self.assertFalse(all_unique_chars_no_set(\"alabama\"))\n self.assertFalse(all_unique_chars_no_set(\"Ricardio\"))\n self.assertFalse(all_unique_chars_no_set(\"aardvark\"))\n self.assertFalse(all_unique_chars_no_set(\"Zimbabwe\"))\n self.assertFalse(all_unique_chars_no_set(\"....What?....\"))",
"def verify_rn(u_count, u_x, t_count, t_x, r_count, r_x):\r\n \r\n if (compar_f(f_range) >= pdf(f_range)).sum() == f_range.shape[0]:\r\n print (':) Comparison funciton is always greater than pdf')\r\n \r\n # needs to be compacted\r\n t_x = t_x[:-1] # remove the exatra edge from the end\r\n t_x += (t_x[1]-t_x[0])/2 # center each bin x value\r\n r_x = r_x[:-1] # remove the exatra edge from the end\r\n r_x += (r_x[1]-r_x[0])/2 # center each bin x value\r\n u_x = r_x[:-1] # remove the exatra edge from the end\r\n u_x += (r_x[1]-r_x[0])/2 # center each bin x value\r\n \r\n u_dif = (u_count - n/res)**2\r\n t_dif = (t_count - 0.5*np.sin(t_x))**2\r\n r_dif = (r_count - (2/np.pi) * np.sin(r_x)**2)**2\r\n print('\\nThe mean squared difference between distribution generated and expcted are:')\r\n print('uniform %.5f' % (u_dif.mean()/n), '\\n0.5Sin(x) %.5f' % t_dif.mean(), '\\n2/pi*sin^2(x) %.5f' % r_dif.mean() )",
"def is_unique(x):\n return len(set(x)) == len(x)",
"def com_adobe_fonts_check_family_consistent_upm(ttFonts):\n upm_set = set()\n for ttFont in ttFonts:\n upm_set.add(ttFont['head'].unitsPerEm)\n if len(upm_set) > 1:\n yield FAIL,\\\n Message(\"inconsistent-upem\",\n f\"Fonts have different units per em: {sorted(upm_set)}.\")\n else:\n yield PASS, \"Fonts have consistent units per em.\"",
"def has_uniquely_named_variables(formula: Formula) -> bool:\r\n forbidden_variables = set(formula.free_variables())\r\n def has_uniquely_named_variables_helper(formula: Formula) -> bool:\r\n if is_unary(formula.root):\r\n return has_uniquely_named_variables_helper(formula.first)\r\n elif is_binary(formula.root):\r\n return has_uniquely_named_variables_helper(formula.first) and \\\r\n has_uniquely_named_variables_helper(formula.second)\r\n elif is_quantifier(formula.root):\r\n if formula.variable in forbidden_variables:\r\n return False\r\n forbidden_variables.add(formula.variable)\r\n return has_uniquely_named_variables_helper(formula.predicate)\r\n else:\r\n assert is_relation(formula.root) or is_equality(formula.root)\r\n return True\r\n\r\n return has_uniquely_named_variables_helper(formula)",
"def _encode_check_unknown(values, uniques, return_mask=False):\n uniques_set = set(uniques)\n diff = list(set(values) - uniques_set)\n if return_mask:\n if diff:\n valid_mask = [val in uniques_set for val in values]\n else:\n valid_mask = [True] * len(values)\n return diff, valid_mask\n else:\n return diff",
"def test_check_bc_duplicates_var_len_no_dupes(self):\r\n\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n mapping_data = [['s-1', 'ACGTA', 'AAAA', '1', 's1&data'],\r\n ['s2', 'ACGT', 'TAAA', '2', 's2_data']]\r\n errors = []\r\n\r\n errors = check_bc_duplicates(header,\r\n mapping_data,\r\n errors,\r\n has_barcodes=True,\r\n variable_len_barcodes=True,\r\n added_demultiplex_field=None)\r\n\r\n # combination of primer seq and barcodes to match largest barcode\r\n # present is ACGTA and ACGTT, so should not get a duplicate hit.\r\n expected_errors = []\r\n\r\n self.assertEqual(errors, expected_errors)",
"def estimate_uncertainties(self, model, obs, sig, mu=1.0):\n\n \n syn, J = self.synthesize_rf(model, mu=mu)\n\n error = model*0\n ny, nx = error.shape[0:2]\n \n for yy in range(ny):\n for xx in range(nx):\n \n for kk in range(9):\n J[yy,xx,kk] /= sig\n \n\n Hdiag = (J[yy,xx,:]**2).sum(axis=(1,2))\n error[yy,xx,:] = (((obs[yy,xx]-syn[yy,xx]) / sig )**2).sum()\n\n for kk in range(9):\n error[yy,xx,kk] /= Hdiag[kk]\n\n error *= 2.0 / 9.0\n \n return np.sqrt(error)",
"def badMuons(self, allmuons, allvertices):\n\n muons = list(m for m in allmuons) # make it a python list\n goodMuon = []\n\n if len(allvertices) < 1: raise RuntimeError\n PV = allvertices[0].position()\n \n out = [] \n for mu in muons:\n if (not(mu.isPFMuon()) or mu.innerTrack().isNull()):\n goodMuon.append(-1); # bad but we don't care\n continue;\n if (self.preselection(mu)):\n dxypv = abs(mu.innerTrack().dxy(PV));\n dzpv = abs(mu.innerTrack().dz(PV));\n if (self.tighterId(mu)):\n ipLoose = ((dxypv < 0.5 and dzpv < 2.0) or mu.innerTrack().hitPattern().pixelLayersWithMeasurement() >= 2);\n goodMuon.append(ipLoose or (not(self.selectClones_) and self.tightGlobal(mu)));\n elif (self.safeId(mu)):\n ipTight = (dxypv < 0.2 and dzpv < 0.5);\n goodMuon.append(ipTight);\n else:\n goodMuon.append(0);\n else:\n goodMuon.append(3); # maybe good, maybe bad, but we don't care\n\n n = len(muons)\n for i in range(n):\n if (muons[i].pt() < self.ptCut_ or goodMuon[i] != 0): continue;\n bad = True;\n if (self.selectClones_):\n bad = False; # unless proven otherwise\n n1 = muons[i].numberOfMatches(ROOT.reco.Muon.SegmentArbitration);\n for j in range(n):\n if (j == i or goodMuon[j] <= 0 or not(self.partnerId(muons[j]))): continue\n n2 = muons[j].numberOfMatches(ROOT.reco.Muon.SegmentArbitration);\n if (deltaR(muons[i],muons[j]) < 0.4 or (n1 > 0 and n2 > 0 and ROOT.muon.sharedSegments(muons[i],muons[j]) >= 0.5*min(n1,n2))):\n bad = True;\n break;\n if (bad):\n out.append(muons[i]);\n return out",
"def test_check_unique_var(self):\n\n for component_name in [\"var\", \"raw.var\"]:\n with self.subTest(component_name=component_name):\n\n # Resetting validator\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n\n # Duplicate 1st row in var and assign it to 2nd\n component = Validator.getattr_anndata(\n self.validator.adata, component_name\n )\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ],\n )",
"def ucb_check(self, e):\n\n s1, s2 = e\n k = self._find_focal_coord(s1, s2)\n if self.mu[k][s1] > self.mu[k][s2]:\n better_strat = s1\n worse_strat = s2\n else:\n better_strat = s2\n worse_strat = s1\n\n ucb = self._ucb(worse_strat, k)\n lcb = self._lcb(better_strat, k)\n\n return (ucb < lcb), (worse_strat, better_strat)",
"def is_special_sum_set(A):\n len_A = len(A)\n for len_B in xrange(1,len_A/2 + 1):\n for B in combinations(A, len_B):\n sum_B = sum(B)\n A_comp_B = A - set(B)\n for len_C in xrange(2,len_A - len_B + 1):\n for C in combinations(A_comp_B, len_C):\n sum_C = sum(C)\n #rule1: for any 2 subsets (disjoint--no element is shared, and nonempty) called B C, sum(B) != sum(C)\n if sum_B == sum_C:\n return False\n #rule2: if len(B) > len(C), then sum(B) > sum(C)\n if (len_B > len_C and sum_B <= sum_C):\n return False\n if (len_C > len_B and sum_C <= sum_B):\n return False\n\n return True",
"def _test_sampdup(t):\n return t.shape[1] != len(set(t.ids(axis='sample')))"
]
| [
"0.73538435",
"0.5752358",
"0.56996167",
"0.54556",
"0.5418842",
"0.54076654",
"0.537638",
"0.53074366",
"0.52388746",
"0.5164242",
"0.51392543",
"0.5134561",
"0.5127542",
"0.51243865",
"0.5114154",
"0.5074979",
"0.50513005",
"0.5024604",
"0.4982424",
"0.49569768",
"0.49510577",
"0.49400774",
"0.49387047",
"0.49377286",
"0.4937464",
"0.49136457",
"0.48955202",
"0.488841",
"0.48772353",
"0.48586825"
]
| 0.7588708 | 0 |
download_data downloads the training data from the specified remote path via fsspec and places it in the tmpdir unextracted. | def download_data(remote_path: str, tmpdir: str) -> str:
tar_path = os.path.join(tmpdir, "data.tar.gz")
print(f"downloading dataset from {remote_path} to {tar_path}...")
fs, _, rpaths = fsspec.get_fs_token_paths(remote_path)
assert len(rpaths) == 1, "must have single path"
fs.get(rpaths[0], tar_path)
data_path = os.path.join(tmpdir, "data")
print(f"extracting {tar_path} to {data_path}...")
with tarfile.open(tar_path, mode="r") as f:
f.extractall(data_path)
return data_path | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def maybe_download_and_extract():\n dest_directory = FLAGS.data_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n r = requests.Session().get(DATA_URL)\n with open(filepath, 'wb') as fd:\n for chunk in r.iter_content(500):\n fd.write(chunk)\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')\n if not os.path.exists(extracted_dir_path):\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)",
"def download(data_root, version):\n if version not in GroceriesReal.GROCERIES_REAL_DATASET_TABLES.keys():\n raise ValueError(\n f\"A valid dataset version is required. Available versions are:\"\n f\"{GroceriesReal.GROCERIES_REAL_DATASET_TABLES.keys()}\"\n )\n dest_path = os.path.join(\n data_root, GroceriesReal.LOCAL_PATH, f\"{version}.zip\"\n )\n expected_checksum = GroceriesReal.GROCERIES_REAL_DATASET_TABLES[\n version\n ].checksum\n extract_folder = os.path.join(data_root, GroceriesReal.LOCAL_PATH)\n if os.path.exists(dest_path):\n logger.info(\"The dataset file exists. Skip download.\")\n try:\n validate_checksum(dest_path, expected_checksum)\n except ChecksumError:\n logger.info(\n \"The checksum of the previous dataset mismatches. \"\n \"Delete the previously downloaded dataset.\"\n )\n os.remove(dest_path)\n if not os.path.exists(dest_path):\n source_uri = GroceriesReal.GROCERIES_REAL_DATASET_TABLES[\n version\n ].source_uri\n GroceriesReal._download_http(source_uri, dest_path, version)\n GroceriesReal._extract_file(dest_path, extract_folder)",
"def maybe_download_and_extract():\n dest_directory = FLAGS.data_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (filename,\n float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')\n if not os.path.exists(extracted_dir_path):\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)",
"def maybe_download_and_extract():\n dest_directory = FLAGS.model_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath,\n reporthook=_progress)\n print()\n statinfo = os.stat(filepath)\n print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)",
"def maybe_download_and_extract():\n dest_directory = FLAGS.model_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)",
"def maybe_download_and_extract():\n dest_directory = FLAGS.model_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)",
"def maybe_download_and_extract():\n dest_directory = FLAGS['model_dir']\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' %\n (filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)",
"def download(dataset_name,dataset_url):\n directory = \"tmp\"\n if not os.path.exists(os.path.join(directory,dataset_name)):\n os.makedirs(os.path.join(directory,dataset_name))\n for url, filename in get_all_data(dataset_url):\n if not os.path.exists(os.path.join(directory,dataset_name,filename)):\n print(\"Downloading \"+filename+\":\",)\n ul.urlretrieve(url,os.path.join(directory,dataset_name,filename),reporthook)\n unzip_ecco_tcp_xmls(os.path.join(directory, dataset_name), os.path.join(directory, dataset_name + \"_unzipped\"))\n shutil.rmtree(os.path.join(directory, dataset_name))\n shutil.move(os.path.join(directory, dataset_name + \"_unzipped\"), os.path.join(directory, dataset_name))\n headers_to_csv(directory, dataset_name)\n corpus_to_csv(directory, dataset_name)\n erase_all_files_with_extension(directory, dataset_name, \".hdr\")\n erase_all_files_with_extension(directory, dataset_name, \".xml\")",
"def maybe_download():\n\n print(\"Downloading Inception 5h Model ...\")\n download.maybe_download_and_extract(url=data_url, download_dir=data_dir)",
"def download_dataset(self):\n dataset_name = ADE20K_URL.split(\"/\")[-1].split(\".\")[0]\n req = urllib.request.Request(ADE20K_URL, method=\"HEAD\")\n size_file = urllib.request.urlopen(req).headers[\"Content-Length\"]\n download = \"n\"\n while download != \"y\":\n if not self.yes_all:\n download = input(f\"You are about to download {dataset_name} ({size_file} bytes) to the temporary folder {self.tmp_path}. Do you want to continue? [y/n] \\n\")\n if self.yes_all or download == \"y\":\n logger.info(f\"Downloading dataset {dataset_name} at {ADE20K_URL} to temporary folder {self.tmp_path}...\")\n zip_path, hdrs = urllib.request.urlretrieve(ADE20K_URL, f\"{self.tmp_path}/{dataset_name}.zip\")\n logger.info(f\"Extracting {zip_path} to temporary folder {self.tmp_path}...\")\n with zipfile.ZipFile(f\"{zip_path}\", 'r') as z:\n z.extractall(f\"{self.tmp_path}\")\n self.input_data_path = zip_path[:-4]\n break\n elif download == \"n\":\n logger.error(f\"Cannot pursue without downloading the dataset.\")\n sys.exit()\n else:\n logger.error(\"Please enter a valid answer (y or n).\")",
"def download_models_and_data():\n\n for file in DATA_FILES:\n download_file(file[\"url\"], file[\"path\"])",
"def download_training_data(data_dir, task):\n\n COMMENTS_FILE = \"%s_annotated_comments.tsv\" % task\n LABELS_FILE = \"%s_annotations.tsv\" % task\n\n if task == \"attack\":\n download_file(ATTACK_ANNOTATED_COMMENTS_URL,\n os.path.join(data_dir, COMMENTS_FILE))\n download_file(ATTACK_ANNOTATIONS_URL, os.path.join(data_dir,\n LABELS_FILE))\n elif task == \"recipient_attack\":\n download_file(ATTACK_ANNOTATED_COMMENTS_URL,\n os.path.join(data_dir, COMMENTS_FILE))\n download_file(ATTACK_ANNOTATIONS_URL, os.path.join(data_dir,\n LABELS_FILE))\n elif task == \"aggression\":\n download_file(AGGRESSION_ANNOTATED_COMMENTS_URL,\n os.path.join(data_dir, COMMENTS_FILE))\n download_file(AGGRESSION_ANNOTATIONS_URL,\n os.path.join(data_dir, LABELS_FILE))\n elif task == \"toxicity\":\n download_file(TOXICITY_ANNOTATED_COMMENTS_URL,\n os.path.join(data_dir, COMMENTS_FILE))\n download_file(TOXICITY_ANNOTATIONS_URL,\n os.path.join(data_dir, LABELS_FILE))\n else:\n print(\"No training data for task: \", task)",
"def do_download( data_url, dest_dir,\n skip_download=False,\n skip_decompress=False,\n remove_zip_after=False ) :\n zip_file_path = dest_dir + '/tmp_file.zip'\n\n #%%\n if not skip_download :\n import urllib.request\n response = urllib.request.urlopen(data_url)\n\n chunk_size = 1024 * 64\n read_bytes = 0\n #%%\n with open( zip_file_path, 'wb') as f_out:\n for chunk in read_in_chunks( response, chunk_size ) :\n read_bytes += len( chunk )\n print( \"%d bytes read\" % read_bytes )\n f_out.write( chunk )\n else :\n print( \"skipping download\" )\n\n if not skip_decompress :\n print( \"Decompressing tmp zip file: \" + zip_file_path )\n zip_ref = zipfile.ZipFile(zip_file_path, 'r')\n #%%\n zip_ref.extractall( dest_dir )\n zip_ref.close()\n print( \"Done decompressing.\\nListing destination dir: \" + dest_dir )\n print( pformat( os.listdir( dest_dir ) ) )\n else :\n print( \"skipping decompress\" )\n\n if remove_zip_after :\n os.remove( zip_file_path )\n\n\n print('making train test dirs and distributing images in them')\n make_train_test_dirs( base_dir = dest_dir,\n orig_data_subdir = 'FullIJCNN2013',\n max_id=42,\n train_prop=0.8,\n seed=1337)",
"def download_and_extract_data(tmp_dir, dataset):\n url = dataset[0]\n print(dataset)\n compressed_filename = os.path.basename(url)\n compressed_file = generator_utils.maybe_download(\n tmp_dir, compressed_filename, url)\n\n for file in dataset[1]:\n tf.logging.info(\"Reading file: %s\" % file)\n filepath = os.path.join(tmp_dir, file)\n\n # Extract from tar if needed.\n if not tf.gfile.Exists(filepath):\n with tarfile.open(compressed_file, \"r:gz\") as corpus_tar:\n corpus_tar.extractall(tmp_dir)\n\n documents_filename, labels_filename = dataset[1]\n documents_filepath = os.path.join(tmp_dir, documents_filename)\n labels_filepath = os.path.join(tmp_dir, labels_filename)\n return documents_filepath, labels_filepath",
"def fetch_the_data():\n subprocess.run([\"wget\", \"https://storage.googleapis.com/recipe-box/recipes_raw.zip\"])\n subprocess.run([\"unzip\", \"recipes_raw.zip\", \"-d\", RECIPES_DIRPATH])\n subprocess.run([\"rm\", \"recipes_raw.zip\"])",
"def maybe_download(train_data, test_data):\r\n if train_data:\r\n train_file_name = train_data\r\n else:\r\n train_file = tempfile.NamedTemporaryFile(delete=False)\r\n urllib.request.urlretrieve('http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.data', train_file.name)\r\n train_file_name = train_file.name\r\n train_file.close()\r\n print('Training data is downloaded to %s' % train_file_name)\r\n\r\n if test_data:\r\n test_file_name = test_data\r\n else:\r\n test_file = tempfile.NamedTemporaryFile(delete=False)\r\n urllib.request.urlretrieve('http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.test', test_file.name)\r\n test_file_name = test_file.name\r\n test_file.close()\r\n print('Test data is downloaded to %s' % test_file_name)\r\n\r\n return train_file_name, test_file_name",
"def download(self):\n if not self.url:\n raise RuntimeError(self.tips)\n\n download_file_name = os.path.join(\n self.raw_path, os.path.splitext(os.path.basename(self.url))[0]\n )\n file_format = self.url.split(\".\")[-1]\n if \"amazon\" in self.url:\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.json.{file_format}\"\n )\n else:\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.{file_format}\"\n )\n if \"1drv.ms\" in self.url:\n file_format = \"zip\"\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.{file_format}\"\n )\n if not os.path.exists(raw_file_path):\n print(f\"download_file: url: {self.url}, raw_file_path: {raw_file_path}\")\n download_file(self.url, raw_file_path)\n if \"amazon\" in raw_file_path:\n # amazon dataset do not unzip\n print(\"amazon dataset do not decompress\")\n return\n elif file_format == \"gz\":\n file_name = raw_file_path.replace(\".gz\", \"\")\n with gzip.open(raw_file_path, \"rb\") as fin:\n with open(file_name, \"wb\") as fout:\n shutil.copyfileobj(fin, fout)\n else:\n shutil.unpack_archive(\n raw_file_path, self.raw_path, format=get_format(file_format)\n )\n\n if not os.path.exists(download_file_name):\n return\n elif os.path.isdir(download_file_name):\n os.rename(\n download_file_name, os.path.join(self.raw_path, self.dataset_name)\n )\n else:\n os.rename(\n download_file_name,\n os.path.join(\n self.raw_path,\n f'{self.dataset_name}.{download_file_name.split(\".\")[-1]}',\n ),\n )",
"def download_and_extract(self, data_path=None):\n if data_path is None:\n data_path = 'data'\n\n if not os.path.exists(data_path):\n os.makedirs(data_path)\n\n filename = self.DATA_URL.split('/')[-1]\n filepath = os.path.join(data_path, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename,\n float(count * block_size) / float(total_size) * 100.0)\n )\n\n sys.stdout.flush()\n\n filepath, _ = urllib.request.urlretrieve(CifarData.DATA_URL,\n filepath,\n _progress)\n statinfo = os.stat(filepath)\n self._verbose_print('Successfully downloaded', filename,\n statinfo.st_size, 'bytes.')\n\n with tarfile.open(filepath, 'r:gz') as t:\n dataset_dir = os.path.join(data_path, t.getmembers()[0].name)\n t.extractall(data_path)\n\n return dataset_dir",
"def get_data( name=None, force_download=False, version=19, target_extension='.csv' ):\n os.makedirs(DATA_PATH, exist_ok=True)\n\n def download_data( version ):\n url = \"https://ndownloader.figshare.com/articles/14766102/versions/\" + str(version)\n target_file_name = \"14766102.zip\"\n target_file_name_path = tf.keras.utils.get_file(target_file_name, url,\n cache_subdir=DATA_PATH, extract = True )\n os.remove( DATA_PATH + target_file_name )\n\n if force_download:\n download_data( version = version )\n\n\n files = []\n for fname in os.listdir(DATA_PATH):\n if ( fname.endswith(target_extension) ) :\n fname = os.path.join(DATA_PATH, fname)\n files.append(fname)\n\n if len( files ) == 0 :\n download_data( version = version )\n for fname in os.listdir(DATA_PATH):\n if ( fname.endswith(target_extension) ) :\n fname = os.path.join(DATA_PATH, fname)\n files.append(fname)\n\n if name == 'all':\n return files\n\n datapath = None\n\n for fname in os.listdir(DATA_PATH):\n mystem = (Path(fname).resolve().stem)\n mystem = (Path(mystem).resolve().stem)\n mystem = (Path(mystem).resolve().stem)\n if ( name == mystem and fname.endswith(target_extension) ) :\n datapath = os.path.join(DATA_PATH, fname)\n\n if datapath is None:\n raise ValueError('File doesnt exist. Options: ' , os.listdir(DATA_PATH))\n return datapath",
"def _download(self, path):\n self.logger.info('Getting Million Song Dataset...')\n self.logger.info('Downloading Echo Nest Taste Subprofile train data...')\n base_url = 'http://millionsongdataset.com/sites/default/files/challenge/'\n\n download_dataset(\n base_url + 'train_triplets.txt.zip',\n join(self.data_folder, 'train.zip')\n )\n rename(join(self.data_folder, 'train'), path)\n\n self.logger.info('Downloading evaluation data for MSD Challenge...')\n download_dataset(\n base_url + 'EvalDataYear1MSDWebsite.zip',\n join(path, 'eval.zip')\n )\n rename(\n join(path, 'EvalDataYear1MSDWebsite'),\n join(path, 'evaluation')\n )\n\n self.logger.info('Downloading list of matching errors...')\n url = 'http://millionsongdataset.com/sites/default/files/tasteprofile/sid_mismatches.txt'\n download_url(url, join(path, 'sid_mismatches.txt'))",
"def download_dataset(dataset):\n\n if dataset not in URLS:\n print(f\"unknown dataset {dataset}\")\n sys.exit(0)\n\n filename = f'{dataset}.tar.gz'\n url = URLS[dataset]\n\n if not os.path.exists(filename):\n print(f'downloading dataset \"{dataset}\"')\n os.system(f'curl \"{url}\" -o {filename}')\n else:\n print(f'zipfile \"{filename}\" already exists, remove it if you want to re-download.')\n\n if not os.path.exists(dataset):\n print(f'extracting \"{filename}\"')\n os.system(f'tar -xvf {filename}')\n else:\n print(f'folder \"{dataset}\" already exists, remove it if you want to re-create.')\n\n image_chips = f'{dataset}/image-chips'\n label_chips = f'{dataset}/label-chips'\n if not os.path.exists(image_chips) and not os.path.exists(label_chips):\n print(\"creating chips\")\n libs.images2chips.run(dataset)\n else:\n print(f'chip folders \"{image_chips}\" and \"{label_chips}\" already exist, remove them to recreate chips.')",
"def _DownloadData(data_dir, data_path, vm):\n\n vm.Install('google_cloud_sdk')\n vm.RemoteCommand(\n 'if [ ! -d \\\"{data_path}\\\" ]; then '\n ' sudo mkdir -p {data_path} && '\n ' sudo chmod a+w {data_path} && '\n ' {gsutil_path} -m cp -r {data_dir}/* {data_path} ;'\n 'fi'.format(\n data_dir=data_dir,\n gsutil_path=google_cloud_sdk.GSUTIL_PATH,\n data_path=data_path))",
"def download_dailydialog(daily_raw_fname: str, data_path: str):\n wget.download(daily_raw_fname, data_path)\n # Manually unzip the train/dev/test files",
"def download_coco_dataset():\n # Create file structure\n os.makedirs(os.path.join(\"data\", \"coco\", \"train\"), exist_ok=True)\n os.makedirs(os.path.join(\"data\", \"coco\", \"dev\"), exist_ok=True)\n os.makedirs(os.path.join(\"data\", \"coco\", \"test\"), exist_ok=True)\n # Download the train, dev and test datasets\n print(\"Downloading COCO dataset.\")\n url = \"http://images.cocodataset.org/zips/train2014.zip\"\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, os.path.join(\"data\", \"coco\", \"train2014.zip\"))\n url = \"http://images.cocodataset.org/zips/val2014.zip\"\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, os.path.join(\"data\", \"coco\", \"val2014.zip\"))\n url = \"http://images.cocodataset.org/zips/test2014.zip\"\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, os.path.join(\"data\", \"coco\", \"test2014.zip\"))\n print(\"Done downloading COCO dataset.\")\n # Unzip the files\n print(\"Extracting COCO dataset.\")\n # Extract Train dataset\n zip_ref = zipfile.ZipFile(os.path.join(\"data\", \"coco\", \"train2014.zip\", \"r\"))\n zip_ref.extractall(os.path.join(\"data\", \"coco\"))\n shutil.move(\n os.path.join(\"data\", \"coco\", \"train2014\"),\n os.path.join(\"data\", \"coco\", \"train\", \"dummy\"),\n )\n # Extract Validation dataset\n zip_ref = zipfile.ZipFile(os.path.join(\"data\", \"coco\", \"val2014.zip\", \"r\"))\n zip_ref.extractall(os.path.join(\"data\", \"coco\"))\n shutil.move(\n os.path.join(\"data\", \"coco\", \"val2014\"),\n os.path.join(\"data\", \"coco\", \"dev\", \"dummy\"),\n )\n # Extract Test dataset\n zip_ref = zipfile.ZipFile(os.path.join(\"data\", \"coco\", \"test2014.zip\", \"r\"))\n zip_ref.extractall(os.path.join(\"data\", \"coco\"))\n shutil.move(\n os.path.join(\"data\", \"coco\", \"test2014\"),\n os.path.join(\"data\", \"coco\", \"test\", \"dummy\"),\n )\n print(\"Done extracting COCO dataset.\")",
"def download_data_files(self, dest_directory):\n\t\tif not os.path.exists(dest_directory):\n\t\t\tos.makedirs(dest_directory)\n\t\tfilename = DATA_URL.split('/')[-1]\n\t\tfilepath = os.path.join(dest_directory, filename)\n\t\tif not os.path.exists(filepath):\n\t\t\tdef _progress(count, block_size, total_size):\n\t\t\t\tsys.stdout.write('\\r>> Downloading %s %.1f%%' % (filename,\n\t\t\t\t\t\tfloat(count * block_size) / float(total_size) * 100.0))\n\t\t\t\tsys.stdout.flush()\n\t\t\tfilepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n\t\t\tprint()\n\t\t\tstatinfo = os.stat(filepath)\n\t\t\tprint('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n\t\textracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')\n\t\tif not os.path.exists(extracted_dir_path):\n\t\t\ttarfile.open(filepath, 'r:gz').extractall(dest_directory)",
"def maybe_download_and_extract():\n dest_directory = MODEL_DIR\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)",
"def _download(self) -> None:\n download_url(\n self.url,\n self.root,\n filename=self.data_dir,\n md5=self.md5 if self.checksum else None,\n )\n self._extract()",
"def download_datasets():\n if not os.path.exists(\"__data__/cornell/movie_conversations.txt\") \\\n or not os.path.exists(\"__data__/cornell/movie_lines.txt\"):\n subprocess.call(['scripts/download_cornell.sh'])\n if not os.path.isdir('__data__/opensubs'):\n subprocess.call(['scripts/download_opensubs.sh'])",
"def _build_hf_dataset_from_remote(\n cfg: DictConfig, tokenizer: PreTrainedTokenizerBase\n) -> Union[hf_datasets.DatasetDict, hf_datasets.Dataset,\n hf_datasets.IterableDatasetDict, hf_datasets.IterableDataset]:\n supported_extensions = ['jsonl', 'csv', 'parquet']\n finetune_dir = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n f'downloaded_finetuning_data/{cfg.dataset.split}')\n os.makedirs(finetune_dir, exist_ok=True)\n for extension in supported_extensions:\n name = f'{cfg.dataset.hf_name.strip(\"/\")}/{cfg.dataset.split}.{extension}'\n destination = str(\n os.path.abspath(f'{finetune_dir}/{cfg.dataset.split}.{extension}'))\n # Since we don't know exactly what the extension will be, since it is one of a list\n # use a signal file to wait for instead of the desired file\n signal_file_path = os.path.join(finetune_dir, '.the_eagle_has_landed')\n if dist.get_local_rank() == 0:\n try:\n get_file(name, destination, overwrite=True)\n except FileNotFoundError as e:\n if extension == supported_extensions[-1]:\n raise FileNotFoundError(\n f'Could not find a {cfg.dataset.split} file with any of ' + \\\n f'the supported extensions: {supported_extensions}\\n' + \\\n f'at {cfg.dataset.hf_name}/{cfg.dataset.split}'\n ) from e\n else:\n print(\n f'Could not find {name}, looking for another extension')\n continue\n\n os.makedirs(os.path.dirname(signal_file_path), exist_ok=True)\n with open(signal_file_path, 'wb') as f:\n f.write(b'local_rank0_completed_download')\n\n # Avoid the collective call until the local rank zero has finished trying to download the checkpoint\n # so that we don't timeout for large downloads. This syncs all processes on the node\n with dist.local_rank_zero_download_and_wait(signal_file_path):\n # Then, wait to ensure every node has finished downloading the checkpoint\n dist.barrier()\n\n # clean up signal file\n if dist.get_local_rank() == 0:\n os.remove(signal_file_path)\n dist.barrier()\n\n cfg.dataset.hf_name = finetune_dir\n print(cfg.dataset)\n dataset = dataset_constructor.build_from_hf(\n cfg.dataset,\n max_seq_len=cfg.dataset.max_seq_len,\n tokenizer=tokenizer,\n )\n return dataset",
"def download_data(self, filename=None):\n if (filename is None): filename = ['Public','Gathering.dat']\n elif (type(filename) is str): filename = [filename]\n elif (type(filename) is list): pass\n else: raise TypeError('Require the file path (\\'Public/Gathering.dat\\')')\n\n self.newportxps.ftpconn.connect(**self.newportxps.ftpargs)\n remote_path = posixpath.join(self.newportxps.ftphome, *filename)\n self.newportxps.ftpconn.cwd(remote_path)\n self.newportxps.ftpconn.save(posixpath.basename(remote_path), posixpath.basename(remote_path))\n self.newportxps.ftpconn.close()"
]
| [
"0.6259458",
"0.62141436",
"0.61821884",
"0.61650014",
"0.61649317",
"0.61506605",
"0.6042444",
"0.6038",
"0.59566534",
"0.5953423",
"0.5948335",
"0.5939772",
"0.5936615",
"0.59077805",
"0.58687586",
"0.5842238",
"0.5826728",
"0.5821514",
"0.57949364",
"0.5789982",
"0.5730511",
"0.57288533",
"0.5727541",
"0.57082874",
"0.57053614",
"0.5688339",
"0.56875646",
"0.5679076",
"0.5666957",
"0.56653273"
]
| 0.6341184 | 0 |
Fills the given path with randomly generated 64x64 images. This can be used for quick testing of the workflow of the model. Does NOT pack the files into a tar, but does preprocess them. | def create_random_data(output_path: str, num_images: int = 5) -> None:
train_path = os.path.join(output_path, "train")
class1_train_path = os.path.join(train_path, "class1")
class2_train_path = os.path.join(train_path, "class2")
val_path = os.path.join(output_path, "val")
class1_val_path = os.path.join(val_path, "class1")
class2_val_path = os.path.join(val_path, "class2")
test_path = os.path.join(output_path, "test")
class1_test_path = os.path.join(test_path, "class1")
class2_test_path = os.path.join(test_path, "class2")
paths = [
class1_train_path,
class1_val_path,
class1_test_path,
class2_train_path,
class2_val_path,
class2_test_path,
]
for path in paths:
try:
os.makedirs(path)
except FileExistsError:
pass
for i in range(num_images):
pixels = numpy.random.rand(64, 64, 3) * 255
im = Image.fromarray(pixels.astype("uint8")).convert("RGB")
im.save(os.path.join(path, f"rand_image_{i}.jpeg"))
process_images(output_path) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mock_raw_data(tmp_dir, raw_dim=1024, num_channels=3, num_images=1):\n\n tf.gfile.MakeDirs(tmp_dir)\n\n for image_id in range(num_images):\n\n raw_image_path = os.path.join(tmp_dir, \"%s.jpg\" % image_id)\n\n mock_raw_image(x_dim=raw_dim, y_dim=raw_dim,\n num_channels=num_channels,\n output_path=raw_image_path)",
"def getRandomImage(path):\n folders = list(filter(lambda x: os.path.isdir(os.path.join(path, x)), os.listdir(path)))\n random_directory = np.random.randint(0,len(folders))\n path_class = folders[random_directory]\n print(\"Class - \" + five_celeb_dict_n[str(path_class)])\n file_path = path + path_class\n file_names = [f for f in listdir(file_path) if isfile(join(file_path, f))]\n random_file_index = np.random.randint(0,len(file_names))\n image_name = file_names[random_file_index]\n return cv2.imread(file_path+\"/\"+image_name)",
"def fixture_image_data(tmp_path_factory, request):\n # Make root dir\n root = tmp_path_factory.mktemp(\"data\")\n\n # Set params\n num_images = request.param\n\n # Create image files\n paths = [root / Path(f\"{idx}.png\") for idx in range(num_images)]\n dimensions = [(idx % 10 + 1, (10 - idx) % 10 + 1) for idx in range(num_images)]\n for path, dim in zip(paths, dimensions):\n image = Image.new(mode=\"RGB\", size=dim)\n if not path.parent.exists():\n path.parent.mkdir(parents=True)\n with open(path, \"wb\") as img_file:\n image.save(img_file)\n return root",
"def make_image(self, path):\n\t\treturn None",
"def populate_train_test_val_dirs_randomly(root_dir=(os.getcwd()), val_ratio=0.15, test_ratio=0.05):\n\n ''' Creating partitions of the data after shuffling '''\n # Folder to copy images from\n src = root_dir # The folder to copy images from\n\n all_file_names = [f for f in os.listdir(src) if isfile(join(src, f))]\n\n np.random.shuffle(all_file_names)\n\n train_file_names, val_file_names, test_file_names = np.split(np.array(all_file_names),\n [int(len(all_file_names) * (\n 1 - val_ratio + test_ratio)),\n int(len(all_file_names) * (1 - test_ratio))])\n ''' Print the file distribution amongst the folders '''\n print_file_distribution(len(all_file_names), len(train_file_names), len(val_file_names), len(test_file_names))\n\n print(train_file_names)\n\n ''' Copy-Pasting Images '''\n for name in train_file_names:\n shutil.copy(join(root_dir, 'CoregisteredImages', name), root_dir + '/train/CoregisteredImages')\n shutil.copy(join(root_dir, 'BlurryImages', name), root_dir + '/train/BlurryImages')\n for name in val_file_names:\n shutil.copy(join(root_dir, 'CoregisteredImages', name), root_dir + '/val/CoregisteredImages')\n shutil.copy(join(root_dir, 'BlurryImages', name), root_dir + '/val/BlurryImages')\n for name in test_file_names:\n shutil.copy(join(root_dir, 'CoregisteredImages', name), root_dir + '/test/CoregisteredImages')\n shutil.copy(join(root_dir, 'BlurryImages', name), root_dir + '/test/BlurryImages')",
"def test_generator(self, test_path):\n\n img_list = os.scandir(test_path)\n for img_entry in img_list:\n\n img = cv2.imread(img_entry.path, COLOR_TO_OPENCV[self.color_mode])\n if img.shape[-1] == 3:\n orig_shape = img.shape[-2::-1]\n else:\n orig_shape = img.shape[::-1]\n\n\n img = cv2.resize(img, tuple(self.target_size))\n img = img / 255\n if self.color_mode == \"grayscale\":\n img = np.reshape(img, img.shape + (1,))\n img = np.reshape(img, (1,) + img.shape)\n yield img, img_entry, orig_shape",
"def create_image(path, pxcount):\n img = Image.open(path, 'r').convert('L')\n pixels = img.load()\n for i in range(pxcount):\n x = randint(0, img.size[0]-1)\n y = randint(0, img.size[0]-1)\n if pixels[x, y] == 0:\n pixels[x, y] = 255\n else:\n pixels[x, y] = 0\n return img",
"def packDir(self, path='', recursive=True, autorotate=False, debug=False):\n\n Console.info('Packing sprites in: %s' % os.path.join(self.base, path))\n Console.indent()\n \n self.files = []\n self.addDir(path, recursive=recursive)\n Console.info('Found %d images' % len(self.files))\n\n if len(self.files) > 0:\n self.generate(path, autorotate, debug)\n \n Console.outdent()",
"def get_rand_img():\n import urllib\n import os\n import glob\n\n pics = glob.glob('/home/cody_techngs/PycharmProjects/ProjTest/ActiveAMT/ActiveAMT_FLASK/static/images/HITs/rand*')\n nums = []\n\n for pic in pics:\n nums.append(int(pic.split('rand_img')[1].split('.')[0]))\n\n unique_num = False\n new_rand_num = 0\n\n while not unique_num:\n new_rand_num = random.randrange(1, 2000)\n if new_rand_num not in nums:\n unique_num = True\n\n img_name = 'rand_img{}.jpg'.format(new_rand_num)\n dl_location = os.getcwd() + '/ActiveAMT/ActiveAMT_FLASK/static/images/HITs/' + img_name\n url = 'https://unsplash.it/400/300/?random'\n urllib.urlretrieve(url, dl_location)\n\n return 'static/images/HITs/{}'.format(img_name)",
"def populate_train_test_val_dirs_nonrandomly(root_dir, val_ratio=0.15, test_ratio=0.05, preliminary_clahe=True,\n apply_masks=True):\n\n ''' Creating partitions of the data after shuffling '''\n # Folder to copy images from\n src = join(root_dir, 'CoregisteredBlurryImages')\n\n all_file_names = [f for f in os.listdir(src) if isfile(join(src, f))]\n\n if val_ratio == 0.0:\n # Select the number of images to skip between validation images\n val_skip_number = len(all_file_names) + 1\n else:\n # Select the number of images to skip between validation images\n val_skip_number = len(all_file_names) / (val_ratio * len(all_file_names))\n\n if test_ratio == 0.0:\n # Select the number of images to skip between test images\n test_skip_number = len(all_file_names) + 1\n else:\n # Select the number of images to skip between test images\n test_skip_number = len(all_file_names) / (test_ratio * len(all_file_names))\n\n # Get the list of validation file names, test file names, and train file names\n val_file_names = all_file_names[::int(val_skip_number)]\n test_file_names = [filename for filename in all_file_names[::int(test_skip_number + 1)]\n if filename not in val_file_names]\n train_file_names = [filename for filename in all_file_names\n if filename not in val_file_names and filename not in test_file_names]\n\n # Print the file distribution among the folders\n logger.print_file_distribution(len(all_file_names), len(train_file_names), len(val_file_names),\n len(test_file_names))\n\n # Copy-Pasting images into train dataset\n for name in train_file_names:\n shutil.copy(join(root_dir, 'CoregisteredBlurryImages', name), root_dir + '/train/CoregisteredBlurryImages')\n shutil.copy(join(root_dir, 'ClearImages', name), root_dir + '/train/ClearImages')\n if apply_masks:\n shutil.copy(join(root_dir, 'Masks', name), root_dir + '/train/Masks')\n\n # Copy-Pasting images into val dataset\n for name in val_file_names:\n shutil.copy(join(root_dir, 'CoregisteredBlurryImages', name), root_dir + '/val/CoregisteredBlurryImages')\n shutil.copy(join(root_dir, 'ClearImages', name), root_dir + '/val/ClearImages')\n if apply_masks:\n shutil.copy(join(root_dir, 'Masks', name), root_dir + '/val/Masks')\n\n # Copy-Pasting images into test dataset\n for name in test_file_names:\n shutil.copy(join(root_dir, 'CoregisteredBlurryImages', name), root_dir + '/test/CoregisteredBlurryImages')\n shutil.copy(join(root_dir, 'ClearImages', name), root_dir + '/test/ClearImages')\n if apply_masks:\n shutil.copy(join(root_dir, 'Masks', name), root_dir + '/test/Masks')\n\n ''' Augment the images in each new folder '''\n # If we want to use preliminary adaptive equalization...\n if preliminary_clahe:\n pass\n # ... then first, apply Contrast Limited Adaptive Histogram Equalization to clear images in all folders\n CLAHE_image_folder(root_dir + '/train/ClearImages')\n CLAHE_image_folder(root_dir + '/val/ClearImages')\n CLAHE_image_folder(root_dir + '/test/ClearImages')\n\n # Then, apply histogram equalization to make the blurry images' histogram match that of the clear images\n hist_match_image_folder(root_dir=join(root_dir, 'train'),\n clear_dir_name='ClearImages',\n blurry_dir_name='CoregisteredBlurryImages',\n match_to_clear=True)\n hist_match_image_folder(root_dir=join(root_dir, 'val'),\n clear_dir_name='ClearImages',\n blurry_dir_name='CoregisteredBlurryImages',\n match_to_clear=True)\n hist_match_image_folder(root_dir=join(root_dir, 'test'),\n clear_dir_name='ClearImages',\n blurry_dir_name='CoregisteredBlurryImages',\n match_to_clear=True)",
"def populate_train_test_val_dirs_nonrandomly(root_dir=(os.getcwd()), val_ratio=0.15, test_ratio=0.05):\n\n ''' Creating partitions of the data after shuffling '''\n # Folder to copy images from\n src = join(root_dir, 'CoregisteredImages')\n\n all_file_names = [f for f in os.listdir(src) if isfile(join(src, f))]\n\n # Select the number of images to skip between validation images\n val_skip_number = len(all_file_names) / (val_ratio * len(all_file_names))\n\n # Select the number of images to skip between test images\n test_skip_number = len(all_file_names) / (test_ratio * len(all_file_names))\n\n # Get the list of validation file names, test file names, and train file names\n val_file_names = all_file_names[::int(val_skip_number)]\n test_file_names = [filename for filename in all_file_names[::int(test_skip_number + 1)] if filename not in val_file_names]\n train_file_names = [filename for filename in all_file_names if filename not in val_file_names and filename not in test_file_names]\n\n ''' Print the file distribution amongst the folders '''\n print_file_distribution(len(all_file_names), len(train_file_names), len(val_file_names), len(test_file_names))\n\n print(train_file_names)\n\n ''' Copy-Pasting Images '''\n for name in train_file_names:\n shutil.copy(join(root_dir, 'CoregisteredImages', name), root_dir + '/train/CoregisteredImages')\n shutil.copy(join(root_dir, 'BlurryImages', name), root_dir + '/train/BlurryImages')\n for name in val_file_names:\n shutil.copy(join(root_dir, 'CoregisteredImages', name), root_dir + '/val/CoregisteredImages')\n shutil.copy(join(root_dir, 'BlurryImages', name), root_dir + '/val/BlurryImages')\n for name in test_file_names:\n shutil.copy(join(root_dir, 'CoregisteredImages', name), root_dir + '/test/CoregisteredImages')\n shutil.copy(join(root_dir, 'BlurryImages', name), root_dir + '/test/BlurryImages')",
"def generate_standard_dataset(dir_path):\n filenames = []\n \n # Normal Training data\n for pathAndFileName in glob.iglob(os.path.join(dir_path, '*.jpg')):\n filenames.append(pathAndFileName)\n \n filename_queue = tf.train.string_input_producer(filenames, shuffle=None)\n \n reader = tf.WholeFileReader()\n \n _, value = reader.read(filename_queue)\n \n image = tf.image.decode_jpeg(value, 3)\n \n image = preprocess_image(image, height=34, width=34)\n \n return image, filenames",
"def test_RawRun_imagepaths():\n p1 = r.imagepaths[0]\n path = 'tests/data/synced/r11_07_06c/cam1/img_0001.jpg'\n assert(os.path.samefile(p1, path))\n assert_equal(len(r.imagepaths), 6)",
"def get_random_image_path(imgs_path):\n img_files = os.listdir(imgs_path)\n\n if len(img_files) < 1:\n raise Exception(\"No images found pertaining to the given make and mode.\")\n\n img_path = imgs_path + \"/\" + str(img_files[random.randrange(0, len(img_files))])\n\n return img_path",
"def populate_train_test_val_dirs_randomly(root_dir=(os.getcwd()), val_ratio=0.15, test_ratio=0.05):\n # Creating partitions of the data after shuffling\n # Folder to copy images from\n src = root_dir # The folder to copy images from\n\n all_file_names = [f for f in os.listdir(src) if isfile(join(src, f))]\n\n np.random.shuffle(all_file_names)\n\n train_file_names, val_file_names, test_file_names = np.split(np.array(all_file_names),\n [int(len(all_file_names) * (\n 1 - val_ratio + test_ratio)),\n int(len(all_file_names) * (1 - test_ratio))])\n # Print the file distribution amongst the folders\n logger.print_file_distribution(len(all_file_names), len(train_file_names), len(val_file_names),\n len(test_file_names))\n\n print(train_file_names)\n\n # Copy-Pasting Images\n for name in train_file_names:\n shutil.copy(join(root_dir, 'CoregisteredBlurryImages', name), root_dir + '/train/CoregisteredBlurryImages')\n shutil.copy(join(root_dir, 'ClearImages', name), root_dir + '/train/ClearImages')\n for name in val_file_names:\n shutil.copy(join(root_dir, 'CoregisteredBlurryImages', name), root_dir + '/val/CoregisteredBlurryImages')\n shutil.copy(join(root_dir, 'ClearImages', name), root_dir + '/val/ClearImages')\n for name in test_file_names:\n shutil.copy(join(root_dir, 'CoregisteredBlurryImages', name), root_dir + '/test/CoregisteredBlurryImages')\n shutil.copy(join(root_dir, 'ClearImages', name), root_dir + '/test/ClearImages')",
"def generate_random_patches(filenames, size, seed=0, per_image=1):\n from copy import copy\n import itertools as itr\n\n filenames = copy(filenames)\n randgen = np.random.RandomState(seed)\n randgen.shuffle(filenames)\n failures = 0\n for fn in itr.cycle(filenames):\n img = asgray(load_image(fn))\n\n for l in range(per_image):\n # Random position\n x_to = img.shape[0]-size[0]+1\n y_to = img.shape[1]-size[1]+1\n\n if x_to >= 1 and y_to >= 1:\n x = randgen.randint(x_to) \n y = randgen.randint(y_to)\n yield img[x:x+size[0], y:y+size[1]]\n \n failures = 0\n else:\n failures += 1\n\n # The images are too small, let's stop iterating\n if failures >= 30:\n return",
"def main():\n\n os.system(\"rm -rf images; mkdir images\")\n\n if (len(sys.argv) > 1):\n N = int(sys.argv[1])\n else:\n N = 10\n\n x_test = np.load(\"../../../../data/mnist/mnist_test_images.npy\")\n\n for i in range(N):\n r,c = random.randint(6,12), random.randint(6,12)\n g = np.zeros(r*c)\n for j in range(r*c):\n if (random.random() < 0.15):\n g[j] = 1\n g = g.reshape((r,c))\n g[:,0] = g[0,:] = g[:,-1] = g[-1,:] = 0\n\n img = np.zeros((28*r,28*c), dtype=\"uint8\")\n for x in range(r):\n for y in range(c):\n if (g[x,y] == 1):\n n = random.randint(0, x_test.shape[0])\n im = x_test[n]\n img[28*x:(28*x+28), 28*y:(28*y+28)] = im\n \n Image.fromarray(img).save(\"images/image_%04d.png\" % i)",
"def setup_image_folder(path_to_images):\n\n print(\"setup images folder...\")\n\n if os.path.isdir(path_to_images):\n print(\"folder already exists: remove...\")\n shutil.rmtree(path_to_images)\n\n os.mkdir(path_to_images)\n print(\"folder created\")",
"def load_mnist(path='./', test_size=0.3, random_state = 123):\n \n np.random.seed(random_state)\n if 'X_train.npy' not in os.listdir(path=path) or 'y_train.npy' not in os.listdir(path=path):\n print (\"Train dataset not found. Downloading...\")\n os.system(\"curl -L -o train.zip {}\".format(TRAIN_DATA_LINK))\n os.system(\"unzip train.zip\")\n os.system(\"tar -xf trainingSet.tar.gz\")\n images = []\n labels = []\n for class_name in os.listdir('./trainingSet'):\n if 'ipynb' not in class_name and '.DS' not in class_name:\n for image_name in os.listdir('./trainingSet/{}'.format(class_name)):\n image = imread('./trainingSet/{}/{}'.format(class_name, image_name))\n images.append(image)\n labels.append(int(class_name))\n X_train = np.array(images)\n y_train = np.array(labels)\n\n permutation = np.random.permutation(X_train.shape[0])\n X_train = X_train[permutation]\n y_train = y_train[permutation]\n\n with open('X_train.npy', 'wb') as f:\n np.save(f, X_train)\n with open('y_train.npy', 'wb') as f:\n np.save(f, y_train)\n os.system(\"rm -rf trainingSet\")\n os.system(\"rm -rf train.zip\")\n os.system(\"rm -rf trainingSet.tar.gz\")\n else:\n X_train = np.load('X_train.npy')\n y_train = np.load('y_train.npy')\n\n if 'X_test.npy' not in os.listdir(path=path) or 'y_test.npy' not in os.listdir(path=path):\n print (\"Test dataset not found. Downloading...\")\n os.system(\"curl -L -o test.zip {}\".format(TEST_DATA_LINK))\n os.system(\"unzip test.zip\")\n os.system(\"tar -xf trainingSample.tar.gz\")\n images = []\n labels = []\n for class_name in os.listdir('./trainingSample'):\n if 'ipynb' not in class_name and '.DS' not in class_name:\n for image_name in os.listdir('./trainingSample/{}'.format(class_name)):\n image = imread('./trainingSample/{}/{}'.format(class_name, image_name))\n images.append(image)\n labels.append(int(class_name))\n X_test = np.array(images)\n y_test = np.array(labels)\n with open('X_test.npy', 'wb') as f:\n np.save(f, X_test)\n with open('y_test.npy', 'wb') as f:\n np.save(f, y_test)\n\n os.system(\"rm -rf trainingSample\")\n os.system(\"rm -rf test.zip\")\n os.system(\"rm -rf trainingSet.tar.gz\")\n\n else:\n X_test = np.load('X_test.npy')\n y_test = np.load('y_test.npy')\n\n return X_train, X_test, y_train, y_test",
"def feed(self, reset=True): \n if self.reuse:\n image_subdirs = get_random_image_sample(IMAGE_PACKAGE_SIZE, self.image_location, [])\n else:\n image_subdirs = get_random_image_sample(IMAGE_PACKAGE_SIZE, self.image_location, self.used_images)\n if reset:\n reset_directory(self.feed_location, self.image_location)\n images = self.move_images(image_subdirs, self.feed_location, folders=True)\n self.used_images.extend(images)\n return image_subdirs",
"def load_images(input_dir=\"/tmp/mapswipe/project-1\", n_images=2000, seed=1):\n class_map = {1: \"1\", 0: \"5\"}\n output_dir = \"/Users/thead/git/dreamview/data/\"\n\n X_ = []\n y_ = []\n for new_klass in class_map:\n images = []\n for klass in class_map[new_klass]:\n for img in glob.glob(input_dir + \"/%s/*/*/*/aerial.jpeg\" % klass):\n if os.stat(img).st_size > 0:\n images.append(img)\n\n images = shuffle(images, random_state=seed+42+new_klass)\n images = images[:n_images]\n X_ += images\n y_ += [new_klass] * len(images)\n\n # XXX deduce array size from an actual image\n X = np.zeros((2*n_images, 256*256), dtype=np.ubyte)\n y = np.zeros(2*n_images, dtype=np.int)\n\n for n, (img_path, klass) in enumerate(zip(X_, y_)):\n # the order of these OPs has been chosen on purpose, don't mess\n # without checking what happens\n img = imread(img_path)\n img = equalize_adapthist(img)\n img = rgb2grey(img)\n img = img_as_ubyte(img)\n\n if not n % 10:\n fname = os.path.split(img_path)[:-1]\n fname = os.path.join(*fname, \"aerial-processed.jpeg\")\n imsave(fname, img)\n\n X[n,:] = img.ravel()\n y[n] = klass\n\n return X, y",
"def seed(path):\n return os.path.join(os.path.split(os.path.realpath(__file__))[0], path)",
"def random_img(path):\n fullpath = os.path.join(settings.MEDIA_ROOT, path)\n filenames = [f for f in os.listdir(fullpath) if is_image_file(f)]\n pick = random.choice(filenames)\n return posixpath.join(settings.MEDIA_URL, path, pick)",
"def custom_data_generator(img_paths, final_height, final_width):\n for img_path in img_paths:\n image = Image.open(img_path)\n resized_image = image.resize((final_width, final_height), Image.ANTIALIAS) # Image.LANCZOS\n img = np.array(resized_image)\n img = tf.image.convert_image_dtype(img, tf.float32)\n yield img, tf.constant([[]], dtype=tf.float32), tf.constant([], dtype=tf.int32)",
"def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n # all_files = tf.gfile.Glob(os.path.join(input_dir, '*.png'))\n # test_files = [all_files[idx] for x in np.random.choice(len(all_files), 200, replace=False)]\n # for filepath in test_files:\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n with tf.gfile.Open(filepath) as f:\n image = imread(f, mode='RGB').astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images",
"def setUp(self):\n self.image = np.random.randint(\n 0, 256, size=(10, 10, 3)).astype('uint8')",
"def load_images(self, image_paths):\n \n fill_list = []\n \n for idx in tqdm(range(len(image_paths))):\n path = image_paths[idx]\n yield cv2.imread(path)",
"def __init__(self, width, height, rand_seed=None, filepath=None):\r\n self.height = height\r\n self.width = width\r\n\r\n if rand_seed:\r\n seed(rand_seed)",
"def test_image(filename, x_size=350, y_size=350):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap(i, 0, x_size, -1, 1)\n y = remap(j, 0, y_size, -1, 1)\n pixels[i, j] = (random.randint(0, 255), # Red channel\n random.randint(0, 255), # Green channel\n random.randint(0, 255)) # Blue channel\n\n im.save(filename)",
"def get_image_path_label(all_paths):\r\n n_folders_int = random.sample(range(0, len(all_paths)), n_way)\r\n image_labels = [[(glob.glob(all_paths[n] + '\\*')[k], n) # (path, label)\r\n for n in n_folders_int\r\n for k in random.sample(range(0, len(glob.glob(all_paths[n] + '\\*'))), k_shot+1)\r\n ] for b in range(batch_size)] \r\n return image_labels"
]
| [
"0.60784703",
"0.59843904",
"0.5849876",
"0.5831968",
"0.5792602",
"0.57504994",
"0.56419593",
"0.5597036",
"0.5579867",
"0.5567797",
"0.5550139",
"0.55407375",
"0.5509547",
"0.5507798",
"0.5497652",
"0.5475824",
"0.54651845",
"0.5463205",
"0.5461314",
"0.5449749",
"0.5435942",
"0.5417159",
"0.5402434",
"0.5398665",
"0.5367033",
"0.5358561",
"0.53389835",
"0.5335965",
"0.53103906",
"0.52960426"
]
| 0.60208035 | 1 |
Return a tuple of types for a field, or `default` if there is no type information and a default is specified. All fields will return a singleelement tuple, unless it was configured with a `UnionKind`, in which case all types in the union will be returned. If `unwrap` is True, then the tuple of types will be specified. If it is False, then the tuple of type proxies will be returned these could be `UnionKind`, `ImmediateKind`, or `DeferredKind` instances. | def field_type(f, default=MISSING, *, unwrap=True) -> Union[tuple, Any]:
return _field_type(f, TYPE, default, unwrap=unwrap) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _or_types(field):\n return '|'.join(field.get('type', {}).get('names', []))",
"def field_subtype(f, default=MISSING, *, unwrap=True):\n return _field_type(f, SUBTYPE, default, unwrap=unwrap)",
"def build_defaults(self, fields, defaults):\n # assert '__iter__' in dir(defaults), iterReq('defaults', defaults)\n if not defaults or '__iter__' not in dir(defaults):\n defaults = []\n if len(defaults) != len(fields):\n print 'WARNING: mismatched lengths of defaults and expected_types'\n print 'Found (%d) instead of (%d)' % (len(defaults), len(fields))\n print '>>> OVERRIDING DEFAULTS TO EXPECTED TYPES W/O ARGS'\n defaults = [ self.expected[f]() for f in self.expected ]\n\n return defaults",
"def namedtuple_with_two_defaults(typename, field_names, default_values=('', '')):\n T = collections.namedtuple(typename, field_names)\n T.__new__.__defaults__ = default_values\n return T",
"def namedtuple_with_defaults(typename,\n field_names,\n default_values=(),\n units=None):\n T = collections.namedtuple(typename, field_names)\n T.__new__.__defaults__ = (None, ) * len(T._fields)\n if isinstance(default_values, collections.abc.Mapping):\n prototype = T(**default_values)\n else:\n prototype = T(*default_values)\n T.__new__.__defaults__ = tuple(prototype)\n if units is None:\n T.units = ('-') * len(T._fields)\n else:\n T.units = units\n return T",
"def _default_field_value(field):\n return field.default or ([field.value_cls()] if field.is_list else field.value_cls())",
"def get_return_fields(self, all_types: \"Dict[str, SchemaType]\") -> Tuple[SelectedField]:\n if self._return_fields is None:\n from qlient import helpers\n self._return_fields = helpers.adapt_return_fields(\n self.return_type,\n all_types,\n self.settings.max_recursion_depth\n )\n return self._return_fields",
"def get_field_type(connection, table_name, row):\n field_params = OrderedDict()\n field_notes = []\n is_geometry = False\n try:\n field_type = connection.introspection.get_field_type(row[1], row)\n except KeyError:\n field_type = 'TextField'\n field_notes.append('This field type is a guess.')\n\n # This is a hook for data_types_reverse to return a tuple of\n # (field_type, field_params_dict).\n if type(field_type) is tuple:\n field_type, new_params = field_type\n field_params.update(new_params)\n\n # Add max_length for all CharFields.\n if field_type == 'CharField' and row[3]:\n field_params['max_length'] = int(row[3])\n\n if field_type == 'DecimalField':\n if row[4] is None or row[5] is None:\n field_notes.append(\n 'max_digits and decimal_places have been guessed, as this '\n 'database handles decimal fields as float')\n field_params['max_digits'] = row[4] if row[4] is not None else 10\n field_params['decimal_places'] = row[\n 5] if row[5] is not None else 5\n else:\n field_params['max_digits'] = row[4]\n field_params['decimal_places'] = row[5]\n\n if field_type == 'GeometryField':\n geo_col = row[0]\n # Getting a more specific field type and any additional parameters\n # from the `get_geometry_type` routine for the spatial backend.\n field_type, geo_params = connection.introspection.get_geometry_type(\n table_name, geo_col)\n field_params.update(geo_params)\n is_geometry = True\n\n return field_type, field_params, is_geometry\n # return getattr(models.fields, field_type), field_params",
"def _or_types(field):\n return rst.escape('|'.join(field.get('type', {}).get('names', [])))",
"def _compute_out_types(fields, type_list):\n if fields == 'all':\n return type_list\n return [type_list[i] for i in fields]",
"def get_expected_type(self) -> Type[Any]:\n types = get_type_hints(self.model.record)\n\n if self.pivot:\n return Dict[types[self.pivot], types[self.field]] # type: ignore\n\n return List[types[self.field]] # type: ignore",
"def _split(\n _fields: Dict[str, Union[Annotation, Input, Output]]\n ) -> Tuple[Dict[str, Union[Annotation, Input, Output]], Dict[str, Union[Annotation, Input, Output]]]:\n _no_defaults_fields, _defaults_fields = {}, {}\n seen_default = False\n for key, val in _fields.items():\n if val.get(\"default\", None) or seen_default:\n seen_default = True\n _defaults_fields[key] = val\n else:\n _no_defaults_fields[key] = val\n return _no_defaults_fields, _defaults_fields",
"def get_field_type(self, field):\n for mapping in self.mappings:\n if isinstance(field, mapping[1]):\n return mapping[0]\n return None",
"def _cast_tuple(self, values):\n result = []\n for i, value in enumerate(values):\n if i < len(self.field_types):\n result.append(self._cast_field(self.field_types[i], value))\n else:\n result.append(self._cast_field(self.field_types[-1], value))\n\n return tuple(result)",
"def __parse_field(self, field, tuple_descriptor, alias_on_complex_types, make_visible):\r\n alias = None\r\n field_type = None\r\n return_type = None\r\n underlying_fields = None\r\n aggregate_factory = None\r\n literal_value = None\r\n func_factory = None\r\n fields_to_verify = []\r\n parsed_fds = []\r\n field_backup = list(field)\r\n self.__clean_list(field)\r\n \r\n # parse aliases if they exist\r\n if (len(field) >= 4) and (field[-2] == QueryTokens.AS):\r\n alias = field[-1]\r\n field = field[:-2]\r\n if (field[0] == QueryTokens.STRING_LITERAL) or \\\r\n (field[0] == QueryTokens.INTEGER_LITERAL) or \\\r\n (field[0] == QueryTokens.FLOAT_LITERAL): \r\n alias = self.unnamed_operator_name()\r\n underlying_fields = []\r\n field_type = FieldType.LITERAL\r\n literal_value = field[1]\r\n if field[0] == QueryTokens.STRING_LITERAL:\r\n return_type = ReturnType.STRING\r\n elif field[0] == QueryTokens.INTEGER_LITERAL:\r\n return_type = ReturnType.INTEGER\r\n literal_value = int(literal_value)\r\n elif field[0] == QueryTokens.FLOAT_LITERAL:\r\n return_type = ReturnType.FLOAT\r\n literal_value = float(literal_value)\r\n elif field[0] == QueryTokens.COLUMN_NAME: # field or alias\r\n if alias == None:\r\n alias = field[1]\r\n field_descriptor = tuple_descriptor.get_descriptor(field[1])\r\n if field_descriptor == None: # underlying field not yet defined. mark to check later\r\n field_type = FieldType.UNDEFINED\r\n underlying_fields = [field[1]]\r\n # check alias and underlying once this process is done to\r\n # find yet-undefined fields\r\n fields_to_verify.append(field[1])\r\n fields_to_verify.append(alias)\r\n else: # field found, copy information\r\n field_type = field_descriptor.field_type\r\n return_type = field_descriptor.return_type\r\n underlying_fields = field_descriptor.underlying_fields\r\n aggregate_factory = field_descriptor.aggregate_factory\r\n func_factory = field_descriptor.func_factory\r\n elif field[0] == QueryTokens.FUNCTION_OR_AGGREGATE: # function or aggregate \r\n if alias == None:\r\n if alias_on_complex_types:\r\n raise QueryException(\"Must specify alias (AS clause) for '%s'\" % (field[1]))\r\n else:\r\n alias = self.unnamed_operator_name()\r\n underlying_field_list = field[2:]\r\n underlying_fields = []\r\n for underlying in underlying_field_list:\r\n (parsed_fd_list, parsed_verify) = self.__parse_field(underlying, tuple_descriptor, False, False)\r\n for parsed_fd in parsed_fd_list:\r\n parsed_fd.visible = False\r\n fields_to_verify.extend(parsed_verify)\r\n parsed_fds.extend(parsed_fd_list)\r\n underlying_fields.append(parsed_fd_list[0].alias)\r\n aggregate_factory = get_aggregate_factory(field[1])\r\n if aggregate_factory != None: # found an aggregate function\r\n field_type = FieldType.AGGREGATE\r\n return_type = ReturnType.FLOAT\r\n else:\r\n function_information = self.function_registry.get_function(field[1])\r\n if function_information != None:\r\n field_type = FieldType.FUNCTION\r\n func_factory = function_information.func_factory\r\n return_type = function_information.return_type\r\n else:\r\n raise QueryException(\"'%s' is neither an aggregate or a registered function\" % (field[1]))\r\n else:\r\n raise QueryException(\"Empty field clause found: %s\" % (\"\".join(field_backup)))\r\n fd = FieldDescriptor(alias, underlying_fields, field_type, return_type, aggregate_factory, func_factory, literal_value)\r\n fd.visible = make_visible\r\n parsed_fds.insert(0, fd)\r\n return (parsed_fds, fields_to_verify)",
"def field_type(name):\n if name not in field_types:\n field_types[name] = records.fields_get([name], attributes=['type'])[name]['type']\n return field_types.get(name)",
"def _format_field_val(\n self,\n field: str,\n field_type: str,\n value: Any,\n ) -> str | int | bool | list | None:\n\n # If the field is empty, no need to format.\n if value is None:\n return None\n\n # TODO(DanielRyanSmith): Write checks to ensure enum values are valid.\n if field_type == 'emails' or field_type == 'split_str':\n list_val = self._split_list_input(field, field_type, value, ',')\n if field == 'blink_components' and len(value) == 0:\n return [settings.DEFAULT_COMPONENT]\n return list_val\n elif field_type == 'link':\n return self._extract_link(value)\n elif field_type == 'links':\n list_val = self._split_list_input(field, field_type, value)\n # Filter out any URLs that do not conform to the proper pattern.\n return [self._extract_link(link)\n for link in list_val if link]\n elif field_type == 'int':\n # Int fields can be unset by giving null or nothing in the input field.\n if value == '' or value is None:\n return None\n try:\n return int(value)\n except ValueError:\n self._abort_invalid_data_type(field, field_type, value)\n elif field_type == 'bool':\n return bool(value)\n return str(value)",
"def get_fields(\n schema: Union[Config, Schema], types: Union[Type, Tuple[Type]] = None\n) -> List[Tuple[str, BaseField]]:\n\n fields = list(schema._fields.items())\n if isinstance(schema, Config):\n fields += list(schema._schema._fields.items())\n\n if types:\n fields = [item for item in fields if isinstance(item[1], types)]\n return fields",
"def guess_type_value_type (none = True) :\n return [ None, str, int, float ] if none else [ str, int, float ]",
"def _get_defaults_for_field_type(cls, field_type):\n defaults = cls._ATTRIBUTE_DEFAULTS['*'].copy()\n defaults.update(cls._ATTRIBUTE_DEFAULTS.get(field_type, {}))\n\n return defaults",
"def _get_defaults_for_field_type(cls, field_type):\n defaults = cls._ATTRIBUTE_DEFAULTS['*'].copy()\n defaults.update(cls._ATTRIBUTE_DEFAULTS.get(field_type, {}))\n\n return defaults",
"def get_field_dtype(self, field=None):\n\n if field in self._fields_dtypes:\n return self._fields_dtypes[field]\n\n # initialize dbtypes for all fields\n field_type = pd.read_sql(\n 'select distinct column_name, type '\n 'from fields',\n self._get_db_engine())\n\n for row in field_type.itertuples():\n self._fields_dtypes[row.column_name] = row.type\n\n return self._fields_dtypes[field] if field in self._fields_dtypes else None",
"def get_default_factory_for_field(\n field: ModelField,\n) -> Union[NoArgAnyCallable, UnsetType]:\n default_factory = field.default_factory\n default = field.default\n\n has_factory = default_factory is not None and default_factory is not UNSET\n has_default = default is not None and default is not UNSET\n\n # defining both default and default_factory is not supported\n\n if has_factory and has_default:\n default_factory = cast(NoArgAnyCallable, default_factory)\n\n raise BothDefaultAndDefaultFactoryDefinedError(\n default=default, default_factory=default_factory\n )\n\n # if we have a default_factory, we should return it\n\n if has_factory:\n default_factory = cast(NoArgAnyCallable, default_factory)\n\n return default_factory\n\n # if we have a default, we should return it\n\n if has_default:\n return lambda: smart_deepcopy(default)\n\n # if we don't have default or default_factory, but the field is not required,\n # we should return a factory that returns None\n\n if not field.required:\n return lambda: None\n\n return UNSET",
"def field_types(self):\n if self._field_types is None:\n with open(self.csv_path, encoding=\"utf8\") as f:\n reader = csv.DictReader(f)\n for i, row in enumerate(reader):\n if i == 0:\n self._field_types = {k: v for k, v in row.items()}\n return self._field_types",
"def gather_types(self):\n\n def gather_subfields(field: Field) -> List[Field]:\n fields = [field]\n\n if isinstance(field, CompositeField):\n for f in field.fields:\n fields.extend(gather_subfields(f))\n elif isinstance(field, ArrayField):\n fields = []\n fields.extend(gather_subfields(field.itemtype))\n\n return fields\n\n types = []\n for method in self.methods:\n types.extend([method.request, method.response])\n for field in method.request.fields:\n types.extend(gather_subfields(field))\n for field in method.response.fields:\n types.extend(gather_subfields(field))\n return types",
"def make_tuple(*fields):\n fields2 = []\n for (idx, f) in zip(range(len(fields)), fields):\n if isinstance(f, (_StructField, pb.StructField)):\n fields2.append(f)\n if isinstance(f, pb.SQLType):\n fields2.append(pb.StructField(\n field_name=\"_%s\" % str(idx),\n field_type=f))\n raise ValueError(\"Could not understand type %s for %s\" % (type(f), f))\n return StructType(fields2)",
"def get_field_type(\n self, field_type: Union[Type, str], collection_name: str\n ) -> SchemaFieldDataType:\n TypeClass: Optional[Type] = _field_type_mapping.get(field_type)\n\n if TypeClass is None:\n self.report.report_warning(\n collection_name, f\"unable to map type {field_type} to metadata schema\"\n )\n TypeClass = NullTypeClass\n\n return SchemaFieldDataType(type=TypeClass())",
"def fields(class_or_instance: Union[Type[_DT], _DT]) -> Tuple[Var[Any, Any]]:\n # Might it be worth caching this, per class?\n try:\n fields = getattr(class_or_instance, \"fields\")\n meta = getattr(class_or_instance, \"meta\")\n meta_vars = meta[\"vars\"]\n except AttributeError or KeyError:\n raise TypeError(\"must be called with a declared type or instance\")\n\n # Exclude pseudo-fields. Note that fields is sorted by insertion\n # order, so the order of the tuple is as the fields were defined.\n out = []\n for f in fields:\n var = meta_vars[f]\n out.append(var)\n return tuple(out)",
"def _set_defaults(defaults, fillvalue, named_tuple):\n\n if defaults is not None:\n # Default values specified in interface kwarg take priority.\n defaults = tuple(defaults)\n if len(defaults) > len(named_tuple._fields):\n raise ValueError(\n \"Received more default values ({}) than field names ({}).\".format(\n len(defaults), len(named_tuple._fields)\n )\n )\n elif len(named_tuple._fields) > len(defaults):\n padded_defaults = [fillvalue] * (len(named_tuple._fields) - len(defaults))\n padded_defaults.extend(defaults)\n defaults = tuple(padded_defaults)\n else:\n # Defaults attribute can be called `_field_defaults` or `_fields_defaults`.\n nt_defaults = getattr(named_tuple, \"_fields_defaults\", None) or getattr(\n named_tuple, \"_field_defaults\", None\n )\n if nt_defaults: # Can be empty dict.\n defaults = tuple(\n nt_defaults.get(field, fillvalue) for field in (named_tuple._fields)\n )\n return defaults",
"def _type_def_helper(name, args, env: Env) -> typing.Tuple[Basic, typing.Dict[str, Undecided]]:\n\n new_basic = make_basic(name)\n env.set_named_type(name, new_basic)\n _ty_args = OrderedDict((arg, Undecided()) for arg in args)\n env.undecided_types.update(_ty_args)\n return new_basic, _ty_args"
]
| [
"0.5224334",
"0.52030015",
"0.4946775",
"0.4809755",
"0.47901797",
"0.47571266",
"0.4754597",
"0.4746675",
"0.47141576",
"0.4618402",
"0.46020648",
"0.45992017",
"0.44853425",
"0.44204098",
"0.44177788",
"0.4409449",
"0.44036892",
"0.4402199",
"0.43875858",
"0.4387374",
"0.4387374",
"0.43761858",
"0.43232515",
"0.43038324",
"0.42714342",
"0.42673296",
"0.42410603",
"0.42327368",
"0.42223978",
"0.41957402"
]
| 0.71920294 | 0 |
For a given seed and shot, generate a config file based on a template config file that is used for training/evaluation. You can extend/modify this function to fit your usecase. | def get_config(seed, shot):
if args.coco:
# COCO
assert args.two_stage, 'Only supports novel weights for COCO now'
if args.novel_finetune:
# Fine-tune novel classifier
ITERS = {
1: (10000, 500),
2: (10000, 1500),
3: (10000, 1500),
5: (10000, 1500),
10: (10000, 2000),
30: (10000, 6000),
}
mode = 'novel'
assert not args.fc and not args.unfreeze
else:
# Fine-tune entire classifier
ITERS = {
1: (14400, 16000),
2: (28800, 32000),
3: (43200, 48000),
5: (72000, 80000),
10: (144000, 160000),
30: (216000, 240000),
}
mode = 'all'
split = temp_split = ''
temp_mode = mode
config_dir = 'configs/COCO-detection'
ckpt_dir = 'checkpoints/coco/faster_rcnn'
base_cfg = '../../Base-RCNN-FPN.yaml'
else:
# PASCAL VOC
assert not args.two_stage, 'Only supports random weights for PASCAL now'
ITERS = {
1: (3500, 4000),
2: (7000, 8000),
3: (10500, 12000),
5: (17500, 20000),
10: (35000, 40000),
}
split = 'split{}'.format(args.split)
mode = 'all{}'.format(args.split)
# temp_split = 'split1'
# temp_mode = 'all1'
temp_split=split
temp_mode = mode
config_dir = 'configs/PascalVOC-detection'
ckpt_dir = 'checkpoints/voc/faster_rcnn'
base_cfg = '../../../Base-RCNN-FPN.yaml'
seed_str = 'seed{}'.format(seed) if seed != 0 else ''
fc = '_fc' if args.fc else ''
unfreeze = '_unfreeze' if args.unfreeze else ''
# Read an example config file for the config parameters
temp = os.path.join(
temp_split, 'faster_rcnn_R_101_FPN_ft{}_{}_1shot{}'.format(
fc, temp_mode, unfreeze)
)
print('temp_file:', temp)
config = os.path.join(args.root, config_dir, temp + '.yaml')
print('config_file:', config)
prefix = 'faster_rcnn_R_101_FPN_ft{}_{}_{}shot{}{}'.format(
fc, mode, shot, unfreeze, args.suffix)
print('prefix_file:', prefix)
output_dir = os.path.join(args.root, ckpt_dir, seed_str)
print('output_dir',output_dir)
os.makedirs(output_dir, exist_ok=True)
save_dir = os.path.join(
args.root, config_dir, split, seed_str,
)
print('save_dir',save_dir)
os.makedirs(save_dir, exist_ok=True)
save_file = os.path.join(save_dir, prefix + '.yaml')
print('save_file' , save_file)
configs = load_yaml_file(config)
print('reading from this config file ',config)
configs['_BASE_'] = base_cfg
configs['DATASETS']['TRAIN'] = make_tuple(configs['DATASETS']['TRAIN'])
configs['DATASETS']['TEST'] = make_tuple(configs['DATASETS']['TEST'])
if args.coco and not args.novel_finetune:
ckpt_path = os.path.join(output_dir, prefix, 'model_reset_combine.pth')
if not os.path.exists(ckpt_path):
src2 = os.path.join(
output_dir, 'faster_rcnn_R_101_FPN_ft_novel_{}shot{}'.format(
shot, args.suffix),
'model_final.pth',
)
if not os.path.exists(src2):
print('Novel weights do not exist. Please run with the ' + \
'--novel-finetune flag first.')
assert False
combine_cmd = 'python tools/ckpt_surgery.py --coco --method ' + \
'combine --src1 checkpoints/coco/faster_rcnn/faster_rcnn' + \
'_R_101_FPN_base/model_final.pth --src2 {}'.format(src2) + \
' --save-dir {}'.format(os.path.join(output_dir, prefix))
run_cmd(combine_cmd)
assert os.path.exists(ckpt_path)
configs['MODEL']['WEIGHTS'] = ckpt_path
elif not args.coco:
configs['MODEL']['WEIGHTS'] = configs['MODEL']['WEIGHTS'].replace(
'base1', 'base' + str(args.split))
for dset in ['TRAIN', 'TEST']:
configs['DATASETS'][dset] = (
configs['DATASETS'][dset][0].replace(
temp_mode, 'all' + str(args.split)),
)
configs['DATASETS']['TRAIN'] = (
configs['DATASETS']['TRAIN'][0].replace(
'1shot', str(shot) + 'shot'
) + ('_{}'.format(seed_str) if seed_str != '' else ''),
)
configs['SOLVER']['BASE_LR'] = args.lr
configs['SOLVER']['MAX_ITER'] = ITERS[shot][1]
configs['SOLVER']['STEPS'] = (ITERS[shot][0],)
configs['SOLVER']['CHECKPOINT_PERIOD'] = ITERS[shot][1] // args.ckpt_freq
configs['OUTPUT_DIR'] = os.path.join(output_dir, prefix)
if seed != 0:
with open(save_file, 'w') as fp:
yaml.dump(configs, fp)
return save_file, configs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def dump_config_and_makefile(_config):\n experiment_dir = Path(_config['trainer']['storage_dir'])\n makefile_path = Path(experiment_dir) / \"Makefile\"\n\n if not makefile_path.exists():\n from padertorch.contrib.examples.source_separation.tasnet.templates import \\\n MAKEFILE_TEMPLATE_TRAIN\n\n config_path = experiment_dir / \"config.json\"\n pt.io.dump_config(_config, config_path)\n\n makefile_path.write_text(\n MAKEFILE_TEMPLATE_TRAIN.format(\n main_python_path=pt.configurable.resolve_main_python_path(),\n experiment_name=experiment_name,\n eval_python_path=('.'.join(\n pt.configurable.resolve_main_python_path().split('.')[:-1]\n ) + '.evaluate')\n )\n )",
"def generate_config(self):\n\n cfgmgr = ConfigManager()\n\n script_dir = os.path.join(cfgmgr.getRoot(), 'rules')\n\n if not os.path.exists(script_dir):\n print('Creating rules directory \\\"{0}\\\"'.format(script_dir))\n\n os.makedirs(script_dir)\n else:\n if not self.getArgs().force:\n sys.stderr.write('Script directory \\\"{0}\\\" already exists.\\n'\n 'Use --force to overwrite current'\n ' scripts\\n'.format(script_dir))\n\n sys.exit(1)\n\n print('Overwriting any scripts in directory \\\"{0}\\\"'.format(\n script_dir))\n\n # Determine UGE cell directory from environment\n if not os.getenv('SGE_ROOT') or not os.getenv('SGE_CELL'):\n print('Error: UGE environment is not sourced', file=sys.stderr)\n\n sys.exit(1)\n\n cell_dir = os.path.join(os.getenv('SGE_ROOT'), os.getenv('SGE_CELL'))\n\n template_vars = {\n 'tortuga_root': cfgmgr.getRoot(),\n 'uge_cell_dir': cell_dir,\n 'script_dir': script_dir,\n 'burst_swprofile': self.getArgs().software_profile,\n 'burst_hwprofile': self.getArgs().hardware_profile,\n 'burst_queue': 'burst.q',\n 'polling_interval': self.getArgs().polling_interval,\n 'slots_per_host': self.getArgs().slots_per_host,\n }\n\n env = Environment(loader=FileSystemLoader('templates'),\n undefined=StrictUndefined)\n\n for filename in glob.glob('templates/*.j2'):\n# print('Processing template {0}'.format(\n# os.path.basename(filename)))\n\n template = env.get_template(os.path.basename(filename))\n\n dstfile = os.path.join(\n script_dir,\n os.path.splitext(os.path.basename(filename))[0])\n\n print(' - writing {0}'.format(os.path.basename(dstfile)))\n\n with open(dstfile, 'w') as outfp:\n template.stream(template_vars).dump(outfp)",
"def generateConfig(run,subrun,conditions):\n \n configname = (conditions.numcdir + \"/\" + str(run) + \"/\" + str(subrun)\n + \"/numc_config_\" + str(run) + \"_\" + str(subrun) + \".cfg\")\n \n configContents = \"\"\n \n configContents += \"[software]\\n\"\n if conditions.oldneut:\n configContents += \"neut_setup_script = /project/t/tanaka/T2K/neut/branches/5.1.4.2_nd280_ROOTv5r34p09n01/src/neutgeom/setup.sh\\n\"\n elif conditions.newoldneut:\n configContents += \"neut_setup_script = /project/t/tanaka/T2K/neut/branches/5.1.4.3_nd280/src/neutgeom/setup.sh\\n\"\n else:\n #configContents += \"neut_setup_script = /project/t/tanaka/T2K/neut/branches/5.3.1_nd280/src/neutgeom/setup.sh\\n\"\n #configContents += \"neut_setup_script = /project/t/tanaka/T2K/neut/branches/5.3.1_nd280_wBBBA05/src/neutgeom/setup.sh\\n\"\n configContents += \"neut_setup_script = /project/t/tanaka/T2K/neut/branches/5.3.2_nd280/src/neutgeom/setup.sh\\n\"\n \n configContents += \"[geometry]\\n\"\n\n configContents += \"baseline = \" + conditions.geometry +\"\\n\"\n if conditions.waterair == \"water\":\n configContents += \"p0d_water_fill = 1\\n\"\n else:\n configContents += \"p0d_water_fill = 0\\n\"\n \n configContents += \"\"\"\n \n[configuration]\nmodule_list = neutMC\n\n[filenaming]\n\"\"\"\n configContents += \"comment = \" + conditions.comment + \"\\n\"\n configContents += \"run_number = \" + str(run) +\"\\n\"\n configContents += \"subrun = \" + str(subrun) + \"\\n\"\n\n if conditions.oldneut:\n configContents += \"\"\" \n\n[neutrino]\nneut_card = /project/t/tanaka/T2K/neut/branches/5.1.4.2_nd280_ROOTv5r34p09n01/src/neutgeom/neut.card\n\"\"\"\n elif conditions.newoldneut:\n configContents += \"\"\" \n\n[neutrino]\nneut_card = /project/t/tanaka/T2K/neut/branches/5.1.4.3_nd280/src/neutgeom/neut.card\n\"\"\"\n else:\n configContents += \"\"\" \n\n[neutrino]\nneut_card = /project/t/tanaka/T2K/neut/branches/5.3.2_nd280/src/neutgeom/neut.card\n\"\"\"\n\n configContents += \"flux_file = \" + conditions.ram_disk + \"/\" + conditions.flux_base + \"\\n\"\n\n#flux_file = flux_file\n#\"\"\"\n\n# configContents += \"flux_file_path = \" + conditions.ram_disk + \"/\" + conditions.flux_base\n\n# configContents += \"\"\" \n#flux_file_start = 1\n#flux_file_stop = 300\n#\"\"\"\n\n configContents += \"maxint_file = \" + conditions.maxint_file_local + \"\\n\"\n\n# default: 5e17 but for basket MC special production higher\n configContents += \"\"\" \npot = 5.0e17\nneutrino_type = beam\n\"\"\"\n if conditions.baskmagn == \"basket\":\n configContents += \"\"\" \nflux_region = basket\nmaster_volume = Basket \nrandom_start = 1\n\"\"\"\n elif conditions.baskmagn == \"magnet\":\n configContents += \"\"\" \nflux_region = magnet\nmaster_volume = Magnet \nrandom_start = 1\n\"\"\"\n else:\n print \"Unknown basket/magnet condition\"\n \n\n configContents += \"random_seed = \" + str(getRandom()) +\"\\n\"\n configContents += \"neut_seed1 = \" + str(getRandom())+\"\\n\" \n configContents += \"neut_seed2 = \" + str(getRandom())+\"\\n\" \n configContents += \"neut_seed3 = \" + str(getRandom())+\"\\n\" \n\n configContents += \"\\n\"\n configContents += \"[nd280mc]\\n\"\n configContents += \"mc_type=Neut_RooTracker \\n\"\n\n #print configContents\n\n try:\n macFile = open(configname,\"w\")\n macFile.write(configContents)\n \n except:\n print \"can't write config file\" \n \n\n return configname",
"def generate_config(args):\n default_config = resource_string('webrpg', 'scripts/templates/default_config.txt').decode('utf-8')\n if args.sqla_connection_string:\n default_config = default_config.replace('%(sqlalchemy_url)s', args.sqla_connection_string)\n else:\n default_config = default_config.replace('%(sqlalchemy_url)s', get_user_parameter('SQL Alchemy Connection String', 'sqlite:///%(here)s/pyire_test.db'))\n\n with open(args.filename, 'w') as out_f:\n out_f.write(default_config)",
"def generate_cfg():\n \n if not os.path.exists(cfg_path):\n os.mkdir(cfg_path)\n \n for img_path in get_template_paths():\n extractor = BlockExtractor(img_path)\n extractor.get_cfg()\n for block in extractor.get_blocks():\n img = BlockParser(img_path, block).block_image()\n #cv.imshow(\"Block\", img)\n #cv.waitKey() & 0xFF",
"def _setup_run(cfg: Dict) -> Dict:\n now = datetime.now()\n day = f\"{now.day}\".zfill(2)\n month = f\"{now.month}\".zfill(2)\n hour = f\"{now.hour}\".zfill(2)\n minute = f\"{now.minute}\".zfill(2)\n run_name = f'run_{day}{month}_{hour}{minute}_seed{cfg[\"seed\"]}'\n # cfg[\"run_dir\"] = Path(__file__).absolute().parent / \"runs\" / run_name\n cfg[\"run_dir\"] = cfg[\"run_dir\"] / run_name\n if not cfg[\"run_dir\"].is_dir():\n cfg[\"train_dir\"] = cfg[\"run_dir\"] / \"data\" / \"train\"\n cfg[\"train_dir\"].mkdir(parents=True)\n cfg[\"val_dir\"] = cfg[\"run_dir\"] / \"data\" / \"val\"\n cfg[\"val_dir\"].mkdir(parents=True)\n else:\n raise RuntimeError(f\"There is already a folder at {cfg['run_dir']}\")\n\n # dump a copy of cfg to run directory\n with (cfg[\"run_dir\"] / \"cfg.json\").open(\"w\") as fp:\n temp_cfg = {}\n for key, val in cfg.items():\n if isinstance(val, PosixPath):\n temp_cfg[key] = str(val)\n elif isinstance(val, Dict):\n for k in val:\n if isinstance(val[k], PosixPath):\n val[k] = str(val[k])\n elif isinstance(val, pd.Timestamp):\n temp_cfg[key] = val.strftime(format=\"%d%m%Y\")\n else:\n temp_cfg[key] = val\n json.dump(temp_cfg, fp, sort_keys=True, indent=4)\n\n return cfg",
"def _create_config(env_path):\n s2e_yaml = 's2e.yaml'\n version_path = os.path.join(os.path.dirname(__file__), '..', 'dat', 'VERSION')\n\n with open(version_path, 'r', encoding='utf-8') as fp:\n context = {\n 'creation_time': str(datetime.datetime.now()),\n 'version': fp.read().strip(),\n }\n\n render_template(context, s2e_yaml, os.path.join(env_path, s2e_yaml))",
"def config():\n experiment_dir = './experiments'\n simulation_steps = 1000\n device = 'cpu'\n path_to_molecules = os.path.join(experiment_dir, 'data/ethanol.xyz')\n simulation_dir = os.path.join(experiment_dir, 'simulation')\n training_dir = os.path.join(experiment_dir, 'training')\n model_path = os.path.join(training_dir, 'best_model')\n overwrite = True",
"def create_custom_config(watch, start_cmd, stop_cmd, match_cmd):\n with open(TEMPLATE_LOCATION) as template:\n output = template.read()\n output = output.format(\n process_name=watch, match_clause='MATCHING {}'.format(match_cmd),\n group=watch, start_line=start_cmd, stop_line=stop_cmd)\n\n config_file = os.path.join(MONIT_CONFIG_DIR, 'appscale-{}.cfg'.format(watch))\n file_io.write(config_file, output)",
"def createConfiguration(self, input):\n resolvedInputName = envString.resolve(input)\n if self.opts.verbose:\n print(\"creating configuration using \", resolvedInputName)\n template = TemplateWriter()\n substitutes = self.defaults.copy()\n for key in self.commandLineDefaults:\n val = self.commandLineDefaults[key]\n if val is not None:\n substitutes[key] = self.commandLineDefaults[key]\n\n substitutes[\"CTRL_EXECUTE_SETUP_PACKAGES\"] = self.getSetupPackages()\n\n configDir = os.path.join(substitutes[\"LOCAL_SCRATCH\"], \"configs\")\n if not os.path.exists(configDir):\n os.mkdir(configDir)\n self.outputFileName = os.path.join(configDir, \"%s.config\" % (self.runid))\n if self.opts.verbose:\n print(\"writing new configuration to \", self.outputFileName)\n template.rewrite(resolvedInputName, self.outputFileName, substitutes)\n return self.outputFileName",
"def write_fit_config( input_path, output_path, output_config_path, config_path =\"./\" ) :\n\n\n\tfit_template = \"\"\"\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<config>\n\n\t<!-- Job to run -->\n\t<jobType>TofEffFitter</jobType>\n\t<Task name=\"TofEffFitter\" type=\"TofEffFitter\" config=\"\" nodePath=\"TofEffFitter\" />\n\n\t<TofEffFitter>\n\t\t<Logger color=\"true\" globalLogLevel=\"info\" logLevel=\"all\" />\n\t\t<Reporter> <output url=\"{report_file}\" width=\"700\" height=\"500\" /> </Reporter>\n\n\t\t<input url=\"{input_path}\"/> \n\n\t\t<output path=\"{output_path}\">\n\t\t\t<data>{product_file}</data>\n\t\t\t<params>{params_file}</params>\n\t\t</output>\n\n\t\t<!-- the bins into which the 9 centrality bins are mapped. -->\n\t\t<Include url=\"../common/centralityMap.xml\" />\n\n\t\t<Style>\n\t\t\t<TofEff lw=\"3\" ms=\"1\" mst=\"8\" />\n\t\t</Style>\n\n\t</TofEffFitter>\n\n</config>\"\"\"\n\n\n\treport = pjoin( output_path, \"rp_\" + t_product_file.format( ext=\"pdf\" ) )\n\tproduct = t_product_file.format( ext=\"root\" )\n\tparams = pjoin( output_config_path, t_product_file.format( ext=\"xml\" ) )\n\n\twith open( pjoin( config_path, 'fit.xml' ), 'w' ) as f :\n\t\tf.write( fit_template.format( input_path = output_path, output_path = output_path, params_file = params, product_file=product, report_file=report ) )",
"def generate_config(container_data, file_path):\n pass",
"def mkconfig():\n basedir = os.path.join(os.path.expanduser('~'), '.strikepackage')\n\n # Try to populate dirs\n defaultdirs = [os.path.join(basedir, leaf)\n for leaf in ['examples', 'keys', 'templates']]\n\n for dirpath in defaultdirs:\n if not os.path.exists(dirpath):\n try:\n os.makedirs(dirpath, 0755)\n except (os.error, IOError) as ex:\n warn(\"Error while creating default directory: {}\".format(ex))\n\n # Try to place example confs if not present\n exdir = os.path.join(basedir, 'examples')\n exfiles = [(os.path.join(exdir, exfile[0]), exfile[1])\n for exfile in [('config.yaml', config_src),\n ('metadata.jinja2', metadata_src),\n ('userdata.jinja2', userdata_src)]]\n for exfile in exfiles:\n if not os.path.isfile(exfile[0]):\n try:\n with open(exfile[1], 'r') as f:\n src = f.read()\n with open(exfile[0], 'w+') as f:\n f.write(src)\n except IOError as ex:\n warn(\"Error writing example file: {}\".format(ex))",
"def generate(ctx: Context):\n try_to_load_agent_config(ctx)",
"def process_config(json_file):\n config, _ = get_config_from_json(json_file)\n print(\" THE Configuration of your experiment ..\")\n pprint(config)\n print(\" *************************************** \")\n try:\n config.summary_dir = os.path.join(\"experiments\", config.exp_name, \"summaries/\")\n config.checkpoint_dir = os.path.join(\"experiments\", config.exp_name, \"checkpoints/\")\n config.out_dir = os.path.join(\"experiments\", config.exp_name, \"out/\")\n create_dirs([config.summary_dir, config.checkpoint_dir, config.out_dir])\n except AttributeError as e:\n print(\"ERROR!!..Please provide the exp_name in json file..\")\n exit(-1)\n return config",
"def generate_config(self):\n\n # Change crypto-config.yaml and add organizations\n yaml = YAML()\n with open(os.path.join(self.config_path, \"crypto-config-template.yaml\"), \"r\") as crypto_config_file:\n config = yaml.load(crypto_config_file)\n\n config[\"OrdererOrgs\"][0][\"Specs\"] = []\n for orderer_index in range(1, self.num_validators + 1):\n orderer_host, _ = self.experiment.get_peer_ip_port_by_id(orderer_index)\n config[\"OrdererOrgs\"][0][\"Specs\"].append({\n \"Hostname\": \"orderer%d\" % orderer_index,\n \"SANS\": [orderer_host]\n })\n\n config[\"PeerOrgs\"] = []\n for organization_index in range(1, self.num_validators + 1):\n organization_host, _ = self.experiment.get_peer_ip_port_by_id(organization_index)\n organization_config = {\n \"Name\": \"Org%d\" % organization_index,\n \"Domain\": \"org%d.example.com\" % organization_index,\n \"EnableNodeOUs\": True,\n \"Template\": {\n \"Count\": 1,\n \"SANS\": [organization_host]\n },\n \"Users\": {\n \"Count\": 1\n }\n }\n config[\"PeerOrgs\"].append(organization_config)\n\n with open(os.path.join(self.config_path, \"crypto-config.yaml\"), \"w\") as crypto_config_file:\n yaml.dump(config, crypto_config_file)\n\n # Change configtx.yaml\n yaml = YAML()\n with open(os.path.join(self.config_path, \"configtx-template.yaml\"), \"r\") as configtx_file:\n config = yaml.load(configtx_file)\n\n config[\"Profiles\"][\"TwoOrgsChannel\"][\"Application\"][\"Organizations\"] = []\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Consortiums\"][\"SampleConsortium\"][\"Organizations\"] = []\n\n for organization_index in range(1, self.num_validators + 1):\n org_admin = \"Org%dMSP.admin\" % organization_index\n org_peer = \"Org%dMSP.peer\" % organization_index\n org_client = \"Org%dMSP.client\" % organization_index\n organization_host, _ = self.experiment.get_peer_ip_port_by_id(organization_index)\n\n organization_config = {\n \"Name\": \"Org%dMSP\" % organization_index,\n \"ID\": \"Org%dMSP\" % organization_index,\n \"MSPDir\": \"crypto-config/peerOrganizations/org%d.example.com/msp\" % organization_index,\n \"Policies\": {\n \"Readers\": {\n \"Type\": \"Signature\",\n \"Rule\": \"OR('%s', '%s', '%s')\" % (org_admin, org_peer, org_client)\n },\n \"Writers\": {\n \"Type\": \"Signature\",\n \"Rule\": \"OR('%s', '%s')\" % (org_admin, org_peer)\n },\n \"Admins\": {\n \"Type\": \"Signature\",\n \"Rule\": \"OR('%s')\" % (org_admin)\n }\n },\n \"AnchorPeers\": [{\n \"Host\": organization_host,\n \"Port\": 7000 + organization_index\n }]\n }\n\n commented_map = CommentedMap(organization_config)\n commented_map.yaml_set_anchor(\"Org%d\" % organization_index, always_dump=True)\n config[\"Organizations\"].append(commented_map)\n config[\"Profiles\"][\"TwoOrgsChannel\"][\"Application\"][\"Organizations\"].append(commented_map)\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Consortiums\"][\"SampleConsortium\"][\"Organizations\"]\\\n .append(commented_map)\n\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Orderer\"][\"EtcdRaft\"][\"Consenters\"] = []\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Orderer\"][\"Addresses\"] = []\n\n for organization_index in range(1, self.num_validators + 1):\n organization_host, _ = self.experiment.get_peer_ip_port_by_id(organization_index)\n consenter_port = 7000 + organization_index\n consenter_info = {\n \"Host\": organization_host,\n \"Port\": consenter_port,\n \"ClientTLSCert\": \"crypto-config/ordererOrganizations/example.com/orderers/\"\n \"orderer%d.example.com/tls/server.crt\" % organization_index,\n \"ServerTLSCert\": \"crypto-config/ordererOrganizations/example.com/orderers/\"\n \"orderer%d.example.com/tls/server.crt\" % organization_index\n }\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Orderer\"][\"EtcdRaft\"][\"Consenters\"].append(consenter_info)\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Orderer\"][\"Addresses\"].append(\n \"%s:%d\" % (organization_host, consenter_port))\n\n with open(os.path.join(self.config_path, \"configtx.yaml\"), \"w\") as configtx_file:\n round_trip_dump(config, configtx_file, Dumper=RoundTripDumper)",
"def make_config(config, out_dir=None, pism_root=pism_root):\n\n # ensure that config is a list\n if type(config) is str:\n config = [config]\n\n # initialize netCDF dataset\n nc_path = os.path.join(out_dir, 'config.nc')\n nc = Dataset(nc_path, 'w')\n var = nc.createVariable('pism_overrides', 'i1')\n\n # loop on config files\n for c in config:\n c_path = '%s/config/%s.txt' % (pism_root, c)\n\n # fill in pism overrides\n with open(c_path) as f:\n for line in f:\n\n # ignore what follows '//'\n line = line.split('//', 1)[0].strip()\n\n # parse non-empty lines and overwrite existing values\n if line:\n k, v = line.split(':', 1)\n k = k.strip()\n v = v.strip().strip('\"')\n try:\n v = float(v)\n except ValueError:\n pass\n var.setncattr(k, v)\n\n # close and return path to output file\n nc.close()\n return nc_path",
"def update_config_external_template(config):\r\n\r\n # best parameters from the paper\r\n config['train_batch_size'] = 16384\r\n config['lr'] = 3e-4\r\n config['sgd_minibatch_size'] = 4096\r\n config['num_sgd_iter'] = 4\r\n config['rollout_fragment_length'] = 100\r\n\r\n # run ID to communicate to the http trainer\r\n config['run_uid'] = '_setme'\r\n\r\n # stable baselines accepts full episodes\r\n config[\"batch_mode\"] = \"complete_episodes\"\r\n\r\n # stable baselines server address\r\n config[\"http_remote_port\"] = \"http://127.0.0.1:50001\"\r\n\r\n # no gpus, stable baselines might use them\r\n config['num_gpus'] = 0\r\n\r\n # set trainer class\r\n config['_trainer'] = \"External\"\r\n config['_policy'] = \"PPO\"\r\n\r\n # tuned\r\n config['num_envs_per_worker'] = 10\r\n config['num_workers'] = 3\r\n return config",
"def genConfig():\n\n cfg = open('/home/sevudan/Scripts/projects/topogen/result.cfg','w')\n template = getTemplate()\n G = topo.topology()\n gen_config_lo(G, cfg)\n # Get node from list nodes.\n for node in sorted(G.nodes):\n d = dict(G[node])\n hostname = node\n # Get attributes for node.\n peer = d.keys()\n for peer_node in peer:\n params = d.get(peer_node)\n conf = template.render(\n node=hostname,\n description = peer_node,\n ifd = params.get('ifd'),\n local_ifl = params.get('local_ifl'),\n peer_ifl = params.get('peer_ifl'),\n ifa = params.get('ip_address')\n )\n result = '{}{}'.format(conf,'\\n')\n cfg.write(result)\n cfg.close()",
"def generate_config_template():\n lines = ['# Lines starting with # will be skipped.']\n lines.append('# Only one argument on each line.')\n lines.append('#-s This option is always assumed to be true.')\n lines.append('#-p')\n lines.append('#-m')\n lines.append('#-o')\n lines.append('#-c')\n lines.append('-l')\n lines.append('#-a')\n lines.append('#-d')\n\n with open('export_config.txt', 'wb') as f_new:\n f_new.write('\\r\\n'.join(lines))\n print 'Template generated. Edit this file as you please and call this script '\\\n 'with the -f option enabled.'",
"def get_valid_config(args):\n source = confuse.YamlSource(args.config)\n config = confuse.RootView([source])\n\n job_template = {\n \"job\": {\n \"name\": str,\n \"dir\": confuse.Optional(\n FilenameValidate(\n cwd=str(pathlib.Path(__file__).parent.absolute())),\n default=str(pathlib.Path(__file__).parent.absolute())\n ),\n }\n }\n job_config = config.get(job_template)\n\n logging_template = confuse.Optional(\n confuse.MappingTemplate({\n 'ids': confuse.StrSeq(),\n 'data': confuse.Sequence(\n confuse.Choice(['objectives', 'state', 'variables'])),\n 'timestamped': confuse.Optional(bool, default=True),\n \"to_file\": confuse.Optional(bool, default=True),\n \"to_console\": confuse.Optional(bool, default=False)\n })\n )\n\n sumo_template = {\n \"dir\": FilenameValidate(\n cwd=job_config.job.dir),\n \"gui\": confuse.Optional(bool, default=True),\n \"max_steps\": confuse.Optional(int, default=10e5),\n \"network\": FilenameValidate(relative_to=\"dir\"),\n }\n sumo_config = config.get({\"sumo\": sumo_template})\n sumo_template[\"additional\"] = confuse.Sequence(\n FilenameValidate(cwd=sumo_config.sumo.dir))\n sumo_template[\"route\"] = confuse.Sequence(\n FilenameValidate(cwd=sumo_config.sumo.dir))\n\n tls_template = confuse.Sequence({\n \"id\": str,\n \"controller\": confuse.Choice(\n TLSFactory.get_registered_keys()),\n \"constants\": confuse.MappingValues(\n confuse.OneOf([\n confuse.Number(),\n AllowedContainers(list),\n AllowedContainers(dict),\n FilenameValidate(cwd=job_config.job.dir),\n ExecutableValidate()\n ])\n ),\n \"variables\": confuse.MappingValues(\n confuse.OneOf([\n confuse.Number(),\n AllowedContainers(list)\n ])\n ),\n \"extract\": {\n \"user_data\": confuse.Sequence({\n \"feature\": confuse.Choice(\n [\"count\", \"speed\", \"eta\", \"delay\", \"waiting_time\"]),\n \"user_class\": confuse.Choice(\n [\"bicycle\", \"passenger\", \"pedestrian\", \"bus\", \"truck\", \"moped\"]),\n \"at\": confuse.Choice(\n [\"lane\", \"detector\", \"phase\"]),\n \"mapping\": AllowedContainers(dict)\n }),\n \"tls_data\": confuse.Sequence({\n \"feature\": confuse.Choice(\n [\"elapsed_time\", \"integer_phase\", \"binary_phase\"]),\n \"to_variable\": str\n })\n }\n })\n\n full_template = {\n \"logging\": logging_template,\n \"sumo\": sumo_template,\n \"tls\": tls_template,\n }\n job_template.update(full_template)\n valid_config = config.get(job_template)\n\n # second round of sumo validation\n assert len(valid_config.sumo.route) > 0, \\\n \"No demand definition: sumo.route is an empty list, expected at least one *.rou.xml\"\n \n # second round of logger validation, look if ids are given\n if valid_config.logging:\n if valid_config.logging.ids and valid_config.logging.data:\n output_dir = os.path.join(valid_config.job.dir, \"output\")\n os.makedirs(output_dir, exist_ok=True)\n valid_config.logging.update({\"dir\": output_dir})\n else:\n del valid_config['logging']\n\n return valid_config",
"def build_config():\n if not os.path.exists(config_path):\n # generate key pair\n priv_key, pub_key = crypt.ecdsa_generate()\n if not priv_key or not pub_key:\n log.error(\"Unable to generate public/private keypair....\")\n exit(0)\n else:\n # fill default config with generated keypair\n base_config['key']['pub'] = pub_key\n base_config['key']['priv'] = priv_key\n\n # dump default config\n log.info(\"Dumping initial config to: %s\", config_path)\n with open(config_path, 'w') as fp:\n json.dump(base_config, fp, sort_keys=True, indent=2)\n return True\n else:\n return False",
"def generate_nnie_config(nnie_cfg, config, nnie_out_path='./config.json', tensor_type='float'):\n u8_start = False if tensor_type == 'float' else False\n default_config = {\n \"default_net_type_token\": \"nnie\",\n \"rand_input\": False,\n \"data_num\": 100,\n \"input_path_map\": {\n \"data\": \"./image_bins\",\n },\n \"nnie\": {\n \"max_batch\": 1,\n \"output_names\": [],\n \"mapper_version\": 11,\n \"u8_start\": u8_start,\n \"device\": \"gpu\",\n \"verbose\": False,\n \"image_path_list\": [\"./image_list.txt\"],\n \"mean\": [128, 128, 128],\n \"std\": [1, 1, 1]\n }\n }\n image_path_list = nnie_cfg['image_path_list']\n assert os.path.exists(image_path_list)\n with open(image_path_list, 'r') as f:\n image_list = [item.strip() for item in f.readlines()]\n\n mean = config.to_kestrel.get('pixel_means', [123.675, 116.28, 103.53])\n std = config.to_kestrel.get('pixel_stds', [58.395, 57.12, 57.375])\n resize_hw = config.to_kestrel.get('resize_hw', (224, 224))\n resize_hw = tuple(resize_hw)\n data_num = len(image_list)\n image_bin_path = generate_image_bins(image_list, mean, std, resize_hw)\n default_config['data_num'] = data_num\n default_config['input_path_map']['data'] = image_bin_path\n default_config['nnie']['max_batch'] = nnie_cfg.get('max_batch', 1)\n default_config['nnie']['mapper_version'] = nnie_cfg.get('mapper_version', 11)\n default_config['nnie']['image_path_list'] = [image_path_list]\n default_config['nnie']['mean'] = [128] * len(std)\n default_config['nnie']['std'] = [1] * len(std)\n with open(nnie_out_path, \"w\") as f:\n json.dump(default_config, f, indent=2)\n\n return nnie_out_path",
"def create_net(args):\n\n # Load config file for this experiment\n xinfo = yaml.load(open(args.exp)) # experiment info\n\n # copy config to run directory\n assert osp.isdir(args.cache_dir), 'Working directory not found: ' + args.cache_dir\n # output config file\n yaml.dump(xinfo, open(args.exp_config_path, 'w'),\n default_flow_style=False)\n\n # Load dataset config file\n dcfg_path = osp.join(args.data_config_path, xinfo['INPUT']['DATASET'])\n dinfo = yaml.load(open(dcfg_path)) # dataset info\n data_dir = dinfo['ROOT']\n\n layout = xinfo['INPUT']['LAYOUT']\n inps = [s.strip() for l in layout for s in l.split(',')]\n outs = [s.strip() for s in xinfo['REFINE']['TARGETS'].split(',')]\n\n supports = ['seg', 'flow', 'norm', 'rgb', 'depth']\n\n nets = {}\n for split in ['train', 'test']:\n net_inps = []\n net_outs = []\n for inp in inps:\n match = re.search('^(gt|pr)({})'.format('|'.join(supports)), inp)\n assert match is not None, 'Error in config INPUT-LAYOUT: ' + inp\n\n modality = match.group(2)\n nchannels = dinfo[modality]['n']\n path = osp.join(data_dir, dinfo[modality][match.group(1) + '-' + split])\n\n assert osp.exists(path), 'File not found: ' + path\n net_inps.append((inp, path, nchannels))\n\n for out in outs:\n # TODO: read target type: zero couplings, tight, loose couplings\n match = re.search('({})'.format('|'.join(supports)), out)\n assert match is not None, 'Error in config REFINE-TARGET: '+ out\n\n modality = match.group(1)\n nchannels = dinfo[modality]['n']\n path = osp.join(data_dir, dinfo[modality]['gt-' + split])\n\n assert osp.exists(path), 'File not found: ' + path\n net_outs.append((out, path, nchannels))\n\n loss_params = dict()\n mapping = None\n if 'mapping' in dinfo['seg']:\n idx = dinfo['seg']['mapping']\n mapping = dict(zip(idx, xrange(len(idx))))\n\n if split == 'train':\n\n # if the class weights is not in the dataset config file\n if 'gt-train-weights' not in dinfo['seg']:\n print 'Generating median frequency balancing weights.'\n (weights, mapping) = gcw.get_mfb(osp.join(dinfo['ROOT'], dinfo['seg']['gt-train']),\n dinfo['seg']['ignore_label'],\n mapping)\n # save back to dataset config\n dinfo['seg']['gt-train-weights'] = weights\n yaml.dump(dinfo, open(dcfg_path, 'w'), default_flow_style=False)\n else:\n weights = dinfo['seg']['gt-train-weights']\n # update data\n # update loss parameter\n ignore_label = dinfo['seg']['ignore_label']\n ignore_label = mapping[ignore_label] if mapping is not None else ignore_label\n loss_params['loss_param'] = {\n 'ignore_label': ignore_label,\n 'class_weighting': weights\n }\n\n # generate net prototxt\n loader = dinfo['NAME'] + '_loader'\n net_proto = arch.create_net(net_inps, net_outs, split, loader, layout, mapping, **loss_params)\n\n # output to file\n path = osp.join(args.cache_dir, getattr(args, 'exp_{}_path'.format(split)))\n open(path, 'w').write(str(net_proto))\n nets[split] = net_proto\n\n return nets",
"def createConfig():\n\twith open(configPath, 'w', encoding='utf-8') as file:\n\t\tjson.dump(default_config, file, indent=3)",
"def config_path(request):\n submissions_directory = tempfile.mkdtemp()\n holding_directory = tempfile.mkdtemp()\n\n test_config = {\n 'secret_key': 'itsasecret',\n 'sqlalchemy_database_uri': 'sqlite://',\n 'iron': {\n 'project_id': 'notnecessary'\n },\n 'submissions_directory': submissions_directory,\n 'holding_directory': holding_directory\n }\n\n opened_file_descriptor, filepath = tempfile.mkstemp()\n opened_file = os.fdopen(opened_file_descriptor, 'w')\n yaml.dump(test_config, opened_file)\n opened_file.close()\n\n def fin():\n os.unlink(filepath)\n shutil.rmtree(submissions_directory)\n shutil.rmtree(holding_directory)\n\n request.addfinalizer(fin)\n return filepath",
"def make_config():\n # find date of data obtained\n current_pathname = os.path.basename(os.getcwd())\n guess_date = extract_date(current_pathname)\n\n while(True):\n if guess_date is None:\n prompt = 'YYYYMMDD'\n else:\n prompt = guess_date\n\n string = input('Date of observation [{}]: '.format(prompt))\n input_date = extract_date(string)\n if input_date is None:\n if guess_date is None:\n continue\n else:\n input_date = guess_date\n break\n else:\n break\n \n input_datetime = datetime.datetime.strptime(input_date, '%Y-%m-%d')\n\n # create config object\n config = configparser.ConfigParser()\n\n config.add_section('data')\n\n config.set('data', 'telescope', 'Keck-I')\n config.set('data', 'instrument', 'HIRES')\n config.set('data', 'rawpath', 'rawdata')\n #config.set('data', 'statime_key', statime_key)\n #config.set('data', 'exptime_key', exptime_key)\n\n config.add_section('reduce')\n config.set('reduce', 'midpath', 'midproc')\n config.set('reduce', 'figpath', 'images')\n config.set('reduce', 'odspath', 'onedspec')\n config.set('reduce', 'mode', 'normal')\n config.set('reduce', 'oned_suffix', 'ods')\n config.set('reduce', 'fig_format', 'png')\n \n config.add_section('reduce.bias')\n config.set('reduce.bias', 'bias_file', '${reduce:midpath}/bias.fits')\n config.set('reduce.bias', 'cosmic_clip', str(10))\n config.set('reduce.bias', 'maxiter', str(5))\n config.set('reduce.bias', 'smooth', 'yes')\n config.set('reduce.bias', 'smooth_method', 'gaussian')\n config.set('reduce.bias', 'smooth_sigma', str(3))\n config.set('reduce.bias', 'smooth_mode', 'nearest')\n\n config.add_section('reduce.trace')\n config.set('reduce.trace', 'minimum', str(1e-3))\n config.set('reduce.trace', 'scan_step', str(100))\n config.set('reduce.trace', 'separation', '100:84, 1500:45, 3000:14')\n config.set('reduce.trace', 'filling', str(0.2))\n config.set('reduce.trace', 'align_deg', str(2))\n config.set('reduce.trace', 'display', 'no')\n config.set('reduce.trace', 'degree', str(4))\n config.set('reduce.trace', 'file', '${reduce:midpath}/trace.fits')\n\n config.add_section('reduce.flat')\n config.set('reduce.flat', 'file', '${reduce:midpath}/flat.fits')\n\n # write to config file\n filename = 'HIRES.{}.cfg'.format(input_date)\n outfile = open(filename, 'w')\n for section in config.sections():\n maxkeylen = max([len(key) for key in config[section].keys()])\n outfile.write('[{}]'.format(section)+os.linesep)\n fmt = '{{:{}s}} = {{}}'.format(maxkeylen)\n for key, value in config[section].items():\n outfile.write(fmt.format(key, value)+os.linesep)\n outfile.write(os.linesep)\n outfile.close()\n\n print('Config file written to {}'.format(filename))",
"def _setup_configfiles(self, Testboard):\n\n # Delete all root files which are already in the directory\n root_files = glob.glob(Testboard.testdir+'/*.root')\n for f in root_files:\n os.remove(f)\n # Change testboard name\n\tif Testboard.DTB and os.path.isfile(Testboard.testdir + \"/tb\"):\n self._config_file_content_substitute(Testboard.testdir + \"/tb\", {\"id\":Testboard.address})\n else:\n self._config_file_content_substitute(Testboard.testdir + \"/configParameters.dat\", {\"testboardName\":Testboard.address})\n\n # Get test specific config parameters (if available)\n params = ()\n try:\n params = self.init.items(\"Test \" + self.test.testname)\n except:\n return\n for par in params:\n file = par[0]\n if '.cfg' in file:\n section,pair = par[1].split(':')\n key,value = pair.split('=')\n config_file = BetterConfigParser()\n config_file.read(Testboard.testdir + \"/\" + file)\n config_file.set(section,key,value)\n write_file = open(Testboard.testdir + \"/\" + file, 'write')\n config_file.write(write_file)\n write_file.close()\n continue\n # Check for valid keys that represent config files\n elif \"testParameters\" in file or \"dacParameters\" in file or \"configParameters\" in file:\n pass\n elif \"tbmParameters\" in file or \"tbParameters\" in file:\n pass\n else:\n continue\n\n encoded_keys = par[1].split(\",\")\n keys = {}\n for key in encoded_keys:\n key = key.split(\"=\", 2)\n if len(key) != 2:\n continue\n keys[key[0]] = key[1]\n if len(file) < 4 or file[-4:] != \".dat\":\n file += \".dat\"\n self._config_file_content_substitute(Testboard.testdir + \"/\" + file, keys)",
"def setup(args):\n cfg = get_cfg()\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n # customize reszied parameters\n # cfg['INPUT']['MIN_SIZE_TRAIN'] = (20,)\n # cfg['INPUT']['MAX_SIZE_TRAIN'] = 50\n cfg.freeze()\n default_setup(\n cfg, args\n ) # if you don't like any of the default setup, write your own setup code\n return cfg",
"def get_config():\n\n parser = argparse.ArgumentParser(\n description='ZoomingSloMo or only Slo-Mo training argument parser')\n parser.add_argument('--cfg', default=\"./config.yaml\")\n args, _ = parser.parse_known_args()\n conf = read_yaml(args.cfg)\n\n parser.add_argument('--lmdb-data-gt', type=str, default=\"datasets/\",\n help='Path to HR frames lmdb for training')\n\n parser.add_argument('--lmdb-data-lq', type=str, default=\"datasets/\",\n help='Path to LR frames lmdb for training')\n\n parser.add_argument('--output-dir', type=str, default=\"models/\",\n help='Path to store trained models')\n\n parser.add_argument('--batch-size', type=int, default=\"12\",\n help='Maximum number of iterations for training')\n\n parser.add_argument('--gt-size', type=int, default=128,\n help='Ground truth frame size')\n\n parser.add_argument('--only-slomo', action='store_true', default=False,\n help='If True, network will train for Slo-Mo only (No Zooming)')\n\n args = parser.parse_args()\n\n # Refine config file variables\n conf.data.lmdb_data_gt = args.lmdb_data_gt\n conf.data.lmdb_data_lq = args.lmdb_data_lq\n conf.data.output_dir = args.output_dir\n conf.train.batch_size = args.batch_size\n conf.train.only_slomo = args.only_slomo\n conf.data.gt_size = args.gt_size if not args.only_slomo else args.gt_size // 4\n conf.data.lr_size = args.gt_size // 4\n\n return conf"
]
| [
"0.6447288",
"0.6440364",
"0.6314993",
"0.63084924",
"0.62926704",
"0.61100346",
"0.6052603",
"0.6044389",
"0.59434706",
"0.59244305",
"0.59235203",
"0.59207696",
"0.5917525",
"0.5906047",
"0.58772135",
"0.58684087",
"0.5858659",
"0.5848748",
"0.58348626",
"0.58281535",
"0.58206755",
"0.5808289",
"0.57735306",
"0.5766891",
"0.57489705",
"0.57425404",
"0.5735072",
"0.57270855",
"0.5710682",
"0.5698444"
]
| 0.7042954 | 0 |
Redirect nonwww requests to www. | def redirect_nonwww():
urlparts = urlparse(request.url)
if urlparts.netloc != 'www.mealscount.com':
return redirect('https://www.mealscount.com/', code=301) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_www(hostname: str) -> str:\n if hostname.startswith(\"www.\"):\n return hostname[4:]\n return hostname",
"def ssl_redirect():\n if request.get_header('X-Forwarded-Proto', 'http') != 'https':\n redirect(request.url.replace('http://', 'https://', 1), code=301)",
"def first_request():\n heroku_url: str = 'https://justice-ndou.herokuapp.com/'\n registered_domain: str = 'https://justice-ndou.herokuapp.com/'\n\n if request.host_url.lower().startswith(heroku_url):\n return redirect(request.host_url.lower().replace(heroku_url, registered_domain)), 301",
"def redirect_heroku():\n urlparts = urlparse(request.url)\n domain_name = \"rsvp.tiks-ultimate.in\"\n old_domain_name = \"thatte-idli-rsvp.herokuapp.com\"\n fly_domain_name = \"tiks-ultimate-rsvp.fly.dev\"\n if urlparts.netloc in {old_domain_name, fly_domain_name}:\n urlparts_list = list(urlparts)\n urlparts_list[1] = domain_name\n return redirect(urlunparse(urlparts_list), code=301)",
"def _MaybeRedirectToBrandedDomain(self, request, project_name):\n if request.params.get('redir'):\n return # Avoid any chance of a redirect loop.\n if not project_name:\n return\n needed_domain = framework_helpers.GetNeededDomain(\n project_name, request.host)\n if not needed_domain:\n return\n\n url = 'https://%s%s' % (needed_domain, request.path_qs)\n if '?' in url:\n url += '&redir=1'\n else:\n url += '?redir=1'\n logging.info('branding redirect to url %r', url)\n self.redirect(url, abort=True)",
"def redirect_handler_factory():\n\n class RedirectHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):\n def do_GET(self):\n self.send_response(301)\n domain = self.headers['host']\n if ':' in domain:\n domain = domain.split(':')[0]\n self.send_header('Location', \"https://\" + domain + self.path)\n self.end_headers()\n\n return RedirectHandler",
"def filter_nossl(request):\n if request.scheme == 'http':\n return True\n else:\n return False",
"def _send_301(self, new_url):\n try:\n self.send_response(301)\n self.send_header('Location', new_url)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n except UnicodeEncodeError:\n self._send_internal_server_error()",
"def filter_domain(name):\n def wrapped(request):\n \"\"\" Function used to filter request\n \"\"\"\n if request.environ.get('HTTP_HOST'):\n url = request.environ['HTTP_HOST']\n else:\n url = request.environ['SERVER_NAME']\n if url.lower() == name.lower():\n return True\n\n return False\n return wrapped",
"def redirect_to_ssl(self, domains):\n for dom in domains:\n try:\n self.installer.enhance(dom, \"redirect\")\n except errors.LetsEncryptConfiguratorError:\n logging.warn(\"Unable to perform redirect for %s\", dom)\n\n self.installer.save(\"Add Redirects\")\n self.installer.restart()",
"def test_redirects_shortlink_without_http_scheme(self):\n rv = self.post('www.seinfeld.com')\n assert '<a href=\"TheStakeOut\">TheStakeOut</a> is now short for <a href=\"www.seinfeld.com\">www.seinfeld.com</a>!' in rv.data\n rv = self.app.get('/TheStakeOut')\n assert rv.status_code == 302\n assert rv.location == 'http://www.seinfeld.com'",
"def skip_cross_domain_referer_check(request):\n is_secure_default = request.is_secure\n request.is_secure = lambda: False\n try:\n yield\n finally:\n request.is_secure = is_secure_default",
"def fix_website(raw_website):\n if url_is_good(raw_website):\n return raw_website\n else:\n return \"http://\" + raw_website",
"def redirect_to_security_check(redirect_to, request):\n netloc = urlparse.urlparse(redirect_to)[1]\n # Light security check -- make sure redirect_to isn't garbage.\n if not redirect_to or ' ' in redirect_to:\n return False\n # Heavier security check -- don't allow redirection to a diff host.\n elif netloc and netloc != request.get_host():\n return False\n return True",
"def test_load_https_uri_with_www_prefix(self):\n\n url = \"https://www.%s\" % (self.https_uri)\n\n self.browser.proxy_client.new_har(\"page\")\n\n po = self.catalog.load_pageobject('GenericPage')\n po.goto_page(url)\n\n har_entry = self.browser.page_load_details(url)\n\n assert har_entry is not None, \\\n \"failed to load the uri %s. http archive unavailable.\" \\\n % (url)\n\n assert self.browser.error_loading_page(har_entry) is False, \\\n \"failed to load the uri %s. http archive follows:\\n%s\" \\\n % (url,pprint.pformat(har_entry))",
"def host_valid_strict(self, host: str) -> bool:\n host = host[4:] if host.startswith('www.') else 'www.' + host\n return host in self.root_domains",
"def redirect(url):",
"def test_link_is_tracked_true_with_www(self):\n self.assertTrue(link_is_tracked(\"https://www.test.com/testurl\"))",
"def add_to_request_dict(request, request_dict, params):\n\n if not is_good_request(request, params):\n return\n\n if params['ignore_www']:\n if re.match(r\"www\\.\", request.url):\n # Remove www. from url ????????????????????\n request.url = request.url[4:]\n\n request_dict[request.url][0] += 1\n request_dict[request.url][1] += int(request.responce_time)",
"def test_forward(self):\n short_url = ShortURL.objects.create(url='http://example.com')\n response = self.client.get('/%s'%(short_url.key))\n self.assertEqual(response.status_code, 301)",
"def redirect(to):\r\n def _redirect(environ, start_response):\r\n args, kwargs = environ['wsgiorg.routing_args']\r\n start_response('301 MOVED PERMANENTLY',\r\n [('Location', to.format(*args, **kwargs))])\r\n return []\r\n return _redirect",
"def filter_ssl(request):\n if request.scheme == 'https':\n return True\n else:\n return False",
"def wrapped(request):\n if request.environ.get('HTTP_HOST'):\n url = request.environ['HTTP_HOST']\n else:\n url = request.environ['SERVER_NAME']\n if url.lower() == name.lower():\n return True\n\n return False",
"def test_apache_port_80_redirect_to_443(host):\n args = \"url=http://127.0.0.1 follow_redirects=none validate_certs=no\"\n request = host.ansible(\"uri\", args, check=False)\n assert request[\"status\"] == 301",
"def resolveRedirect(self, useHEAD=False):\n conn = self.getConnection()\n try:\n if useHEAD:\n conn.request('HEAD', '%s%s' % (self.path, self.query), None,\n self.header)\n else:\n conn.request('GET', '%s%s' % (self.path, self.query), None,\n self.header)\n self.response = conn.getresponse()\n # read the server's encoding, in case we need it later\n self.readEncodingFromResponse(self.response)\n except httplib.BadStatusLine:\n # Some servers don't seem to handle HEAD requests properly,\n # e.g. http://www.radiorus.ru/ which is running on a very old\n # Apache server. Using GET instead works on these (but it uses\n # more bandwidth).\n if useHEAD:\n return self.resolveRedirect(useHEAD=False)\n else:\n raise\n if self.response.status >= 300 and self.response.status <= 399:\n # to debug, print response.getheaders()\n redirTarget = self.response.getheader('Location')\n if redirTarget:\n try:\n redirTarget.encode('ascii')\n except UnicodeError:\n redirTarget = redirTarget.decode(\n self.getEncodingUsedByServer())\n if redirTarget.startswith('http://') or \\\n redirTarget.startswith('https://'):\n self.changeUrl(redirTarget)\n return True\n elif redirTarget.startswith('/'):\n self.changeUrl(u'%s://%s%s'\n % (self.protocol, self.host, redirTarget))\n return True\n else: # redirect to relative position\n # cut off filename\n directory = self.path[:self.path.rindex('/') + 1]\n # handle redirect to parent directory\n while redirTarget.startswith('../'):\n redirTarget = redirTarget[3:]\n # some servers redirect to .. although we are already\n # in the root directory; ignore this.\n if directory != '/':\n # change /foo/bar/ to /foo/\n directory = directory[:-1]\n directory = directory[:directory.rindex('/') + 1]\n self.changeUrl('%s://%s%s%s'\n % (self.protocol, self.host, directory,\n redirTarget))\n return True\n else:\n return False # not a redirect",
"def redirect(self):\n new_url = self.server.url + options.script_alias + '/'\n self.send_response(301, \"Moved (redirection follows)\")\n self.send_header(\"Content-type\", \"text/html\")\n self.send_header(\"Location\", new_url)\n self.end_headers()\n self.wfile.write(\"\"\"<html>\n<head>\n<meta http-equiv=\"refresh\" content=\"1; URL=%s\">\n</head>\n<body>\n<h1>Redirection to <a href=\"%s\">ViewVC</a></h1>\nWait a second. You will be automatically redirected to <b>ViewVC</b>.\nIf this doesn't work, please click on the link above.\n</body>\n</html>\n\"\"\" % tuple([new_url]*2))",
"def test_load_http_uri_with_www_prefix(self):\n\n url = \"http://www.%s\" % (self.http_uri)\n\n self.browser.proxy_client.new_har(\"page\")\n\n po = self.catalog.load_pageobject('GenericPage')\n po.goto_page(url)\n\n har_entry = self.browser.page_load_details(url)\n\n assert har_entry is not None, \\\n \"failed to load the uri %s. http archive unavailable.\" \\\n % (url)\n\n assert self.browser.error_loading_page(har_entry) is False, \\\n \"failed to load the uri %s. http archive follows:\\n%s\" \\\n % (url,pprint.pformat(har_entry))",
"def test_wsgi_script_name_on_domain_url(self):\n lh = LambdaHandler(\"tests.test_wsgi_script_name_settings\")\n\n event = {\n \"body\": \"\",\n \"resource\": \"/{proxy+}\",\n \"requestContext\": {},\n \"queryStringParameters\": {},\n \"headers\": {\n \"Host\": \"example.com\",\n },\n \"pathParameters\": {\"proxy\": \"return/request/url\"},\n \"httpMethod\": \"GET\",\n \"stageVariables\": {},\n \"path\": \"/return/request/url\",\n }\n response = lh.handler(event, None)\n\n self.assertEqual(response[\"statusCode\"], 200)\n self.assertEqual(response[\"body\"], \"https://example.com/return/request/url\")",
"def __addDDOSBypass(self, exchangeName):\n #bypassing cloudflare with cookies\n url = self.__exchanges[exchangeName].urls['www']\n tokens, user_agent = cfscrape.get_tokens(url)\n self.__exchanges[exchangeName].headers = {\n 'cookie': '; '.join([key + '=' + tokens[key] for key in tokens]),\n 'user-agent': user_agent,\n }",
"def safe_website(self):\n if not self.website.startswith(\"http://\"):\n return \"http://%s\" % self.website\n return self.website"
]
| [
"0.6378573",
"0.62721723",
"0.5859779",
"0.5771819",
"0.5680137",
"0.56411994",
"0.54752874",
"0.5462076",
"0.5380044",
"0.5353241",
"0.5254475",
"0.5163623",
"0.51251495",
"0.50596863",
"0.501662",
"0.4976819",
"0.49192634",
"0.4908197",
"0.48885605",
"0.4851343",
"0.48468718",
"0.4836696",
"0.4756232",
"0.47467652",
"0.47459173",
"0.47359008",
"0.47230688",
"0.47039765",
"0.469952",
"0.46696883"
]
| 0.8099394 | 0 |
returns a list of neighbors returns a list of position objects with their directiontomoveto set to the direction that the empty square moved. | def neighbors(self):
# find 0 - blank square
x0 = None
y0 = None
for i in range(4):
for j in range(4):
if self.get_tile(i,j) == 0:
y0 = i
x0 = j
if x0 == None or y0 == None:
return []
neighbor_list = []
# move 0 to the right
if x0 < 3:
new_position = Position(self.tiles)
temp = new_position.get_tile(y0,x0+1)
new_position.set_tile(y0,x0+1,0)
new_position.set_tile(y0,x0,temp)
new_position.directiontomoveto = 'r'
neighbor_list.append(new_position)
# move 0 to the left
if x0 > 0:
new_position = Position(self.tiles)
temp = new_position.get_tile(y0,x0-1)
new_position.set_tile(y0,x0-1,0)
new_position.set_tile(y0,x0,temp)
new_position.directiontomoveto = 'l'
neighbor_list.append(new_position)
# move 0 up
if y0 > 0:
new_position = Position(self.tiles)
temp = new_position.get_tile(y0-1,x0)
new_position.set_tile(y0-1,x0,0)
new_position.set_tile(y0,x0,temp)
new_position.directiontomoveto = 'u'
neighbor_list.append(new_position)
# move 0 down
if y0 < 3:
new_position = Position(self.tiles)
temp = new_position.get_tile(y0+1,x0)
new_position.set_tile(y0+1,x0,0)
new_position.set_tile(y0,x0,temp)
new_position.directiontomoveto = 'd'
neighbor_list.append(new_position)
return neighbor_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_neighbors(self):\n return list(map(self.game.square, [self.position - self.game.rules[\"row_len\"], self.position + 1, self.position + self.game.rules[\"row_len\"], self.position - 1]))",
"def empty_spots(self):\n\t\tret = []\n\t\tfor i in range(0, self.size):\n\t\t\tfor j in range(0, self.size):\n\t\t\t\tif(self.grid[i][j] == self.terminal):\n\t\t\t\t\tret.append((i,j))\n\t\treturn ret",
"def findNeighbours(self):\n neighbours = []\n\n for i in range(self.xCoordinate - 1, self.xCoordinate + 2):\n for j in range(self.yCoordinate - 1, self.yCoordinate + 2):\n if (not (i == self.xCoordinate and j == self.yCoordinate)) and (0 <= i <= 394 and 0 <= j <= 499):\n neighbours.append(PixelPosition(i, j))\n\n return neighbours",
"def update_neighbors(self):\n neighbors = []\n for i in range(-1, 2):\n for j in range(-1, 2):\n if (i, j) == (0, 0):\n continue\n try:\n y, x = self.loc[0]+i, self.loc[1]+j\n neighbor = self.board.array[y, x]\n if neighbor > 0:\n neighbors.append(neighbor)\n except:\n continue\n \n self.neighbors = neighbors",
"def get_neighbours(self):\n return []",
"def _get_neighbours(self, position):\n grid = self._grid\n x, y = position\n neighbours = []\n offsets = [(0,1),(1,0),(0,-1),(-1,0)]\n shuffle(offsets)\n for offset in offsets:\n i, j = offset\n position = (x + i, y + j)\n if grid.valid_position(position) and position not in self.shots:\n neighbours.append(position)\n return neighbours",
"def find_neighbors(self):\n x, y = self.position\n\n for i in range(3):\n for j in range(3):\n try:\n self.neighbors.append(self.stitches[(x - 1 + i, y - 1 + j)].position)\n except:\n pass\n\n # this cell will be added by default so we must delete at the end\n self.neighbors.remove(self.position)",
"def get_neighbours(self, cell: Position) -> Iterable[Position]:\n x, y = cell\n\n return [\n (x - 1, y - 1), (x, y - 1), (x + 1, y - 1),\n (x - 1, y), (x + 1, y),\n (x - 1, y + 1), (x, y + 1), (x + 1, y + 1),\n ]",
"def get_neighbors(self) -> List['games.saloon.tile.Tile']:\n neighbors = []\n\n for direction in Tile.directions:\n neighbor = getattr(self, \"tile_\" + direction.lower())\n if neighbor:\n neighbors.append(neighbor)\n\n return neighbors",
"def get_neighbors(start_square, visited=[]):\n neighbors = []\n\n # loop over possible x values\n for i in [start_square.x - 1, start_square.x, start_square.x + 1]:\n\n # drop neighbors outside of our region of interest\n if i < 0 or i > MAX_X:\n continue\n\n # loop over possible y values\n for j in [start_square.y - 1, start_square.y, start_square.y + 1]:\n\n # drop neighbors outside of our region of interest\n if j < 0 or j > MAX_Y:\n continue\n\n # Ignore ourself\n if i == start_square.x and j == start_square.y:\n continue\n\n # Ignore corner pieces\n if i == start_square.x - 1 and j != start_square.y:\n continue\n if i == start_square.x + 1 and j != start_square.y:\n continue\n\n # Deal with barriers\n found = False\n for square in visited:\n if square.pos == [i, j]:\n found = True\n break\n if found:\n continue\n\n neighbors.append(Square(i, j))\n\n return neighbors",
"def neighbors(\n self, state: Grid2D.State\n ) -> Iterable[Tuple[Grid2D.Action, Grid2D.State]]:\n # pylint: disable=invalid-name\n for a, cell in self.adjacent_coordinates(cell=state.agent_position):\n if not self.is_wall(cell):\n yield (a, Grid2D.State(cell))",
"def get_neighbours_and_directions(self, from_position):\n \n # Transform index into board matrix into index into index into neighbour matrix\n from_row_index = self.board_to_connection_index(from_position)\n row = self.connection_matrix[from_row_index]\n \n neighbours = []\n for col_num in range(0, len(row)): \n if row[col_num]:\n # Transform index into board index\n board_index = self.connection_to_board_index(col_num)\n if self.board[board_index[0]][board_index[1]].state != PegState.EMPTY:\n neighbours.append((board_index, row[col_num])) # Store board index and direction in neighbours\n return neighbours",
"def get_neighbors(self, pos):\n y, x = pos\n neighbors = []\n\n # Check if sarting position for dijsktra.\n if y == sys.maxsize:\n neighbors = [(self.size-1,i) for i in range(self.size)]\n elif y == -sys.maxsize:\n neighbors = [(0,i) for i in range(self.size)]\n elif x == sys.maxsize:\n neighbors = [(i,self.size-1) for i in range(self.size)]\n elif x == -sys.maxsize:\n neighbors = [(i,0) for i in range(self.size)]\n # Position inside board\n else:\n if y-1 >= 0:\n neighbors.append((y-1, x))\n if y+1 < self.size:\n neighbors.append((y+1, x))\n if y-1 >= 0 and x+1 <= self.size-1:\n neighbors.append((y-1, x+1))\n if y+1 < self.size and x-1 >= 0:\n neighbors.append((y+1, x-1))\n if x+1 < self.size:\n neighbors.append((y, x+1))\n if x-1 >= 0:\n neighbors.append((y, x-1))\n return neighbors",
"def get_move_list(self):\n return [\n tuple(x) for x in np.argwhere(self.board == HexBoard.EMPTY).tolist()\n ]",
"def get_all_neighbors(self):\n m, n = self.board.shape\n return as_strided(self.expanded_board,\n shape = (m,n,3,3), \n strides = self.expanded_board.strides + self.expanded_board.strides)",
"def neighbours(self):\n\n neighbours = []\n root = self.root\n if self == root:\n return neighbours\n\n ########################\n # IMMEDIATELY ADJACENT #\n sizes = [self.maxs[0] - self.mins[0], self.maxs[1] - self.mins[1]]\n coords = [(self.mins[0] + sizes[0] / 2, self.maxs[1] + sizes[1] / 2,),\n (self.maxs[0] + sizes[0] / 2, self.mins[1] + sizes[1] / 2,),\n (self.mins[0] + sizes[0] / 2, self.mins[1] - sizes[1] / 2,),\n (self.maxs[0] - sizes[0] / 2, self.mins[1] + sizes[1] / 2,),]\n # loop through top, right, bottom, left\n for i in range(4):\n x, y = coords[i]\n query_quad = root.query_xy(x, y)\n if query_quad is not None:\n same_size_idx = query_quad.location[: self.tree_depth]\n same_size_quad = root[same_size_idx]\n neighbours += list(self._get_border_children(same_size_quad, i))\n\n #############\n # DIAGONALS #\n root_sizes = [root.maxs[0] - root.mins[0], root.maxs[1] - root.mins[1]]\n xs, ys = (root_sizes / 2 ** root.max_tree_depth) / 2\n neighbours += [\n root.query_xy(self.mins[0] - xs, self.mins[1] - ys), # TL\n root.query_xy(self.maxs[0] + xs, self.mins[1] - ys), # TR\n root.query_xy(self.mins[0] - xs, self.maxs[1] + ys), # BL\n root.query_xy(self.maxs[0] + xs, self.maxs[1] + ys), # BR\n ]\n\n unique_neighbours = list(set(neighbours))\n try:\n unique_neighbours.remove(self)\n except ValueError:\n pass\n\n return unique_neighbours",
"def emptyNeighborsList(board, row, column):\n\n neighboursList = list(BoardUtils.neighborsList(board, 1, row, column))\n emptyNeighbours = []\n for row, column in neighboursList:\n if BoardUtils.isEmpty(board, row, column):\n emptyNeighbours.append((row, column))\n return emptyNeighbours",
"def __get_neighbors(self, goal):\n neighbors = set()\n start = self.__get_position(0, self.puzzle)\n # start_x = start[0]\n # start_y = start[1]\n # Get the below neighbor.\n if(start[0] - 1 >= 0):\n temp = self.__swap(start[0], start[1], start[0] - 1, start[1])\n neighbors.add(State(temp, self.g + 1, 'D', goal))\n # Get the above neighbor\n if(start[0] + 1 <= len(self.puzzle) -1):\n temp = self.__swap(start[0], start[1], start[0] + 1, start[1])\n neighbors.add(State(temp, self.g + 1, 'U', goal))\n # Get the right neighbor\n if(start[1] - 1 >= 0):\n temp = self.__swap(start[0], start[1], start[0], start[1] - 1)\n neighbors.add(State(temp, self.g + 1, 'R', goal))\n # Get the left neighbor\n if(start[1] + 1 <= len(self.puzzle[0]) -1):\n temp = self.__swap(start[0], start[1], start[0], start[1] + 1)\n neighbors.add(State(temp, self.g + 1, 'L', goal))\n\n return neighbors",
"def neighbours(self):\n seen = set()\n return [l.other(self) for l in self.dovetails \\\n if id(l) not in seen and not seen.add(id(l))]",
"def available_moves(self):\n moves = []\n for x, y in self.available_boards:\n moves.extend([self.to_position(x, y, i, j) for (i, j)\n in self.boards[x][y].empty_squares])\n return moves",
"def get_neighbours(x, y, board):\n return [get_left(x, y, board), get_upper(x, y, board), get_right(x, y, board), get_lower(x, y, board)]",
"def _find_neighbours(self):\n\n neighbours = []\n for i, p in enumerate(self.frame_0):\n nearests = np.where(np.linalg.norm(self.frame_0 - p, axis=1) <= self.R_n)[0]\n # delete self index\n index = np.argwhere(nearests==i)\n nearests = np.delete(nearests, index)\n neighbours.append(nearests)\n\n return neighbours",
"def get_neighbors(self, row, col):\n neighbors = set()\n for d in [-1,1]:\n if row+d >= 0 and row+d < self._height and \\\n (row+d,col) in self._empty_spaces:\n neighbors.add((row+d,col))\n if col+d >= 0 and col+d < self._width and \\\n (row,col+d) in self._empty_spaces:\n neighbors.add((row,col+d))\n return neighbors",
"def findImmediateNeighbours(self):\n immediateNeighbours = []\n\n if self.xCoordinate - 1 > 0:\n immediateNeighbours.append(PixelPosition(self.xCoordinate - 1, self.yCoordinate))\n\n if self.xCoordinate + 1 < 395:\n immediateNeighbours.append(PixelPosition(self.xCoordinate + 1, self.yCoordinate))\n\n if self.yCoordinate + 1 < 500:\n immediateNeighbours.append(PixelPosition(self.xCoordinate, self.yCoordinate + 1))\n\n if self.yCoordinate - 1 > 0:\n immediateNeighbours.append(PixelPosition(self.xCoordinate, self.yCoordinate - 1))\n\n return immediateNeighbours",
"def get_neighbours(self, cell):\n\t\tx,y = cell.find_id()\n\t\tlength = self.space.shape[1]\n\t\twidth = self.space.shape[0]\n\t\tif (length == 0 or width == 0 or x < 0 or x >= length or y < 0 or y >= width):\n\t\t\treturn []\n\t\tneighs = [(i,j) for i in range(y-1,y+2) if 0<=i<width for j in range(x-1,x+2) if 0<=j<length]\n\t\tneighbours = []\n\t\tfor neigh in neighs:\n\t\t\tneighbours.append(self.space[neigh[0],neigh[1]])\n\t\treturn neighbours",
"def list_neighbors(current_row, current_col, grid_size):\n neighbors = []\n for row_offset, col_offset in [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1),\n (1, -1), (1, 0), (1, 1)]:\n new_row = current_row + row_offset\n new_col = current_col + col_offset\n if (new_row >= 0 and new_row < grid_size and new_col >= 0\n and new_col < grid_size):\n neighbors.append((new_row, new_col))\n return neighbors",
"def get_neighbors(self, pos):\r\n neighbors = []\r\n if pos[0] + 1 < self.size:\r\n neighbors.append((pos[0] + 1, pos[1]))\r\n if pos[0] - 1 >= 0:\r\n neighbors.append((pos[0] - 1, pos[1]))\r\n if pos[1] + 1 < self.size:\r\n neighbors.append((pos[0], pos[1] + 1))\r\n if pos[1] - 1 >= 0:\r\n neighbors.append((pos[0], pos[1] - 1))\r\n return neighbors",
"def get_neighbors(grid, x, y):\n out = []\n if x > 0:\n out.append(grid[x-1, y])\n if y > 0:\n out.append(grid[x, y-1])\n if y < grid.shape[1] - 1:\n out.append(grid[x, y+1])\n if x < grid.shape[0] - 1:\n out.append(grid[x+1, y])\n return out",
"def neighbours(pos):\r\n\t\tnbs = []\r\n\t\tfor direction in directions:\r\n\t\t\tnb = add(pos, direction)\r\n\t\t\tif is_inside(nb):\r\n\t\t\t\tnbs.append(nb)\r\n\t\treturn nbs",
"def get_legal_moves(self):\n moves = []\n if self.player_locations[self.whose_turn] is None:\n return self.get_blank_locations()\n matrix = [(1,0), (-1,0), (0,1), (0,-1), (1,1), (1,-1), (-1, 1), (-1,-1)]\n\n for dx, dy in matrix:\n x,y = self.player_locations[self.whose_turn]\n while x+dx <= xdim and x+dx >= 0 and y+dy <= ydim and y+dy >= 0:\n x = x+dx\n y = y+dx\n if self.board[x][y] : break\n moves.append((x,y))\n return moves"
]
| [
"0.7583234",
"0.73333657",
"0.72945344",
"0.7293684",
"0.725854",
"0.72296697",
"0.72086686",
"0.71733797",
"0.71366847",
"0.71238005",
"0.707542",
"0.7062021",
"0.7032389",
"0.70102596",
"0.6965794",
"0.6941387",
"0.6917996",
"0.6900293",
"0.6844919",
"0.6826912",
"0.68257666",
"0.6819659",
"0.68142366",
"0.6794661",
"0.6777948",
"0.6770957",
"0.67692715",
"0.6765685",
"0.67654103",
"0.6741333"
]
| 0.8379654 | 0 |
Add a league which doesn't exist in the DB to it. | def add_league(inp_to_add, type_to_add, con, host, root, password):
with con.cursor() as cur:
if type_to_add == "url":
league_soup = BeautifulSoup(requests.get(inp_to_add).text, 'html.parser')
league_site = inp_to_add
elif type_to_add == "country":
midterm_url = get_countries_dict()[inp_to_add]
league_soup = BeautifulSoup(requests.get(midterm_url).text, 'html.parser')
league_site = SOCCER_URL + league_soup.find('ul', class_="left-tree").li.a["href"]
else:
league_soup, league_site = get_first_search_result(
SOCCER_URL + "/search/competitions/?q=" + inp_to_add)
if league_soup:
cur.execute("SELECT MAX(id) FROM leagues")
league_id = cur.fetchall()[0][0]
addition = (league_soup.body.h1.text, league_soup.body.h2.text, league_site)
cur.execute("""INSERT INTO leagues (name, country, url) VALUES (%s, %s, %s)""", addition)
con.commit()
league_dict = {league_id: {'name': addition[0], 'url': addition[2]}}
add_all_teams_and_players_in_league(league_dict, con, host, root, password) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_game():\n\n if (request.json or \"teams\") not in request.json:\n abort(400)\n game = {\n \"id\": games[-1][\"id\"] + 1,\n \"teams\": request.json[\"teams\"],\n \"score\": request.json.get(\"score\", \"\"),\n \"city\": request.json.get(\"city\", \"\"),\n \"date\": request.json.get(\"date\", \"\"),\n }\n games.append(game)\n return (jsonify({\"game\": game}), 201)",
"async def giveaway_add(self, ctx):\n\n file = ctx.message.attachments[0]\n content = await file.read()\n data = json.loads(content)\n await self._insert_games(data)\n await ctx.reply(\"Games were added to the DB!\")",
"def add_team(inp_to_add, type_to_add, host, root, password):\r\n team_name = \"\"\r\n\r\n if type_to_add == \"url\":\r\n team_soup = BeautifulSoup(requests.get(inp_to_add).text, 'html.parser')\r\n team_site = inp_to_add\r\n else:\r\n team_soup, team_site = get_first_search_result(\r\n SOCCER_URL + \"/search/teams/?q=\" + inp_to_add)\r\n\r\n if team_soup:\r\n # Need to examine if league already exists, if not - add it. Then, get its LEAGUE_ID\r\n league_url = SOCCER_URL + team_soup.find('div', id=\"page_team_1_block_team_table_9-wrapper\").h2.a[\"href\"]\r\n find_league({league_url}, \"url\", host, root, password)\r\n\r\n team_name = team_soup.find(\"table\", class_=\"leaguetable sortable table\").tbody.find_all(\r\n 'tr', class_=[\"odd highlight team_rank\", \"even highlight team_rank\"])[0].find(\r\n 'td', class_=\"text team large-link\").a.text\r\n\r\n return team_name",
"def add_games(self):\n with open(self.csv_log, newline='') as f:\n # Open the csv\n reader = csv.reader(f)\n for line in reader:\n # Get the game data\n game = line[self.game_loc]\n if len(game) > 0:\n try:\n # Add it if it is unique\n g = Game(name=game)\n g.save()\n except IntegrityError:\n pass",
"def addTeam(teaminfo):\r\n team, auto, rc_comp, spirit_comp, video_comp = teaminfo\r\n if team_exists(team): # Team already exists\r\n print(\"Team\", team, \"already exists.\")\r\n else:\r\n with sqlite3.connect(database_file) as conn:\r\n #(teamname TEXT, autonomous TEXT, rc TEXT, spirit INT, video INT)\r\n conn.execute(\"INSERT INTO scores(teamname, autonomous, rc, spirit, video)\\\r\n VALUES('{0}', '{1}', '{2}', '{3}', '{4}');\".format(team, auto, rc_comp, spirit_comp, video_comp))",
"def add_game(user, date_played, level, was_won, score, time_taken):\n\n game = Game.objects.get_or_create(user=user, date_played=date_played)[0]\n game.level = level\n game.was_won = was_won\n game.score = score\n game.time_taken = time_taken\n\n game.save()\n return game",
"def add_teams(self, data):\n for k, v in data.items():\n try:\n self._db_cur.execute(\"insert or ignore into team_data \\\n (team_id, team_name) values (?, ?)\", (v, k))\n self._db_conn.commit()\n except sqlite3.Error as er:\n print er",
"def getLeagueByEspn(self, name):\n\n league, created = Leagues.objects.get_or_create(name_espn=name)\n return int(league.id)",
"def add_team():\n if request.method == 'POST':\n result = request.form\n teamImage = request.files['teamImage'].read()\n team = Team.query.filter_by(team_name=result['team_name']).first()\n if not team:\n team1 = Team(team_name=result['team_name'], team_image=teamImage)\n db.session.add(team1)\n db.session.commit()\n flash(result['team_name'] + ' is added successfully')\n teams = get_team()\n return render_template('team-players.html', teams=teams)\n else:\n flash(result['team_name'] + ' is already present')\n return render_template('addteam.html')",
"def add_team(self, team):\n return self \\\n .team_identifier(team.identifier) \\\n .fold() \\\n .coalesce(\n # The team exists.\n __.unfold()\n .project('vertex', 'exists')\n .by(__.identity().elementMap())\n .by(__.constant(True)),\n # The team does not exist.\n __.addV('Team')\n .property(T.id, str(uuid.uuid4()))\n .property(Cardinality.single, 'identifier', team.identifier)\n .property(Cardinality.single, 'name', team.name)\n .project('vertex', 'exists')\n .by(__.identity().elementMap())\n .by(__.constant(False)),\n )",
"async def add(self, ctx, game):\n\n user = ctx.message.author\n\n if add(game, user.id):\n await self.bot.say(\"{}, {} was added to your library.\".format(user.mention, game))\n else:\n await self.bot.say(\"{}, you already have this game in your library.\".format(user.mention))",
"def add_players(game: LolGame, players: List[dict], add_page_id: bool = False) -> LolGame:\n\n for team_side in game[\"teams\"]:\n team_side_leaguepedia = \"1\" if team_side == \"BLUE\" else \"2\"\n\n for idx, game_player in enumerate(game[\"teams\"][team_side][\"players\"]):\n try:\n # We get the player object from the Leaguepedia players list\n player_latest_data = next(\n p\n for p in players\n if p[\"Side\"] == team_side_leaguepedia\n and lit.get_id(p[\"Champion\"], object_type=\"champion\") == game_player[\"championId\"]\n )\n\n game_player[\"role\"] = role_translation[player_latest_data[\"gameRoleNumber\"]]\n\n unique_identifiers = LeaguepediaPlayerIdentifier(\n name=player_latest_data.get(\"currentGameName\"),\n irlName=player_latest_data.get(\"irlName\"),\n country=player_latest_data.get(\"Country\"),\n residency=player_latest_data.get(\"Residency\"),\n age=player_latest_data.get(\"Age\"),\n role=player_latest_data.get(\"Role\"),\n team=player_latest_data.get(\"Team\"),\n kills=player_latest_data.get(\"Kills\"),\n deaths=player_latest_data.get(\"Deaths\"),\n assists=player_latest_data.get(\"Assists\"),\n ss=player_latest_data.get(\"SummonerSpells\"),\n gold=player_latest_data.get(\"Gold\"),\n cs=player_latest_data.get(\"CS\"),\n items=player_latest_data.get(\"Items\"),\n trinket=player_latest_data.get(\"Trinket\"),\n keystoneMastery=player_latest_data.get(\"KeystoneMastery\"),\n keystoneRune=player_latest_data.get(\"KeystoneRune\"),\n runes=player_latest_data.get(\"Runes\"),\n )\n\n if add_page_id:\n unique_identifiers[\"pageId\"] = int(player_latest_data[\"pageId\"])\n\n game_player[\"uniqueIdentifiers\"] = {\"leaguepedia\": unique_identifiers}\n\n except StopIteration:\n # Since we cannot get the role properly, we try to infer it\n game_player[\"role\"] = list(role_translation.values())[idx]\n\n return game",
"def add_user(username):\n db_names = []\n db_scores = []\n new_user = models.Joined(username=username, score=100)\n print(\"New user\", new_user)\n #we need to see if the user already exists in the database\n exists = bool(\n models.Joined.query.filter_by(username=username).first())\n print(exists)\n flag = True #pylint explained that this was the best practice\n if exists != flag: #gets if user is already in DB\n add_user_name(new_user)\n\n all_people = models.Joined.query.order_by(models.Joined.score.desc()).all()\n for people in all_people:\n db_names.append(people.username) #appends username to database\n db_scores.append(people.score)\n return db_names, db_scores",
"def add_league_teams(league_diction, team_count, host, root, password):\r\n teams_diction = scrape_teams(league_diction, team_count)\r\n create_teams(host, root, password, dict_to_read=teams_diction)\r\n\r\n return teams_diction",
"async def saveleague(self, ctx, *, league_id):\n if is_support_guild(ctx.guild.id):\n await ctx.send('Sorry, this discord does not allow update, saveid, '\n 'leaderboard, and series commands so as not to overload me. '\n 'Try `!careerstats` or `!yearlystats` with your customer ID to test '\n 'or go to #invite-link to bring the bot to your discord for all functionality')\n return\n\n await self.save_league.call(ctx, league_id)",
"def add_team(display_name, indexed_name, game, division, fingerprint, now=None):\n\ttry:\n\t\tteam = session.query(Team).filter(Team.fingerprint == fingerprint).one()\n\t\tteam.display_name = display_name\n\t\tteam.indexed_name = indexed_name\n\texcept sa_orm.exc.NoResultFound:\n\t\tteam = Team(display_name=display_name, indexed_name=indexed_name,\n\t\t\t\tgame=game, division=division, fingerprint=fingerprint)\n\t\tsession.add(team)\n\n\tsession.commit()\n\treturn team.id",
"def add_match(team1_id, team2_id, time, game, division, fingerprint, now=None):\n\ttry:\n\t\tmatch_id = session.query(Match)\\\n\t\t\t\t.filter(Match.fingerprint == fingerprint)\\\n\t\t\t\t.one()\\\n\t\t\t\t.id\n\t\treturn match_id\n\texcept sa_orm.exc.NoResultFound:\n\t\t# This match does not exist; continue to add the match.\n\t\tsession.rollback()\n\n\ttry:\n\t\t# Add the match.\n\t\tmatch = Match(team1_id=team1_id, team2_id=team2_id, game=game, division=division, time=time,\n\t\t\t\tfingerprint=fingerprint)\n\t\tsession.add(match)\n\t\tsession.flush()\n\t\tmatch_id = match.id\n\n\t\t# Add each opponent for the match.\n\t\tmatch_opponent1 = MatchOpponent(\n\t\t\t\tteam_id=team1_id, match_id=match_id, time=time, opponent_id=team2_id)\n\t\tmatch_opponent2 = MatchOpponent(\n\t\t\t\tteam_id=team2_id, match_id=match_id, time=time, opponent_id=team1_id)\n\t\tsession.add(match_opponent1)\n\t\tsession.add(match_opponent2)\n\t\tsession.commit()\n\n\t\treturn match.id\n\texcept sa.exc.IntegrityError:\n\t\t# The commit failed because teams with the given identifiers are missing.\n\t\tsession.rollback()\n\t\traise common_db.DbException._chain()",
"def add(self, PlugLead):\n\n self.check_conflicts(PlugLead)\n self.plugleads.append(PlugLead)",
"def _addplayer(opteid, optrid, optplayer):\n\n # everything looks good so lets prep to add. # 2330|1163|tom brady|tom|brady|TM||PRT|\n optplayer = _sanitizeName(optplayer) # sanitize.\n namesplit = optplayer.split() # now we have to split the optplayer into first, last. (name needs to be parsed before)\n fndm = doublemetaphone(namesplit[0]) # dm first.\n lndm = doublemetaphone(namesplit[1]) # dm last.\n # connect to the db and finally add.\n with sqlite3.connect(DB) as db:\n try:\n cursor = db.cursor()\n cursor.execute(\"INSERT INTO players VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)\", (opteid, optrid, optplayer, namesplit[0], namesplit[1], fndm[0], fndm[1], lndm[0], lndm[1]))\n db.commit()\n #return(\"I have successfully added player {0}({1}).\".format(optplayer, opteid))\n return True\n except sqlite3.Error, e:\n print(\"ERROR: I cannot add {0}. Error: '{1}'\".format(optplayer, e))\n return None",
"def team_add(token_user):\n if not json_param_exists('name') or \\\n not json_param_exists('type'):\n abort(400, \"one or more required parameter is missing\")\n name = request.json['name']\n team_type = TeamType.query.filter_by(name=request.json['type']).first()\n if not team_type:\n abort(400, \"invalid team type\")\n\n if team_type.name == 'other_team':\n if not token_user.has_permission('team.create') and \\\n not token_user.has_permission('team.create.elevated'):\n abort(403, 'team creation is not permitted')\n else: # creating any team other than 'other_team' requires elevated\n if not token_user.has_permission('team.create.elevated'):\n abort(403, 'insufficient permissions to create a team of this type')\n\n team = Team(name=name)\n team.team_type = team_type\n\n try:\n get_db().add(team)\n get_db().commit()\n except IntegrityError:\n abort(409, 'team name is already in use')\n\n return '', 201",
"def addbot(self, bot):\n\n if bot:\n for i in range(len(self.bots)-1, -1, -1):\n if self.bots[i].name == bot.name:\n del self.bots[i]\n self.bots.append(bot)",
"def _add_new_word(self, word):\n if word not in self.word_to_id:\n word_id = len(self.word_to_id)\n self.word_to_id[word] = word_id\n self.id_to_word[word_id] = word",
"def load_database(self):\n # If there is already data, do not load\n if self:\n raise DatabaseError('Data already loaded!')\n\n # Gather all data from the table\n data = self.cursor.execute(\n 'SELECT unique_id, name, wins, time_stamp, '\n 'last_win FROM gungame_winners'\n )\n data = data.fetchall()\n\n # Are there no winners to add?\n if not data:\n return\n\n # Loop through all the past winners and their data\n for unique_id, name, wins, time_stamp, last_win in data:\n\n # Add the current winner to the database\n instance = self[unique_id]\n instance.name = name\n instance.wins = int(wins)\n instance.time_stamp = float(time_stamp)\n instance.last_win = float(last_win)",
"def addTeam(request):\n registered = False\n if request.method == 'POST':\n team_form = TeamForm(data=request.POST)\n if team_form.is_valid():\n team = team_form.save()\n registered = True\n else:\n print(team_form.errors)\n else:\n team_form = TeamForm()\n return render(request,'footBallApp/team.html',\n {'team_form':team_form,\n 'registered':registered})",
"def new_game(blank_game, user_id=None):\n if user_id:\n g.db.remove({'_id': user_id}, justOne=True)\n new_id = g.db.insert({'game': blank_game.serialise()})\n flash('New user successfully created')\n return new_id",
"def insert_champion_match_data(champion_match_data):\n conn = get_connect()\n cursor = conn.execute(\"SELECT * FROM championMatchData where matchId = ? AND championId = ?\",\n [champion_match_data[0], champion_match_data[1]])\n result_list = cursor.fetchall()\n if len(result_list) == 0:\n conn.execute(\"INSERT INTO championMatchData \\\n VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\", champion_match_data)\n print(\"champion_match_data (\" + str(champion_match_data[0]) + \",\" + str(champion_match_data[1]) + \") is inserted\")\n else:\n print(\"champion_match_data \" + str(champion_match_data[0]) + \",\" + str(champion_match_data[1]) + \" already exists!\")\n conn.commit()\n conn.close()\n return",
"def insert_match(self, gameid):\n if Match.query.filter(Match.gameid == gameid).first():\n self.logger.info(\"Match {} already exists in the DB\".format(gameid))\n return True\n match_json = self.rc.get_match(gameid)\n if not match_json:\n self.logger.warning(\"API did not return data for this gameid: {}\".format(gameid))\n return False\n match_json = self.lower_keys(match_json)\n # Get column names\n match_columns = Match.__table__.columns.keys()\n # Remove all k:v pairs that do not match column names\n to_del = []\n for k, v in match_json.items():\n if k not in match_columns:\n to_del.append(k)\n # del match_json[k]\n for k in to_del:\n del match_json[k]\n match = Match(**match_json)\n match.gamecreation = datetime.utcfromtimestamp(match.gamecreation // 1000)\n self.db.session.add(match)\n self.db.session.commit()\n return True",
"async def add_to_team(self, player : Player, team):\r\n if player in self.remaining:\r\n self.teams[team].append(player)\r\n self.remaining.remove(player)\r\n return discord.Embed(title=\"Valorant 10 Man Bot\",\r\n description=\"{} has been drafted to team {}\".format(get_member_name(player,lower=False), \":a:\" if team == \"A\" else \":b:\"))\r\n else:\r\n return discord.Embed(title=\"Valorant 10 Man Bot\",description=\"Sorry, {} is already drafted\".format(get_member_name(player)))",
"def create_games(self, league, round, match_list):\n try:\n cursor = self.conn.cursor()\n for match in match_list:\n command = '''\n INSERT INTO Match (League, Round, P1, P2)\n VALUES (?, ?, ?, ?)\n '''\n cursor.execute(command, (league, round, match[0], match[1]))\n self.conn.commit()\n except BaseException as e:\n self.log.log_error('Fehler beim erstellen der Spiele', e)\n raise e",
"def update_leagues(self, summonerid: int):\n self.logger.info(\"Update leagues info for summonerid: {}\".format(summonerid))\n # Get leagues data from API\n leagues_json = self.rc.get_summoner_leagues_by_summoner_id(summonerid)\n if not leagues_json:\n self.logger.warning(\"Leagues JSON not returned by RiotConnector for summonerid: {}\".format(summonerid))\n return\n # leagues_json = RequestHandler.lower_keys(leagues_json)\n self.logger.debug(\"Leagues json for summonerid {}: {}\".format(summonerid, leagues_json))\n for league_dict in leagues_json:\n league_dict['summonerid'] = summonerid\n # If an entry exists for this summonerid and queuetype, then update it, otherwise add new entry\n existing = UserLeague.query.filter_by(summonerid=summonerid, queuetype=league_dict['queueType']).first()\n if existing:\n self.logger.info(\"Existing league info found for summoner id: {}, queue: {}\".format(summonerid,\n league_dict[\n 'queueType']))\n self.logger.debug(\"Updating with values: {}\".format(RequestHandler.lower_keys(league_dict)))\n for k, v in RequestHandler.lower_keys(league_dict).items():\n setattr(existing, k, v)\n else:\n self.db.session.add(UserLeague(**RequestHandler.lower_keys(league_dict)))\n self.db.session.commit()"
]
| [
"0.61241",
"0.5901289",
"0.58466893",
"0.5826113",
"0.5742285",
"0.5730009",
"0.5710978",
"0.5699382",
"0.5605475",
"0.5599539",
"0.5587349",
"0.55752534",
"0.55563235",
"0.554479",
"0.5541232",
"0.55322415",
"0.54096913",
"0.538382",
"0.5365119",
"0.53349566",
"0.5314468",
"0.5288812",
"0.52520245",
"0.525097",
"0.5247018",
"0.5211577",
"0.52054614",
"0.52032244",
"0.5193611",
"0.5171632"
]
| 0.6966496 | 0 |
Get the HTML of the first relevant result in soccerway.com | def get_first_search_result(searching_site, player=None):
search_soup = BeautifulSoup(requests.get(searching_site).text, 'html.parser')
first_result = None
result_soup = None
result_site = None
if player:
search_result = search_soup.find('table', class_="playerstats table")
if search_result:
first_result = search_soup.find('table', class_="playerstats table").\
find('td', class_="player").a["href"]
else:
search_result = search_soup.find('ul', class_="tree search-results")
if search_result:
first_result = search_soup.find('ul', class_="tree search-results").li.a.attrs['href']
if first_result:
result_site = SOCCER_URL + first_result
result_soup = BeautifulSoup(requests.get(result_site).text, 'html.parser')
return result_soup, result_site | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def html(self, first_date = \"\"):\n\n link = \"https://en.wikipedia.org/w/index.php?title=\" + self.title + \"&offset=\" + first_date + \"&limit=500&action=history\"\n return bs(requests.get(link).text, 'lxml')",
"def get_html(query, search_type):\n params = {\n '_nkw': query,\n '_stpos': 60640, # My zip code\n '_sop': {'new': 10, 'soon': 1, 'cheap': 2}[search_type],\n '_ipg': 200, # Results per page (up to 200)\n '_udhi': 100 # Max price\n }\n \n # Make request and turn into soup\n response = requests.get(BASE_URL, params=params)\n soup = BeautifulSoup(response.text, 'html.parser')\n \n # Isolate listings (first div is not a listing)\n listings = soup.find_all('div', class_='s-item__info clearfix')\n if len(listings) > 1:\n return listings[1:]\n else:\n return []",
"def get_result_office(soup):\n return soup.find('h2').text",
"def get_certain_joke(html):\n soup = BeautifulSoup(html, 'lxml')\n\n # for i in len(soup.select('div.content')):\n # print(soup.select('div.content')[i].get_text())\n\n joke_content = soup.select('div.content')[0].get_text()\n\n return joke_content",
"def fetch_conversation_starter():\n\n url = 'https://www.conversationstarters.com/generator.php'\n\n try:\n response = requests.get(url)\n html_content = response.text\n soup = BeautifulSoup(html_content, 'html.parser')\n conv_starter = soup.find_all(text=True)[22].strip()\n return conv_starter\n except Exception as e:\n print(\"Error occurred fetching conversation starter:\\n\", e)",
"def get_results():\n # store info in a dictionary {name -> shortname}\n res = {}\n session = requests.Session()\n handle_url('http://www.creators.com/comics/cat-seeall.html', session, res)\n save_result(res, json_file)",
"def find_rc(soup):\n lst = soup.find_all('div', attrs={\"class\":\"rc\"})\n if len(lst)==0:\n return None\n\n sites = []\n for elt in lst:\n try:\n #class r is just url and title\n url = elt.find(\"h3\", attrs={\"class\":\"r\"}).find(\"a\").get(\"href\")\n title = elt.find(\"h3\", attrs={\"class\":\"r\"}).get_text()\n\n except:\n url = elt.find(\"div\", attrs={\"class\":\"r\"}).find(\"a\").get(\"href\")\n title = elt.find(\"div\", attrs={\"class\":\"r\"}).find(\"h3\", attrs={\"class\":\"LC20lb\"}).get_text()\n\n\n snippet1 = elt.find(\"div\", attrs={\"class\":\"s\"}) #text from page\n snippet2 = elt.find(\"div\", attrs={\"class\":\"P1usbc\"}) #extra text\n\n #we're grabbing one or the other or showing missing message\n if snippet1 != None and snippet1.find(\"span\", attrs={\"class\":\"st\"}) != None:\n snippet = snippet1.find(\"span\", attrs={\"class\":\"st\"}).get_text()\n elif snippet2 != None:\n snippet = snippet2.get_text()\n else:\n snippet = \"\"\n # print(\"------------\")\n # print(\"MISSING snippet -\", url, title)\n # print(\"------------\")\n\n sites.append({'url':url, 'title': title, 'snippet': snippet})\n\n return sites",
"def scrape_google(html_content):\n soup = BeautifulSoup(html_content)\n tag = soup.a\n company_name = tag.get_text()\n url = tag['href']\n #print(company_name,url)\n #return company_name, url\n return url",
"def get_clubs_html():\n url = 'https://ocwp.apps.pennlabs.org'\n return get_html(url)",
"def retrieve_offers_html(search_string):\n url = generate_request_url(search_string)\n return requests.get(url).text",
"def get_html_content():\n url = \"https://www.worldometers.info/coronavirus/\"\n req_data = requests.get(url).text\n soup = BeautifulSoup(req_data, 'html.parser')\n html_data = soup.select(\"#main_table_countries_today > tbody:nth-child(2) > tr[style='']\")\n return html_data",
"def scrape(self):\n\n self.url = self.headline.url\n\n # Should raise exception...\n if not self.parsing_template:\n return None, None, None, None, None\n\n try:\n response = self.download()\n self.source = response.text\n except:\n return None, None, None, None, None\n\n soup = BeautifulSoup(response.content, \"html.parser\")\n\n if soup:\n return self.parse(soup)\n else:\n return None, None, None, None, None",
"def get_result_ht(wd):\n try:\n result = wd.find_element_by_id(\"js-partial\").text\n ht = clean_goals(result)\n ht = ht.split(\",\")\n return ht[0]\n except:\n return \"N/A HT Result\"",
"def get_doctor_html(url):\n headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n 'Accept-Encoding':'gzip, deflate, br',\n 'Accept-Language':'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',\n 'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36'}\n \n response = requests.get(url, headers=headers)\n html = response.text\n status_code = response.status_code\n \n # If the status code is not 200, execute the following section\n # This section repeats the visit, increasing the time to sleep each time\n # Repeat 16 times at most\n count = 0\n while status_code != 200 and count < 16:\n response = requests.get(url=url,headers={'User-Agent': user_agent})\n response.encoding = 'cp936'\n\n html = response.text\n status_code = response.status_code\n print(status_code)\n \n count += 1\n time.sleep(count*0.1)\n \n #print(html)\n #print(count)\n #print(response.status_code)\n return html",
"def get_results():\n # store info in a dictionary {name -> shortname}\n res = {}\n session = requests.Session()\n handle_url('http://www.gocomics.com/features', session, res)\n handle_url('http://www.gocomics.com/explore/editorial_list', session, res)\n handle_url('http://www.gocomics.com/explore/sherpa_list', session, res)\n save_result(res, json_file)",
"def get_search_results(text, out_file=None, num_res=3):\n # specify the source website\n text += ' site:tableau.com'\n text = urllib.parse.quote_plus(text)\n\n url = 'https://google.com/search?q=' + text\n USER_AGENT = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}\n \n # TODO: add delay here?\n response = requests.get(url,headers=USER_AGENT)\n\n soup = BeautifulSoup(response.text, 'html.parser')\n result_block = soup.find_all('div', attrs={'class': 'g'})\n\n final_result = []\n for rb_ind in range(len(result_block)):\n if len(final_result)==num_res:\n # done sraping\n break\n \n rb = result_block[rb_ind]\n # print(rb_ind)\n if rb.find('h3'):\n title = rb.find('h3').text\n link = rb.find('a', href=True)['href']\n\n desc = rb.find(class_='IsZvec').text\n \n if not desc:\n # print(rb_ind)\n # print(\"got here\")\n desc = rb.find(class_='ILfuVd')\n if desc:\n desc = desc.text\n else:\n desc = ''\n final_result.append([title,link,desc])\n print('\\n'.join([title,link,desc]))\n\n if out_file is not None:\n with open(out_file,\"a+\",encoding='utf8') as f:\n f.writelines([r + '\\n' for r in final_result])\n \n return final_result",
"def most_recent_html(results):\n\n if len(results) == 0:\n return no_results_html()\n else:\n common_label_classname = \"has-margin-top-10 has-margin-bottom-10 is-size-3 has-text-weight-semibold\"\n\n overhead_label = html.Div(\n f\"Recently Published\",\n className=common_label_classname\n )\n\n formatted_results = [format_result_html(\n result) for result in results]\n paper_table = html.Table(\n formatted_results,\n className=\"table is-fullwidth is-bordered is-hoverable is-narrow is-striped\",\n )\n results_elements = [paper_table]\n\n return html.Div(\n [\n overhead_label,\n html.Div(\n results_elements\n ),\n ],\n className=\"has-margin-top-20 has-margin-bottom-20 msweb-fade-in\"\n )",
"def get_top_story_bbc():\n\ttopStory = []\n\tresponse = requests.get('https://www.bbc.co.uk/news/england/coventry_and_warwickshire')\n\tbbcContent = BeautifulSoup(response.content, 'html.parser')\n\ttopStory.append(bbcContent.findAll('span', {'class' : 'title-link__title-text'})[0].text) \n\ttopStory.append(bbcContent.findAll('p', {'class' : 'skylark__summary'})[0].text)\n\treturn topStory",
"def scrape(self):\n pass",
"def _scrape(self):",
"def get_search_results_html(search_query=None):\n\n if search_query:\n # Insert the search term into the database\n insert_query(search_query)\n\n # Format results as HTML\n search_results = f'<p>You searched for: {search_query}</p>'\n search_results += '<p>No results found.</p>'\n return search_results\n\n # No search term specified, so return an empty string\n return \"\"",
"def extractSearchResults(self, html):\n results = list()\n soup = BeautifulSoup(html, 'html.parser')\n div = soup.find('div', id='main')\n if (type(div) == types.NoneType):\n div = soup.find('div', id='center_col')\n if (type(div) == types.NoneType):\n div = soup.find('body')\n if (type(div) != types.NoneType):\n lis = div.findAll('a')\n if(len(lis) > 0):\n for link in lis:\n if (type(link) == types.NoneType):\n continue\n \n url = link['href']\n if url.find(\".google\") > 6:\n continue\n \n url = self.extractUrl(url)\n if(cmp(url, '') == 0):\n continue\n title = link.renderContents()\n title = re.sub(r'<.+?>', '', title)\n result = SearchResult()\n result.setURL(url)\n print '### URL: ' + url\n result.setTitle(title)\n span = link.find('div')\n if (type(span) != types.NoneType):\n content = span.renderContents()\n content = re.sub(r'<.+?>', '', content)\n result.setContent(content)\n results.append(result)\n return results",
"def get_first_item(self):\n params = urllib.parse.urlencode({'o':'1', 'q':self.query})\n url = 'https://www.leboncoin.fr/annonces/offres/ile_de_france/?{:s}'.format(params) # Cree l'url de recherche en get\n html = urllib.request.urlopen(url)\n if url != html.geturl():\n return None\n soup = BeautifulSoup.BeautifulSoup(html, 'html5lib')\n try:\n products = soup.section.find_all('a', 'list_item clearfix trackable')\n except Exception as e:\n print('Nothing found on leboncoin')\n return None\n for product in products: # recupere les differentes informations de chaque produit\n if str(product.section.h2).strip() == 'None':\n continue\n name = product.section.h2.contents[0].strip()\n price = self.__get_price(product)\n link = 'http:' + product['href']\n return (name, price, link)\n return None",
"def _extract_html(self, url):\n self.response = requests.get(url, timeout=5)\n self.html = BeautifulSoup(self.response.content, \"lxml\") if self.response.ok else None\n # return self.html",
"async def _fetch(self, session, url, proxy=None, raw=False, which_site=False):\n print(url)\n result = None\n site = None\n if 'hare' in url: # {'Unknown': -1, 'Pixnet': 0, 'Hares': 1}\n site = self._websites['Hares']\n elif 'pixnet' in url:\n site = self._websites['Pixnet']\n else:\n site = self._websites['Unknown']\n\n count = 1\n while count <= 2:\n soup = ''\n status = 0\n try:\n async with session.get(url, proxy=proxy) as response:\n source_code = await response.text('utf-8')\n status = response.status\n soup = source_code if raw else BeautifulSoup(source_code, 'lxml')\n except Exception as e:\n print('Connection error: ' + str(e))\n soup = None\n finally:\n result = (url, soup, status, site) if which_site else (url, soup, status)\n if status != 0:\n return result\n if 'searcharticle' not in url:\n count += 1\n result = (url, soup, status, site) if which_site else (url, soup, status)\n return result",
"def scrape(self):\n try:\n self.result = urlfetch.fetch(self.url)\n except DownloadError:\n self.result = urlfetch.fetch(self.url) \n if ((self.result.status_code == 200) and\n (self.result.content_was_truncated == 0)):\n self.soup = BeautifulSoup(self.result.content)\n else:\n logging.critical(\"Bad Status Code: \", self.result.status_code, self.url)\n sys.exit(1)",
"def get_search_result_data_from_soup(soup, debug=False, show_soup=False):\n\n # extract code snippet with search title\n search_results_num_soup = soup.find(\"h2\", {\"class\": \"searchresults-title\"}) \n if show_soup is True:\n print(\"*** SOUP FOR SEARCH RESULT HEADER *****************\")\n print(search_results_num_soup)\n print(\"*******************************************\")\n search_results = {}\n\n if search_results_num_soup is not None:\n search_result_text = search_results_num_soup.text \n try:\n search_url = search_results_num_soup.find(\"a\").attrs['href']\n except:\n print(traceback.format_exc())\n search_result_text = None\n search_url = None\n else:\n search_result_text = None\n search_url = None\n search_results['search_url'] = search_url\n\n # get the search term (everything behind \"Treffer: \")\n try:\n search_text = re.search('Treffer: (.*)', search_result_text).group(1)\n search_text = search_text.strip('\\\"')\n except:\n print(traceback.format_exc())\n search_text = None\n search_results['search_text'] = search_text\n\n # get the number of hits\n try:\n search_hits_reg = re.search(r'\\d+', search_result_text)\n search_hits = int(search_hits_reg.group(0))\n except:\n print(traceback.format_exc())\n search_hits = 0\n\n search_results['search_hits'] = search_hits\n if debug is True:\n print(f\"Search url {search_results['search_url']}\" +\n f\"\\nText:{search_results['search_text']}, number hits:{search_results['search_hits']}\")\n return search_results",
"def parseSearchHtml(self):\n pass",
"def parseSearchHtml(self):\n pass",
"def get_city_job(html):\n soup = BeautifulSoup(html, 'html.parser')\n city = soup.find(class_=\"subtle loc\").get_text()\n if city:\n return city\n return None"
]
| [
"0.6606933",
"0.63835824",
"0.63004875",
"0.626508",
"0.61528665",
"0.6132819",
"0.6128713",
"0.61236644",
"0.6098358",
"0.60884625",
"0.6087206",
"0.60443574",
"0.60235274",
"0.59097105",
"0.5905273",
"0.58991724",
"0.58679825",
"0.5866748",
"0.5862152",
"0.5857842",
"0.5853376",
"0.5812768",
"0.5806469",
"0.5797459",
"0.5778126",
"0.57749707",
"0.5767244",
"0.5754958",
"0.5754958",
"0.5751723"
]
| 0.6680494 | 0 |
Get the countries that exist in soccerway.com and the midterm URL to their first leagues | def get_countries_dict():
competitions_page_soup = BeautifulSoup(requests.get(
SOCCER_URL + "/competitions/").text, 'html.parser')
competitions_page_soup = competitions_page_soup.find('ul', class_='areas')
countries_soup = competitions_page_soup.find_all('div', class_="row")
countries_dict = {}
for country_soup in countries_soup:
countries_dict[country_soup.a.text.strip().title()] = SOCCER_URL + country_soup.a["href"]
return countries_dict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_leagues_and_countries(source=utils.get_native_source):\n if not isinstance(source, games.models.Source):\n # If I used source=native_source() or if native_source was a global variable then\n # during db initialization (running command initialize) you would get an error since\n # it gets its value when the database is empty.\n source = source()\n logger.info(\"getting leagues and countries from source %s...\", source)\n if not source:\n return [], []\n data, meta, status_code = sportmonks.countries.all(include='leagues.seasons')\n if not data:\n # if the status code is not 200 data and meta are None\n return [], []\n # with open('sportmonks/response_texts/aws_01.txt', 'w') as outfile:\n # json.dump(meta, outfile, indent=4)\n # json.dump(data, outfile, indent=4)\n\n pre_countries, pre_competitions = [], []\n\n try:\n # Notice that only the first supported sport will be processed (currently this is is acceptable since we only\n # support football and so the first supported sport will always be football)\n sport_sids = parse_sport(meta)\n sports = []\n for sport_sid in sport_sids:\n sport = games.models.Sport.by_sid(sid=sport_sid, source=source)\n if not sport:\n logger.info(\"Sport contained in the response with sid {} is not supported\".format(sport_sid))\n continue\n sports.append(sport)\n if not sports:\n logger.error(\"No supported sport in the response\")\n return [], []\n football_gname = games.naming.sport_names.get('football', None)\n football = games.models.Sport.objects.get(name=football_gname)\n if football not in sports:\n logger.info(\"Football is not in response\")\n return [], []\n # logger.debug(\"Trying to get sport from source: %s and sid: %s\", source, sport_sid)\n sport_gname = football_gname\n for item in data:\n try:\n country_sid = item.get('id')\n # logger.debug('country_sid: %s', country_sid)\n country_sname = item.get('name')\n # logger.debug('country_sname: %s', country_sname)\n extra = item.get('extra')\n # logger.debug('extra: %s', extra)\n leagues = item.get('leagues').get('data')\n # logger.debug('leagues: %s', leagues)\n try:\n fifa_code = extra.get('fifa') # some countries might lack extra information\n except AttributeError:\n fifa_code = None\n except Exception as e:\n logger.data_error('%s', e)\n continue\n pre_country = pre_models.PreCountry(source=source, sname=country_sname, sid=country_sid, fifa_code=fifa_code)\n pre_countries.append(pre_country)\n for league in leagues:\n try:\n # sportmonks uses sgname for leagues. I use this sgname as an sname (comp_season_specific name)\n competition_sname = league.get('name')\n # logger.debug('competition_sname: %s', competition_sname)\n sid = league.get('id')\n # logger.debug('sid: %s', sid)\n seasons = league.get('seasons').get('data')\n # logger.debug('seasons: %s', seasons)\n except Exception as e:\n logger.data_error('%s', e)\n continue\n competition_season_utils = []\n # comp_seas_sids = []\n for season in seasons:\n try:\n season_name = season.get('name')\n # logger.debug('season_name: %s', season_name)\n # season_name = seasons_special_treatment(season_name)\n competition_season_sid = season.get('id')\n # logger.debug('competition_season_sid: %s', competition_season_sid)\n is_current_season = season.get('is_current_season', False)\n # logger.debug('is_current_season: %s', is_current_season)\n except Exception as e:\n logger.data_error('%s', e)\n continue\n # comp_seas_sids.append(competition_season_sid)\n zak_season_name = games.models.Season.zakandify_season_string(season_name)\n season = zakanda.utils.season_from_season_name(zak_season_name)\n competition_season_type = get_competition_season_type(season)\n competition_season_util = pre_models.CompetitionSeasonUtil(season, competition_season_sid, competition_sname, competition_season_type)\n competition_season_utils.append(competition_season_util)\n # logger.debug(\"competition season sids: %s\", comp_seas_sids)\n pre_competition = pre_models.PreCompetition(\n source=source, sname=competition_sname, sid=sid, sport_name=sport_gname,\n competition_season_utils=competition_season_utils, pre_country=pre_country)\n pre_competitions.append(pre_competition)\n\n except Exception as e:\n logger.error('%s Unexpected problem with sportmonks.countries.all from source %s', e, source)\n logger.info(\"%s pre countries and %s pre competitions were created\", len(pre_countries), len(pre_competitions))\n return pre_countries, pre_competitions",
"def get_countries():\n call = build_call('attr', 'country')\n return request_data(call)",
"def _get_countries():\n print('-c, -C [country]\\\n \\n [country]=\\\n \\n AR\\t: Argentina\\\n \\n AT\\t: Austria\\\n \\n BR\\t: Brazil\\\n \\n BY\\t: Belarus\\\n \\n CA\\t: Canda\\\n \\n DE\\t: Germany\\\n \\n FR\\t: France\\\n \\n GB\\t: Great Britain\\\n \\n GH\\t: Ghana\\\n \\n HU\\t: Hungary\\\n \\n ID\\t: Indonesia\\\n \\n IL\\t: Israel\\\n \\n JP\\t: Japan\\\n \\n KR\\t: Korea\\\n \\n MA\\t: Morocco\\\n \\n MY\\t: Malaysia\\\n \\n NL\\t: Netherlands\\\n \\n NO\\t: Norway\\\n \\n OM\\t: Oman\\\n \\n PK\\t: Pakistan\\\n \\n RU\\t: Russia\\\n \\n SA\\t: Saudi Arabia\\\n \\n TH\\t: Thailand\\\n \\n TW\\t: Taiwan\\\n \\n UA\\t: Ukraine\\\n \\n US\\t: United States\\\n \\n UY\\t: Uruguay\\\n \\n VE\\t: Venezuela\\\n \\n VN\\t: Vietnam\\\n \\n .....\\n common usage: opengate -c JP')",
"def getCountry(soup):\n title_details = self.getAdditionalDetails(soup)\n pattern = r'country_of_origin.*?>(.*?)<'\n country = re.findall(pattern, str(title_details))\n return country",
"def scrapeCountries(conn):\n query = \"SELECT * FROM country\"\n c = conn.cursor()\n c.execute(query)\n results = []\n for row in c:\n result={}\n country_fields = {}\n\n result['pk'] = country_codes[row[0]]\n result['model'] = \"infohub.country\"\n result['fields'] = country_fields\n country_fields['name'] = row[1]\n\n results.append(result)\n return results",
"def test_get_countries(self):\n pass",
"def all_matched_searches(affiliations, de_facto_affiliations):\n geolocator = Nominatim()\n backup_geolocator = Google(\"AIzaSyCc3U_YDbluAh_Eja8Zc4e4PX04ndyDXgE\")\n iso_3166_1 = pd.read_csv(os.path.abspath(os.path.join(__file__, os.pardir,\n \"ISO_3166_1.csv\")), na_filter=False)\n iso_3166_2_us = pd.read_csv(os.path.abspath(os.path.join(__file__,\n os.pardir, \"ISO_3166_2_US.csv\")), na_filter=False)\n iso_dict = {**{country['Alpha-2 code']: [country[\n 'English short name (upper/lower case)'], country[\n 'Alpha-2 code'], country['Alpha-3 code']] for country in\n iso_3166_1.to_dict(orient='records')}, **{state['Code']: [\n state[\"Subdivision name\"], state['Code'], state['Code']] for\n state in iso_3166_2_us.to_dict(orient='records')}, 'unknown': [\n 'unknown'] * 3}\n countries = {**{country['Alpha-2 code']: country['Alpha-2 code'] for\n country in iso_3166_1.to_dict(orient='records')}, **{country[\n 'Alpha-3 code']: country['Alpha-2 code'] for country in\n iso_3166_1.to_dict(orient='records')}, **{country[\n 'English short name (upper/lower case)']: country[\n 'Alpha-2 code'] for country in iso_3166_1.to_dict(orient=\n 'records')}, **{state['Code']: state['Code'] for state in\n iso_3166_2_us.to_dict(orient='records')}, **{state[\n 'Subdivision name']: state['Code'] for state in\n iso_3166_2_us.to_dict(orient='records')}, 'unknown': 'unknown',\n '?': 'unknown', 'Taiwan': 'TW', \"PRC\": \"CN\", \"PR China\": \"CN\",\n \"UK\": \"GB\", \"United Kingdom\": \"GB\", \"Vietnam\": \"VN\",\n \"South Korea\": \"KR\", \"Macedonia\": \"MK\",\n \"Macedonia (FYROM)\": \"MK\", \"Iran (Islamic Republic of)\": \"IR\"}\n us = {'US', 'USA', 'United States', 'U.S.A', \"United States of America\"}\n us_states = {state['Subdivision name']: state['Code'] for state in\n iso_3166_2_us.to_dict(orient='records')}\n usa_states = dict()\n for state in us_states:\n usa_states[countries[state]] = countries[state]\n usa_states[countries[state][-2:]] = countries[state]\n if state not in countries:\n countries[state] = us_states[state]\n us_states = {**us_states, **usa_states}\n del usa_states\n country_count = {country: 0 for country in iso_dict}\n for k, v in affiliations.items():\n time.sleep(1)\n if \"country\" not in affiliations[k]:\n address_components = None\n while not address_components:\n time.sleep(1)\n try:\n address_components = [x.strip() for x in\n geolocator.reverse(k, language=\n 'en').address.split(',')]\n except GeocoderServiceError as g:\n try:\n address_components = list({com_g.strip() for com_g in [\n com_i for com_h in [com[0].split(\n ',') for com in\n backup_geolocator.reverse(k,\n language='en')] for com_i in com_h\n ]})\n except:\n print(colored(g, 'yellow'))\n next\n if bool([u for u in us if u in address_components]):\n local_states = [state for state in us_states if state in\n address_components]\n if bool(local_states):\n for state in local_states :\n affiliations[k][\"country\"] = us_states[state]\n country_count[affiliations[k][\"country\"]] = \\\n country_count[\n affiliations[\n k][\n \"country\"]\n ] + 1\n else:\n for country in countries:\n if \"country\" not in affiliations[k]:\n if country != 'United States of America' and country \\\n in address_components:\n affiliations[k][\"country\"] = countries[country]\n country_count[affiliations[k][\"country\"]] = \\\n country_count[\n affiliations[\n k][\n \"country\"]\n ] + 1\n if \"country\" not in affiliations[k]:\n country = input(colored(\"{}\\n{}? \".format(str(\n address_components), str(affiliations[k][\n \"affiliations\"])), 'magenta'))\n if len(country):\n affiliations[k][\"country\"] = countries[country]\n country_count[affiliations[k][\"country\"]] = country_count[\n affiliations[\n k][\"country\"]]\\\n + 1\n if \"country\" in affiliations[k]:\n print(\"{}: {}\".format(iso_dict[affiliations[k][\"country\"]][0], str(\n address_components)))\n save_heatmap_data(affiliations)\n return(affiliations, country_count)",
"def get_all_country_urls(self, proxies=None):\n html_rsp = self._get_url_wrapper('https://socialblade.com/youtube/top/100', proxies=proxies)\n if not html_rsp:\n return False\n country_id_list = self._extract_country_ids(html_rsp)\n url_list = list()\n for country_id in country_id_list:\n url_list.append('https://socialblade.com/youtube/top/country/' + country_id)\n return url_list",
"def countries(self, unit=None, units=None, timezone=None,\r\n rollup=None, limit=None, unit_reference_ts=None):\r\n params = base.get_params(None, locals())\r\n return self._get('countries', params)",
"def get_all_jh_countries():\n download_jh_data()\n file_path = os.path.join(JH_DATA_DIR, \"covid_confirmed.csv\")\n data = pd.read_csv(file_path)\n countries = data[\"Country/Region\"].to_list()\n countries = list(dict.fromkeys(countries))\n return countries",
"def countries(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"countries\")",
"def get_countriesdata(url, downloader, with_world=True):\n headers, iterator = downloader.get_tabular_rows(url, dict_form=True)\n countriesdata = dict()\n for row in iterator:\n countryiso3 = row[\"REF_AREA\"]\n countriesdata[countryiso3] = countriesdata.get(countryiso3, []) + [row]\n if with_world:\n countriesdata[WORLD] = countriesdata.get(WORLD, []) + [row]\n\n return countriesdata, headers",
"def get_countries():\n\n cnx,cur = connect_to_db() #get connection with db\n cur.execute(\"SELECT DISTINCT country FROM locations order by country\")\n lst = cur.fetchall()\n cur.close()\n cnx.close()\n return lst",
"def countries(cls) -> list[str]:\n\n with cls.cursor() as cur:\n cur.execute(\"SELECT country_code FROM countries;\")\n return list(row[0] for row in cur.fetchall())",
"def countries(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"countries\")",
"def countries(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"countries\")",
"def get_country(self, data: dict):\n country_entries = data.get(\"P27\")\n if country_entries is None or len(country_entries) == 0:\n country_entries = data.get(\"P19\")\n if country_entries is None or len(country_entries) == 0:\n return [{\"country\": \"Unknown\", \"region\": \"Unknown\"}]\n countries = []\n for entry in country_entries:\n country = entry.get(\"mainsnak\").get(\"datavalue\").get(\"value\").get(\"id\")\n countries.append(self._reference.get_country(country))\n return countries",
"def test_get_country_by_geo_location(self):\n pass",
"async def get_country_tour(self, flights_params, country_name,\n lang='en', limit=1) -> tuple:\n excursions = await self.excursion_by_country_search(country_name, limit=limit)\n flights = await flights_instance.get_flights(flights_params)\n hotels = await self.get_hotels(query=country_name, limit=limit, lang=lang)\n return excursions, flights, hotels",
"def get_country(self, country):\n if country == \"United Kingdom\": return \"en\"\n if country == \"Portugal\": return \"pt\"\n\n result = self.session.get(\"https://en.ogame.gameforge.com\")\n soup = BeautifulSoup(result.content, \"html.parser\")\n\n code_list = soup.find(\"ul\", {\"id\": \"mmoList1\"})\n countries = {}\n for tag in code_list.find_all(\"li\"):\n link = tag.find(\"a\")[\"href\"]\n name = tag.string.strip() # name of the country\n code = link.split(\".\")[0].replace(\"//\", \"\")\n countries[name] = code # save to the dict\n\n # check if input was ok\n if not country in countries.keys():\n self.crash(\"Country\", country, \"was not found on the list.\")\n if len(countries[country]) != 2:\n self.crash(\"Can't fetch code for country\", country)\n\n return countries[country]",
"def get_country(user, session, flag_pattern):\r\n page = \"https://www.fanfiction.net/u/\" + str(user)\r\n country = \"\"\r\n with closing(session.get(page, timeout=10.0, stream=True)) as r:\r\n lines = 0;\r\n for rline in r.iter_lines(chunk_size=10):\r\n lines += 1\r\n rstr = repr(rline)\r\n if rstr.find('Joined <sp') > 0:\r\n match = re.search(flag_pattern, rstr)\r\n if match:\r\n country = match.group(1)\r\n break\r\n if lines > 600:\r\n break\r\n return country",
"def country() -> str:",
"def scrape_all_world_cup_goals():\n def scrape_goals_year(year):\n urls = scrape_world_cup_scoreboard(year)\n goals = []\n for url in urls:\n goals.extend(scrape_fifa_goals(url, 'FIFA World Cup'))\n return goals\n\n l = []\n for year in sorted(world_cup_mapping.keys()):\n l.extend(scrape_goals_year(year))\n return l",
"def get_school_name_urls():\n\tschools_tree = get_tree(\"http://www.gla.ac.uk/schools/\")\n\tns = 'http://exslt.org/regular-expressions'\n\tpath = '//div[@class=\"row standardContent\"]//a[re:match(@href, \"schools/[A-Za-z]+/\")]'\n\t# Get all the <a> elements on the page which link to a school page\n\ta_elems = schools_tree.xpath(path, namespaces={'re':ns})\n\tbase_url = \"http://www.gla.ac.uk\"\n\turls = []\n\tnames = []\n\n\tfor a in a_elems:\n\t\t# make school staff page url\n\t\tstaff_page_url = base_url + a.get(\"href\") + \"staff/\"\n\t\turls.append(staff_page_url)\n\t\t# get name of school\n\t\tschool_name = a.text\n\t\tnames.append(school_name)\n\n\t# create list of tuples\n\tschool_names_urls = zip(names, urls)\n\treturn school_names_urls",
"def get_channels_by_country(self, url, proxies=None):\n country_id = url.split('/')[-1] # The country id iso code is always last on sb country urls.\n html_rsp = self._get_url_wrapper(url, proxies=proxies)\n if not html_rsp:\n return False\n channel_list = self._extract_channels_from_sb_country(html_rsp)\n return {country_id: channel_list}",
"def countries(self, unit=None, units=None, timezone=None,\r\n limit=None, unit_reference_ts=None):\r\n params = base.get_params(None, locals())\r\n return self._get('countries', params)",
"def country_list(cls):\n return jsonify(result=[\n {'key': c.id, 'value': c.name}\n for c in current_website.countries\n ])",
"def get_country_code(country_name):\n # worldmap_chart = pygal.maps.world.World()\n # for code, name in worldmap_chart:\n\n for code, name in i18n.COUNTRIES:\n\n # for code, name in COUNTRIES.items():\n if name == country_name:\n print(code)\n return code\n # If the country wasn't found, return None.\n return None",
"def country(name):\n return location_db().find(name=name)[\"country\"]",
"def get_country_names(data, world=True):\n if world:\n return ['World'] + data['country_long'].unique().tolist()\n else:\n return data['country_long'].unique().tolist()"
]
| [
"0.70956886",
"0.6197914",
"0.607584",
"0.59360814",
"0.5923666",
"0.58099824",
"0.5723477",
"0.57135236",
"0.5656138",
"0.56534845",
"0.5647318",
"0.5647285",
"0.56464356",
"0.56439775",
"0.5614668",
"0.5614668",
"0.55881184",
"0.55697626",
"0.5568917",
"0.5506007",
"0.54929125",
"0.5474704",
"0.5440106",
"0.5428226",
"0.54137754",
"0.54095954",
"0.53971034",
"0.53947693",
"0.53936243",
"0.53868216"
]
| 0.6228564 | 1 |
Add all teams in a league which doesn't exist in the DB to it. | def add_league_teams(league_diction, team_count, host, root, password):
teams_diction = scrape_teams(league_diction, team_count)
create_teams(host, root, password, dict_to_read=teams_diction)
return teams_diction | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_teams(self, data):\n for k, v in data.items():\n try:\n self._db_cur.execute(\"insert or ignore into team_data \\\n (team_id, team_name) values (?, ?)\", (v, k))\n self._db_conn.commit()\n except sqlite3.Error as er:\n print er",
"def add_all_teams_and_players_in_league(league_dict, con, host, root, password):\r\n with con.cursor() as cur:\r\n cur.execute(\"\"\"SELECT MAX(id) FROM teams\"\"\")\r\n team_counter = cur.fetchall()[0][0]\r\n\r\n cur.execute(\"\"\"SELECT MAX(id) FROM players\"\"\")\r\n player_count = cur.fetchall()[0][0]\r\n\r\n cur.execute(\"\"\"SELECT MAX(id) FROM injuries\"\"\")\r\n injury_count = cur.fetchall()[0][0]\r\n\r\n cur.execute(\"\"\"SELECT MAX(id) FROM player_season\"\"\")\r\n player_season_count = cur.fetchall()[0][0]\r\n\r\n cur.execute(\"\"\"SELECT MAX(id) FROM player_team\"\"\")\r\n player_team_count = cur.fetchall()[0][0]\r\n\r\n teams_dict = add_league_teams(league_dict, team_counter, host, root, password)\r\n\r\n add_teams_players(teams_dict, player_count, injury_count, player_season_count,\r\n player_team_count, host, root, password)",
"def check_existing_teams(user, teams_from_lms):\n teams = user.teams.all()\n for team in teams:\n if team not in teams_from_lms:\n user.teams.remove(team)",
"def creat_team(self):\n te = Teams()\n per = Persons()\n teamlist = []\n for one in per.find({'role':'leader'},{'team_name'}):\n if one['team_name'] not in teamlist:\n teamlist.append(one['team_name'])\n # print len(teamlist)\n for team in teamlist:\n tmp = {'name': '', 'leader_email': '', 'person_emails': []}\n tmp['name'] = team\n tmp['leader_email'] = per.get_one({'team_name':team,'role':'leader'})['email']\n for one in per.find({'team_name':team},{'email'}):\n tmp['person_emails'].append(one['email'])\n print tmp\n search_t = te.get_one({'name':team})\n if search_t is None:\n te.insert_one(tmp)\n else:\n te.update_one({'name':team,'leader_email':'','person_emails':''},tmp,cover=True)",
"def add_users_to_team(team, users):\n assignment = team.assignment_fk\n if len(TeamMember.objects.filter(team_fk=team)) + len(users) > assignment.max_num_team_members:\n raise Exception('Maximum number of team members exceeds')\n\n with transaction.atomic():\n for user in users:\n if TeamMember.objects.filter(team_fk=team, user_fk=user):\n raise Exception('Some users have had belonged team')\n TeamMember.objects.create(team_fk=team, user_fk=user,\n assignment_fk=assignment, is_leader=False)\n\n return True",
"def teams_to_SQL (self, cursor, gid, games):\n\n\t\tvalues = []\n\t\tquery_with = \"WITH teams AS (\"\n\t\tfor game in games.findall('.'):\n\t\t\tquery_with += \"SELECT ? AS id, ? AS abbreviation, ? AS file_code, ? AS city, ? AS name UNION SELECT ? AS id, ? AS abbreviation, ? AS file_code, ? AS city, ? AS name UNION \"\n\t\t\tvalues.append(game.attrib['away_team_id'])\n\t\t\tvalues.append(game.attrib['away_name_abbrev'])\n\t\t\tvalues.append(game.attrib['away_file_code'])\n\t\t\tvalues.append(game.attrib['away_team_city'])\n\t\t\tvalues.append(game.attrib['away_team_name'])\n\t\t\tvalues.append(game.attrib['home_team_id'])\n\t\t\tvalues.append(game.attrib['home_name_abbrev'])\n\t\t\tvalues.append(game.attrib['home_file_code'])\n\t\t\tvalues.append(game.attrib['home_team_city'])\n\t\t\tvalues.append(game.attrib['home_team_name'])\n\t\tquery_with = query_with[:-7] + \")\"\n\n\t\tmerge = query_with + \"MERGE team AS t USING teams AS s ON t.id = s.id \"\n\t\tmerge += \"WHEN NOT MATCHED BY TARGET THEN INSERT (id, abbreviation, file_code, city, name) VALUES (s.id, s.abbreviation, s.file_code, s.city, s.name) \"\n\t\tmerge += \"WHEN MATCHED AND ((t.abbreviation IS NULL AND s.abbreviation IS NOT NULL) OR (t.file_code IS NULL AND s.file_code IS NOT NULL) OR (t.city IS NULL AND s.city IS NOT NULL) OR (t.name IS NULL AND s.name IS NOT NULL)) THEN UPDATE SET t.abbreviation = s.abbreviation, t.file_code = s.file_code, t.city = s.city, t.name = s.name;\"\n\t\t\n\t\tcursor.execute(merge, values)",
"def add_team(self, team):\n return self \\\n .team_identifier(team.identifier) \\\n .fold() \\\n .coalesce(\n # The team exists.\n __.unfold()\n .project('vertex', 'exists')\n .by(__.identity().elementMap())\n .by(__.constant(True)),\n # The team does not exist.\n __.addV('Team')\n .property(T.id, str(uuid.uuid4()))\n .property(Cardinality.single, 'identifier', team.identifier)\n .property(Cardinality.single, 'name', team.name)\n .project('vertex', 'exists')\n .by(__.identity().elementMap())\n .by(__.constant(False)),\n )",
"def _register_teams(self):\n # loop through all agents\n for agent_id, agent_body in self.registered_agents.items():\n # find their team name\n team = agent_body.properties['team']\n\n # register the team (if not already done) and the agent in it\n if team not in self.__teams:\n self.__teams[team] = []\n self.__teams[team].append(agent_id)",
"def set_league(teams, l):\n\t\n\t#~ print teams[0].name\n\t\n\tif l>0 and l<10:\n\t\tfor team in teams:\n\t\t\tteam.league = l\n\t\t\n\telse:\n\t\tprint(\"Error! Only leagues between 1 and 9 can be set.\")\n\t\n\treturn teams",
"def add_players(game: LolGame, players: List[dict], add_page_id: bool = False) -> LolGame:\n\n for team_side in game[\"teams\"]:\n team_side_leaguepedia = \"1\" if team_side == \"BLUE\" else \"2\"\n\n for idx, game_player in enumerate(game[\"teams\"][team_side][\"players\"]):\n try:\n # We get the player object from the Leaguepedia players list\n player_latest_data = next(\n p\n for p in players\n if p[\"Side\"] == team_side_leaguepedia\n and lit.get_id(p[\"Champion\"], object_type=\"champion\") == game_player[\"championId\"]\n )\n\n game_player[\"role\"] = role_translation[player_latest_data[\"gameRoleNumber\"]]\n\n unique_identifiers = LeaguepediaPlayerIdentifier(\n name=player_latest_data.get(\"currentGameName\"),\n irlName=player_latest_data.get(\"irlName\"),\n country=player_latest_data.get(\"Country\"),\n residency=player_latest_data.get(\"Residency\"),\n age=player_latest_data.get(\"Age\"),\n role=player_latest_data.get(\"Role\"),\n team=player_latest_data.get(\"Team\"),\n kills=player_latest_data.get(\"Kills\"),\n deaths=player_latest_data.get(\"Deaths\"),\n assists=player_latest_data.get(\"Assists\"),\n ss=player_latest_data.get(\"SummonerSpells\"),\n gold=player_latest_data.get(\"Gold\"),\n cs=player_latest_data.get(\"CS\"),\n items=player_latest_data.get(\"Items\"),\n trinket=player_latest_data.get(\"Trinket\"),\n keystoneMastery=player_latest_data.get(\"KeystoneMastery\"),\n keystoneRune=player_latest_data.get(\"KeystoneRune\"),\n runes=player_latest_data.get(\"Runes\"),\n )\n\n if add_page_id:\n unique_identifiers[\"pageId\"] = int(player_latest_data[\"pageId\"])\n\n game_player[\"uniqueIdentifiers\"] = {\"leaguepedia\": unique_identifiers}\n\n except StopIteration:\n # Since we cannot get the role properly, we try to infer it\n game_player[\"role\"] = list(role_translation.values())[idx]\n\n return game",
"def fillTeamsInPopulation(self):\n for team in self.team_pop:\n while len(team.learners) < team.max_size:\n learners = [l for l in self.learner_pop if l not in team.learners]\n if len(learners) == 0:\n break\n else:\n learner = choice(learners)\n team.addLearner(learner)",
"def create_teams(a_season):\n\n team_a = Team(name=\"Team A\", season=a_season)\n team_a.save()\n for p in create_players(7, 0):\n team_a.players.add(p)\n team_a.save()\n team_b = Team(name=\"Team B\", season=a_season)\n team_b.save()\n for p in create_players(7, 16):\n team_b.players.add(p)\n team_b.save()",
"def delete_teams_all(self, team_name):\n self.execute(TABELLE['teams']['delete']['all'])",
"def add_games(self, games):\n for game in games:\n # TODO(kevin): Use Benchapp opponent ID rather than creating a new\n # opponent for every game.\n response = self._session.post(self._add_game_url,\n data={\n 'type': 'GAME',\n 'subType': 'PLAYOFF' if game.playoffs else 'REGULAR',\n 'opponentID': 0,\n 'newTeamName': game.opponent,\n 'homeAway': 'Home' if game.is_home else 'Away',\n 'dateValue': str(game.time.date()),\n 'hr': game.time.strftime('%I'),\n 'min': game.time.strftime('%M'),\n 'am-pm': game.time.strftime('%p'),\n 'duration_hrs': \"1\",\n 'location': game.location\n })\n response.raise_for_status()",
"def _validteams(self):\n db_filename = self.registryValue('dbLocation')\n with sqlite3.connect(db_filename) as conn:\n cursor = conn.cursor()\n query = \"select team from mlb\"\n cursor.execute(query)\n teamlist = []\n for row in cursor.fetchall():\n teamlist.append(str(row[0]))\n\n return teamlist",
"def test_handle_list_no_teams(self):\n self.db.query.return_value = []\n self.assertTupleEqual(self.testcommand.handle(\"team list\", user),\n (\"No Teams Exist!\", 200))",
"def _load_team_map(self) -> None:\n self._cursor.execute(\"select id, franchid, teamid, lgid from teams where yearid = %s;\", (self._yearid,))\n all_teams = self._cursor.fetchall()\n for team in all_teams:\n r = {'id': team[0], 'franchid': team[1], 'teamid': team[2], 'lgid': team[3]}\n self._team_map[team[1]] = r",
"def get_all_teams(self):\n return self._db.Teams.find({})",
"def merge_team(self, team):\n for m in team.members:\n self.members.append(m)\n for n in team.neighbors:\n self.neighbors.append(n)\n self.calculate_a()\n return self",
"def write_team_to_db(cls, file_name, teams_list):\n conn = sqlite3.connect('database.db')\n c = conn.cursor()\n query = \"DELETE FROM `teams_list`;\"\n c.execute(query)\n\n for team in teams_list:\n c.execute(\"INSERT INTO teams_list (name) VALUES (?)\", [team])\n conn.commit()\n conn.close()",
"def add_league(inp_to_add, type_to_add, con, host, root, password):\r\n with con.cursor() as cur:\r\n if type_to_add == \"url\":\r\n league_soup = BeautifulSoup(requests.get(inp_to_add).text, 'html.parser')\r\n league_site = inp_to_add\r\n elif type_to_add == \"country\":\r\n midterm_url = get_countries_dict()[inp_to_add]\r\n league_soup = BeautifulSoup(requests.get(midterm_url).text, 'html.parser')\r\n league_site = SOCCER_URL + league_soup.find('ul', class_=\"left-tree\").li.a[\"href\"]\r\n else:\r\n league_soup, league_site = get_first_search_result(\r\n SOCCER_URL + \"/search/competitions/?q=\" + inp_to_add)\r\n\r\n if league_soup:\r\n cur.execute(\"SELECT MAX(id) FROM leagues\")\r\n league_id = cur.fetchall()[0][0]\r\n\r\n addition = (league_soup.body.h1.text, league_soup.body.h2.text, league_site)\r\n cur.execute(\"\"\"INSERT INTO leagues (name, country, url) VALUES (%s, %s, %s)\"\"\", addition)\r\n con.commit()\r\n\r\n league_dict = {league_id: {'name': addition[0], 'url': addition[2]}}\r\n add_all_teams_and_players_in_league(league_dict, con, host, root, password)",
"def collect_teams(year):\n\n team_list = Team.objects.filter(year=year).order_by('location')\n teams = []\n for t in team_list:\n team = {\n 'id': t.abbreviation,\n 'team': t,\n }\n teams.append(team)\n return teams",
"def add_team(inp_to_add, type_to_add, host, root, password):\r\n team_name = \"\"\r\n\r\n if type_to_add == \"url\":\r\n team_soup = BeautifulSoup(requests.get(inp_to_add).text, 'html.parser')\r\n team_site = inp_to_add\r\n else:\r\n team_soup, team_site = get_first_search_result(\r\n SOCCER_URL + \"/search/teams/?q=\" + inp_to_add)\r\n\r\n if team_soup:\r\n # Need to examine if league already exists, if not - add it. Then, get its LEAGUE_ID\r\n league_url = SOCCER_URL + team_soup.find('div', id=\"page_team_1_block_team_table_9-wrapper\").h2.a[\"href\"]\r\n find_league({league_url}, \"url\", host, root, password)\r\n\r\n team_name = team_soup.find(\"table\", class_=\"leaguetable sortable table\").tbody.find_all(\r\n 'tr', class_=[\"odd highlight team_rank\", \"even highlight team_rank\"])[0].find(\r\n 'td', class_=\"text team large-link\").a.text\r\n\r\n return team_name",
"def remove_unknown_teams(all_data):\n\t# Mappings\n\ttry:\n\t\tteam_id_map = load_json('team_id_mapping.json', fdir=os.path.join('data', 'archived_data'))\n\t\tconf_id_map = load_json('conf_id_mapping.json', fdir=os.path.join('data', 'archived_data'))\n\texcept:\n\t\tprint \"Need to generate team or conference mapping first, returning None\"\n\t\treturn None\n\t# Reverse mappings\n\tteam_id_map_rev = dict([(int(new),int(old)) for old,new in team_id_map.iteritems()])\n\tconf_id_map_rev = dict([(int(new),int(old)) for old,new in conf_id_map.iteritems()])\n\t# Determine games with unknown teams/conferences\n\tis_unknown_team = lambda tid0, tid1: tid0 not in team_id_map_rev or tid1 not in team_id_map_rev\n\tis_unknown_conf = lambda cid0, cid1: cid0 not in conf_id_map_rev or cid1 not in conf_id_map_rev\n\tzip_tids = zip(all_data['this_TeamId'].values, all_data['other_TeamId'].values)\n\tzip_cids = zip(all_data['this_conferenceId'].values, all_data['other_conferenceId'].values)\n\tixUnknown_team = [is_unknown_team(ttid, otid) for ttid, otid in zip_tids]\n\tixUnknown_conf = [is_unknown_conf(tcid, ocid) for tcid, ocid in zip_cids]\n\tixKeep = np.logical_not(np.logical_or(ixUnknown_team, ixUnknown_conf))\n\treturn all_data[ixKeep]",
"def check_and_add_default_teams(user):\n for course in user.course.all():\n user_courses_without_teams = course.teams.exclude(\n team_id__icontains='default_team_'\n )\n team_id = f'default_team_for_{course.frontend_course_id}'\n default_team = list(course.teams.filter(team_id=team_id))\n\n if user_courses_without_teams.exists():\n # delete this student from default team\n user.teams.remove(*default_team)\n else:\n # add this student to default team\n user.teams.add(*default_team)\n\n user.save()",
"def update_member_teams(request, team_name):\n if request.method == 'GET':\n email = request.session.get('email', None)\n member = Member.objects.get(email=email)\n all_teams = Team.objects.all()\n\n for team in all_teams:\n if team.name == team_name:\n member.teams.add(team)\n break\n\n message = 'Member teams updated succesffully'\n messages.add_message(request, messages.INFO, message)\n return redirect('teamsapp:teams')\n else:\n raise Http404('Not allowed')",
"def add_teams_players(teams_dictionary, player_counter, injury_counter, player_season_counter,\r\n player_team_counter, host, root, password):\r\n players, injuries, players_seasons, players_teams = \\\r\n scrape_players(teams_dictionary, player_counter, injury_counter, player_season_counter,\r\n player_team_counter)\r\n if players:\r\n create_players(host, root, password, dict_to_read=players)\r\n create_injuries(host, root, password, dict_to_read=injuries)\r\n create_players_by_team(host, root, password, dict_to_read=players_teams)\r\n create_players_by_season(host, root, password, dict_to_read=players_seasons)",
"def get_teams_from_league(teams, league):\n\t\n\tteams_1 = []\n\tfor i in range(0,len(teams)):\n\t\tif teams[i].league == 1:\n\t\t\tteams_1.append(teams[i])\n\n\treturn teams_1",
"def get_all_fb_teams(self):\n\n all_teams = ()\n self._logger.debug(\"Getting all fb teams from database\")\n try:\n self.check_if_db_connected()\n cursor = self._db_conn.cursor()\n cursor.execute(\"SELECT team_id, team_name, time FROM team ORDER BY \\\ntime DESC\")\n teams = cursor.fetchall()\n\n for team_id, name, timestamp in teams:\n intermediate_teams = ()\n intermediate_teams = intermediate_teams + (name,)\n cursor.execute(\"SELECT player FROM player_team_xref WHERE \\\nteam = {0}\".format(team_id))\n players = cursor.fetchall()\n for player in players:\n cursor.execute(\"SELECT first_name, last_name, nickname \\\nFROM player WHERE player_id = {0}\".format(player[0]))\n first_name, last_name, nickname = cursor.fetchall()[0]\n\n intermediate_teams = intermediate_teams + (first_name,\n last_name, nickname)\n\n intermediate_teams = intermediate_teams + (timestamp.strftime('%Y-%m-%d'),)\n all_teams = all_teams + (intermediate_teams,)\n del intermediate_teams\n\n except MySQLdb.OperationalError:\n self._logger.error(\"MySQL operational error occured\")\n traceback.print_exc()\n raise exceptions.DBConnectionError(\"Cannot connect to MySQL server\")\n\n except MySQLdb.ProgrammingError:\n self._logger.error(\"MySQL programming error\")\n traceback.print_exc()\n raise exceptions.DBSyntaxError(\"MySQL syntax error\")\n\n else:\n return all_teams",
"def create_table_per_team(self) -> None:\n for team_id in self.teams:\n home = self.file.loc[(self.file[\"IdHomeTeam\"] == team_id)].reset_index(drop=True)\n home = home.rename(columns={\"FTHG\": \"FTG_asH\", \"FTR\": \"FT_RESULT\", \"HTHG\": \"HTG_asH\",\n \"HTR\": \"HT_RESULT\", \"HS\": \"Shoot_asH\", \"HST\": \"ShootTarget_asH\",\n \"HF\": \"Fouls_asH\", \"HC\": \"Corner_asH\", \"HY\": \"YellowC_asH\", \"HR\": \"RedC_asH\",\n \"FTAG\": \"FT_against_H\", \"HTAG\": \"HT_against_H\",\n \"AS\": \"Shoot_against_H\", \"AST\": \"ShootTarget_against_H\",\n \"AF\": \"Fouls_against_H\",\n \"AC\": \"Corner_against_H\", \"AR\": \"RedC_againts_H\", \"AY\": \"YellowC_against_H\"})\n home[\"nWeekHome\"] = home.index + 1\n home.drop([\"nWeekAway\", \"IdAwayTeam\"], axis=1,\n inplace=True)\n home.loc[(home[\"FT_RESULT\"] == \"H\"), \"FT_RESULT\"] = \"Winn\"\n home.loc[(home[\"FT_RESULT\"] == \"D\"), \"FT_RESULT\"] = \"Draw\"\n home.loc[(home[\"FT_RESULT\"] == \"A\"), \"FT_RESULT\"] = \"Lost\"\n home.loc[(home[\"HT_RESULT\"] == \"H\"), \"HT_RESULT\"] = \"Winn\"\n home.loc[(home[\"HT_RESULT\"] == \"D\"), \"HT_RESULT\"] = \"Draw\"\n home.loc[(home[\"HT_RESULT\"] == \"A\"), \"HT_RESULT\"] = \"Lost\"\n\n away = self.file.loc[(self.file[\"IdAwayTeam\"] == team_id)].reset_index(drop=True)\n away = away.rename(columns={\"FTAG\": \"FTG_asA\", \"FTR\": \"FT_RESULT\", \"HTAG\": \"HTG_asA\",\n \"HTR\": \"HT_RESULT\", \"AS\": \"Shoot_asA\", \"AST\": \"ShootTarget_asA\",\n \"AF\": \"Fouls_asA\", \"AC\": \"Corner_asA\", \"AY\": \"YellowC_asA\", \"AR\": \"RedC_asA\",\n \"FTHG\": \"FT_against_A\", \"HTHG\": \"HT_against_A\",\n \"HS\": \"Shoot_against_A\", \"HST\": \"ShootTarget_against_A\",\n \"HF\": \"Fouls_against_A\",\n \"HC\": \"Corner_against_A\", \"HR\": \"RedC_againts_A\", \"HY\": \"YellowC_against_A\"})\n away[\"nWeekAway\"] = away.index + 1\n away.drop([\"nWeekHome\", \"IdHomeTeam\"], axis=1,\n inplace=True)\n away.loc[(away[\"FT_RESULT\"] == \"H\"), \"FT_RESULT\"] = \"Lost\"\n away.loc[(away[\"FT_RESULT\"] == \"D\"), \"FT_RESULT\"] = \"Draw\"\n away.loc[(away[\"FT_RESULT\"] == \"A\"), \"FT_RESULT\"] = \"Winn\"\n away.loc[(away[\"HT_RESULT\"] == \"H\"), \"HT_RESULT\"] = \"Lost\"\n away.loc[(away[\"HT_RESULT\"] == \"D\"), \"HT_RESULT\"] = \"Draw\"\n away.loc[(away[\"HT_RESULT\"] == \"A\"), \"HT_RESULT\"] = \"Winn\"\n\n self.home = self.home.append(home, ignore_index=True)\n self.away = self.away.append(away, ignore_index=True)\n\n self.home[\"Date\"] = pd.to_datetime(self.home[\"Date\"], format=\"%Y-%m-%d\")\n self.away[\"Date\"] = pd.to_datetime(self.away[\"Date\"], format=\"%Y-%m-%d\")\n self.home.reset_index(drop=True)\n self.away.reset_index(drop=True)"
]
| [
"0.6818083",
"0.6252091",
"0.6219382",
"0.589217",
"0.58345926",
"0.58333796",
"0.58290184",
"0.5785899",
"0.57341504",
"0.57333916",
"0.5699606",
"0.56956434",
"0.56849265",
"0.5651164",
"0.5634886",
"0.56297773",
"0.5575925",
"0.55408275",
"0.55124295",
"0.5452181",
"0.5450211",
"0.54212576",
"0.5419695",
"0.5406048",
"0.5390301",
"0.53749067",
"0.5369157",
"0.5362793",
"0.53538185",
"0.53514194"
]
| 0.64764833 | 1 |
Initialize a dictionary that will contain the metadata. | def init_meta():
meta = {}
meta["title"] = None
meta["authors"] = []
meta["date"] = None
meta["abstract"] = None
meta["notes"] = []
return meta | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_metadata(self):\n return {}",
"def initMetadata(self):\n\n if not 'flags' in self.metadata:\n\n self.metadata['flags'] = {}\n\n if not 'uidvalidity' in self.metadata:\n\n\n self.metadata['uidvalidity'] = random.randint(1000000, 9999999)\n\n if not 'uids' in self.metadata:\n\n self.metadata['uids'] = {}\n\n if not 'uidnext' in self.metadata:\n\n self.metadata['uidnext'] = 1",
"def dict_initialise(metadata, analysistype):\n for sample in metadata:\n sample[analysistype].dnaseq = dict()\n sample[analysistype].protseq = dict()\n sample[analysistype].ntindex = dict()\n sample[analysistype].aaindex = dict()\n sample[analysistype].ntalign = dict()\n sample[analysistype].aaalign = dict()\n sample[analysistype].aaidentity = dict()\n return metadata",
"def meta_data(self) -> Dict:\n pass",
"def init_meta(self):\n meta = {}\n # Required (core)\n meta['ra'] = dict(ext=1, card='RA')\n meta['dec'] = dict(ext=1, card='DEC')\n meta['target'] = dict(ext=1, card='OBJECT')\n meta['decker'] = dict(ext=1, card='APERTURE')\n meta['dichroic'] = dict(ext=1, card='FILTER')\n meta['binning'] = dict(ext=1, card=None, default='1,1')\n\n meta['mjd'] = dict(ext=0, card=None, compound=True)\n meta['exptime'] = dict(ext=1, card='EXPTIME')\n meta['airmass'] = dict(ext=1, card='AIRMASS')\n # Extras for config and frametyping\n meta['dispname'] = dict(ext=1, card='DISPERSE')\n meta['idname'] = dict(ext=1, card='IMAGETYP')\n\n # Ingest\n self.meta = meta",
"def init_meta(self):\n meta = {}\n # Required (core)\n meta['ra'] = dict(ext=0, card='OBJRA')\n meta['dec'] = dict(ext=0, card='OBJDEC')\n meta['target'] = dict(ext=0, card='OBJECT')\n meta['decker'] = dict(ext=0, card='ALAPRTNM')\n meta['binning'] = dict(card=None, compound=True)\n\n meta['mjd'] = dict(ext=0, card=None, compound=True)\n meta['exptime'] = dict(ext=0, card='EXPTIME')\n meta['airmass'] = dict(ext=0, card='AIRMASS')\n # Extras for config and frametyping\n meta['dispname'] = dict(ext=0, card='ALGRNM')\n meta['idname'] = dict(ext=0, card='IMAGETYP')\n # Lamps\n # Use Keck/LRIS approach\n\n # Ingest\n self.meta = meta",
"def metadata(self) -> dict:\n meta = {}\n meta['filename'] = self.filename\n meta['label'] = self.label\n meta['url'] = self.url\n\n return meta",
"def _initialize_metadata(self):\n\n survey_metadata = metadata.Survey(id=\"0\")\n survey_metadata.stations.append(metadata.Station(id=\"0\"))\n survey_metadata.stations[0].runs.append(metadata.Run(id=\"0\"))\n\n return survey_metadata",
"def init_meta(self):\n self.meta = {}\n # Required (core)\n self.meta['ra'] = dict(ext=0, card='RA')\n self.meta['dec'] = dict(ext=0, card='DEC')\n self.meta['target'] = dict(ext=0, card='object')\n self.meta['idname'] = dict(ext=0, card='obsmode')\n self.meta['decker'] = dict(ext=0, card='MASKNAME')\n self.meta['binning'] = dict(card=None, compound=True)\n self.meta['detector'] = dict(ext=0, card='detector')\n self.meta['mjd'] = dict(ext=0, card='MJD-OBS')\n self.meta['exptime'] = dict(ext=0, card='EXPTIME')\n self.meta['airmass'] = dict(ext=0, card='AIRMASS')\n self.meta['dispname'] = dict(ext=0, card='GRISM')\n self.meta['datasec'] = dict(ext=0, card='DETSIZE')\n self.meta['dichroic'] = dict(ext=0, card='FILTER1')\n self.meta['instrument'] = dict(ext=0, card='INSTRUME')\n self.meta['slitwid'] = dict(card=None, compound=True)",
"def init_meta(self):\n self.meta = {}\n # Required (core)\n self.meta['ra'] = dict(ext=0, card='RA')\n self.meta['dec'] = dict(ext=0, card='DEC')\n self.meta['target'] = dict(ext=0, card='OBJECT')\n self.meta['decker'] = dict(ext=0, card=None, default='default')\n self.meta['dichroic'] = dict(ext=0, card=None, default='default')\n self.meta['binning'] = dict(ext=0, card=None, default='1,1')\n\n self.meta['mjd'] = dict(ext=0, card='ACQTIME')\n self.meta['exptime'] = dict(ext=0, card='EXPTIME')\n self.meta['airmass'] = dict(ext=0, card='AIRMASS')\n # Extras for config and frametyping\n self.meta['dispname'] = dict(ext=0, card='GRISM')\n self.meta['idname'] = dict(ext=0, card='OBSTYPE')\n self.meta['instrument'] = dict(ext=0, card='INSTRUME')",
"def get_metadata(self):\n meta_data = {}\n if self.beam_energy is not None:\n meta_data['beam_energy'] = self.beam_energy\n if self.collection_angle is not None:\n meta_data['collection_angle'] = self.collection_angle\n return meta_data",
"def init_meta(self):\n self.meta = {}\n # Required (core)\n self.meta['ra'] = dict(ext=0, card='RA')\n self.meta['dec'] = dict(ext=0, card='DEC')\n self.meta['target'] = dict(ext=0, card='object')\n self.meta['idname'] = dict(ext=0, card='obsmode')\n self.meta['decker'] = dict(ext=0, card='MASKNAME')\n self.meta['binning'] = dict(card=None, compound=True) # Uses CCDSUM\n self.meta['detector']=dict(ext=0,card='detector')\n self.meta['mjd'] = dict(ext=0, card='MJD-OBS')\n self.meta['exptime'] = dict(ext=0, card='EXPTIME')\n self.meta['airmass'] = dict(ext=0, card='AIRMASS')\n self.meta['dispname'] = dict(ext=0, card='GRISM')\n self.meta['datasec'] = dict(ext=1, card='DATASEC')\n self.meta['dichroic'] = dict(ext=0, card='FILTER1')\n self.meta['instrument'] = dict(ext=0, card='INSTRUME')",
"def _metadata(self) -> Dict[str, Any]:\n return self.__metadata",
"def metadata(self) -> dict:\n meta = {}\n meta['name'] = self.name\n meta['id'] = self.id\n meta['family'] = self.family\n \n meta['ptd_type'] = []\n meta['pos'] = []\n meta['atype'] = []\n meta['db_vect'] = []\n meta['scale'] = []\n for cp in self.parameters:\n meta['ptd_type'].append(cp.get('ptd_type', None))\n meta['pos'].append(cp.get('pos', None))\n meta['atype'].append(cp.get('atype', None))\n meta['db_vect'].append(cp.get('db_vect', None))\n meta['scale'].append(cp.get('scale', None))\n \n return meta",
"def metadata(self):\n metadata = {}\n metadata['successful'] = True\n metadata['time_information'] = {'begin': self.begin.isoformat(),\n 'end': self.end.isoformat(),\n 'elapsed': self.elapsed,\n }\n metadata['user'] = self.user\n metadata['database'] = {'name': settings.DATABASES['default']['NAME'],\n 'host': settings.DATABASES['default']['HOST'],\n }\n metadata['input_arguments'] = self.input_arguments\n center_ids = [center.center_id for center in self.centers]\n metadata['registration_centers_processed'] = sorted(center_ids)\n metadata['total_pdf_file_count'] = self.n_total_files\n metadata['total_pdf_page_count'] = self.n_total_pages\n metadata['total_pdf_byte_count'] = self.n_total_bytes\n metadata['files'] = self.fileinfo\n metadata['offices'] = [model_to_dict(office) for office in self.offices.values()]\n\n return metadata",
"def extract_metadata(self):\n if self.is_generatable_file:\n logger.debug(\"Converting collected details to dict..\")\n if self.metadata_collector:\n self.metadata = MetadataToDict(\n metadata_collector=self.metadata_collector,\n file_import=self.file_import,\n )\n self.metadata.build_integration_dict()",
"def initial_metadata(self):\n raise NotImplementedError()",
"def init_meta(self):\n self.meta = {}\n # Required (core)\n self.meta['ra'] = dict(ext=0, card='RA', required_ftypes=['science', 'standard'])\n self.meta['dec'] = dict(ext=0, card='DEC', required_ftypes=['science', 'standard'])\n self.meta['target'] = dict(ext=0, card='OBJECT')\n self.meta['decker'] = dict(ext=0, card='DECKNAME')\n self.meta['binning'] = dict(card=None, compound=True)\n self.meta['mjd'] = dict(ext=0, card='MJD')\n # This may depend on the old/new detector\n self.meta['exptime'] = dict(ext=0, card='ELAPTIME')\n self.meta['airmass'] = dict(ext=0, card='AIRMASS')\n #self.meta['dispname'] = dict(ext=0, card='ECHNAME')\n # Extras for config and frametyping\n self.meta['hatch'] = dict(ext=0, card='HATOPEN')\n self.meta['dispname'] = dict(ext=0, card='XDISPERS')\n self.meta['filter1'] = dict(ext=0, card='FIL1NAME')\n self.meta['echangle'] = dict(ext=0, card='ECHANGL', rtol=1e-3)\n self.meta['xdangle'] = dict(ext=0, card='XDANGL', rtol=1e-3)\n# self.meta['idname'] = dict(ext=0, card='IMAGETYP')\n # NOTE: This is the native keyword. IMAGETYP is from KOA.\n self.meta['idname'] = dict(ext=0, card='OBSTYPE')\n self.meta['frameno'] = dict(ext=0, card='FRAMENO')\n self.meta['instrument'] = dict(ext=0, card='INSTRUME')",
"def mof_metadata(self):\n\n arg_dict = collections.defaultdict(dict)\n\n arg_dict[self.name]['type'] = self.arg_type\n arg_dict[self.name]['qualifiers'] = self.qualifiers\n arg_dict[self.name]['valuemap'] = self.valuemap\n\n return dict(arg_dict)",
"def get_metadata(self):\n metadata = {}\n for k in self.metadata_keys:\n metadata[k] = copy.copy(getattr(self, k))\n return metadata",
"def _meta_dict(self, node):\n meta = {n: self._text(node, n) for n in ('source', 'date', 'key')}\n meta.update(self.infon_dict(node))\n return meta",
"def __metadata__(self) -> defaultdict:\n\n metadata:defaultdict = defaultdict(lambda:False)\n for key in self.md.Meta: # Create defaultdict out of metadata\n if type(self.md.Meta[key]) == list:\n metadata[key] = self.md.Meta[key][0]\n else:\n metadata[key] = self.md.Meta[key]\n return metadata",
"def MakeMetadataDict(self):\n sub_dict = {}\n for attr_name in Subscription._JSON_ATTRIBUTES:\n util.SetIfNotNone(sub_dict, attr_name, getattr(self, attr_name, None))\n if self.extra_info:\n sub_dict['extra_info'] = deepcopy(self.extra_info)\n return sub_dict",
"def init_metadata(self, parent):\n parent_metadata = parent.get('metadata', {})\n return {\n 'started': utcnow(),\n 'dependencies_met': True,\n 'engine': self.ident,\n 'is_broadcast': parent_metadata.get('is_broadcast', False),\n 'is_coalescing': parent_metadata.get('is_coalescing', False),\n 'original_msg_id': parent_metadata.get('original_msg_id', ''),\n }",
"def __init__(self):\n self.metadata = {}\n self.geometry = {'array': None, \n 'geom': None, \n 'wkt': None}",
"def METADATA(self) -> Dict[str, Any]:\n return self._metadata",
"def _metadata_map():\n return {\n 'date_added': 'dateAdded',\n 'dns_active': 'dnsActive',\n 'last_modified': 'lastModified',\n 'private_flag': 'privateFlag',\n 'whois_active': 'whoisActive',\n 'key_name': 'Key Name',\n 'value_type': 'Value Type',\n 'value_name': 'Value Name',\n 'block': 'Block',\n 'mutex': 'Mutex',\n 'as_number': 'AS Number',\n 'hostname': 'hostName',\n }",
"def metadata(self, metadata):\n return Metadata(metadata)",
"def get_data_to_create_object(self):\n return {}",
"def _metadata(self):\n meta = super()._metadata\n meta.update({\n \"name\": self.name,\n \"lead_in_time\": self.lead_in_time,\n \"amplification\": self.amplification,\n \"amplifier_clipping\": self.amplifier_clipping,\n \"power_threshold\": self.power_threshold,\n })\n return meta"
]
| [
"0.75047845",
"0.7474656",
"0.7414356",
"0.7339264",
"0.7337838",
"0.73160756",
"0.7288747",
"0.71932274",
"0.7176863",
"0.715695",
"0.71429217",
"0.7088457",
"0.7064578",
"0.7053004",
"0.6986673",
"0.6980293",
"0.6911412",
"0.6873568",
"0.6837764",
"0.682899",
"0.671761",
"0.67158175",
"0.67061675",
"0.6705935",
"0.6675388",
"0.66559803",
"0.664584",
"0.6606048",
"0.6563929",
"0.6556934"
]
| 0.7806081 | 0 |
Match URL against known URL formats for this repository of papers (e.g. URL for papers and/or landing pages). Return match object. | def match_url(self, url):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _match_url(self, _url):\n\n regex = re.compile(\n r'^(?:http|ftp)s?://' # http:// or https://\n r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|' # domain\n r'localhost|' # localhost\n r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})' # ...or ip\n r'(?::\\d+)?' # optional port\n r'(?:/?|[/?]\\S+)$', re.IGNORECASE)\n\n if re.match(regex, _url):\n return True\n else:\n return False",
"def validate_url(cls, url: str) -> Optional[Match[str]]:\n match = re.match(cls._VALID_URL, url)\n return match",
"def _match_url(parsed_url: ParsedUrl, rules: Sequence[str]) -> Mapping[str, Any]:\n # We use the routing capabilities of werkzeug to match the URL path\n urls = werkzeug.routing.Map(\n [werkzeug.routing.Rule(rule) for rule in rules],\n strict_slashes=False,\n ).bind(parsed_url.hostname)\n try:\n _, components = urls.match(parsed_url.path)\n except werkzeug.exceptions.HTTPException:\n raise ValueError(f\"The provided {parsed_url.hostname} URL is not valid\")\n return components",
"def from_url(self, url: str) -> Optional[str]:\n parsed = urlparse.urlparse(url)\n if parsed.scheme not in {'http', 'https', ''}:\n return None\n\n path = parsed.path\n if parsed.query:\n path += '?' + parsed.query\n\n # Discard $1 and everything after it\n path, *_ = path.partition('$1')\n\n for domain in self.domains:\n if domain in parsed.netloc:\n break\n else:\n return None\n\n matched_sites = set()\n for code in chain(self.codes,\n getattr(self, 'test_codes', ()),\n getattr(self, 'closed_wikis', ()),\n ):\n if self._hostname(code)[1] == parsed.netloc:\n # Use the code and family instead of the url\n # This is only creating a Site instance if domain matches\n site = pywikibot.Site(code, self.name)\n pywikibot.log(f'Found candidate {site}')\n\n for iw_url in site._interwiki_urls():\n iw_url, *_ = iw_url.partition('{}')\n if path.startswith(iw_url):\n matched_sites.add(site)\n break\n\n if len(matched_sites) == 1:\n return matched_sites.pop().code\n\n if not matched_sites:\n return None\n\n raise RuntimeError(\n 'Found multiple matches for URL \"{}\": {}'\n .format(url, ', '.join(str(s) for s in matched_sites)))",
"def _parse_url(url: str) -> Optional[str]:\n match = re.search(r\"pastecord.com(?:/raw|/documents)?/(\\w+)(?:\\.\\w+)?\", url)\n if match is None:\n return None\n return match.group(1)",
"def _match(cls, url, **kwargs):\n return url.scheme.startswith('http')",
"def callback_from_url(self, url):\n if re.search(\"https?://mebook.cc/page/.*\", url):\n return self.parse_list_page\n\n if re.search(\"https?://mebook.cc/date/.*\", url):\n return self.parse_archive_page\n\n if re.search(\"https?://mebook.cc/category/.*$\", url):\n return self.parse_category_page\n\n if re.search(\"https?://mebook.cc/[^/]+.html$\", url):\n return self.parse_book_page\n\n if re.search(\"https?://mebook.cc/download.php?id=.*$\", url):\n return self.parse_download_page",
"def validaURL(url: AnyStr) -> bool:\n\n return re.compile(patternURL).search(url) != None # Linea 1",
"def url_validator(arg):\n #ะฟะธัะตะผ ะบะพัััะปั, ะฝะฐ ัะปััะฐะน ะตัะปะธ ะธะผะตะฝะฝะฐั ัััะปะบะฐ ัะพะดะตัะถะธั ะฝะฐัะฐะปะพ ะฒะธะดะฐ club_\n if arg.find('https://vk.com/club_') != -1 or arg.find('https://vk.com/club-') != -1:\n return {\"type\": 'named-link', \"id\": arg.split('/')[-1]}\n else:\n arg = arg.lower()\n\n # If url looks like http(s)://vk.com/named-link\n symbolic_id = TXT_ID_REGEXP.match(arg)\n if symbolic_id:\n url = symbolic_id.groupdict()\n url[\"type\"] = 'named-link'\n return url\n\n # If url looks like http[s]://vk.com/id123456\n numeric_id = NUM_ID_REGEXP.match(arg)\n if numeric_id:\n url = numeric_id.groupdict()\n return url\n\n #raise argparse.ArgumentTypeError(\"{} - invalid url address\".format(arg))",
"def url(regex, view):\n return RegexPattern(regex, view)",
"def match_url(url, domainlist):\n if not url:\n return False\n return match_host(url_split(url)[1], domainlist)",
"def posse_post_discovery(original, regex):\n if not hasattr(regex, 'match'):\n regex = re.compile(regex)\n\n if regex.match(original):\n return original\n\n try:\n d = mf2py.parse(url=original)\n urls = d['rels'].get('syndication', [])\n for item in d['items']:\n if 'h-entry' in item['type']:\n urls += item['properties'].get('syndication', [])\n for url in urls:\n if regex.match(url):\n return url\n except HTTPError:\n current_app.logger.exception('Could not fetch original')\n except SSLError:\n current_app.logger.exception('SSL Error')\n except Exception as e:\n current_app.logger.exception('MF2 Parser error: %s', e)",
"def build_url_db(self, url, pattern, ext, match_type='href',\n rename=None, date_type=\"Monthly\"):\n\n self.get_links(url)\n if match_type == 'href':\n self.pattern_files = [x for x in self.all_links if pattern in x['href']]\n elif match_type == 'text':\n self.pattern_files = [x for x in self.all_links if pattern in x.text]\n\n self.build_url_dates(pattern, ext, rename=rename, date_type=date_type)\n\n return self",
"def format_link(self):\n self.url = sys.argv[1]\n video_link_regex = re.compile(\n r'(https?://)?(www\\.)?youtube\\.(com|nl)/watch\\?v=([\\w-]+)')\n playlist_link_regex = re.compile(\n r'(https?://)?(www\\.)?youtube\\.(com|nl)/playlist\\?list=([\\w-]+)')\n # check if it's a single video link\n if video_link_regex.search(self.url):\n result_regex = video_link_regex.search(self.url)\n self. url = result_regex.group().split('&')[0]\n self.show_formats()\n # check if it's a playlist link\n elif playlist_link_regex.search(self.url):\n logging. debug('Yes it a playlist')\n result_regex = playlist_link_regex.search(self.url)\n playlist_link = result_regex.group().split('&')[0]\n self. get_videos_in_playlist()\n # check if link is not a youtube link\n else:\n logging.debug('Not even a yt link')\n sys. exit()",
"def _HandleUrl(self, input_line, match, output_stream):\n self._formatting_handler.HandleLink(input_line, output_stream, match, None)",
"def ignores(self, url, **kwargs):\n\t\tpu = re.escape(kwargs.get('primary_url', ''))\n\t\tph = re.escape(kwargs.get('primary_netloc', ''))\n\n\t\tfor pattern in self.patterns:\n\t\t\tregexp = pattern\n\t\t\tif '{' in regexp:\n\t\t\t\tregexp = regexp.replace('{primary_url}', pu).replace('{primary_netloc}', ph)\n\t\t\ttry:\n\t\t\t\tif re.search(regexp, url):\n\t\t\t\t\treturn pattern\n\t\t\texcept re.error as error:\n\t\t\t\tprint('Pattern %s is invalid (error: %s). Ignored.' % (pattern, str(error)), file=sys.stderr)\n\n\t\treturn False",
"def contains_url(self, string):\n return re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', string)",
"def match_url(self, url, options=None):\n options = options or {}\n for optname in self.options:\n if optname == 'match-case': # TODO\n continue\n\n if optname not in options:\n raise ValueError(\"Rule requires option %s\" % optname)\n\n if optname == 'domain':\n if not self._domain_matches(options['domain']):\n return False\n continue\n\n if options[optname] != self.options[optname]:\n return False\n\n return self._url_matches(url)",
"def check_url_format(self):\n\n m = re.match(r\"^http://www\\.flipkart\\.com/.*/p/.*$\", self.product_page_url)\n\n return not not m",
"def target_url(self, url):\n url_parse = urlparse.urlparse(url)\n patten = re.compile(self.url_patten)\n if patten.match(url_parse.path):\n return True\n else:\n return False",
"def get_abs_pdf_urls(self, url):\n if re.match(self.re_abs, url):\n abs_url = url\n doi = self._get_doi(abs_url)\n pdf_url = \"http://citeseerx.ist.psu.edu/viewdoc/download?doi={doi}&rep=rep1&type=pdf\".format(\n doi=doi\n )\n elif re.match(self.re_pdf, url):\n pdf_url = url\n doi = self._get_doi(pdf_url)\n abs_url = \"http://citeseerx.ist.psu.edu/viewdoc/summary?doi={doi}\".format(\n doi=doi\n )\n else:\n raise URLResolutionError(\"CiteSeerX\", url)\n return abs_url, pdf_url",
"def filter_url_parse_full_links(match):\n url = html.unescape(match.group(1))\n url = html.escape(url)\n punctuation = match.group(2)\n caption = filter_url_trim(url, filter_url_length)\n return '<a href=\"' + url + '\">' + caption + '</a>' + punctuation",
"def url_validator(arg):\n arg = arg.lower()\n\n # If url looks like http[s]://vk.com/domain\n symbolic_id = constants.TXT_ID_REGEXP.match(arg)\n if symbolic_id:\n url = symbolic_id.groupdict()\n url[\"type\"] = \"domain\"\n return url\n\n # If url looks like http[s]://vk.com/id123456\n numeric_id = constants.NUM_ID_REGEXP.match(arg)\n if numeric_id:\n url = numeric_id.groupdict()\n return url\n\n raise argparse.ArgumentTypeError(\"{} - invalid url address\".format(arg))",
"def search_against_url(request, url):\n\n (scheme, _1, _2, _3, _4, _5) = urlparse(url)\n if scheme not in ('http', 'https'):\n return search_page(request, error='The URL must begin with either http or https.')\n\n sfm = from_django_conf('sidebyside')\n try:\n (title, text) = fetch_and_clean(url)\n except requests.exceptions.Timeout:\n return search_page(request, error=\"Sorry, that news article couldn't be retrieved.\")\n\n try:\n sfm_results = sfm.search(text=text, title=title, url=url)\n drop_silly_results(sfm_results)\n sort_by_coverage(sfm_results)\n\n\n #if they submit a url, don't return the exact same url in the results\n for r in sfm_results['documents']['rows']:\n if r.get('url') == url:\n sfm_results['documents']['rows'].remove(r)\n\n if sfm_results.has_key('text'): text = sfm_results['text']\n else: text = ''\n\n if sfm_results.has_key('title'): title = sfm_results['title']\n else: title='No Title'\n\n return search_result_page(request, sfm_results, text,\n source_title=title, source_url=url)\n except superfastmatch.SuperFastMatchError, e:\n if e.status == httplib.NOT_FOUND:\n raise HttpResponse('No such article {0}'.format(url))\n elif settings.DEBUG == True:\n return HttpResponse(e.response[1], status=e.response[0])\n else:\n raise",
"def handle_url(url, session, res):\n print(\"Parsing\", url, file=sys.stderr)\n try:\n data, baseUrl = getPageContent(url, session)\n except IOError as msg:\n print(\"ERROR:\", msg, file=sys.stderr)\n return\n for match in url_matcher.finditer(data):\n url = match.group(1)\n name = unescape(match.group(2))\n name = asciify(name.replace('&', 'And').replace('@', 'At'))\n name = capfirst(name)\n if name in exclude_comics:\n continue\n if contains_case_insensitive(res, name):\n # we cannot handle two comics that only differ in case\n print(\"INFO: skipping possible duplicate\", repr(name), file=sys.stderr)\n continue\n res[name] = url",
"def test_regex_advanced_url_regex(self):\n val = DwcaValidator(yaml.load(self.yaml_regex, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'issue_url': \"https://github.com/inbo/whip/issues/4\"}\n self.assertTrue(val.validate(document))\n document = {'issue_url': r\"https:\\\\github.com\\inbo\\whip\\issues\\4\"}\n self.assertFalse(val.validate(document))",
"def scan_line(line):\n URL = None\n type = None\n # Check for direct URL first\n match = re.search(MATCH_URL, line)\n if match:\n return match.group(0)\n # Check for reference URL \n match = re.match(MATCH_REF_URL, line)\n if not match:\n return None\n # So, it's a reference, scan doc for corresponding URL\n return expand_reference(match.group(1))",
"def to_p(url):\n if type(url) != str:\n return\n match = config.p_re.match(url.strip())\n if match:\n return match.group(config.p_re_group_id)",
"def file_url(self, url):\n return self.is_regex_url(url, self.is_file_regex)",
"def parsed_url(self):\n if isinstance(self.url_or_urllib_parseresult, urllib.parse.ParseResult):\n parsed_url = self.url_or_urllib_parseresult\n else:\n parsed_url = urllib.parse.urlparse(self.url_or_urllib_parseresult)\n return urllib.parse.ParseResult(\n scheme=parsed_url.scheme,\n netloc=parsed_url.netloc,\n path='', params='', query='', fragment='')"
]
| [
"0.6157265",
"0.594759",
"0.5917867",
"0.58787566",
"0.58424425",
"0.5786022",
"0.56746024",
"0.56278706",
"0.5603191",
"0.5554448",
"0.5544937",
"0.5531292",
"0.54624885",
"0.5449005",
"0.5389851",
"0.5389752",
"0.53753614",
"0.5351463",
"0.5350919",
"0.53164864",
"0.5315264",
"0.5264032",
"0.525178",
"0.523936",
"0.5237044",
"0.5232619",
"0.5229115",
"0.5224958",
"0.5202457",
"0.5194556"
]
| 0.6763665 | 0 |
Return normalized URL, assuming it matches a known URL format for this repository of papers. Return normalized URL. | def normalize_url(self, url):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_normalized_url(url):\r\n scheme, netloc, path, params, query, fragment = urlparse(url)\r\n\r\n # Exclude default port numbers.\r\n if scheme == 'http' and netloc[-3:] == ':80':\r\n netloc = netloc[:-3]\r\n elif scheme == 'https' and netloc[-4:] == ':443':\r\n netloc = netloc[:-4]\r\n if scheme not in ('http', 'https'):\r\n raise ValueError(\"Unsupported URL %s (%s).\" % (url, scheme))\r\n\r\n # Normalized URL excludes params, query, and fragment.\r\n return urlunparse((scheme, netloc, path, None, None, None))",
"def _proper_url(self, url):\n if self.base_url not in url:\n url = self.base_url + url\n url = re.sub(r'(?<!https:)//', '/', url)\n if not url.endswith('/') and '?' not in url:\n url = url + '/'\n if url.endswith('?'):\n url = url[:-1]\n return url",
"def normalize_url(url):\n if not url.startswith((\"git+\", \"hg+\")):\n return url\n return url[4:]",
"def normalize_url(url):\n # print(url)\n if not url.startswith('http://') and not url.startswith('https://'):\n return 'https://{}/{}'.format(zone_name, url.replace('//', '/'))\n return url",
"def urlpath(self, url):\n\t\t# remove schema + hostname\n\t\turl = re.sub('^[^:]*://[^/]+', '/', url)\n\n\t\treturn self.canonicalize(url)",
"def _transform_github_url(self):\n self.url = (\n self.url\n .replace('/blob/', '/')\n .replace(self.GITHUB_NETLOC, self.GITHUB_RAW_NETLOC)\n )",
"def normalize(seed_url, link):\n link, _ = urldefrag(link) # remove hash to avoid duplicates\n return urljoin(seed_url, link)",
"def normalize_url(url: str) -> str:\n parts = urlparse(url)\n\n path = quote(parts.path)\n while '//' in path:\n path = path.replace(\"//\", \"/\")\n\n return urlunparse(parts._replace(path=path))",
"def norm(url):\n url = _unicode(url) # operate on unicode strings\n url_tuple = urlparse(url)\n normalized_tuple = norm_tuple(*url_tuple)\n return urlunparse(normalized_tuple).replace(' ','%20')",
"def convert_single_relation_url_to_simplified_format(relation_url):\n relation_url = relation_url.strip()\n prefix = 'www.freebase.com/'\n if not relation_url.startswith(prefix):\n raise Exception(\"Invalid format of relation '{}', expected prefix '{}'\".format(relation_url, prefix))\n return relation_url[len(prefix):].replace('/', '.').strip()",
"def clean_url_part(self):\n complete_url = \"{url_prefix}{url_part}\".format(\n url_prefix=self.URL_PREFIX,\n url_part=self.cleaned_data['url_part']\n )\n URLValidator(complete_url)\n self.cleaned_data['repo_url'] = complete_url\n return self.cleaned_data['url_part']",
"def get_scraper_url(self):\r\n \r\n return self.reformat_scraper_url()",
"def normalize_url(url):\n parse = urlparse(url)\n\n # netloc should be lowercase\n netloc = parse.netloc.lower()\n if parse.scheme == \"http\":\n if netloc.endswith(\":80\"):\n netloc = netloc[:-3]\n\n elif parse.scheme == \"https\" and netloc.endswith(\":443\"):\n netloc = netloc[:-4]\n\n # add a '/' at the end of the netloc if there in no path\n if not parse.path:\n netloc = netloc + \"/\"\n\n return \"{}://{}{}\".format(parse.scheme, netloc, parse.path)",
"def sanitize_url(self, url):\r\n if not self.markdown.safeMode:\r\n # Return immediately bipassing parsing.\r\n return url\r\n \r\n try:\r\n scheme, netloc, path, params, query, fragment = url = urlparse(url)\r\n except ValueError:\r\n # Bad url - so bad it couldn't be parsed.\r\n return ''\r\n \r\n locless_schemes = ['', 'mailto', 'news']\r\n if netloc == '' and scheme not in locless_schemes:\r\n # This fails regardless of anything else. \r\n # Return immediately to save additional proccessing\r\n return ''\r\n\r\n for part in url[2:]:\r\n if \":\" in part:\r\n # Not a safe url\r\n return ''\r\n\r\n # Url passes all tests. Return url as-is.\r\n return urlunparse(url)",
"def resolved_url(self):\n # '{year}/{release}-Year/csv_{record_type}(state}.zip'\n us = self.url_proto.format(year=self._year, release=self._release,\n record_type=self.record_type.lower(), state = self._state.lower())\n\n return parse_app_url(us)",
"def getFilteredUrl(self, url):\n url = url.split('#')[0]\n if url.startswith('/wiki'):\n return ('https://en.wikipedia.org' + url)\n if 'en.wikipedia.org/wiki/' not in url:\n return ('https://en.wikipedia.org/wiki' + url)\n return url",
"def CanonicalUrl(self, u:str)->str:\n return u",
"def format_internal_url(url):\n\n url = url.split('\"')[-2]\n\n if not url.startswith('https:'):\n url = (\n 'https://medium.com{}'.format(url) if not url.startswith('//medium.com')\n else 'https:{}'.format(url))\n\n return url",
"def build_url(self):\n url = requests.utils.requote_uri(\n self.torrent_page + self.string_search)\n if self.page == '1337x':\n return(url + '/1/')\n elif self.page == 'limetorrents':\n return(url + '/')\n else:\n return(url)",
"def normalize_cdmi_url(self, path):\n # Turn URL path into OS path for manipulation\n mypath = url2pathname(path)\n if not os.path.isabs(mypath):\n mypath = os.path.join(url2pathname(self.pwd()), mypath)\n # normalize path\n mypath = os.path.normpath(mypath)\n if path.endswith(\"/\") and not mypath.endswith(\"/\"):\n mypath += \"/\"\n url = self.cdmi_url + pathname2url(mypath)\n return url",
"def formatURL(self, url):\n pattern = r'(imdb\\.com\\/title\\/(.*/))'\n urls = re.findall(pattern, url)\n urls = urls[0]\n new_url = urls[0]\n new_url = \"https://www.\" + new_url\n title_code = urls[1].replace(\"/\", \"\")\n return new_url",
"def handle_url(self, url):\n parse = urlparse.urlparse(url, \"http\")\n # relative url path\n if not parse.netloc:\n parse = urlparse.urlparse(\n urlparse.urljoin(\n self.source_url,\n parse.path))\n return urlparse.urlunparse(parse)",
"def normalize_url(node):\n if not node:\n node = DEFAULT_NODE\n elif '://' not in node:\n node = '//{}'.format(node)\n parts = urlparse(node, scheme='http', allow_fragments=False)\n port = parts.port if parts.port else _get_default_port(parts.scheme)\n netloc = '{}:{}'.format(parts.hostname, port)\n return urlunparse((parts.scheme, netloc, parts.path, '', '', ''))",
"def url_norm(url, encoding=None, strip=False, lowercase_path=False, remove_fragment=False):\n\n if strip:\n url = url.strip()\n\n if isinstance(url, unicode):\n # try to decode the URL to ascii since urllib.unquote()\n # handles non-unicode strings differently\n try:\n url = url.encode('ascii')\n except UnicodeEncodeError:\n pass\n encode_unicode = True\n else:\n encode_unicode = False\n urlparts = list(urlparse.urlsplit(url))\n\n #fix missing scheme\n if not urlparts[0] or not urlparts[1]:\n urlparts = list(fix_missing_scheme(url, urlparts))\n elif urlparts[0] not in default_scheme_for_port:\n # Todo: find the scheme with the min edit distance\n pass\n\n # scheme\n if not http_scheme_pattern.match(urlparts[0]):\n raise InvalidUrl(url)\n\n urlparts[0] = urllib.unquote(urlparts[0]).lower()\n # host (with path or query side effects)\n is_idn = url_fix_host(urlparts)\n # query\n urlparts[3] = url_parse_query(urlparts[3], encoding=encoding)\n if urlparts[0] in urlparse.uses_relative:\n # URL has a hierarchical path we should norm\n if not urlparts[2]:\n # Empty path is allowed if both query and fragment are also empty.\n # Note that in relative links, urlparts[0] might be empty.\n # In this case, do not make any assumptions.\n if urlparts[0] and (urlparts[3] or urlparts[4]):\n urlparts[2] = '/'\n else:\n # fix redundant path parts\n urlparts[2] = collapse_segments(urlparts[2])\n if not remove_fragment:\n # anchor\n urlparts[4] = urllib.unquote(urlparts[4])\n # quote parts again\n urlparts[0] = url_quote_part(urlparts[0], encoding=encoding) # scheme\n urlparts[1] = url_quote_part(urlparts[1], safechars='@:', encoding=encoding) # host\n urlparts[2] = url_quote_part(urlparts[2], safechars=_nopathquote_chars, encoding=encoding) # path\n\n if lowercase_path:\n urlparts[2] = urlparts[2].lower()\n\n if remove_fragment:\n urlparts[4] = ''\n else:\n urlparts[4] = url_quote_part(urlparts[4], encoding=encoding) # anchor\n\n if not urlparts[2]:\n urlparts[2] = '/'\n\n res = urlunsplit(urlparts)\n\n if encode_unicode:\n res = unicode(res)\n return res, is_idn",
"def normalize_url(link_url, page_url):\n # Strip off the file name from the current page's URL.\n page_path = os.path.dirname(page_url)\n # Join (concatenate) the current page's URL path to the new link.\n joined_url = os.path.join(page_path, link_url)\n # Normalize the resulting path (deal with relative folder references).\n normalized_url = os.path.normpath(joined_url)\n # Return the result, replacing backslashes with slashes.\n return normalized_url.replace(\"\\\\\", \"/\")",
"def url(self):\n scheme, netloc, path, query, fragment = six.moves.urllib.parse.urlsplit(self.baseurl)\n url = six.moves.urllib.parse.urlunsplit((\n scheme, netloc, path + '.dods',\n self.id + hyperslab(self.slice) + '&' +\n '&'.join(self.selection), fragment)).rstrip('&')\n\n return url",
"def sanitize_url(url, require_scheme = False):\r\n if not url or ' ' in url:\r\n return\r\n\r\n url = url.strip()\r\n if url.lower() == 'self':\r\n return url\r\n\r\n u = urlparse(url)\r\n # first pass: make sure a scheme has been specified\r\n if not require_scheme and not u.scheme:\r\n url = 'http://' + url\r\n u = urlparse(url)\r\n\r\n if (u.scheme and u.scheme in valid_schemes\r\n and u.hostname and len(u.hostname) < 255\r\n and '%' not in u.netloc):\r\n return url",
"def cleanmatomo_url(self):\n self.matomo_url = re.sub(r\"/\\/$/\", \"\", self.matomo_url) # Cuts \"/\"\n\n if re.match(r\"^http://\", self.matomo_url): # replace it to \"https://\"\n self.matomo_url = re.sub(\"^http://\", \"\", self.matomo_url)\n self.matomo_url = self.protocol + self.matomo_url\n elif not bool(re.match(\"^https://\", self.matomo_url)): # check for \"https://\" and set it\n self.matomo_url = self.protocol + self.matomo_url",
"def _format_url(s):\n return u'%s%s\\n' % (BASE_URL, s.get_absolute_url())",
"def format_ha_url(self, url):\n is_relative = not url.startswith(\"http\")\n if not is_relative:\n return url\n elif is_relative and self.ha_url is None:\n raise ValueError(\"ha_url must be specified when using relative url for photo_attribute.\") \n else:\n return urljoin(self.ha_url, url)"
]
| [
"0.6855667",
"0.68432397",
"0.6735049",
"0.66331047",
"0.6608784",
"0.6575851",
"0.65584445",
"0.6552749",
"0.6549481",
"0.6537265",
"0.64769214",
"0.6430751",
"0.63981295",
"0.6336849",
"0.63184595",
"0.62799007",
"0.62568545",
"0.6245693",
"0.6244802",
"0.6235218",
"0.6233857",
"0.6229935",
"0.6216807",
"0.62116903",
"0.62001675",
"0.61969936",
"0.6190313",
"0.6184358",
"0.6171073",
"0.6147686"
]
| 0.7057887 | 0 |
Entry point for starting an HTTP git server. | def main(config=None):
init = InitRepoPath(config)
listen_address, port = init.get_listen_address()
backend = DictBackend(init.get_backends())
app = make_wsgi_chain(backend)
server = make_server(listen_address, port, app,
handler_class=WSGIRequestHandlerLogger,
server_class=WSGIServerLogger)
logger.info('Listening for HTTP connections on %s:%d',
listen_address, port)
server.serve_forever() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main():\n host = ''\n port = 8088\n HTTPServer((host, port), HandleRequests).serve_forever()",
"def git_server():\n log('Instalando git', yellow)\n sudo('apt-get -y install git')",
"def main():\r\n LOG.info('Starting server build.')\r\n web.run_app(init_app(),\r\n host=os.environ.get('APP_HOST', CONFIG.registry.get('app_host', '0.0.0.0')),\r\n port=int(os.environ.get('APP_PORT', CONFIG.registry.get('app_port', 8080))),\r\n shutdown_timeout=0,\r\n ssl_context=application_security())",
"def main():\n tornado.options.parse_command_line()\n ioloop = tornado.ioloop.IOLoop.instance()\n http_server = tornado.httpserver.HTTPServer(App())\n http_server.listen(options.port)\n tornado.autoreload.start()\n ioloop.start()",
"def main():\n signal(SIGINT, shutdown_handler)\n parser = ArgumentParser()\n parser.add_argument(\"-p\", \"--port\", help=\"The port to run the server on (Default: 8080)\",\n type=int, default=8080, required=False)\n parser.add_argument(\"-d\", \"--dir\", help=\"The directory to host (Default: current directory)\",\n type=str, default=os.getcwd(), required=False)\n parser.add_argument(\n \"-6\", \"--ipv6\", help=\"Use IPv6 instead of IPv4\", action='store_true')\n args = parser.parse_args()\n\n httpd = get_threaded_server(\n port=args.port, serve_path=args.dir, ipv6=args.ipv6)\n\n logging.info(\"Serving %s at localhost:%d via IPv%d...\" %\n (args.dir, args.port, 6 if args.ipv6 else 4))\n Thread(target=run_server, name=\"threaded_http_server\", kwargs={\n \"server\": httpd, \"sema\": runSema}).start()\n while not runSema.acquire(True, 0.25):\n pass\n logging.info(\"Shutting down\")\n httpd.server_close()",
"def main():\n try:\n http_server = WSGIServer(('0.0.0.0', 8080),\n app,\n log=logging,\n error_log=logging)\n\n http_server.serve_forever()\n except Exception as exc:\n logger.error(exc.message)\n logger.exception(traceback.format_exc())\n finally:\n # get last entry and insert build appended if not completed\n # Do something here\n pass",
"def main():\n parser = argparse.ArgumentParser(description=DESCRIPTION)\n parser.add_argument(\n \"--port\", type=int, default=9090, help=\"The port number to listen to.\",\n )\n parser.add_argument(\"--settings\", help=\"Full path to settings file.\", required=True)\n parser.add_argument(\n \"--ssl_context\",\n help=\"A key and certificate file pair to run the server in HTTPS mode.\",\n nargs=2,\n )\n\n args = parser.parse_args()\n\n keyfile = None\n certfile = None\n if args.ssl_context:\n keyfile, certfile = args.ssl_context\n\n run_server(\n port=args.port, settings=args.settings, keyfile=keyfile, certfile=certfile,\n )",
"def main():\n try:\n port = 8080\n ip = '0.0.0.0'\n http_server = WSGIServer((ip, port),\n app,\n log=logging,\n error_log=logging,\n )\n print(\"Server started at: {0}:{1}\".format(ip, port))\n http_server.serve_forever()\n except Exception as exc:\n logger.error(exc.message)\n logger.exception(traceback.format_exc())\n finally:\n # Do something here\n pass",
"def webserver_start():\n run(_webserver_command())",
"def run():\n app = Application()\n #app.sentry_client = AsyncSentryClient(app.settings['sentry_url'])\n http_server = HTTPServer(app, xheaders=True)\n http_server.listen(options.port)\n print('Running on port %d' % options.port)",
"def main():\n print(\"\\n\\n\")\n print(\"*\" * 50)\n print(f\"[LOG] Printing command line arguments [{', '.join(sys.argv)}]\")\n check_file_name()\n print(\"*\" * 50)\n #http_request_pipeline(123,\"HEAD / HTTP/1.0\\r\\nHost: www.google.com\\r\\n\\r\\n\")\n # This argument is optional, defaults to 18888\n proxy_port_number = get_arg(1, 18888)\n entry_point(proxy_port_number)",
"def http(c, path=local.http_path, port=local.http_port):\r\n c = conn(c)\r\n print(\"make http repo on {}, path [{}]\".format(c.host, path))\r\n\r\n \"\"\" ๅๅค\r\n \"\"\"\r\n system.install(c, 'httpd createrepo')\r\n c.run('mkdir -p {path}'.format(path=path))\r\n\r\n \"\"\" ้
็ฝฎ\r\n \"\"\"\r\n c.run('''\r\n cd {home}; mkdir -p save\r\n cp -f conf/httpd.conf save\r\n mv conf.d/welcome.conf save\r\n rm conf.d/local.conf -rf'''.format(home=local.http_home))\r\n\r\n c.run('''cat << EOF > {host}\r\n<VirtualHost *:{port}>\r\n DocumentRoot \"{path}\"\r\n <Directory \"{path}\">\r\n Options Indexes FollowSymLinks\r\n AllowOverride None\r\n Require all granted\r\n </Directory>\r\n</VirtualHost>\r\nEOF'''.format(host=local.http_host, path=path, port=port))\r\n\r\n if port != local.http_port:\r\n sed.append(hosts.conn(2), 'Listen {port}'.format(port=port), 'Listen 80', local.http_conf)\r\n print(\"set http port [{}]\".format(port))\r\n\r\n \"\"\" ้
็ฝฎ๏ผ\r\n root pathไธ่ฆ้
็ฝฎๅจ /tmpไธ๏ผๆ ๆณ่ฏๅซ\r\n httpd -t \r\n \"\"\"\r\n\r\n if globing.invoke:\r\n c.run('''cat << EOF > /start.sh\r\n#!/bin/bash\r\necho \"start httpd ... [`date`]\"\r\n\r\n#mkdir -p /run/httpd\r\nfor count in {1..5} \r\ndo \r\n echo \"start $count\"\r\n httpd -DFOREGROUND\r\n sleep 1\r\ndone\r\nEOF''')\r\n else:\r\n c.run('systemctl restart httpd')",
"def main():\n return run_server(**parse_server_args())",
"def start_cherrypy(server, cp_root_config, host, port):\n LOGGER.debug(\"Starting BibleMunger's CherryPy HTTP server\")\n cherrypy.config.update({\n 'server.socket_host': host,\n 'server.socket_port': port,\n })\n cherrypy.tree.mount(server, '/', cp_root_config)\n cherrypy.engine.signals.subscribe()\n cherrypy.engine.start()\n cherrypy.engine.block()",
"def run_server(argv=None, microscope_factory=None):\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-p\", \"--port\", type=int, default=8080, help=\"Specify port on which the server is listening\")\n parser.add_argument(\"--host\", type=str, default='', help=\"Specify host address on which the the server is listening\")\n args = parser.parse_args(argv)\n\n try:\n # Create a web server and define the handler to manage the incoming request\n server = MicroscopeServer((args.host, args.port), MicroscopeHandler, microscope_factory=microscope_factory)\n print(\"Started httpserver on host '%s' port %d.\" % (args.host, args.port))\n print(\"Press Ctrl+C to stop server.\")\n\n # Wait forever for incoming htto requests\n server.serve_forever()\n\n except KeyboardInterrupt:\n print('Ctrl+C received, shutting down the web server')\n server.socket.close()\n\n return 0",
"def server(args):\n GISHTTPRequestHandler.protocol_version = args.protocol\n httpd = HTTPServer((args.bind, args.port), GISHTTPRequestHandler)\n\n if args.verbose:\n sa = httpd.socket.getsockname()\n print(\"Serving GIS on\", sa[0], \"port\", sa[1], \"...\")\n\n try:\n httpd.serve_forever()\n except KeyboardInterrupt:\n print(\"\\nKeyboard interrupt received, exiting.\")\n httpd.server_close()\n sys.exit(0)",
"def start():\n from paste.deploy import loadapp, loadserver\n from moksha.config.environment import load_environment\n from moksha.config.middleware import make_app\n ini = 'config:' + path('development.ini').abspath()\n wsgi_app = loadapp(ini)\n serve = loadserver(ini)\n serve(wsgi_app)",
"def main():\n # Initialize logging to the terminal.\n coloredlogs.install()\n # Parse the command line arguments.\n try:\n options, arguments = getopt.getopt(sys.argv[1:], 'vqh', [\n 'verbose', 'quiet', 'help'\n ])\n for option, value in options:\n if option in ('-v', '--verbose'):\n coloredlogs.increase_verbosity()\n elif option in ('-q', '--quiet'):\n coloredlogs.decrease_verbosity()\n elif option in ('-h', '--help'):\n usage()\n return\n else:\n assert False, \"Unhandled option!\"\n if not arguments:\n arguments = ['.']\n elif len(arguments) > 1:\n raise Exception(\"Only one positional argument may be given!\")\n except Exception as e:\n print(\"Failed to parse command line arguments! (%s)\" % e)\n print(\"\")\n usage()\n sys.exit(1)\n try:\n if os.path.isdir(arguments[0]):\n start_webserver(find_readme_file(arguments[0]))\n elif os.path.isfile(arguments[0]):\n start_webserver(arguments[0])\n else:\n raise Exception(\"Input doesn't exist!\")\n except KeyboardInterrupt:\n sys.stderr.write('\\r')\n logger.error(\"Interrupted by Control-C, terminating ..\")\n sys.exit(1)\n except Exception:\n logger.exception(\"Encountered an unhandled exception, terminating!\")\n sys.exit(1)",
"def main():\n\n # TODO: more advanced argument processing\n\n # Handle port\n port = None\n if len(sys.argv) > 1:\n port_arg = sys.argv[1]\n try:\n port = int(port_arg[1:] if port_arg.startswith(':') else port_arg)\n except:\n pass\n\n try:\n serve(port=port)\n except ValueError, ex:\n # Show input error\n print 'Error:', ex",
"def server():\n package('apache2')\n require_started('apache2')",
"def main():\n print(\"Starting python server...\")\n\n # Set address to localhost\n address = \"tcp://127.0.0.1:\" + parse_port()\n\n # Start server with class API as \n server = zerorpc.Server(API.API())\n server.bind(address)\n\n print(\"Server started running on {}\".format(address))\n\n # Blocking command. Keeps server running\n server.run()",
"def main():\n if len(sys.argv) != 2:\n sys.exit(\"Usage: python server-python.py [Server Port]\")\n server_port = int(sys.argv[1])\n server(server_port)",
"def main():\n if len(sys.argv) != 2:\n sys.exit(\"Usage: python server-python.py [Server Port]\")\n server_port = int(sys.argv[1])\n server(server_port)",
"def start(self):\n self.serve_forever()",
"def start(self):\n self.serve_forever()",
"def git():\n pass",
"def run(self):\n self._server = self._get_server()\n self._server.serve_forever()",
"def main():\n server = ThreadedServer(MasterControllerService, port=5000)\n server.start()",
"def run_server():\n app = init_app()\n app.run(host=app.config['HOST'], port=app.config['PORT'])",
"def main():\n\n apps = [\n 'fires', 'hw6',\n 'imageapp',\n 'quixote_demo',\n 'quotes',\n 'chat',\n 'cookie'\n ]\n parser = argparse.ArgumentParser(\n description='A WSGI Server implemented for CSE491-001.',\n epilog='Please check the non-existent documentation for more info.',\n formatter_class=argparse.RawTextHelpFormatter\n )\n # Add the '-?' alias for '--help', which I prefer to use:\n parser.add_argument('-?',\n action='help',\n help='Alias for --help')\n # Add the application argument:\n parser.add_argument('--app',\n nargs='?',\n dest='app',\n default='fires',\n choices=apps,\n help='\\n'.join([\n 'Which WSGI application to run.',\n '(default: \"%(default)s\" - my homework 6)',\n 'Alias: -A'\n ]))\n parser.add_argument('-A',\n nargs='?',\n dest='app',\n default='fires',\n choices=apps,\n help=argparse.SUPPRESS)\n # Add the port argument:\n parser.add_argument('--port',\n nargs='?',\n default=random.randint(8000, 9999),\n type=int,\n help='\\n'.join([\n 'Which port to start the server on.',\n '(default: random integer between 8000 and 9999)',\n 'Alias: -p'\n ]))\n # After that, parse the command-line arguments.\n args = parser.parse_args()\n\n # Create a socket object\n sock = socket.socket()\n # Get local machine name\n host = socket.getfqdn()\n\n if host in ('magrathea', 'Thoth'):\n # For testing, I don't want to have to change my url all the damn time.\n port = 8080\n else:\n port = args.port\n # Bind to the port\n # TODO figure out how to immediately unbind when I'm done\n sock.bind((host, port))\n print 'Starting server at http://%s:%d/' % (host, port)\n # Now wait for client connection.\n sock.listen(5)\n\n # get this from commandline\n app_to_run = args.app\n if app_to_run == 'quixote_demo':\n # quixote stuff for testing with that\n p = create_publisher()\n # p.is_thread_safe = True # hack...\n wsgi_app = quixote.get_wsgi_app()\n elif app_to_run == 'imageapp':\n imageapp.setup()\n p = imageapp.create_publisher()\n wsgi_app = quixote.get_wsgi_app()\n elif app_to_run == 'quotes':\n wsgi_app = QuotesApp('./quotes/quotes.txt', './quotes/html')\n elif app_to_run == 'chat':\n wsgi_app = ChatApp('./chat/html')\n elif app_to_run == 'cookie':\n wsgi_app = cookieapp.wsgi_app\n else: #if app_to_run == 'fires': # default\n wsgi_app = app.make_app()\n\n\n print 'Entering infinite loop; hit CTRL-C to exit'\n try:\n while True:\n # Establish connection with client.\n conn, (client_host, client_port) = sock.accept()\n print 'Got connection from', client_host, client_port\n handle_connection(conn, wsgi_app)\n finally:\n # teardown stuffs\n if app_to_run == 'imageapp':\n imageapp.teardown()\n sock.shutdown(2)\n sock.close()"
]
| [
"0.7091706",
"0.70480895",
"0.66758436",
"0.66428643",
"0.65184426",
"0.65044636",
"0.64539856",
"0.64152414",
"0.631969",
"0.6310795",
"0.6310067",
"0.6309897",
"0.6301403",
"0.6270714",
"0.61036736",
"0.608633",
"0.60519946",
"0.60306233",
"0.60127413",
"0.59982204",
"0.59931165",
"0.59882957",
"0.59882957",
"0.595768",
"0.595768",
"0.59492177",
"0.5946968",
"0.59456235",
"0.5931742",
"0.59232813"
]
| 0.75933325 | 0 |
The main point is find a good threshold that is the optimal split label. A good threshold is the threshold that minimize misclassification error. | def _find_threshold(self, feature, y_train, num_class):
assert len(num_class) == 2, "This function only assumes work with binary classification."
best_threshold = 0.0
max_exact_classification = 0.0
is_positive_negative = False
sorted_feature = sorted(np.unique(feature))
for i in range(len(sorted_feature)-1):
# assume the value less than threshold is negative (0), greater than threshold is positive (1)
threshold = (sorted_feature[i] + sorted_feature[i+1]) / 2
left_partition = y_train[feature < threshold]
right_partition = y_train[feature > threshold]
negative_positive = ((len(left_partition[left_partition == 0]) + len(right_partition[right_partition == 1]))
/ len(feature))
# assume the value less than threshold is positive (1), greater than threshold is negative. (0)
positive_negative = ((len(left_partition[left_partition == 1]) + len(right_partition[right_partition == 0]))
/ len(feature))
# make decision here
is_positive_negative = positive_negative > negative_positive
choose = positive_negative if is_positive_negative else negative_positive
if max_exact_classification < choose:
max_exact_classification = choose
best_threshold = threshold
return best_threshold, is_positive_negative | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_metric_threshold(self):\n logger.info(\"compute metric threshold\")\n\n ### Beaucoup trop lent quand on a beaucoup de models ###\n\n df_results_not_aggregated = self.result_reader.load_all_results(aggregate=False)\n\n if len(df_results_not_aggregated) == 0:\n logger.info(\"threshold = None\")\n return None\n\n main_scorer = \"test_%s\" % self.job_config.main_scorer\n (df_results_not_aggregated[main_scorer].fillna(df_results_not_aggregated[main_scorer].min(), inplace=True))\n min_cv = df_results_not_aggregated.groupby(\"job_id\")[main_scorer].min().values\n delta_min_max_cv = np.median(\n df_results_not_aggregated.groupby(\"job_id\")[main_scorer].apply(lambda x: x.max() - x.min())\n )\n\n if len(min_cv) <= self.min_nb_of_models:\n logger.info(\"threshold = None\")\n return None\n\n min_cv = -np.sort(-min_cv)\n result = min_cv[self.min_nb_of_models] - delta_min_max_cv\n\n # result = np.percentile( min_cv, self._get_quantile(len(min_cv)) * 100)\n # TODO : ici peut etre faire une estimation parametric du quantile avec un Kernel, plus smooth et moins sensible quand peu de valeurs\n\n logger.info(\"threshold : %2.2f\" % result)\n return result",
"def choose_best_split(self, X_subset, y_subset):\n # YOUR CODE HERE\n feature_index = None\n threshold = None\n best_G = np.inf\n N = len(X_subset)\n \n for current_feature in range(X_subset.shape[1]):\n thresholds = np.unique(X_subset[:, current_feature])\n \n for t in thresholds:\n y_left, y_right = self.make_split_only_y(current_feature, t, X_subset, y_subset)\n H_L = self.H(y_left)\n H_R = self.H(y_right)\n \n G = (len(y_left) / N) * H_L + (len(y_right) / N) * H_R\n \n if G < best_G:\n best_G = G\n feature_index = current_feature\n threshold = t\n \n return feature_index, threshold",
"def gp_optimize_threshold(gp_model, X_val, y_val, X_scaler, y_scaler, optimize_for=\"profits\"): \n y_hat, conf = gp_model.predict(X_val)\n regressed_payment = y_scaler.inverse_transform(y_hat).reshape(-1)\n loan_amt = X_scaler.inverse_transform(X_val)[:,0]\n\n # This ratio is a guage of how likely a person will pay back.\n # It is compared with a threshold to determine whether or not to loan.\n payment_to_loan_ratio = regressed_payment / loan_amt\n\n # Sort in descending order\n sorted_ind = np.argsort(-payment_to_loan_ratio)\n sorted_payment_to_loan_ratio = payment_to_loan_ratio[sorted_ind]\n X_sorted, y_sorted = X_val[sorted_ind,:], y_val[sorted_ind]\n\n threshold, highest_opt_val = 0, 0\n for i, thresh in enumerate(sorted_payment_to_loan_ratio): \n X_loanee = X_sorted[:i+1,:]\n y_loanee = y_sorted[:i+1]\n \n loan_amt_loanee = np.sum(X_scaler.inverse_transform(X_loanee)[:,0])\n payments_loanee = np.sum(y_loanee)\n\n # Optimize for different values\n if optimize_for == \"profits\":\n opt_val = payments_loanee - loan_amt_loanee\n elif optimize_for == \"profit_percentage\":\n opt_val = (payments_loanee - loan_amt_loanee) / loan_amt_loanee\n else:\n raise Exception(\"Illegal optimize_for value: %s\" % optimize_for)\n\n # Keep track of highest value (that is being optimized for)\n if opt_val > highest_opt_val:\n threshold = thresh\n highest_opt_val = opt_val\n return threshold",
"def get_split(data):\n \"\"\" gets the best feature, and best value \"\"\"\n\n best_feature = None\n best_value = 0.0\n columns = data.columns\n gini_base = gini_impurity(data)\n n_rows = len(data.index) # total number of rows of data before split\n\n # Fininding which split yields the best gini gain\n max_gain = 0\n\n for i in range(len(columns)-1): # -1 b.c. class is final column\n xs = data[columns[i]].unique() # get values to test\n for x in xs: # test values\n # split dataset\n df_left = data[data[columns[i]] < x]\n df_right = data[data[columns[i]] >= x]\n\n # get gini impurities\n gini_left = gini_impurity(df_left)\n gini_right = gini_impurity(df_right)\n \n\n # Calculated weighted gini impurity\n w_left = len(df_left.index) / n_rows\n w_right = len(df_right.index) / n_rows\n\n w_gini = gini_left * w_left + gini_right * w_right\n \n\n # Calculate gini gain (we want to minimize w_gini for the smallest impurity. Ideal split is perfect Left=c1, Right=c2)\n # why not just find min w_gin instead of uding gini_gain and gini_base vaiables?\n gini_gain = gini_base - w_gini\n\n # check if this is the best split so far, store values, update max_gini\n if gini_gain > max_gain:\n best_feature = columns[i]\n best_value = x\n max_gain = gini_gain\n\n df_left = data.loc[data[best_feature] < best_value]\n df_right = data.loc[data[best_feature] >= best_value]\n \n\n return best_feature, best_value, df_left, df_right",
"def get_optimal_threshhold(true_label, prediction, iterations=100, size=17):\n best_threshhold = [0.2]*size\n for t in range(size):\n best_fbeta = 0\n temp_threshhold = [0.2]*size\n for i in range(iterations):\n temp_value = i / float(iterations)\n temp_threshhold[t] = temp_value\n temp_fbeta = fbeta(true_label, prediction > temp_threshhold)\n if temp_fbeta > best_fbeta:\n best_fbeta = temp_fbeta\n best_threshhold[t] = temp_value\n return best_threshhold",
"def _find_best_threshold(self, num_of_steps=20, verbose=False):\n xmin = self.x.min()\n xmax = self.x.max()\n step = (xmax - xmin)/num_of_steps\n \n lower_th = None\n lower_IR = 1\n\n # for each potential threshold\n for threshold in np.arange(xmin+step, xmax, step):\n IR = self._compute_isometric_ratio(threshold)\n \n if IR < lower_IR:\n lower_IR = IR\n lower_th = threshold\n \n self.threshold = lower_th\n if verbose:\n print(f'\\tThreshold:\\t\\t{lower_th}\\n\\tIsometric Ratio:\\t{lower_IR}')",
"def find_best_threshold(y, y_hat, step_size, score_func, maximize=True):\n best_thres, best_score = 0.0, 0.0 if maximize else 1.0\n for thres in np.arange(0, 1, step_size):\n score = score_for_threshold(y, y_hat, score_func, thres)\n if (maximize and (score > best_score)) or (not maximize and (score < best_score)):\n best_score = score\n best_thres = thres\n\n return best_thres, best_score",
"def determine_best_split(data, potential_splits, mltask):\n\n first_iteration = True\n for column_index in potential_splits:\n for value in potential_splits[column_index]:\n data_below,data_above = split_data(data, column_index, value)\n \n if mltask == 'regression':\n current_overall_metric = calculate_overall_metric(data_below, data_above, metric_function = calculate_mse)\n \n # classification\n else:\n current_overall_metric = calculate_overall_metric(data_below, data_above, metric_function = calculate_entropy)\n \n \n if first_iteration or current_overall_metric <= best_overall_metric:\n first_iteration = False\n \n best_overall_metric = current_overall_metric\n best_split_column = column_index\n best_split_value = value\n \n \n return best_split_column,best_split_value",
"def get_best_thres(self, data, label, score_func = f1_score):\n pred_prob = self.model.predict(data)\n best_score = 0\n for i_thres in range(0, 100):\n pred_label = [int(i > (i_thres / 100.0)) for i in pred_prob]\n fs = score_func(label, pred_label)\n if best_score < fs:\n best_score = fs\n self.config.thres = i_thres / 100.0\n print ('best score: %0.2f best_thres: %0.2f' % (best_score, self.config.thres))",
"def _split_threshold(self, node):\n\n # define the score to improve upon\n if self.n_clusters >= self.min_leaves and node.size <= self.max_leaf_size:\n # split only if min(children scores) > node.score\n force_split = False\n best_score = node.score\n else:\n # force split: just take the best (even if children are worse)\n force_split = True\n best_score = None\n\n left, right = None, None\n\n # iterate over embedding dimensions (first ones are more reliable)\n # up to max_n_vec (included), until we found an improving split\n for _vec in range(self.n_vec):\n\n # get the candidate thresholds along this dimension\n threshs = self._get_candidate_thresholds(node, _vec)\n\n # look for an improving best split along this eigenvector\n for _t in threshs:\n # compute the split\n below_thresh = self.E[node.ids, _vec] < _t\n _lids = node.ids[below_thresh]\n _rids = node.ids[np.logical_not(below_thresh)]\n # check if the tubes are not too small\n _nl, _nr = len(_lids), len(_rids)\n is_valid = _nl >= self.min_leaf_size and _nr >= self.min_leaf_size\n if is_valid:\n # compute the score of the new tubes only\n _sl = self.get_tube_score(_lids)\n _sr = self.get_tube_score(_rids)\n # get the score of this split\n split_score = min(_sl, _sr)\n if best_score is None or split_score > best_score:\n # better split\n best_score = split_score\n node.has_children = True\n node.thresh = _t\n left = SpectralNode(\n _lids, _vec, score=_sl, name=node.name + \"0\")\n right = SpectralNode(\n _rids, _vec, score=_sr, name=node.name + \"1\")\n\n # check stopping criterion\n if node.has_children:\n # we found an improving split\n if _vec > 0 or not force_split:\n # found an improving non-forced split: stop here\n break\n\n return left, right",
"def find_stage2_threshold(model_path, u_mode):\n u_modes = ['sub_u', 'unlabeled']\n\n assert u_mode in u_modes\n\n if u_mode == 'sub_u':\n negative = np.load(\"./processed_data/train/sub_u_negative.npy\")\n else:\n negative = np.load(\"./processed_data/train/unlabeled_negative.npy\")\n\n positive = np.load(\"./processed_data/train/raw/train_p.npy\")\n\n\n # begin with sub-u classifier test\n classifier = joblib.load(model_path)\n u_bst_th = 0.0\n u_th = 0.0\n u_index = 0.0\n u_tp = 0.0\n u_fp = 0.0\n while u_th < 1:\n p_result = np.array(classifier.predict_proba(positive[:, :-1])[:, 1])\n sub_u_result = np.array(classifier.predict_proba(negative[:, :-1])[:, 1])\n tp_rate = np.where(p_result >= u_th, 1, 0).sum() / p_result.shape[0]\n fp_rate = np.where(sub_u_result >= u_th, 1, 0).sum() / sub_u_result.shape[0]\n index = math.sqrt(tp_rate * (1 - fp_rate))\n if index >= u_index:\n u_index = index\n u_tp = tp_rate\n u_fp = fp_rate\n u_bst_th = u_th\n print(\"threshold: \" + str(u_th) + \" TP: \"\n + str(tp_rate) + \" FP: \" + str(fp_rate) + \" GMean: \" + str(index) + \"\\n\\n\")\n u_th += _threshold_2_step\n\n\n\n print(model_path + \"\\n \" +\n \"threshold: \" + str(u_bst_th) + \" TP: \" + str(u_tp) + \" FP: \" + str(u_fp) + \" GMean: \" + str(u_index))",
"def decision_threshold(x, y):\n \n model = DecisionTreeClassifier(max_depth=1, criterion='entropy')\n model.fit(x,y)\n print (\"-- Uncertainty Threshold: \", model.tree_.threshold[0])\n return model.tree_.threshold[0]",
"def split_cost(label_count_list):\n return -split_information_gain(label_count_list)\n #this cost value is the misclassification error.\n return split_misclassification_error(label_count_list)",
"def decision_tree(original_training_data,call_depth):\n\n ''' Checking the stopping criterion. If yes then it returns the majority class (Muffin or CupCake) '''\n if check_if_stopping_criterion_is_met(original_training_data.values) or call_depth > 10:\n majority = classification(original_training_data)\n return majority\n\n else:\n ''' Each time we split the data and go deeper, we increment the depth of the tree '''\n call_depth += 1\n\n ''' Finding the best attribute, best threshold to split data, best minimum entropy '''\n best_split_index, best_attribute, best_threshold, best_minimum_entropy = find_best_attribute_threshold_entropy(original_training_data)\n original_training_data_values = original_training_data.values\n\n best_split_values = original_training_data_values[:,best_split_index]\n\n less_than_threshold = original_training_data[best_split_values <= best_threshold]\n more_than_threshold = original_training_data[best_split_values > best_threshold]\n\n ''' Initializing a variable called as condition which stores the format of the key for the resulting decision tree dictionary '''\n condition = original_training_data.columns[best_split_index] + \" <= \" + str(best_threshold)\n\n ''' Initializing a dictionary where key is condition and value is a list. This is the basic data structure in which the\n resulting decision tree is stored '''\n sub_tree = {condition: []}\n\n ''' Calling the decision tree recursively '''\n left_tree = decision_tree(less_than_threshold, call_depth)\n right_tree = decision_tree(more_than_threshold, call_depth)\n\n ''' For removing edge cases where on either split, the resulting decision tree gives the same result '''\n if left_tree == right_tree:\n sub_tree = left_tree\n else:\n ''' Appending the smaller trees in the final decision tree '''\n sub_tree[condition].append(left_tree)\n sub_tree[condition].append(right_tree)\n\n return sub_tree",
"def best_threshold_from_folds(y_tuples, scoring=f1_score, step_size=0.01, maximize=True):\n thresholds, scores = [], []\n for _, y_true, y_pred in y_tuples:\n t, s = find_best_threshold(y_true, y_pred, step_size, scoring, maximize=maximize)\n thresholds.append(t)\n scores.append(s)\n\n mean_threshold = np.mean(thresholds)\n mean_score = np.mean([score_for_threshold(y, y_hat, scoring, mean_threshold) for _, y, y_hat in y_tuples])\n return mean_threshold, mean_score",
"def best_cutoff(self,\n split_label):\n split_args = self.sub_split_args[split_label]\n split_data = self.sub_split_data[split_label]\n # This criterion for the use_scipy flag is arbitrary and needs\n # further testing\n n_unique = len(np.unique(split_data[~np.isnan(split_data)]))\n use_scipy = True\n if n_unique > len(split_data)/1000:\n use_scipy = False\n idxcut_below, effects_below, rstats_below, ndata_below =\\\n self.u_data(split_label, use_scipy=use_scipy)\n idxcut_above, effects_above, rstats_above, ndata_above =\\\n self.u_data(split_label, above=True, use_scipy=use_scipy)\n\n # Default cutoff is min(split_data) - 1\n cutoff = split_data[split_args[0]] - 1\n value = 0\n # If no cutoff was possible\n if len(idxcut_below) == 0 or len(idxcut_above) == 0:\n return cutoff, value\n\n # All idx_cutoffs and values for cutoffs, for debugging\n for idx in range(len(idxcut_above)):\n idxcut = idxcut_above[idx]\n if idxcut != idxcut_below[idx]:\n raise NameError('Code error, invalid split')\n value_temp = (abs(effects_above[idx] -\n effects_below[idx]) *\n rstats_above[idx] *\n rstats_below[idx] *\n min(ndata_above[idx]) *\n min(ndata_below[idx]))\n if value_temp > value:\n cutoff = (split_data[split_args[int(idxcut)]] +\n split_data[split_args[int(idxcut)+1]])/2\n value = value_temp\n return cutoff, value",
"def decision_bound(self, test_data):\n pred = self.w * test_data[:,0] - self.intercept\n self.ret_label = np.zeros((test_data.shape[0], 1))\n self.ret_label[test_data[:,1] > pred] = 1\n self.ret_label[test_data[:,1] <= pred] = 0\n return (self.ret_label)",
"def best_split(self, X, y, attributes):\n if (self.criterion==\"information_gain\"):\n global_if = float('-inf') # the highest value of information gain/gini gain seen so far\n attr = None\n for attribute in attributes:\n attr_val = X[attribute].copy()\n cur_if = information_gain(y,attr_val,self.type)\n if (cur_if>global_if):\n # Update when a better split is receieved\n global_if = cur_if\n attr = attribute\n return attr\n else:\n global_if = float('inf')\n attr = None\n for attribute in attributes:\n attr_val = X[attribute].copy()\n cur_if = gini_gain(y,attr_val)\n if (global_if>cur_if):\n # Update when a better split is receieved\n global_if = cur_if\n attr = attribute\n return attr",
"def cal_thresh(pred_prob,labels):\n mu_stds = []\n for i in range(19):\n pos_mu, pos_std = fit(pred_prob[labels==i, i])\n mu_stds.append([pos_mu, pos_std])\n return mu_stds",
"def compute_splits(feature_df, target_col, max_num_splits):\n tree_estimator = DecisionTreeClassifier(max_leaf_nodes=max_num_splits+1,\n class_weight='balanced',\n random_state=1407)\n\n tree_estimator.fit(feature_df, target_col)\n thresholds = tree_estimator.tree_.threshold[tree_estimator.tree_.children_left != _tree.TREE_LEAF]\n return sorted(thresholds)",
"def calibrate_threshold(test_graphs):\r\n best_threshold = None\r\n best_result = None\r\n for threhold in range(1, 50):\r\n cur_res = evaluate_argument_mention(test_graphs, threhold)\r\n if (best_result is None) or (cur_res > best_result):\r\n best_result = cur_res\r\n best_threshold = threhold\r\n return (best_threshold, best_result)",
"def lgb_hyperopt(data, labels, num_evals=1000, n_folds=5, diagnostic=False):\r\n LGBM_MAX_LEAVES = 2**11 #maximum number of leaves per tree for LightGBM\r\n LGBM_MAX_DEPTH = 25 #maximum tree depth for LightGBM \r\n EVAL_METRIC_LGBM_CLASS = 'f1'\r\n\r\n def lgb_f1_score(y_hat, data):\r\n y_true = data.get_label()\r\n y_hat = np.round(y_hat)\r\n return 'f1', f1_score(y_true, y_hat), True\r\n\r\n print('Running {} rounds of LightGBM parameter optimisation:'.format(num_evals))\r\n #clear space\r\n \r\n integer_params = ['max_depth',\r\n 'num_leaves',\r\n 'max_bin',\r\n 'min_data_in_leaf',\r\n 'min_data_in_bin']\r\n \r\n def objective(space_params):\r\n \r\n #cast integer params from float to int\r\n for param in integer_params:\r\n space_params[param] = int(space_params[param])\r\n \r\n #extract nested conditional parameters\r\n if space_params['boosting']['boosting'] == 'goss':\r\n top_rate = space_params['boosting'].get('top_rate')\r\n other_rate = space_params['boosting'].get('other_rate')\r\n #0 <= top_rate + other_rate <= 1\r\n top_rate = max(top_rate, 0)\r\n top_rate = min(top_rate, 0.5)\r\n other_rate = max(other_rate, 0)\r\n other_rate = min(other_rate, 0.5)\r\n space_params['top_rate'] = top_rate\r\n space_params['other_rate'] = other_rate\r\n \r\n subsample = space_params['boosting'].get('subsample', 1.0)\r\n space_params['boosting'] = space_params['boosting']['boosting']\r\n space_params['subsample'] = subsample\r\n \r\n cv_results = lgb.cv(space_params, train, nfold = n_folds, stratified=True,\r\n early_stopping_rounds=100, seed=42, feval=lgb_f1_score)\r\n \r\n best_loss = -cv_results['f1-mean'][-1]\r\n\r\n return{'loss':best_loss, 'status': STATUS_OK }\r\n \r\n train = lgb.Dataset(data, labels)\r\n \r\n #integer and string parameters, used with hp.choice()\r\n boosting_list = [{'boosting': 'gbdt',\r\n 'subsample': hp.uniform('subsample', 0.5, 1)},\r\n {'boosting': 'goss',\r\n 'subsample': 1.0,\r\n 'top_rate': hp.uniform('top_rate', 0, 0.5),\r\n 'other_rate': hp.uniform('other_rate', 0, 0.5)}] #if including 'dart', make sure to set 'n_estimators'\r\n\r\n objective_list_reg = ['huber', 'gamma', 'fair', 'tweedie']\r\n objective_list_class = ['binary', 'cross_entropy']\r\n objective_list = objective_list_class\r\n is_unbalance_list = [True]\r\n\r\n space ={'boosting' : hp.choice('boosting', boosting_list),\r\n 'num_leaves' : hp.quniform('num_leaves', 2, LGBM_MAX_LEAVES, 1),\r\n 'max_depth': hp.quniform('max_depth', 2, LGBM_MAX_DEPTH, 1),\r\n 'max_bin': hp.quniform('max_bin', 32, 255, 1),\r\n 'min_data_in_leaf': hp.quniform('min_data_in_leaf', 1, 256, 1),\r\n 'min_data_in_bin': hp.quniform('min_data_in_bin', 1, 256, 1),\r\n 'min_gain_to_split' : hp.quniform('min_gain_to_split', 0.1, 5, 0.01),\r\n 'lambda_l1' : hp.uniform('lambda_l1', 0, 5),\r\n 'lambda_l2' : hp.uniform('lambda_l2', 0, 5),\r\n 'learning_rate' : hp.loguniform('learning_rate', np.log(0.005), np.log(0.2)),\r\n 'metric' : None, \r\n 'objective' : hp.choice('objective', objective_list),\r\n 'feature_fraction' : hp.quniform('feature_fraction', 0.5, 1, 0.01),\r\n 'bagging_fraction' : hp.quniform('bagging_fraction', 0.5, 1, 0.01),\r\n 'is_unbalance' : hp.choice('is_unbalance', is_unbalance_list)\r\n }\r\n\r\n trials = Trials()\r\n best = fmin(fn=objective,\r\n space=space,\r\n algo=tpe.suggest,\r\n max_evals=num_evals, \r\n trials=trials)\r\n \r\n #fmin() will return the index of values chosen from the lists/arrays in 'space'\r\n #to obtain actual values, index values are used to subset the original lists/arrays\r\n #extract nested conditional parameters\r\n try:\r\n if best['boosting']['boosting'] == 'goss':\r\n top_rate = best['boosting'].get('top_rate')\r\n other_rate = best['boosting'].get('other_rate')\r\n #0 <= top_rate + other_rate <= 1\r\n top_rate = max(top_rate, 0)\r\n top_rate = min(top_rate, 0.5)\r\n other_rate = max(other_rate, 0)\r\n other_rate = min(other_rate, 0.5)\r\n best['top_rate'] = top_rate\r\n best['other_rate'] = other_rate\r\n except:\r\n if boosting_list[best['boosting']]['boosting'] == 'goss':\r\n top_rate = best['top_rate']\r\n other_rate = best['other_rate']\r\n #0 <= top_rate + other_rate <= 1\r\n top_rate = max(top_rate, 0)\r\n top_rate = min(top_rate, 0.5)\r\n other_rate = max(other_rate, 0)\r\n other_rate = min(other_rate, 0.5)\r\n best['top_rate'] = top_rate\r\n best['other_rate'] = other_rate\r\n best['boosting'] = boosting_list[best['boosting']]['boosting']#nested dict, index twice\r\n best['metric'] = metric_list[best['metric']]\r\n best['objective'] = objective_list[best['objective']]\r\n best['is_unbalance'] = is_unbalance_list[best['is_unbalance']]\r\n \r\n #cast floats of integer params to int\r\n for param in integer_params:\r\n best[param] = int(best[param])\r\n \r\n print('{' + '\\n'.join('{}: {}'.format(k, v) for k, v in best.items()) + '}')\r\n if diagnostic:\r\n return(best, trials)\r\n else:\r\n return(best)",
"def fit(self, data, targets):\n # update these three\n self.idx = 0\n self.val = None\n self.left = None\n self.right = None\n ### YOUR CODE HERE\n # i have added a slow and a fast version\n \n num_points, num_features = data.shape\n # print('num points, num_features', num_points, num_features)\n \n def feat_score(feat_idx):\n feat = data[:, feat_idx].copy()\n perm = np.argsort(feat)\n s_feat = feat[perm]\n s_targets = targets[perm]\n target_var = ((s_targets - s_targets.mean())**2).sum()\n s_left, s_right = sum_squares(s_targets)\n def score(idx, _vals):\n ## slow version\n #left = _vals[0:idx]\n #right = _vals[idx:]\n #assert len(left) + len(right) == len(_vals), (len(left), len(right), len(_vals))\n #left_mean = np.mean(left)\n #right_mean = np.mean(right)\n #left_error = np.sum((left-left_mean)**2)\n #assert np.allclose(left_error, s_left[idx]) \n #right_error = np.sum((right-right_mean)**2)\n #assert np.allclose(right_error, s_right[idx])\n # return left_error+right_error\n # fast version\n return s_left[idx] + s_right[idx]\n # score for every split\n scores = np.array([score(x, s_targets) for x in range(0, num_points)])\n assert scores.min() <= target_var, target_var\n best_score_idx = np.argmin(scores)\n best_score = scores[best_score_idx]\n val = s_feat[best_score_idx]\n # print('best score', feat_idx, best_score, best_score_idx, val, s_feat[best_score_idx+1])\n \n return best_score, {'val': val, \n 'left': np.mean(s_targets[:best_score_idx]), \n 'right': np.mean(s_targets[best_score_idx:])\n } \n\n split_scores = []\n for f in range(0, num_features):\n total_score, _params = feat_score(f)\n split_scores.append(total_score)\n # print('score of {0} - {1}'.format(feat_names[f], total_score))\n # print('feature scores:', np.array(split_scores))\n best_feat = np.argmin(split_scores)\n best_score = split_scores[best_feat]\n # print('Best Feature idx: {0} - Best Cost: {1}'.format(best_feat, best_score))\n score_again, params = feat_score(best_feat)\n # print('double check score', score_again, best_score)\n self.idx = best_feat\n self.val = params['val']\n self.left = params['left']\n self.right = params['right']\n print(\"idx={}, val={}, left={}, right={}\".format(self.idx, self.val, self.left, self.right))\n assert not np.isnan(self.left)\n assert not np.isnan(self.right)\n ### END CODE",
"def thresholding(pred,label,thres):\n \n conf =[]\n \n for i in thres:\n \n pr_th,lab_th = (pred>i),(label>i)\n conf += confusion(pr_th,lab_th)\n \n return np.array(conf).reshape(-1,4)",
"def fit(self, X, y, label_maj=0, label_min=1):\n self.estimators_ = []\n if self._label_binarier is True:\n label_binarier=LabelBinarizer()\n label_binarier.fit(np.arange(500000))\n X_id=label_binarier.transform(X['model_serial'])\n X.drop(['model_serial'],axis=1,inplace=True)\n X=np.concatenate((X.to_numpy(),X_id),axis=1)\n # Initialize by spliting majority / minority set\n X_maj = X[y == label_maj]\n y_maj = y[y == label_maj]\n X_min = X[y == label_min]\n y_min = y[y == label_min]\n\n # Random under-sampling in the 1st round (cold start)\n X_train, y_train = self._random_under_sampling(\n X_maj, y_maj, X_min, y_min)\n self.estimators_.append(\n self._fit_base_estimator(\n X_train, y_train))\n self._y_pred_maj = self.predict_proba(X_maj)[:, 1]\n\n # Loop start\n for i_estimator in range(1, self._n_estimators):\n X_train, y_train = self._self_paced_under_sampling(\n X_maj, y_maj, X_min, y_min, i_estimator, )\n self.estimators_.append(\n self._fit_base_estimator(\n X_train, y_train))\n # update predicted probability\n n_clf = len(self.estimators_)\n y_pred_maj_last_clf = self.estimators_[-1].predict_proba(X_maj)[:, 1]\n self._y_pred_maj = (self._y_pred_maj * (n_clf - 1) + y_pred_maj_last_clf) / n_clf\n\n return self",
"def test_build_stump(self):\n D = np.mat(np.ones((5, 1)) / 5)\n best, min_err, best_estimate =\\\n ada_boost.build_stump(self.larger_matrix,\n self.larger_class_labels,\n D)\n expected = {'threshold': 1.3, 'dim': 0, 'inequal': 'lt'}\n self.assertEqual(best, expected)",
"def is_better(self, curr, best, **kwargs):\r\n score_threshold = kwargs.pop('score_threshold', 1e-3)\r\n relative_eps = 1.0 + score_threshold\r\n return curr >= best*relative_eps",
"def test_decision_tree_min_samples_split_parameter(params, X_train, X_test, y_train, y_test):",
"def get_threshold(ckt_path, threshold_nums, percentage):\n aug_classes = 5\n num_classes = 8\n torch.set_printoptions(precision=2, threshold=100000, linewidth=10000)\n\n # get dataloader\n mean_std_path = './data/mean_std.json'\n data_root = './data/'\n # loader dict:'train','valid', 'test'\n loader = get_dataloader(mean_std_path, data_root)\n\n copy_resnet18 = deepcopy(resnet18(pretrained=False))\n # model = Net1FC(copy_resnet18, all_classes).cuda()\n model = Net8FC(copy_resnet18, num_classes, aug_classes).cuda()\n\n # ckt_path = './backup/models/resnet180.09625'\n ckt = torch.load(ckt_path)\n model.load_state_dict(ckt['model'])\n\n model.eval()\n\n loss_list = [[] for i in range(8)]\n # _pred_list = []\n # _label_list = []\n with torch.no_grad():\n for index, (data, label) in tqdm(enumerate(loader['threshold'])):\n # _label_list.append(int(label))\n labels = torch.tensor([label for i in range(4)]).cuda()\n data = data.squeeze(1).cuda()\n data = torch.stack([data.clone(),\n data.clone().rot90(1, [1, 2]),\n data.clone().rot90(2, [1, 2]),\n data.clone().rot90(3, [1, 2])])\n\n output = model(data, labels, \"valid\")\n\n targets = torch.tensor([0, 1, 2, 3]).cuda()\n loss_list[label].append(cross_entropy(output[label], targets).item() / 4.0)\n\n # pred_label = np.argmin(val_loss)\n # _pred_list.append(int(pred_label))\n\n # val_conf_mat = conf_matrix(_pred_list, _label_list, 8, True, [i for i in range(8)])\n # cal_recall_precision(val_conf_mat, True, [i for i in range(8)])\n\n print(loss_list)\n\n threshold = []\n if threshold_nums == 'multi':\n # ่ฅๅๅ็ฑปๅจๆฑไธไธช้ๅผ\n for i in range(8):\n length = len(loss_list[i])\n threshold.append(np.mean(loss_list[i].sort()[:length * percentage]))\n\n elif threshold_nums == 1:\n # ่ฅๆๆๅ็ฑปๅจๆฑไธไธช้ๅผ\n loss_list_in_one = []\n for loss in loss_list:\n loss_list_in_one.extend(loss)\n length = len(loss_list_in_one)\n threshold = np.mean(loss_list_in_one.sort()[:length * percentage])\n\n print(\"The threshold is:\", threshold)\n\n return threshold",
"def selectThreshold(yval, pval):\n bestEpsilon = 0\n bestF1 = 0\n F1 = 0\n\n stepsize = (pval.max()-pval.min())/1000\n for epsilon in np.arange(pval.min(), pval.max()+stepsize/2, stepsize):\n predictions = (pval < epsilon)\n tp = ((predictions == 1) & (yval == 1)).sum()\n fp = ((predictions == 1) & (yval == 0)).sum()\n fn = ((predictions == 0) & (yval == 1)).sum()\n prec = tp/(tp+fp)\n rec = tp/(tp+fn)\n F1 = 2*prec*rec/(prec+rec)\n\n if F1 > bestF1:\n bestF1 = F1\n bestEpsilon = epsilon\n\n return bestEpsilon, bestF1"
]
| [
"0.67801523",
"0.6688665",
"0.6677787",
"0.65061504",
"0.645018",
"0.64142174",
"0.6409299",
"0.6338233",
"0.63270754",
"0.6305354",
"0.6278283",
"0.62284493",
"0.6212606",
"0.6212504",
"0.6205091",
"0.61867553",
"0.6158211",
"0.6140681",
"0.6089691",
"0.6088866",
"0.6082587",
"0.60790956",
"0.60561943",
"0.6003598",
"0.5988981",
"0.5973413",
"0.5954534",
"0.59487003",
"0.5940657",
"0.59403163"
]
| 0.68700206 | 0 |
Compute entropy each partition of specific feature in a given node. | def _entropy(self, feature, node):
entropy = 0
categories = np.unique(feature)
num_point = len(feature)
for category in categories:
# for each category in that feature
num_category = len(feature[feature == category])
for c in self.num_class:
# count the number of each class
num_category_class = len(feature[np.logical_and(feature == category, node.y == c)])
if num_category_class == 0:
continue
# compute entropy/information gain or classification error
entropy += num_category / num_point * (
-num_category_class / num_category * log2(num_category_class / num_category))
return entropy | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_entropy_feature(self, feature, df=None):\n if df is None:\n df = self.df\n target = self.target\n\n target_variables = df[target].unique()\n variables = df[feature].unique()\n entropy = 0\n\n # Aggregate entropy for each unique value in 'feature' feature on each unique value in target feature\n for variable in variables:\n entropy_inner = 0\n for target_variable in target_variables:\n # Number of values of 'variable' in 'feature' feature that matches current target value\n num = len(df[feature][df[feature] == variable][df[target] == target_variable])\n # Number of values of 'variable' in 'feature' feature\n den = len(df[feature][df[feature] == variable])\n # Machine epsilon\n eps = np.finfo(np.float).eps\n fraction_inner = num/(den+eps)\n entropy_inner += -fraction_inner*np.log(fraction_inner+eps)\n fraction = den/len(df)\n entropy += -fraction*entropy_inner\n\n return abs(entropy)",
"def partition_entropy_by(inputs, attribute):\n partitions = partition_by(inputs, attribute)\n return partition_entropy(partitions.values())",
"def partition_entropy_by(inputs, attribute):\n partitions = partition_by(inputs, attribute)\n return partition_entropy(partitions.values())",
"def __entropy(self, data_set, target_feature):\n frequencies = self.__calculate_frequency(data_set, target_feature)\n feature_entropy = 0.0\n number_of_values = len(data_set)\n\n # Add entropy for each value in frequencies.\n for frequency in frequencies:\n probability = frequencies[frequency] / number_of_values\n feature_entropy += (probability * math.log(probability, 2))\n\n return feature_entropy * -1",
"def entropy(self, **kwargs) -> TensorType:",
"def entropy(self, **kwargs) -> TensorType:",
"def entropy(self):\n raise NotImplementedError",
"def entropy_gain(node,attribute):\n data_subset1 = filter_data(node.data,node.ancestors)\n data_counts = list(Counter(data_subset1['Class']).values())\n base_entropy = entropy(data_counts,base=2)\n num_values = len(data_subset1)\n entropy_sum = 0\n \n for value in [0,1]:\n data_subset2 = filter_data(node.data, node.ancestors + [(attribute,value)])\n subset_counts = list(Counter(data_subset2['Class']).values())\n entropy_sum += (len(data_subset2)/num_values) * entropy(subset_counts,base=2)\n \n return base_entropy - entropy_sum",
"def entropy(data):\n e = 0\n\n counter = collections.Counter(data)\n l = len(data)\n for count in counter.values():\n p_x = count / l\n e += - p_x * math.log2(p_x)\n\n return e",
"def get_entropy(self, x_dict={}, sum_features=True, feature_dims=None):\n raise NotImplementedError()",
"def entropy(x):\n nz = np.nonzero(x)[0]\n return -np.sum(x[nz]*np.log2(x[nz]))",
"def entropy(out, dim=1, reduce='mean'):\n log_prob = F.log_softmax(out, dim=dim)\n h = -torch.sum(log_prob.exp() * log_prob, dim=dim)\n if reduce == 'none':\n return h\n if reduce == 'mean':\n return h.mean()\n if reduce == 'sum':\n return h.sum()",
"def single_entropy(df, var):\n\n entropy_ = df.groupBy(var).agg(count(\"*\").alias('num_entries')) \\\n .withColumn('all', lit('all')) \\\n .withColumn('total_num_entries', sql_sum('num_entries').over(Window.partitionBy('all'))) \\\n .withColumn('pcg', col('num_entries') / col('total_num_entries')) \\\n .select(var, 'pcg') \\\n .withColumn('entropy_term', -col('pcg') * log('pcg')) \\\n .select(sql_sum('entropy_term').alias('entropy')).first()['entropy']\n\n return entropy_",
"def entropy( freq ):\n N = 0.0\n entropy = 0.0\n for x, v in freq.items( ):\n N += v\n entropy -= v * math.log( v, 2 )\n return (N * math.log( N, 2 ) + entropy) / N",
"def entropyDistributed(distribution):\n return -sum(map(lambda p : p * log(p, 2), distribution))",
"def entropy(group_counts):\n total = sum(group_counts)\n entro = 0\n for item_count in group_counts:\n entro += item_entropy(item_count, total)\n return entro",
"def entropy(p):\n ent = tf.where(p > np.finfo(np.float32).eps, -p * tf.log(p), tf.zeros_like(p))\n ent = tf.reduce_sum(ent, axis=1)\n return ent",
"def entropy(self, f):\n f_log = -torch.log(self.einsum(\"q,q->q\", [f, 1 / self.w]))\n return self.einsum(\"q,q->\", [f, f_log])",
"def entropy(self):\n Z = self.sum()\n assert (Z > 0), 'Non-normalizable factor (perhaps log factor?)' # also check for positivity?\n H = 0.0\n for x in np.nditer(self.t, op_flags=['readonly']):\n p = x/Z\n H += 0.0 if p==0 else -p*np.log(p)\n return H",
"def entropy(img):\n # by calculating\n histogram = img.histogram()\n histogram_size = sum(histogram)\n histogram = [float(h) / histogram_size for h in histogram]\n\n return -sum([p * math.log(p, 2) for p in histogram if p != 0])",
"def entropy(self, priors=None):\n def entropy_f(x):\n x[x != 0] *= np.log(x[x != 0])\n return -x.sum(axis=0)\n return self.utility(entropy_f, priors)",
"def get_entropy(distribution, samples):\n entropy = -tf.reduce_sum(distribution.log_prob(samples), axis=1)\n return entropy",
"def partition_entropy(subsets):\n total_count = sum(len(subset) for subset in subsets)\n return sum(data_entropy(subset) * len(subset) / total_count\n for subset in subsets)",
"def partition_entropy(subsets):\n\n total_count = sum(len(subset) for subset in subsets)\n\n return sum(data_entropy(subset) * len(subset) / total_count for subset in subsets)",
"def entropy(y):\n EPS = 0.0005\n\n # YOUR CODE HERE\n if len(y) == 0:\n return 0.\n \n pk = np.mean(y, axis=0)\n \n return - np.sum(pk * np.log(pk + EPS))",
"def _graph_fn_entropy(distribution):\n return distribution.entropy()",
"def entropy_fn(args: StepFunctionArgs) -> SingleScorePerStepTensor:\n logits = args.attribution_model.output2logits(args.forward_output)\n out = torch.distributions.Categorical(logits=logits).entropy()\n if out.ndim > 1:\n out = out.squeeze(-1)\n return out",
"def get_entropy(*labels):\n entropies = [] #list of entropy values from each subset\n total = 0 #total number of datapoints\n for subset in labels:\n n = len(subset)\n total += n\n counts = np.unique(subset, return_counts=True)[1] #frequency of unique values\n entropy = np.sum([-(i/n) * np.log2(i/n) for i in counts]) #subset entropy calcuation\n entropies.append((entropy, n))\n return np.sum([(n/total) * ent for n, ent in iter(entropies)])",
"def calculate_entropy(dataset) :\n\n num_entries = len(dataset)\n label_counts = {}\n for vector in dataset :\n # the label is at the last index of the data set\n current_label = vector[-1]\n if current_label not in label_counts :\n label_counts[current_label] = 0\n label_counts[current_label] += 1\n # Calculate the entropy\n entropy = 0.0\n for label in label_counts :\n # Calculate probability of each label within the dataset\n prob_of_label = label_counts[label]/num_entries\n # Since the entropy is the negative of the sum of all probability,\n # simply subtract it\n entropy -= prob_of_label * log(prob_of_label, 2)\n return entropy",
"def entropy(rows, resCol = None):\n from math import log\n if not resCol: #create the dictionary of counts for each class using pure python\n total = len(rows)\n counts = __uniqueCounts(rows)\n else: #Create the dictionary of counts for each class using pandas.\n assert 'index' in dir(rows)\n total = len(rows.index)\n counts = __uniqueCountsPandas(rows, resCol)\n log2 = lambda x:log(x)/log(2) #Create a function to take log-base 2 of a number\n ent = 0 #Initialize the entropy at zero\n #Implement the formula for entropy, using log-base2\n fracs = [float(x)/total for x in counts.values()]\n for x in fracs:\n ent += -x*log2(x)\n return ent"
]
| [
"0.6836324",
"0.6772727",
"0.6772727",
"0.66967833",
"0.6569912",
"0.6569912",
"0.65580887",
"0.6530346",
"0.6457403",
"0.6431625",
"0.6368553",
"0.63630193",
"0.6301875",
"0.6297013",
"0.62775517",
"0.62715507",
"0.62592775",
"0.62461853",
"0.6226261",
"0.6225333",
"0.6182547",
"0.6180702",
"0.61524254",
"0.6145003",
"0.61390024",
"0.611722",
"0.6115556",
"0.61061287",
"0.6097117",
"0.60946643"
]
| 0.78461814 | 0 |
Compute information gain between a node with that feature. | def _information_gain(self, feature, node):
return node.entropy() - self._entropy(feature, node) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def entropy_gain(node,attribute):\n data_subset1 = filter_data(node.data,node.ancestors)\n data_counts = list(Counter(data_subset1['Class']).values())\n base_entropy = entropy(data_counts,base=2)\n num_values = len(data_subset1)\n entropy_sum = 0\n \n for value in [0,1]:\n data_subset2 = filter_data(node.data, node.ancestors + [(attribute,value)])\n subset_counts = list(Counter(data_subset2['Class']).values())\n entropy_sum += (len(data_subset2)/num_values) * entropy(subset_counts,base=2)\n \n return base_entropy - entropy_sum",
"def gain(self, target_attr, attr, debug=False):\n current_entropy = self.entropy(target_attr)[0]\n # print\n # print attr\n\n gain = current_entropy - self.remainder(target_attr=target_attr, attr=attr)\n if debug is True:\n print attr, \": \", gain\n return gain",
"def information_gain(features, attribute_index, targets):\r\n\r\n possible_feature_values = [0,1]\r\n \r\n possible_classifications = [0,1]\r\n \r\n feature = features[:,attribute_index]\r\n \r\n \r\n number_of_samples = len(feature)\r\n \r\n import math\r\n \r\n \r\n #current_entropy = np.sum([-(len(targets[targets==possible_classification])/number_of_samples)*math.log(len(targets[targets==possible_classification])/number_of_samples, 2) for possible_classification in possible_classifications])\r\n \r\n terms_to_be_summed_for_current_entropy = []\r\n \r\n for classification in possible_classifications:\r\n \r\n number_of_elements_with_this_classification = len(targets[targets==classification])\r\n \r\n p_for_this_classification = number_of_elements_with_this_classification/len(targets)\r\n \r\n if p_for_this_classification != 0:\r\n terms_to_be_summed_for_current_entropy.append(-p_for_this_classification*math.log(p_for_this_classification,2))\r\n else:\r\n terms_to_be_summed_for_current_entropy.append(0)\r\n \r\n current_entropy = np.sum(terms_to_be_summed_for_current_entropy)\r\n \r\n \r\n \r\n terms_to_be_summed_for_weighted_entropy = []\r\n \r\n for possible_value in possible_feature_values:\r\n \r\n targets_split_by_feature_value = targets[feature.flatten() == possible_value]\r\n \r\n if len(targets_split_by_feature_value) != 0:\r\n \r\n \r\n weight_of_feature_value = len(targets_split_by_feature_value)/len(targets)\r\n \r\n terms_for_entropy_within_subset = []\r\n \r\n for classification in possible_classifications:\r\n \r\n number_of_subset_elements_with_this_classification = len(targets_split_by_feature_value[targets_split_by_feature_value==classification])\r\n \r\n p_in_subset_for_this_classification = number_of_subset_elements_with_this_classification/len(targets_split_by_feature_value)\r\n \r\n if p_in_subset_for_this_classification != 0:\r\n terms_for_entropy_within_subset.append(-p_in_subset_for_this_classification*math.log(p_in_subset_for_this_classification,2))\r\n else:\r\n terms_for_entropy_within_subset.append(0)\r\n \r\n entropy_within_subset = np.sum(terms_for_entropy_within_subset)\r\n \r\n terms_to_be_summed_for_weighted_entropy.append(weight_of_feature_value*entropy_within_subset)\r\n \r\n weighted_entropy = np.sum(terms_to_be_summed_for_weighted_entropy)\r\n \r\n \r\n #current_entropy = np.sum(terms_to_be_summed_for_current_entropy)\r\n \r\n #weighted_entropy = np.sum([(len(feature[feature==possible_value])/number_of_samples)*(len(targets[feature==possible_value][targets[feature==possible_value]==possible_classification])/len(targets[feature==possible_value]))*math.log((len(targets[feature==possible_value][targets[feature==possible_value]==possible_classification])/len(targets[feature==possible_value])), 2) for possible_classification in possible_classifications for possible_value in possible_feature_values])\r\n\r\n information_gain = current_entropy - weighted_entropy \r\n \r\n return information_gain",
"def information_gain(f1, f2):\n\n ig = ee.entropyd(f1) - conditional_entropy(f1, f2)\n return ig",
"def gain(Y, X):\n return entropy(Y) - cEntropy(Y,X)",
"def get_information_gain(self, word, documents):\n gain = self.get_entropy(documents)\n with_word, without_word = self.get_split_data(word, documents)\n gain -= self.get_entropy(with_word) * len(with_word) / len(documents)\n gain -= self.get_entropy(without_word) * len(without_word) / len(documents)\n return gain",
"def gain(self):\r\n \r\n for node in self.G.nodes():\r\n # Get number of nodes connected on same and other partition\r\n movForce, retForce = self.nodeForces(node)\r\n nodeGain = movForce-retForce\r\n\r\n #Fill list of Nodes with gains\r\n self.gainOrder.append((nodeGain,node))\r\n \r\n self.gainOrder.sort(key=lambda r: r[0])\r\n self.keys = [r[1] for r in self.gainOrder]",
"def infoGain(self,attr, data, target_attr):\n remainder = 0\n p = 0\n ent = 0\n for ele in target_attr:\n if ele == 1:\n p +=1\n \n q = p / (len(target_attr)) \n if 0 < q < 1:\n ent = -((q * math.log2(q)) + ((1-q) * math.log2(1-q))) \n \n unique = list(pd.unique(self.data_set[attr])) \n l = self.data_set[attr]\n for ele in unique:\n pk =0\n nk=0\n j=0\n for i in range (0, len(data)): #len (l) changed to len(data)\n j = j+1\n ele1 = l[i]\n if ele1 == ele:\n out = target_attr[i]\n if out == 1:\n pk += 1\n else:\n nk += 1\n if (pk+nk) != 0:\n q1 = pk / (pk +nk)\n if 0 < q1 < 1:\n e = -((q1 * math.log2(q1)) + ((1-q1) * math.log2(1-q1)))\n remainder += (pk + nk)/(len(target_attr)) * e\n \n return (ent - remainder, attr)",
"def front_column_model_p_gain():",
"def __gain(self, data_set, split_feature, target_feature):\n frequencies = self.__calculate_frequency(data_set, split_feature)\n data_entropy = 0.0\n\n # Calculate the entropy of the data.\n for value, frequency in frequencies.items():\n probability = frequency / sum(frequencies.values())\n data_subset = data_set[data_set[split_feature] == value]\n data_entropy += probability * self.__entropy(data_subset, target_feature)\n\n return self.__entropy(data_set, target_feature) - data_entropy",
"def info_gain(self, left, right, current_uncertainty):\n p = float(len(left)) / (len(left) + len(right))\n return current_uncertainty - p * self.gini(left) - (1 - p) * self.gini(right)",
"def gain(self):\n return self[1]",
"def informationGain(data, class_label, attribute, indices=None):\n\tsubset = data[:] if indices == None else data.loc[indices]\n\t\n\tsublist = subset[attribute].tolist()\n\tvalues = list(set(sublist))\n\tinfoGain = entropyOnSubset(subset, class_label)\n\t\n\t#print (sublist)\n\t\n\tfor i in values:\n\t\tindex = list(subset.index[subset[attribute] == i])\n\t\tinfoGain -= sublist.count(i)/len(sublist) * entropyOnSubset(subset, class_label, index)\n\n\t\n\treturn infoGain",
"def impurity_gain(node, attribute):\n data_subset1 = filter_data(node.data,node.ancestors)\n data_counts = list(Counter(data_subset1['Class']).values())\n base_impurity = impurity(data_counts)\n num_values = len(data_subset1)\n impurity_sum = 0\n \n for value in [0,1]:\n data_subset2 = filter_data(node.data, node.ancestors + [(attribute,value)])\n subset_counts = list(Counter(data_subset2['Class']).values())\n impurity_sum += (len(data_subset2)/num_values) * impurity(subset_counts)\n \n return base_impurity - impurity_sum",
"def getInfoGain(self, data, index):\n # count for True Positive, True Negitive, False Positive and False Negitive\n TP = 0\n TN = 0\n FP = 0\n FN = 0\n\n for record in data:\n attrValue = record[index]\n clsValue = record[-1]\n\n if attrValue == 'True':\n if clsValue == 'en':\n TP += 1\n else:\n TN += 1\n elif attrValue == 'False':\n if clsValue == 'en':\n FP += 1\n else:\n FN += 1\n\n # calculate class entropy and entropy of each value of an attribute\n E_init = self.getEntropy(self.enClass, self.nlClass)\n E_attr = self.getAttrEntropy(TP, TN, FP, FN)\n\n infoGain = E_init - E_attr\n\n self.entropy = E_init\n return infoGain",
"def info_gain(left, right, current_uncertainty):\n p = float(len(left)) / (len(left) + len(right))\n return current_uncertainty - p * gini(left) - (1 - p) * gini(right)",
"def informationGain2(data, attribute):\n \n split_data = splitBy(data, attribute) \n weighted_entropies = 0\n \n for set in split_data:\n weighted_entropies += len(set) / len(data) * entropy2(set) \n \n columnIG = entropy2(data) - weighted_entropies\n \n return columnIG",
"def get_gain(self, examples, tags, feature):\n initial_entropy = self.calculate_entropy(tags)\n relative_entropy_per_feature = []\n feature_index = self.get_feature_index(feature)\n for possible_value in self.feature_domain_dict[feature]:\n examples_and_tags_vi = [(example, tag) for example, tag in zip(examples, tags)\n if example[feature_index] == possible_value]\n tags_vi = [tag for example, tag in examples_and_tags_vi]\n entropy_vi = self.calculate_entropy(tags_vi)\n if not examples:\n pass\n relative_entropy = (float(len(examples_and_tags_vi)) / len(examples)) * entropy_vi\n relative_entropy_per_feature.append(relative_entropy)\n\n return initial_entropy - sum(relative_entropy_per_feature)",
"def calc_information_gain(data, split_name, target_name):\r\n # Calculate the original entropy\r\n original_entropy = calc_entropy(data[target_name])\r\n \r\n # Find the median of the column we're splitting\r\n column = data[split_name]\r\n median = column.median()\r\n \r\n # Make two subsets of the data, based on the median\r\n left_split = data[column <= median]\r\n right_split = data[column > median]\r\n \r\n # Loop through the splits and calculate the subset entropies\r\n to_subtract = 0\r\n for subset in [left_split, right_split]:\r\n prob = (subset.shape[0] / data.shape[0]) \r\n to_subtract += prob * calc_entropy(subset[target_name])\r\n \r\n # Return information gain\r\n return original_entropy - to_subtract",
"def calc_information_gain(data, split_name, target_name):\n # Calculate the original entropy\n original_entropy = calc_entropy(data[target_name])\n \n # Find the median of the column we're splitting\n column = data[split_name]\n median = column.median()\n \n # Make two subsets of the data, based on the median\n left_split = data[column <= median]\n right_split = data[column > median]\n \n # Loop through the splits and calculate the subset entropies\n to_subtract = 0\n for subset in [left_split, right_split]:\n prob = (subset.shape[0] / data.shape[0]) \n to_subtract += prob * calc_entropy(subset[target_name])\n \n # Return information gain\n return original_entropy - to_subtract",
"def dbgain(self, pt_1, pt_2):\n raise NotImplementedError",
"def return_infogain(instances, labels):\n # some initial calculations\n infogain = dict.fromkeys(range(instances.shape[1]), 0)\n cnt = Counts(instances, labels)\n len_instances = instances.shape[0]\n feature_frequency = cnt.count_document_frequency()\n label_frequency = cnt.count_label_frequency()\n label_feature_frequency = cnt.count_label_feature_frequency()\n label_probability = [(label_frequency[label] / len_instances) for label in label_frequency.keys()]\n initial_entropy = -sum([prob * math.log(prob, 2) for prob in label_probability if prob != 0])\n # assign infogain values to each feature\n for feature in feature_frequency.keys():\n # calculate positive entropy\n frequency = feature_frequency[feature]\n if frequency > 0:\n feature_probability = frequency / len_instances\n positive_label_probabilities = []\n for label in labels:\n if label_feature_frequency[label][feature] > 0:\n positive_label_probabilities.append(label_feature_frequency[label][feature] / frequency)\n else:\n positive_label_probabilities.append(0)\n positive_entropy = -sum([prob * math.log(prob, 2) for prob in positive_label_probabilities if prob != 0])\n else:\n positive_entropy = 0\n # calculate negative entropy\n inverse_frequency = len_instances - feature_frequency[feature]\n negative_probability = inverse_frequency / len_instances\n negative_label_probabilities = [((label_frequency[label] - label_feature_frequency[label][feature]) / inverse_frequency) for label in labels]\n negative_entropy = -sum([prob * math.log(prob, 2) for prob in negative_label_probabilities if prob != 0])\n # based on positive and negative entropy, calculate final entropy\n final_entropy = positive_entropy - negative_entropy\n infogain[feature] = initial_entropy - final_entropy\n return infogain",
"def test_information_gain(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 1)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([0]), set([1]), set([2])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:0, 1:1, 2:2})\n self.assertAlmostEqual(self.decision_tree.get_root_node().node_split.criterion_value,\n 2. * -0.3 * math.log2(0.3) - 0.4 * math.log2(0.4))",
"def _gain(self):\n return None",
"def gain(self) -> int:\n return self._gain",
"def marginalGain(self, X, element):\n\n\t\tpass",
"def __calc_info_gain(self, parent, left_child, right_child):\n parent_entropy = self.__entropy(parent[:, -1])\n\n num_rows_left = left_child.shape[0]\n num_rows_right = right_child.shape[0]\n num_rows_total = num_rows_left + num_rows_right\n\n # don't calculate if any of the children rows are empty\n if num_rows_left == 0 or num_rows_right == 0:\n return 0\n\n # calculate entropy of the children data\n left_child_entropy = self.__entropy(left_child[:, -1])\n right_child_entropy = self.__entropy(right_child[:, -1])\n left_child_contribution = (num_rows_left/num_rows_total)*left_child_entropy\n right_child_contribution = (num_rows_right/num_rows_total)*right_child_entropy\n new_entropy = left_child_contribution + right_child_contribution\n\n info_gain = parent_entropy - new_entropy\n return info_gain",
"def _information_gain(self, y, X_column, split_thersh):\n # parent E\n parent_entropy = entropy(y)\n # generate split\n left_idxs, right_idxs = self._split(X_column, split_thersh)\n\n if len(left_idxs) == 0 or len(right_idxs) == 0:\n return 0\n # weighted avg child E\n n = len(y)\n n_left_samples, n_right_samples = len(left_idxs), len(right_idxs)\n entropy_left, entropy_right = entropy(y[left_idxs]), entropy(y[right_idxs])\n child_entropy = (n_left_samples/n) * entropy_left + (n_right_samples/n) * entropy_right\n\n # return IG\n ig = parent_entropy - child_entropy\n return ig",
"def gradient_descent_gain(self):\n return self._gradient_descent_gain",
"def update(self, state, action, nextState, reward):\n \"\"\"Description:\n Use second equation in slide 71 of MDP\n Adjest weight of active features depend on tranistion \n \"\"\"\n \"\"\" YOUR CODE HERE \"\"\"\n feat = self.featExtractor.getFeatures(state, action)\n\n # if weight is empty, then weight will need to initial to 1 for all features\n # According to which Extractor user choose, weight counter will have equal number of keys.\n if len(self.weight) == 0:\n feat = self.featExtractor.getFeatures(state, action)\n self.weight.incrementAll(feat.keys(), 1)\n \n maxQns = self.getValue(nextState)\n if maxQns == None:\n maxQns = 0\n Qsa = self.getQValue(state, action)\n difference = ( reward + self.discountRate * maxQns ) - Qsa\n \n for key in self.weight.keys():\n self.weight[key] += (self.alpha * difference * feat[key])\n \n \n \"\"\" END CODE \"\"\""
]
| [
"0.6878586",
"0.6690411",
"0.65747595",
"0.65485966",
"0.64689696",
"0.6455205",
"0.6325612",
"0.62913805",
"0.62239647",
"0.61959743",
"0.61940885",
"0.61387944",
"0.6121174",
"0.61054665",
"0.60826224",
"0.6063609",
"0.5996675",
"0.59405375",
"0.59325457",
"0.59104127",
"0.5890621",
"0.5888433",
"0.58683544",
"0.58670557",
"0.5820494",
"0.5788794",
"0.57831883",
"0.5735222",
"0.5724387",
"0.57196456"
]
| 0.84394205 | 0 |
get mean, std and major frequency for a single time series | def get_time_series_stats(time_series):
return pd.Series([np.mean(time_series), np.std(time_series), get_frequency(time_series)]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_analyze_time_series_std():\n\n statistics = analyze_time_series(np.ones(10))\n\n assert statistics.n_total_points == 10\n assert statistics.n_uncorrelated_points == 1\n assert np.isclose(statistics.statistical_inefficiency, 10.0)\n assert statistics.equilibration_index == 0",
"def SeriesStandard(series):\n mean = np.mean(series)\n variance = np.var(series)\n series = (series-mean)/variance\n return series",
"def mean_STD(self,counter):\n \n \n pass",
"def get_frequencies(self):\n num_seconds = float(self.timestamps[-2] - self.timestamps[0]) / float(1000)\n samples_per_second = len(self.data) / num_seconds\n num_samples = len(self.data)\n oscilations_per_sample = [float(oscilations) / num_samples for oscilations in range(0, num_samples)]\n return [ops * samples_per_second for ops in oscilations_per_sample]",
"def get_series(self,year):\n year_dates, year_dc = self.year_data(year)\n mean_dc = []\n std_dc = []\n for date in year_dates:\n day = date.day\n month = date.month\n idx = [i for i in range(self.dates.shape[0]) \\\n if (self.dates[i].month == month and \\\n self.dates[i].day == day)]\n mean_dc.append(np.ma.mean(self.dc[idx]))\n std_dc.append(np.ma.std(self.dc[idx]))\n\n return np.array(mean_dc), np.array(std_dc)",
"def std(self, data):\n ts_ = self.ts(data)\n if 'year' not in ts_.coords:\n return ts_\n return ts_.std('year')",
"def freq():",
"def freqvals(t):\n N = len(t)\n T = t[-1] - t[0]\n dt = T/N\n nyquist = 1/(2.0*dt)\n lowfreq = 1/T\n return (N, T, dt, nyquist, lowfreq)",
"def gen_freqs(ndata, dt):\n dn = 2 # if you like the central frequency to be negative, change dn to 1\n return 1/(ndata*dt) * np.hstack((np.arange(0, (ndata+dn)//2),\n np.arange(-(ndata+dn)//2+dn, 0)))",
"def compute_mean_std(x):\n x = np.hstack(x)\n return (np.mean(x).astype(np.float32),\n np.std(x).astype(np.float32))",
"def get_stats(arr, str=None):\n mean = np.mean(arr)\n std = np.std(arr)\n if str:\n print 'Measuring', str\n print 'STATS: mean=', mean, ' stdev=', std\n return mean, std",
"def mean(series):\n return fsum(series) / len(series)",
"def base_summary(series: pd.Series) -> dict:\n summary = {\n \"frequencies\": series.value_counts().to_dict(),\n \"n_records\": series.shape[0],\n \"memory_size\": series.memory_usage(index=True, deep=True),\n \"dtype\": series.dtype,\n \"types\": series.map(lambda x: type(x).__name__).value_counts().to_dict(),\n }\n\n return summary",
"def ts(self):\n idx = np.mean(np.diff(self.index)) if len(self.index) > 1 else 0\n col = np.mean(np.diff(self.columns)) if len(self.columns) > 1 else 0\n return idx, col",
"def Std(data):\n return data.std()",
"def summaryone(x):\n print 'mean and std are ',np.mean(x), np.std(x)\n print 'max and min are ',np.max(x), np.min(x)\n print 'the range is ',np.max(x)-np.min(x)",
"def get_frequency(time_series):\n if len(time_series.index) == 0:\n return 0\n ft = np.fft.rfft(time_series)\n return np.fft.fftfreq(len(time_series))[np.argmax(abs(ft))]",
"def get_mean_and_std(arr):\r\n return np.round(np.mean(arr), 3), np.round(np.std(arr), 3)",
"def stdev(items):\n return Series.std(Series(items))",
"def frequencies(self):\r\n\r\n # Get the sampling rate from the seed time-series:\r\n self.method['Fs'] = self.method.get('Fs', self.seed.sampling_rate)\r\n NFFT = self.method.get('NFFT', 64)\r\n Fs = self.method.get('Fs')\r\n freqs = tsu.get_freqs(Fs, NFFT)\r\n lb_idx, ub_idx = tsu.get_bounds(freqs, self.lb, self.ub)\r\n\r\n return freqs[lb_idx:ub_idx]",
"def frequencies(self):\r\n\r\n #XXX Use NFFT in the method in order to calculate these, without having\r\n #to calculate the spectrum:\r\n f, spectrum = tsa.get_spectra(self.input.data, method=self.method)\r\n return f",
"def measure_rms(signal, verbose=True, *args, **kwargs):\n rms = np.std(signal.td)\n if verbose:\n print(\"\\nSignal RMS = %f\"%rms)\n print(\"\\tSignal name = %s\"%signal.name)\n return rms",
"def MeanAndStandardDeviation(data):\n n = len(data)\n if n == 0:\n return 0.0, 0.0\n mean = float(sum(data)) / n\n variance = sum([(element - mean)**2 for element in data]) / n\n return mean, math.sqrt(variance)",
"def statistics_from_array(x: numpy.ndarray):\n try:\n return x.mean(), x.std(), x.max(), x.min()\n except AttributeError:\n return numpy.nan, numpy.nan, numpy.nan, numpy.nan",
"def seasonal_means(t, y, edges=None, hard=False):\n ts, ys = seasonal_series(t, y, edges=edges, hard=hard)\n t_means = [t.jyear.mean() for t in ts]\n t_means = astropy.time.Time(t_means, format='jyear', scale=t.scale)\n y_means = np.array([y.mean() for y in ys])\n y_std = np.array([y.std() for y in ys])\n y_N = np.array([y.size for y in ys])\n return t_means, y_means, y_std, y_N",
"def annualized_std(self, series: pd.Series = None):\n\n if series is None:\n series = self.trading_history[\"total_assets\"]\n\n log_differential = np.log(series / series.shift(-1))\n hourly_std = np.std(log_differential)\n annualized_std = hourly_std * np.sqrt(365)\n return annualized_std",
"def mean_function(x):\n return np.sin(12*x) + 0.66*np.cos(25*x) # original frequencies: 12, 25",
"def _get_mean_and_log_std(self, x):\n mean = self._mean_module(x)\n return mean, self._log_std",
"def mean_std_calc(dataloader):\n mean = 0\n std = 0\n samples = 0\n for data, _, _ in dataloader:\n batch_samples = data.size(0)\n data = data.view(batch_samples, data.size(1), -1)\n mean += data.mean(2).sum(0)\n std += data.std(2).sum(0)\n samples += batch_samples\n\n return (mean / samples),(std / samples)",
"def meanstd(self):\n\t\tmean = [125.3, 123.0, 113.9] # R,G,B\n\t\tstddev = [63.0, 62.1, 66.7] # R,G,B\n\t\treturn [mean, stddev]"
]
| [
"0.63931805",
"0.6212582",
"0.61890626",
"0.61378825",
"0.612101",
"0.60711044",
"0.6019158",
"0.58699524",
"0.58329415",
"0.58328384",
"0.58283913",
"0.58263576",
"0.58220524",
"0.58168924",
"0.58132654",
"0.5803837",
"0.5793796",
"0.5787034",
"0.57626736",
"0.5731403",
"0.5707523",
"0.5703039",
"0.56858927",
"0.5684329",
"0.56631905",
"0.5653285",
"0.5648981",
"0.56397575",
"0.56379557",
"0.55908084"
]
| 0.72206175 | 0 |
break up time series into chunks and get time series stats for each | def break_and_get_stats(full_series, breaks):
n = len(breaks)
return pd.concat([get_time_series_stats(full_series[breaks[i]:breaks[i+1]]) for i in range(n-1)], axis=1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_chunk_stats(self, args):\n iStart = args[0]\n iEnd = args[1]\n [start_date, end_date] = parse_start_end_date(None, None)\n print('Chunk %d - %d' %(iStart, iEnd)) # FIXME: TEST ONLY\n chunk_stats = self.components[iStart:iEnd]\n sym_list = self.components[iStart:iEnd].index.tolist()\n\n try:\n pquotes = web.DataReader(sym_list, 'yahoo', start_date, end_date)\n except:\n print('Chunk(%d - %d): failed to download history quotes from Yahoo Finance, try Google Finance now...' %(iStart, iEnd))\n try:\n pquotes = web.DataReader(sym_list, 'google', start_date, end_date)\n except:\n print('!!!Error: chunk(%d - %d): failed to download history quotes!!!' %(iStart, iEnd))\n return DataFrame()\n\n # items - symbols; major_axis - time; minor_axis - Open to Adj Close\n pquotes = pquotes.transpose(2,1,0)\n if len(pquotes.items) == 0:\n print('!!!Error: invalid history quotes for chunk %d - %d.' %(iStart, iEnd))\n return DataFrame()\n\n print('Total # of symbols in this chunk: %d' %len(pquotes.items)) # FIXME: TEST ONLY\n add_stats = self._get_compo_stats(pquotes)\n chunk_stats = chunk_stats.join(add_stats)\n return chunk_stats",
"def time_stats(df):",
"def get_timeseries_data(self, table, datetime_start, datetime_end, timechunk=datetime.timedelta(hours=1)):\n table_schema = LMTDB_TABLES.get(table.upper())\n if table_schema is None:\n raise KeyError(\"Table '%s' is not valid\" % table)\n else:\n result_columns = ['TIMESTAMP'] + table_schema['columns']\n format_dict = {\n 'schema': ', '.join(result_columns).replace(\"TS_ID,\", \"TIMESTAMP_INFO.TS_ID,\"),\n 'table': table,\n }\n\n index0 = len(self.saved_results.get(table, {'rows': []})['rows'])\n chunk_start = datetime_start\n while chunk_start < datetime_end:\n if timechunk is None:\n chunk_end = datetime_end\n else:\n chunk_end = chunk_start + timechunk\n if chunk_end > datetime_end:\n chunk_end = datetime_end\n start_stamp = chunk_start.strftime(\"%Y-%m-%d %H:%M:%S\")\n end_stamp = chunk_end.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n query_str = \"\"\"SELECT\n %(schema)s\n FROM\n %(table)s\n INNER JOIN TIMESTAMP_INFO ON TIMESTAMP_INFO.TS_ID = %(table)s.TS_ID\n WHERE\n TIMESTAMP_INFO.TIMESTAMP >= %%(ps)s\n AND TIMESTAMP_INFO.TIMESTAMP < %%(ps)s\n \"\"\" % format_dict\n self.query(query_str, (start_stamp, end_stamp), table=table, table_schema=table_schema)\n if timechunk is not None:\n chunk_start += timechunk\n\n return self.saved_results[table]['rows'][index0:], result_columns",
"def _extract_mql_timeseries_data(response):\n lkeys = response['timeSeriesDescriptor'].get('labelDescriptors', [])\n # (fixme): Is there a better way to fetch and extract this data?\n for result in response.get('timeSeriesData', []):\n data = {}\n lvalues = result.get('labelValues', [])\n data = {\n key['key']: val.get('stringValue', '')\n for key, val in zip(lkeys, lvalues)\n }\n point_data = result.get('pointData', [])\n if not point_data:\n continue\n\n # Returns all points.\n for point in point_data:\n values = point.get('values', [])\n if not values:\n continue\n\n data.update(point['timeInterval'])\n value_types = []\n value_type_values = []\n for value in values:\n for key, val in value.items():\n value_types.append(key)\n value_type_values.append(val)\n data['metric_value_types'] = value_types\n data['metric_values'] = value_type_values\n yield data",
"def get_time_series_stats(time_series):\n return pd.Series([np.mean(time_series), np.std(time_series), get_frequency(time_series)])",
"def get_stats(self, save=True, chunk=100):\n [start_date, end_date] = parse_start_end_date(None, None)\n self.components = DataFrame() # reset data\n self.get_compo_list()\n if self.sym.quotes.empty:\n self.sym.get_quotes()\n\n if len(self.components) <= chunk:\n args = (0,len(self.components))\n self.components = self._get_chunk_stats(args)\n return self.components\n\n # multiprocessing - process stocks chunk-by-chunk\n num_chunks = int(np.ceil(len(self.components)/chunk))\n num_procs = min(mp.cpu_count(), num_chunks)\n pool = mp.Pool(processes=num_procs)\n steps = np.round(np.linspace(0, len(self.components), num_chunks)).astype(int)\n args = [(steps[i-1], steps[i]-1) for i in range(1,len(steps))]\n stats = pool.map(self._get_chunk_stats, args)\n\n chunk_stats = DataFrame()\n for s in stats:\n chunk_stats = chunk_stats.append(s)\n self.components = chunk_stats\n\n # Replace inf by NaN\n self.components.replace([np.inf, -np.inf], np.nan, inplace=True)\n\n if save and not self.components.empty:\n self.save_data()\n return self.components",
"def process_chunk(self, time_chunk, df_data):\n # get relevant subset of df_data\n df_data_sub = df_data[(df_data['time']>=time_chunk[0]) & (df_data['time']<time_chunk[-1])]\n if df_data_sub.shape[0] < 1:\n print(f\"no data within {time_chunk[0]} and {time_chunk[-1]}\")\n return pd.DataFrame()\n # get the latest cumsum for each grid cell before the start of the time_chunk\n starting_counts = df_data[(df_data['time']<time_chunk[0])].reset_index().groupby('grid_coord').tail(1).set_index('grid_coord')[['cum_value']]\n assert len(starting_counts.index.values) == len(set(starting_counts.index.values))\n # generate empty df_result to populate\n df_result = self.create_empty_df_result(time_chunk[0], time_chunk[-1])\n # update the counts for all the grid cells in starting_counts\n for grid_coord in self.cells:\n self.cells[grid_coord].set_all_counts_zero()\n for grid_coord in starting_counts.index:\n count = starting_counts.loc[grid_coord, 'cum_value']\n neighbors = self.find_neighbors(eval(grid_coord))\n for dist in neighbors:\n for coord in neighbors[dist]:\n self.cells[coord].set_count_at_dist(count, dist)\n # set current_time to start of time chunk\n beginning = str(iso8601.parse_date(min(df_data_sub['time'])).replace(hour=0, minute=0, second=0))\n # print(f\"beginning = {beginning}\")\n for coord in self.cells:\n self.cells[coord].set_current_time(beginning)\n # start processing df_data!!\n current_date = self.get_date_from_string(min(df_data_sub['time']))\n current_time = iso8601.parse_date(min(df_data_sub['time'])).replace(hour=23, minute=59, second=59)\n updated_grid_coords = set()\n # iterate through df_data\n # for index, row in tqdm(df_data.iterrows(), total=df_data.shape[0]):\n for index, row in df_data_sub.iterrows():\n # if new day, then fill in df_result for previous date\n if current_date != self.get_date_from_string(row['time']):\n # print(f\"at least one day passed from {current_date} to {self.get_date_from_string(row['time'])}\")\n # if multiple days passed from the current_date, then need\n # to process each grid cell for each date\n # first get the dates between the two dates\n end_time = iso8601.parse_date(row['time']).replace(hour=23, minute=59, second=59)\n dates = pd.date_range(current_time, end_time, freq='d')\n dates = [e.isoformat() for e in dates]\n # iterate through dates before the last one and process all the\n # grid cells, keep track of onces that don't have all zero values\n for date in dates[:-1]:\n for coord in self.cells:\n min_dist = self.cells[coord].get_min_dist()\n if min_dist is not None:\n self.cells[coord].process_interval('none', date, min_dist)\n if self.cells[coord].values_not_zero():\n updated_grid_coords.add(coord)\n # processing end of day so set current time to be start of new day\n self.cells[coord].set_current_time(str((iso8601.parse_date(date) + timedelta(days=1)).replace(hour=0, minute=0, second=0)))\n # write in data for that day\n for coord in updated_grid_coords:\n # get dictionary of values from grid cell\n cell_data = self.cells[coord].get_data()\n df_result.loc[pd.IndexSlice[(self.get_date_from_string(date), str(coord))]] = pd.Series(cell_data)\n updated_grid_coords = set()\n # set current_date and current_time\n current_date = str(end_time.date())\n current_time = end_time\n # find out which grid cell event took place in\n grid_coord = self.locate_point((row['lat'], row['lng']))\n # get coords of neighborhood cells\n neighbors = self.find_neighbors(grid_coord)\n # do different processing depending on time_type\n time_type = row['time_type']\n time = row['time']\n if time_type == 'trip':\n self.cells[grid_coord].increment_num_trips()\n # a trip occurred at grid_coord, we need to estimate where the\n # demand originated from. For each neighbor, get the probability\n # a user would choose a scooter where the trip happened. Prob\n # would be 0 if there is a closer scooter (greedy perspective)\n probs = {}\n prob_sum = 0\n for dist in neighbors:\n for coord in neighbors[dist]:\n prob = self.cells[coord].get_trip_prob(dist)\n prob_sum += prob\n probs[coord] = prob\n # normalize probs\n if prob_sum > 0:\n norm_probs = {k: v / prob_sum for k, v in probs.items()}\n assert int(round(sum(norm_probs.values(), 0.0))) == 1\n else:\n norm_probs = probs\n # updates self.demand_probs for each grid cell\n for coord in norm_probs:\n self.cells[coord].add_to_demand_prob(norm_probs[coord])\n else: # if time_type is \"start_time\" or \"end_time\", then availability changed\n for dist in neighbors:\n for coord in neighbors[dist]:\n self.cells[coord].process_interval(time_type, time, dist)\n\n # write in leftover data\n updated_grid_coords = set()\n final_time = (iso8601.parse_date(max(df_data_sub['time'])) + timedelta(days=1)).replace(hour=23, minute=59, second=59)\n dates = pd.date_range(current_time, final_time, freq='d')\n dates = [e.isoformat() for e in dates]\n # iterate through dates before the last one and process all the\n # grid cells, keep track of onces that don't have all zero values\n for date in dates[:-1]:\n # print(f\"writing leftover data at end for {date}\")\n for coord in self.cells:\n min_dist = self.cells[coord].get_min_dist()\n if min_dist is not None:\n self.cells[coord].process_interval('none', date, min_dist)\n if self.cells[coord].values_not_zero():\n updated_grid_coords.add(coord)\n # write in data for that day\n for coord in updated_grid_coords:\n # get dictionary of values from grid cell\n cell_data = self.cells[coord].get_data()\n df_result.loc[pd.IndexSlice[(self.get_date_from_string(date), str(coord))]] = pd.Series(cell_data)\n return df_result.dropna(axis=0, how='all')",
"def _chunk_time(x, samp_buffer=0):\n if samp_buffer < 0:\n raise ValueError(\n 'Buffer between signal peaks must be a positive number')\n if samp_buffer != int(samp_buffer):\n raise ValueError('Number of samples must be an integer')\n\n if type(x[0]) == np.bool_:\n Xs = np.arange(len(x))\n x = Xs[x]\n X = len(x)\n\n cur_start = x[0]\n cur_samp = x[0]\n Nchunk = 0\n chunks = []\n for i in range(1, X):\n if x[i] > (cur_samp + samp_buffer + 1):\n if Nchunk == 0:\n chunks = [cur_start, cur_samp]\n else:\n chunks = np.vstack([chunks, [cur_start, cur_samp]])\n\n Nchunk = Nchunk + 1\n cur_start = x[i]\n\n cur_samp = x[i]\n\n # Add final row to chunk\n if Nchunk == 0:\n chunks = [[cur_start, cur_samp]]\n else:\n chunks = np.vstack([chunks, [cur_start, cur_samp]])\n\n return chunks",
"def get_time_series(this_lat, this_lon, case, varnames):\n\n cesmdir = '/gpfs/fs1/collections/cdg/data/cesmLE/CESM-CAM5-BGC-LE/atm/proc/tseries/monthly'\n\n if 'LE' in case:\n\n from observational_large_ensemble.params import karen_params_cesm\n\n mode_lag = karen_params_cesm.mode_lag\n cvdp_loc = karen_params_cesm.cvdp_loc\n AMO_cutoff_freq = karen_params_cesm.AMO_cutoff_freq\n\n name_conversion = {'tas': 'TREFHT', 'pr': 'PRECC', 'slp': 'PSL'}\n cesm_names = [name_conversion[v] for v in varnames]\n this_member = int((case).split('-')[-1])\n cvdp_file = '%s/CESM1-CAM5-BGC-LE_#%i.cvdp_data.1920-2018.nc' % (cvdp_loc, this_member)\n\n # Historical filenames for CESM. Will need to append part of RCP8.5 to get full period\n filenames = []\n for var in cesm_names:\n file_str = '%s/%s/b.e11.B20TRC5CNBDRD.f09_g16.%03d.cam.h0.%s.??????-200512.nc' % (cesmdir, var,\n this_member, var)\n this_file = glob(file_str)[0]\n filenames.append(this_file)\n\n daX, df_shifted, _ = get_obs(case, varnames[0], filenames,\n karen_params_cesm.valid_years, mode_lag,\n cvdp_file, AMO_cutoff_freq, name_conversion)\n\n this_ts = daX.sel({'lat': this_lat, 'lon': this_lon}, method='nearest')\n\n else:\n\n from observational_large_ensemble.params import karen_params_obs\n\n mode_lag = karen_params_obs.mode_lag\n cvdp_loc = karen_params_obs.cvdp_loc\n AMO_cutoff_freq = karen_params_obs.AMO_cutoff_freq\n\n tas_dir = karen_params_obs.tas_dir\n pr_dir = karen_params_obs.pr_dir\n slp_dir = karen_params_obs.slp_dir\n cvdp_file = '%s/HadISST.cvdp_data.1920-2018.nc' % cvdp_loc\n file_dict = {'tas': '%s/Complete_TAVG_LatLong1.nc' % tas_dir,\n 'pr': '%s/full_data_monthly_v2020.nc' % pr_dir,\n 'slp': '%s/prmsl.mon.mean.nc' % slp_dir}\n\n filenames = []\n for var in varnames:\n filenames.append(file_dict[var])\n\n name_conversion = {'tas': 'temperature', 'pr': 'precip', 'slp': 'prmsl'}\n\n daX, df_shifted, _ = get_obs(case, varnames[0], filenames[0],\n karen_params_obs.valid_years, mode_lag,\n cvdp_file, AMO_cutoff_freq, name_conversion)\n\n this_ts = daX.sel({'lat': this_lat, 'lon': this_lon}, method='nearest')\n\n return this_ts, df_shifted",
"def retrieve_multiple_time_series(self,run='latest',run_data=None,criteria={},timestep='daily',name_fn=name_element_variable):\n if timestep==\"daily\":\n suffix = \"\"\n else:\n suffix = \"/aggregated/%s\"%timestep\n\n if run_data is None:\n run_data = self.retrieve_run(run)\n\n retrieved={}\n def name_column(result):\n col_name = name_fn(result)\n if col_name in retrieved:\n i = 1\n alt_col_name = '%s %d'%(col_name,i)\n while alt_col_name in retrieved:\n i += 1\n alt_col_name = '%s %d'%(col_name,i)\n col_name = alt_col_name\n return col_name\n\n units_store = {}\n for result in run_data['Results']:\n if self.result_matches_criteria(result,criteria):\n d = self.retrieve_json(result['TimeSeriesUrl']+suffix)\n result.update(d)\n col_name = name_column(result)\n# raise Exception(\"Duplicate column name: %s\"%col_name)\n if 'Events' in d:\n retrieved[col_name] = d['Events']\n units_store[col_name] = result['Units']\n else:\n all_ts = d['TimeSeries']\n for ts in all_ts:\n col_name = name_column(ts)\n units_store[col_name] = ts['Units']\n\n vals = ts['Values']\n s = self.parse_veneer_date(ts['StartDate'])\n e = self.parse_veneer_date(ts['EndDate'])\n if ts['TimeStep']=='Daily':\n f='D'\n elif ts['TimeStep']=='Monthly':\n f='M'\n elif ts['TimeStep']=='Annual':\n f='A'\n dates = pd.date_range(s,e,freq=f)\n retrieved[col_name] = [{'Date':d,'Value':v} for d,v in zip(dates,vals)]\n # Multi Time Series!\n\n result = self._create_timeseries_dataframe(retrieved)\n for k,u in units_store.items():\n result[k].units = u\n\n return result",
"def get_timeseries_on_points(self, varname, points):\n res = np.zeros((len(points), self.ntimestep), dtype=np.float64)\n for record in range(self.ntimestep):\n res[:, record] = self.get_data_on_points(varname, record, points)\n return res",
"def reduce_data():\n snapshots = Snapshot.objects.all()\n locations = Location.objects.all()\n lst = []\n for snapshot in snapshots:\n lst.append([snapshot.location.name, snapshot.avail_bikes,\n snapshot.free_stands, snapshot.timestamp])\n cols = ['location', 'avail_bikes', 'free_stands', 'timestamp']\n df = pd.DataFrame(lst, columns=cols)\n df['time'] = df['timestamp'].dt.round('30min').dt.strftime('%H:%M')\n\n group = df.groupby(['location', 'time'])\n means = group.mean()\n sd = group.std()\n today = date.today()\n first = today.replace(day=1)\n last_month = first - timedelta(days=1)\n\n for name, time in means.index:\n subset_mean = means.xs((name, time), level=(0, 1), axis=0)\n subset_sd = sd.xs((name, time), level=(0, 1), axis=0)\n m = Stat.objects.get_or_create(\n location=locations.get(name=name),\n avail_bikes_mean=subset_mean['avail_bikes'],\n free_stands_mean=subset_mean['free_stands'],\n avail_bikes_sd=subset_sd['avail_bikes'],\n free_stands_sd=subset_sd['free_stands'],\n time=time,\n month=last_month\n )\n\n # snaps = Snapshot.objects.all()\n # i = 0\n # length = len(snaps)\n # for s in snaps:\n # i += 1\n # print(i)\n # if i > 35000:\n # s.save()\n # reduce_data()",
"def gather_data(self, *args, **kwargs):\n instrument_arg = kwargs.get('instrument', 'EUR_USD')\n granularity_arg = kwargs.get('granularity', 'M1')\n candle_format = kwargs.get('candleFormat', 'bidask')\n start_time = kwargs.get('start', '2014-10-01T00:00:00.000000Z')\n count_arg = kwargs.get('count', 5000)\n out_data = []\n data_complete = False\n while(not data_complete):\n response = self.oanda.get_history(instrument=instrument_arg,\n granularity=granularity_arg,\n candleFormat=candle_format,\n start=start_time,\n count=count_arg)\n raw_data = response['candles']\n if (len(out_data) == 0):\n out_data = out_data + raw_data\n elif (len(out_data) > 1):\n # raw_data[0] is already in out_data as raw_data[-1] from last\n # iteration\n out_data = out_data + raw_data[1:]\n start_time = raw_data[-1]['time']\n if (len(raw_data) < 5000):\n data_complete = True\n\n out_data = self._list_to_df(out_data)\n return out_data",
"def _read_events(fname, t_cols, chunksize):\n n_rows = config['n_rows'][fname]\n with tqdm(desc=fname, total=(n_rows//chunksize+1)) as pbar:\n for df in pd.read_csv(eicu_path + '{}.csv'.format(fname), parse_dates=t_cols, chunksize=chunksize):\n pbar.update()\n yield df",
"def read_in_chunks(self):\n chunksize = 10 ** 3\n lines_number = sum(1 for line in open(self.filepath))\n self.progressMaximum.emit(lines_number // chunksize)\n dfList = []\n\n # self.df = traja.read_file(\n # str(filepath),\n # index_col=\"time_stamps_vec\",\n # parse_dates=[\"time_stamps_vec\"],\n # )\n\n TextFileReader = pd.read_csv(\n self.filepath,\n index_col=\"time_stamps_vec\",\n parse_dates=[\"time_stamps_vec\"],\n chunksize=chunksize,\n )\n for idx, df in enumerate(TextFileReader):\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d %H:%M:%S:%f\")\n dfList.append(df)\n self.intReady.emit(idx)\n self.completed.emit(dfList)\n self.finished.emit()",
"def iter_time_series(ts, in_len, out_len, step):\n\n for i in reversed(range(len(ts) - out_len, in_len - 1, -step)):\n x = ts[i - in_len:i]\n y = ts[i:i + out_len]\n yield x, y",
"def process_data(self, df_data, breakdown='weekly'):\n df_data['time'] = df_data['time'].apply(self.remove_time_zone)\n df_data['grid_coord'] = df_data['grid_coord'].astype(str)\n # return self.process_chunk((self.remove_time_zone('2019-04-15T00:00:00-04:00'), self.remove_time_zone('2019-04-16T00:00:00-04:00')), df_data)\n # get weekly/daily time chunks within cleanedInputData\n week_days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']\n start = min(df_data['time']) #str\n end = max(df_data['time']) #str\n start_date = iso8601.parse_date(start).replace(hour=0, minute=0, second=0)\n end_date = (iso8601.parse_date(end) + timedelta(days=1)).replace(hour=0, minute=0, second=0)\n if breakdown == \"weekly\":\n dates = pd.date_range(start_date, end_date, freq='W-'+week_days[start_date.weekday()])\n dates = [e.isoformat() for e in dates] + [end_date.isoformat()]\n else: # breakdown == \"daily\"\n dates = pd.date_range(start_date, end_date, freq='d')\n dates = [e.isoformat() for e in dates]\n time_chunks = []\n for left, right in zip(dates, dates[1:]):\n time_chunks.append((left, right))\n # return self.process_chunk(time_chunks[0], df_data)\n # parallelize processing between time chunks\n with Pool(cpu_count()) as p:\n ret_list = p.map(partial(self.process_chunk, df_data=df_data), time_chunks)\n return pd.concat(ret_list)",
"def resample_timeseries_data(data, frequency, datetime_field, decimal_places):\r\n if not data:\r\n return []\r\n else:\r\n df = pd.DataFrame(data)\r\n df[datetime_field] = pd.to_datetime(df[datetime_field])\r\n time_indexed_data = df.set_index(datetime_field)\r\n resampled_average_concentrations = time_indexed_data.resample(frequency).mean().round(decimal_places) \r\n resampled_timeseries = [{'pollutant_value':row.pollutant_value,\r\n 'time':datetime.strftime(index,'%b,%Y')}\r\n for index, row in resampled_average_concentrations.iterrows() ] \r\n return resampled_timeseries",
"def feature_engineer_ts(self, month=12):\n st_data_dt = self.get_st_data_dt()\n end_data_dt = self.get_end_data_dt()\n date_list = pd.date_range(*(pd.to_datetime([st_data_dt, end_data_dt]) + pd.offsets.MonthEnd()), freq='M').to_list()\n population = self.get_population()\n is_raw_partition = self.get_is_raw_partition()\n# Lag 2 months\n all_data = []\n# join past is_raw columns\n for d in date_list:\n \n population_partition = population[population['ft_data_dt'] == d] \n old_date = d - relativedelta(months=month)\n y = old_date.year\n m = old_date.month\n day = calendar.monthrange(y, m)[1]\n old_date = date(y, m, day)\n old_date = max(old_date, st_data_dt)\n date_list_join = pd.date_range(*(pd.to_datetime([old_date, d]) + pd.offsets.MonthEnd()), freq='M').to_list()\n date_list_join.reverse()\n for index, date_join in enumerate(date_list_join):\n if date_join.strftime(\"%Y-%m-%d\") not in is_raw_partition.keys():\n continue\n \n tmp_is_raw_partition = is_raw_partition[date_join.strftime(\"%Y-%m-%d\")]\n \n rename_col = [c for c in list(tmp_is_raw_partition.columns) if c not in ['idd', 'ft_data_dt']]\n new_col = [c+'_'+str(index+1) for c in rename_col]\n name_dict = dict(list(zip(rename_col, new_col)))\n tmp_is_raw_partition = tmp_is_raw_partition.rename(columns = name_dict)\n population_partition = population_partition.merge(tmp_is_raw_partition.drop(columns=['ft_data_dt']), on=['idd'], how='left')\n all_data.append(population_partition)\n ts_df = pd.concat(all_data)\n threshold_null = len(ts_df.columns) - 4\n ts_df = ts_df[ts_df.isnull().sum(axis=1) < threshold_null]\n \n def sum_ts(self, df, col_list, feature, duration):\n ft_name = feature+ '_sum_'+str(duration)+'mth'\n tmp_df = df[col_list].sum(axis = 1).to_frame(name=ft_name)\n return tmp_df\n \n def mean_ts(self, df, col_list, feature, duration):\n ft_name = feature+ '_avg_'+str(duration)+'mth'\n tmp_df = df[col_list].mean(axis = 1).to_frame(name=ft_name)\n return tmp_df\n \n def std_ts(self, df, col_list, feature, duration):\n ft_name = feature+ '_std_'+str(duration)+'mth'\n tmp_df = df[col_list].std(axis = 1).to_frame(name=ft_name)\n return tmp_df\n \n def med_ts(self, df, col_list, feature, duration):\n ft_name = feature+ '_med_'+str(duration)+'mth'\n tmp_df = df[col_list].std(axis = 1).to_frame(name=ft_name)\n return tmp_df\n \n def min_ts(self, df, col_list, feature, duration):\n ft_name = feature+ '_min_'+str(duration)+'mth'\n tmp_df = df[col_list].min(axis = 1).to_frame(name=ft_name)\n return tmp_df\n \n def max_ts(self, df, col_list, feature, duration):\n ft_name = feature+ '_max_'+str(duration)+'mth'\n tmp_df = df[col_list].max(axis = 1).to_frame(name=ft_name)\n return tmp_df\n \n def q1_ts(self, df, col_list, feature, duration):\n ft_name = feature+ '_q1_'+str(duration)+'mth'\n tmp_df = df[col_list].quantile(q=0.25, axis = 1).to_frame(name=ft_name)\n return tmp_df\n \n def q3_ts(self, df, col_list, feature, duration):\n ft_name = feature+ '_q3_'+str(duration)+'mth'\n tmp_df = df[col_list].quantile(q=0.75, axis = 1).to_frame(name=ft_name)\n return tmp_df\n \n def last_ts(self, df, feature):\n ft_name = feature+ '_last'\n tmp_df = df[feature+'_'+str(1)].to_frame(name=ft_name)\n return tmp_df\n \n ts_duration = [1, 3, 6, 9, 12]\n feature_list = self.get_is_raw_col()\n df = ts_df[['idd', 'ft_data_dt']]\n# Time Series Features\n for duration in ts_duration:\n for col in feature_list:\n col_list = [col+'_'+str(i) for i in range(1, duration+1)]\n df = pd.concat([df\\\n , sum_ts(self, ts_df, col_list, col, duration)\\\n , mean_ts(self, ts_df, col_list, col, duration)\\\n , med_ts(self, ts_df, col_list, col, duration)\\\n , q1_ts(self, ts_df, col_list, col, duration)\\\n , q3_ts(self, ts_df, col_list, col, duration)\\\n , min_ts(self, ts_df, col_list, col, duration)\\\n , max_ts(self, ts_df, col_list, col, duration)]\n , axis=1)\n self.set_all_data(df)",
"def chunk_periods(start, end):\n\n logging.debug(f'chunking {start} to {end}')\n # convert the strings to datetime objects\n #start = dt.datetime.strptime(''.join(start.rsplit(':', 1)), '%Y-%m-%dT%H:%M:%S-%z')\n start = dt.datetime.strptime(start, '%Y-%m-%dT%H:%M:%S-%z')\n logging.debug(f'start: {start}')\n periods = []\n\n # if the year and month of the period are the same, just return the dates as we got them\n\n\n\n return periods",
"def slice_timeseries(n_slices,dataset):\n\n n,l=np.shape(dataset)\n\n X = np.reshape(dataset,(n*n_slices,l//n_slices))\n\n print('sliced data shape (nr. of slices, slice length):',np.shape(X))\n print('#####################################')\n \n return X",
"def get_meter_data_for_time_slice(apt_no, start_time, end_time):\n if apt_no in ['102A', 102]:\n apt_no = '102A'\n\n logger.debug(\"sMap: Getting meter data for %s between %s and %s\", apt_no, start_time, end_time)\n\n query = (\"select data in ('\" + str(start_time) + \"','\" + str(end_time) + \"') \"\n \"limit 200000 \"\n \"where Metadata/LoadLocation/FlatNumber ='\" + str(apt_no) + \"' and \"\n \"Metadata/Extra/PhysicalParameter='Power'\")\n\n r = requests.post(url, data=query)\n # logger.debug (\"%s\",r)\n payload = r.json()\n # logger.debug(\"Payload:%s\", payload)\n\n if apt_no in ['102A', 102]:\n apt_no = 102\n meters = retrieve_meter_info(apt_no)\n logger.debug(\"Meters: %s\", meters)\n\n streams = []\n meter_type = []\n l_meters = range(0, len(meters))\n for i in l_meters:\n uuid = payload[i]['uuid']\n\n # Get meter type based on uuid\n for meter in meters:\n if meter['uuid'] == uuid:\n m_type = meter['type']\n # logger.debug (uuid, m_type)\n\n meter_type.append(m_type)\n streams.append(np.array(payload[i]['Readings']))\n # logger.debug(\"Streams: %s\", streams)\n\n if len(streams[0]) > 0:\n\n df = [pd.DataFrame({'time': readings[:, 0] / 1000, 'power': readings[:, 1],\n 'type': [meter_type[i]] * len(readings)},\n columns=['time', 'power', 'type']) for i, readings in enumerate(streams)]\n else:\n df = []\n\n return df",
"def power_timeline():\n\n return [\n {\n \"timestamp\": \"2021-09-14T12:37:37.168817\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:37.669237\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:38.170142\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:38.670338\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:39.171321\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:39.671572\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:40.172503\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:40.672693\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:41.173552\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:41.673815\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:42.174560\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:42.674690\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:43.175441\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:43.675743\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:44.176551\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:44.677307\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:45.178049\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:45.678310\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:46.179120\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:46.679308\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:47.180223\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:47.680468\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:48.181316\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:48.681683\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:49.182522\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:49.682731\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:50.183680\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:50.683812\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:51.184792\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:51.685027\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:52.185709\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:52.686065\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:53.186929\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:53.687190\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:54.188031\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:54.688674\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:55.189489\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:55.690299\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:56.191124\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n\n ]",
"def split_time_series_by_time_steps_index(df, n_time_steps=None):\n\t\n\ttime_steps = df.index.get_level_values(1).unique()\n\tsplit_time_step = time_steps[-n_time_steps]\n\ttrain = df[df.index.get_level_values(1) < split_time_step]\n\ttest = df[df.index.get_level_values(1) >= split_time_step]\n\t\n\treturn train, test",
"def _calc_line_dt(srs: Srs, x: str, unit: str) -> Tuple[List[pd.DataFrame], str]:\n # pylint: disable=too-many-locals\n unit_range = dask.compute(*(srs.apply(\"min\").data, srs.apply(\"max\").data))\n unit = _get_timeunit(min(unit_range[0]), min(unit_range[1]), 100) if unit == \"auto\" else unit\n dfs = Dfs(srs.apply(\"to_frame\").self_map(drop_null))\n if unit not in DTMAP.keys():\n raise ValueError\n dfr = dfs.self_map(_dask_group_by_time_series, key=x, freq=DTMAP[unit][0])\n for df in dfr:\n df.columns = [x, \"freq\"]\n df[\"pct\"] = df[\"freq\"] / len(df) * 100\n\n df[x] = df[x] - pd.to_timedelta(6, unit=\"d\") if unit == \"week\" else df[x]\n df[\"lbl\"] = df[x].dt.to_period(\"S\").dt.strftime(DTMAP[unit][1])\n\n return (dfr, DTMAP[unit][3])",
"def _chunk_data(self):\n for n in range(0, len(self.data) + 1, len(self.data) //\n self.num_of_chunks):\n yield self.data[0 + n:len(self.data) // self.num_of_chunks + n]",
"def series_to_supervised(data, n_in=1, n_out=1, dropnan=True, stride=None, dates=False, leaks=True):\n df = pd.DataFrame(data)\n \n time = None\n if 'date' in df.columns:\n time = 'date'\n elif 'time' in df.columns:\n time = 'time'\n if time != None:\n df = df.drop([time], axis=1)\n \n if 'leak' in df.columns:\n df = df.drop(['leak'], axis=1) \n n_vars = df.shape[1]\n times_column = list()\n if dates and time != None:\n times_column = data[time]\n del data\n \n cols, names, pivots = list(), list(), list()\n \n # input sequence (t-n, ... t-1)\n for i in range(n_in, 0, -1):\n cols.append(df.shift(i))\n names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]\n\t# forecast sequence (t, t+1, ... t+n)\n for i in range(0, n_out):\n cols.append(df.shift(-i))\n if i == 0:\n names += [('var%d(t)' % (j+1)) for j in range(n_vars)]\n else:\n names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]\n\t# put it all together\n agg = pd.concat(cols, axis=1)\n \n agg.columns = names\n\n #stride - delete windows\n if stride != None:\n indexes_to_drop = list()\n for i in range(stride, agg.shape[0], stride):\n print(\"index\", i)\n pivots += [i]\n \n onset = 0\n offset = pivots[0]\n for i in range(0, len(pivots)):\n print(\"onset\", onset)\n print(\"offset\", offset)\n to_drop = [ x for x in range(onset,offset)]\n indexes_to_drop += to_drop\n try:\n onset = pivots[i] + 1\n offset = pivots[i+1]\n \n except IndexError:\n onset = pivots[i] + 1\n offset = agg.shape[0]\n to_drop = [ x for x in range(onset,offset)]\n indexes_to_drop += to_drop\n \n \n \n print(\"indexes_to_drop\", indexes_to_drop)\n \n agg.drop(df.index[indexes_to_drop], inplace=True)\n \"\"\"\n if dates and time!=None:\n agg[time] = times_column\n \"\"\" \n # drop rows with NaN values \n if dropnan:\n agg.dropna(inplace=True)\n \n\n return agg",
"def _chunk_data(X, slices):\n\n # from object array to list\n slices = [sl for sl in slices if len(sl)]\n selected_times = np.hstack([np.ravel(sl) for sl in slices])\n start = np.min(selected_times)\n stop = np.max(selected_times) + 1\n slices_chunk = [sl - start for sl in slices]\n X_chunk = X[:, :, start:stop]\n return X_chunk, slices_chunk",
"def _analyze_series(self, series):\n # bin series by analysis time\n # only analyze the last bin\n ts = array([si['timestamp'] for si in series])\n ds = diff(ts)\n\n # tolerance_seconds = 60 * 60 * self._bin_hours\n # ds = diff(ts) > tolerance_seconds\n # bounds = where(ds)[0]\n # itemidx = bounds[-1] if bounds else 0\n # series = series[itemidx:]\n\n for ci in self._conditionals:\n ret = self._execute_conditional(ci, series, ds)\n if ret:\n return ret",
"def build_timeseries(self):\n timeseries = {\n \"metricKind\": \"DELTA\", \n \"metric\": {\n \"labels\": {\n \"response_code\": \"0\"}, \n \"type\": \"agent.googleapis.com/agent/request_count\"\n }, \n \"points\": [\n {\n \"interval\": {\n \"endTime\": \"2019-02-18T22:09:53.939194Z\", \n \"startTime\": \"2019-02-18T21:09:53.939194Z\"\n }, \n \"value\": {\n \"int64Value\": \"62\"\n }\n }, \n {\n \"interval\": {\n \"endTime\": \"2019-02-18T21:09:53.939194Z\", \n \"startTime\": \"2019-02-18T20:09:53.939194Z\"\n }, \n \"value\": {\n \"int64Value\": \"61\"\n }\n }\n ], \n \"resource\": {\n \"labels\": {\n \"instance_id\": \"9113659852587170607\", \n \"project_id\": \"YOUR_PROJECT_ID\", \n \"zone\": \"us-east4-a\"\n }, \n \"type\": \"gce_instance\"\n }, \n \"valueType\": \"INT64\"\n }\n\n return timeseries"
]
| [
"0.62124497",
"0.62107456",
"0.6032165",
"0.6029467",
"0.60025483",
"0.5973273",
"0.58360463",
"0.5779444",
"0.573043",
"0.56823605",
"0.5632646",
"0.5631443",
"0.56259614",
"0.56224453",
"0.5608115",
"0.5569298",
"0.55545145",
"0.55337185",
"0.5533596",
"0.5523319",
"0.5420249",
"0.54147923",
"0.540628",
"0.5394024",
"0.53745705",
"0.5370346",
"0.535937",
"0.53504485",
"0.5342833",
"0.5342287"
]
| 0.6821645 | 0 |
break up time series into chunks and get mean for each | def break_and_get_mean(full_series, breaks):
n = len(breaks)
return pd.Series([np.mean(full_series[breaks[i]:breaks[i+1]]) for i in range(n-1)]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def chunk_means(arr, n):\n means = []\n for i in range(0, len(arr), n):\n means.append(np.mean(arr[i : i + n]))\n return means",
"def mean(series):\n return fsum(series) / len(series)",
"def daymean(x):\n return nstepf(x, 24, func='mean')",
"def split_by_interval(array, interval: int):\n\n prev_timestamp = array[0][\"timestamp\"]\n values = 0\n count = 0\n averages = []\n\n for el in array:\n if el[\"timestamp\"] - prev_timestamp >= timedelta(interval):\n prev_timestamp = el[\"timestamp\"]\n average = values / count\n averages.append(average)\n values = count = 0\n else:\n values += el[\"value\"]\n count += 1",
"def compute_MA(self, series, long_term=True):\n temp = series.copy().reset_index(drop=True) # DO NOT MODIFY THE ORIGINAL DATAFRAME!\n if long_term:\n lag = 200\n else:\n lag = 50\n assert len(temp)>lag, 'Not enough data points in this timeseries!'\n for idx in range(lag, len(temp)):\n temp[idx] = series[idx-lag:idx].mean()\n temp[:lag] = None\n return temp",
"def time_mean(self, width):\n import math\n\n for i in range(len(self.data)):\n for j in range(len(self.chans)):\n self.data[i,:,j,:] = self.data[i - width[j]/2 : i + int(math.ceil(width[j]/2.)), :, j, :].mean(axis=0)",
"def obtain_monthly_mean(data=pd.DataFrame()):\n return data.resample(\"MS\").mean()",
"def mean(xs):\n ave = 0\n for xs_split in xs:\n num = float(xs_split)\n print(xs_split)\n ave = ave+num\n average = ave/len(xs)\n return average",
"def calculateAverage(self): \n if not self.lastTransferAverage: \n size=[0,0,0,0]\n for i in range(0,4):\n if self.lastTransferredChannel & (1 << i):\n size[i]=self.lastNbrSamplesPerSeg\n self.lastAverageArray = [zeros(size[0]),zeros(size[1]),zeros(size[2]),zeros(size[3])]\n nbrSamp=self.lastNbrSamplesPerSeg\n for i in range(0,4):\n if self.lastTransferredChannel & (1 << i):\n nbrSeg=self.lastNbrSegmentsArray[i]\n for j in range (0,nbrSamp):\n for k in range(0,nbrSeg): \n self.lastAverageArray[i][j]+=self.lastWaveformArray[i][k*nbrSamp+j]\n self.lastAverageArray[i][j]/=nbrSeg\n self.lastAverageCalculated=True\n else: print \"NOn averaged data are not available\"",
"def time_segments_average(X, interval, time_column):\n warnings.warn(_TIME_SEGMENTS_AVERAGE_DEPRECATION_WARNING, DeprecationWarning)\n\n if isinstance(X, np.ndarray):\n X = pd.DataFrame(X)\n\n X = X.sort_values(time_column).set_index(time_column)\n\n start_ts = X.index.values[0]\n max_ts = X.index.values[-1]\n\n values = list()\n index = list()\n while start_ts <= max_ts:\n end_ts = start_ts + interval\n subset = X.loc[start_ts:end_ts - 1]\n means = subset.mean(skipna=True).values\n values.append(means)\n index.append(start_ts)\n start_ts = end_ts\n\n return np.asarray(values), np.asarray(index)",
"def moving_average_forecast(series, window_size):\n\tforecast= []\n\tfor time in range(len(series)- window_size):\n\t\tforecast.append(series[time:time + window_size].mean())\n\treturn np.array(forecast)",
"def resample_timeseries_data(data, frequency, datetime_field, decimal_places):\r\n if not data:\r\n return []\r\n else:\r\n df = pd.DataFrame(data)\r\n df[datetime_field] = pd.to_datetime(df[datetime_field])\r\n time_indexed_data = df.set_index(datetime_field)\r\n resampled_average_concentrations = time_indexed_data.resample(frequency).mean().round(decimal_places) \r\n resampled_timeseries = [{'pollutant_value':row.pollutant_value,\r\n 'time':datetime.strftime(index,'%b,%Y')}\r\n for index, row in resampled_average_concentrations.iterrows() ] \r\n return resampled_timeseries",
"def getAggregate(df, step):\n #df = mig.getAggregate(df, 2)\n #df = df.resample('2t').mean() :alternate resampling method?\n\n idx, res, flag = [], [], []\n\n for (start, end) in getWindows(df.value, step, step):\n idx.append(df.index[end])\n res.append(df.value.iloc[start:end].mean())\n flag.append(df.flag.iloc[start] & df.flag.iloc[end])\n\n return pd.DataFrame.from_records({'value':res, 'flag':flag}, index=idx,\n columns=['value', 'flag'])",
"def _mean_over_ts(self, l_ts):\n return sum([numpy.sum(ts) for ts in l_ts]) / len(l_ts)",
"def _get_mean(self, sums, step):\n\n return sums/step",
"def running_mean(x, N):\n cumsum = np.cumsum(np.insert(x, 0, 0)) \n return (cumsum[N:] - cumsum[:-N]) / float(N)",
"def average_by_time(Xs):\n X_merged = {}\n\n for X in Xs:\n for (t,x) in X:\n if t not in X_merged: X_merged[t] = []\n X_merged[t].append(x)\n\n X_avg = {}\n\n for t in X_merged.keys():\n X_avg[t] = sum(X_merged[t]) / len(X_merged[t])\n\n return list(X_avg.items())",
"def get_before_after_mean(x, person_transaction):\n\n person_id = x.person\n \n def split_before_after(time, columns):\n \"\"\"This function will split a dataframe\n into 3 parts, before, on and after\n\n Arguments:\n time -- time in hours\n columns -- series with current persons transaction\n\n Returns:\n before -- mean daily spending before event\n current -- mean daily spending on event day\n after -- mean daily spending after event\n \"\"\" \n if time is None or np.isnan(time):\n return [], [], []\n \n currentday = int(time)//24\n before = columns[:currentday]\n current = columns[currentday]\n after = columns[currentday+1:]\n return (before, current, after)\n \n def split_between(time_view,time_complete, duration, columns):\n if time_view is None or np.isnan(time_view):\n return []\n start = int(time_view)//24\n if time_complete is None or np.isnan(time_complete):\n end = start + duration\n else:\n end = int(time_complete)//24\n return columns[start:end+1]\n \n def mean_weighted(items, reverse=False):\n '''Returns the mean, if empty return 0\n It gives weighted version, the second is worth\n 0.75 of the first, third 0.75 of second etc etc\n This will make sure that items bought near the complete\n date is more importand than a week or so later\n '''\n if reverse:\n items = reversed(items)\n f,div,result=1,0,0\n for x in items:\n result+=(x*f)\n div+=f\n f*=0.75\n if div:\n result/=div\n return result\n \n def mean0(items):\n '''Returns the mean, if empty return 0'''\n if len(items) > 0:\n return items.mean()\n else:\n return 0\n \n #when is an offer?\n # recived time is actually pretty useless, other than calculating the end time\n # so, an offers time, where we coumt the revenue, is all the days from viewed,\n # until completed, either by complete, or by expired\n # if we then take the mean from these days, \n # and after and before as weighted, that should be rather fair, no?\n try:\n person_row = person_transaction.loc[person_id]\n col = person_transaction.columns\n before_start, current_start, after_start = split_before_after(x.start, col)\n before_view, current_view, after_view = split_before_after(x.viewed_time, col)\n before_complete, current_complete, after_complete = split_before_after(x.completed_time, col)\n\n x.before_start = mean0(person_row[before_start])\n x.same_day_start = person_row[current_start].sum()\n x.after_start = mean0(person_row[after_start])\n \n x.before_view = mean0(person_row[before_view])\n x.same_day_view = person_row[current_view].sum()\n x.after_view = mean0(person_row[after_view])\n \n x.before_complete = mean0(person_row[before_complete])\n x.same_day_complete = person_row[current_complete].sum()\n x.after_complete = mean0(person_row[after_complete])\n \n between = split_between(x.viewed_time,x.completed_time, x.duration, col)\n x.w_before = mean_weighted(person_row[before_view],reverse=True)\n x.sum_during = person_row[between].sum()\n x.mean_during = mean0(person_row[between])\n x.w_after = mean_weighted(person_row[after_complete])\n except KeyError as e:\n pass\n \n return x",
"def running_mean(x, N):\n cumsum = np.cumsum(np.insert(x, 0, 0))\n return (cumsum[N:] - cumsum[:-N]) / float(N)",
"def avg_ttm(df, years):\n\n # Start with the non-shifted data.\n df_result = df.copy()\n\n # Add shifted data for each year.\n for i in range(1, years):\n df_result += df.shift(4 * i)\n\n # Take the average.\n df_result /= years\n\n return df_result",
"def mean_period(data):\n peaks = len(find_peaks(data, height=0)[0])\n return len(data) / peaks if peaks > 0 else len(data)",
"def running_mean(data, n):\n\treturn np.convolve(data, np.ones((n, ))/n)[(n-1):-(n-1)]",
"def get_rolling_mean(values, window):\r\n return pd.rolling_mean(values, window=window)",
"def _trajectory_mean(trajectories_path, chunk, top, atom_subset, first_frame, verbose):\n # initiating some variables...\n \n traj_sum_list=[]\n number_of_frames = 0\n \n # just a quick check to verify if path exists\n try:\n os.path.exists(trajectories_path)\n except:\n sys.exit('Make sure you have provided a string for a valid path to a trajectory file!')\n else:\n if verbose > 0:\n print 'Loading trajectories...'\n \n try:\n # now let's calculate the native conformation which\n # is just the mean position of each atom in the \n # whole trajectory file\n for chunk_i in md.iterload(trajectories_path, chunk, top=top, atom_indices = atom_subset):\n \n # just adding the time length of chunk\n # to calculate the total simulation time\n # (not needed in calculation, just for display)\n \n \n if verbose > 1:\n print 'Successfully loaded trajectory: \\n %s' %(chunk_i)\n\n # will use lists in this part because we don't know size\n # of matrices at this point, room for future optimization\n # first we get the sum of all atom coordinates\n # this will be achieved by column wise summation of a coordinate\n # matrix called by xyz trajectory attribute\n \n all_atom_sum =[]\n for atom in range(len(atom_subset)):\n all_atom_sum.append(chunk_i.xyz[:,atom,:].sum(axis=0))\n\n traj_sum_list.append(all_atom_sum)\n\n number_of_frames += chunk_i.xyz.shape[0]\n \n except:\n sys.exit('Error while loading trajectories! Make sure you provided a valid trajectory file!')\n \n else:\n print '\\nSuccessfully loaded trajectory file!'\n if verbose > 0:\n print '\\nTotal number of frames loaded: %s \\n' %(number_of_frames)\n \n traj_sum_all = np.concatenate(traj_sum_list)\n \n # then we need to sum all the coordinates of all chunks together\n # we want the result to be a matrix of shape = (len(atom_subset), 3)\n all_atoms_sum_list=[]\n for atom in range(len(atom_subset)):\n all_atom_sum = traj_sum_all[atom::len(atom_subset), :].sum(axis=0)\n all_atoms_sum_list.append(all_atom_sum)\n \n # we just have to put all together\n reference_conformation_array = np.concatenate(all_atoms_sum_list)\n reference_conformation = np.reshape(reference_conformation_array, (len(atom_subset), 3))\n \n # and now we can calculate the average outside of the loop\n reference_conformation = (reference_conformation / number_of_frames) * 10\n \n # the function returns the numpy array with all coordinates\n # and the trajectory time contains the simulation time length\n return reference_conformation",
"def runavg(ts, w):\n # Original length of ts\n N = len(ts)\n # make ts three-fold periodic\n ts = np.append(ts, np.append(ts, ts))\n # smooth by convolution with a window of equal weights\n ts_smooth = np.convolve(ts, np.ones(w) / w, mode=\"same\")\n # Only output central section, of length equal to the original length of ts\n ts = ts_smooth[N : 2 * N]\n\n return ts",
"def ar1_moving_average_time_series(series, length=1):\n\n # just in case the index isn't already datetime type\n series.index = pd.to_datetime(series.index)\n\n ar1 = []\n ar1_se = []\n index = []\n\n for i in range(len(series) - length ):\n #print(series[i:(length + i)])\n param, se = get_AR1_parameter_estimate(series[i:(length + i)])\n ar1.append(param)\n ar1_se.append(se)\n index.append(series.index[length + i])\n\n ar1_name = series.name+\"_ar1\"\n ar1_se_name = series.name+\"_ar1_se\"\n\n ar1_df = pd.DataFrame()\n ar1_df[ar1_name] = pd.Series(ar1)\n ar1_df[ar1_se_name] = pd.Series(ar1_se)\n ar1_df.index = index\n\n return ar1_df",
"def load_mean(df, begin=None, end=None):\n load_df = df.reset_index()\n max_to = max(load_df.time)\n if end is None:\n end = max_to\n elif end > max_to:\n raise ValueError(\"computing mean load after the \"\n \"last event ({}) is NOT IMPLEMENTED\".format(max_to))\n min_to = load_df.time.iloc[0]\n if begin is None:\n begin = min_to\n elif begin < min_to:\n raise ValueError(\"computing mean load befor the \"\n \"first event ({}) is NOT IMPLEMENTED\".format(min_to))\n\n load_df = _load_insert_element_if_necessary(load_df, begin)\n load_df = _load_insert_element_if_necessary(load_df, end)\n\n u = load_df[(load_df.time < end) & (begin <= load_df.time)]\n\n return u.area.sum()/(end - begin)",
"def reduce_data():\n snapshots = Snapshot.objects.all()\n locations = Location.objects.all()\n lst = []\n for snapshot in snapshots:\n lst.append([snapshot.location.name, snapshot.avail_bikes,\n snapshot.free_stands, snapshot.timestamp])\n cols = ['location', 'avail_bikes', 'free_stands', 'timestamp']\n df = pd.DataFrame(lst, columns=cols)\n df['time'] = df['timestamp'].dt.round('30min').dt.strftime('%H:%M')\n\n group = df.groupby(['location', 'time'])\n means = group.mean()\n sd = group.std()\n today = date.today()\n first = today.replace(day=1)\n last_month = first - timedelta(days=1)\n\n for name, time in means.index:\n subset_mean = means.xs((name, time), level=(0, 1), axis=0)\n subset_sd = sd.xs((name, time), level=(0, 1), axis=0)\n m = Stat.objects.get_or_create(\n location=locations.get(name=name),\n avail_bikes_mean=subset_mean['avail_bikes'],\n free_stands_mean=subset_mean['free_stands'],\n avail_bikes_sd=subset_sd['avail_bikes'],\n free_stands_sd=subset_sd['free_stands'],\n time=time,\n month=last_month\n )\n\n # snaps = Snapshot.objects.all()\n # i = 0\n # length = len(snaps)\n # for s in snaps:\n # i += 1\n # print(i)\n # if i > 35000:\n # s.save()\n # reduce_data()",
"def unweighted_daily_mean(real_df, base=12):\n s = None\n bar = None\n tomorrow = (real_df.index[0] + pd.DateOffset(1)).date()\n today = real_df.index[0].date()\n for (d, h), df in real_df.groupby((real_df.index.date,real_df.index.hour)):\n if d==tomorrow and h<base:\n bar = np.concatenate((bar,df.values.reshape(-1)))\n elif h == base:\n if bar is not None:\n val = np.nanmean(bar)\n s = pd.concat((s, pd.Series({d : val})))\n bar = df.values.reshape(-1)\n today = d\n tomorrow = (d + pd.DateOffset(1)).date()\n elif d==today and h>base:\n bar = np.concatenate((bar, df.values.reshape(-1)))\n else:\n continue\n return s",
"def movingAverage(requestContext, seriesList, windowSize):\n if not seriesList:\n return []\n windowInterval = None\n if isinstance(windowSize, basestring):\n delta = parseTimeOffset(windowSize)\n windowInterval = abs(delta.seconds + (delta.days * 86400))\n\n if windowInterval:\n bootstrapSeconds = windowInterval\n else:\n bootstrapSeconds = max([s.step for s in seriesList]) * int(windowSize)\n\n bootstrapList = _fetchWithBootstrap(requestContext, seriesList, seconds=bootstrapSeconds)\n result = []\n\n for bootstrap, series in zip(bootstrapList, seriesList):\n if windowInterval:\n windowPoints = windowInterval / series.step\n else:\n windowPoints = int(windowSize)\n\n if isinstance(windowSize, basestring):\n newName = 'movingAverage(%s,\"%s\")' % (series.name, windowSize)\n else:\n newName = \"movingAverage(%s,%s)\" % (series.name, windowSize)\n newSeries = TimeSeries(newName, series.start, series.end, series.step, [])\n newSeries.pathExpression = newName\n\n offset = len(bootstrap) - len(series)\n for i in range(len(series)):\n window = bootstrap[i + offset - windowPoints:i + offset]\n newSeries.append(safeAvg(window))\n\n result.append(newSeries)\n\n return result"
]
| [
"0.6471262",
"0.62352437",
"0.6152543",
"0.6098485",
"0.6035611",
"0.59925014",
"0.5950393",
"0.59090394",
"0.5896033",
"0.5873036",
"0.58688504",
"0.5853194",
"0.5843164",
"0.58135355",
"0.5811944",
"0.5798107",
"0.5787635",
"0.5768644",
"0.57661927",
"0.5737312",
"0.5733862",
"0.5732738",
"0.572703",
"0.57259077",
"0.57255864",
"0.5723864",
"0.5712715",
"0.5711794",
"0.5706579",
"0.5702616"
]
| 0.6760139 | 0 |
Scales outputs to be from action min to action max | def scale_actions(self, actions): | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def scale_action(self, action: np.ndarray) -> np.ndarray:\n low, high = self.action_space.low, self.action_space.high\n return 2.0 * ((action - low) / (high - low)) - 1.0",
"def scale(self, state, action):\n control_action = action[..., : self._true_dim_action[0]]\n scale = super().scale(state, control_action)\n\n return scale",
"def rescale_action(self, action: np.ndarray) -> np.ndarray:\n action_rescaled = (\n action * (self.action_max - self.action_min) / 2.0\n + (self.action_max + self.action_min) / 2.0\n )\n return action_rescaled",
"def scale(self):",
"def scale(input):\n return (input - np.min(input)) / ((np.max(input) - np.min(input)))",
"def transform_action(self, action):\n action = np.clip(action, self.input_min, self.input_max)\n transformed_action = (action - self.action_input_transform) * self.action_scale + self.action_output_transform\n\n return transformed_action",
"def action_scaling(env, action_scaler):\n try:\n state_dim = len(env.observation_space.low)\n except AttributeError:\n print(\"Using dm_control so need to get state_dim differently\")\n state_dim = len(env.observation_space['observations'].low)\n\n action_dim = len(env.action_space.low)\n\n # state_scaling = float(state_scaling)\n action_scaler = float(action_scaler)\n\n state_scaler_array = np.ones((state_dim,), dtype=np.float32)\n action_scaler_array = np.ones((action_dim,), dtype=np.float32) * action_scaler\n\n return np.concatenate([state_scaler_array, action_scaler_array], axis=0)",
"def unscale_action(self, scaled_action: np.ndarray) -> np.ndarray:\n low, high = self.action_space.low, self.action_space.high\n return low + (0.5 * (scaled_action + 1.0) * (high - low))",
"def scale_out(self, *args, **kwargs):\n pass",
"def scale_rewards(reward,min_reward,max_reward,factor=10):\n span = (max_reward - min_reward) / 2\n sub = (max_reward+min_reward) / 2\n return ((reward-sub) / span) * factor",
"def scale(self, value):\n\t\toldscale = self.oldmax - self.oldmin\n\t\tnewscale = self.newmax - self.newmin\n\t\treturn (newscale * (value - self.oldmin) / oldscale) + self.newmin",
"def set_output_limits(self, min_value, max_value):\n self.out_min = min_value\n self.out_max = max_value\n if self.out_min > self.out_max:\n print(\"set_output_limits(): min must be smaller than max.\")\n self.iterm = self.clip_to_output_limits(self.iterm)\n self.output = self.clip_to_output_limits(self.output)",
"def scale(self, value):\r\n return (float(value)-float(self.minimum))/float(self.maximum-self.minimum)*2.0 - 1.0",
"def get_scale():\r\n\r\n \r\n return 0.5",
"def action(self, action):\n low = self.action_space.low\n high = self.action_space.high\n\n scale_factor = (high - low) / 2\n reloc_factor = high - scale_factor\n\n action = action * scale_factor + reloc_factor\n action = np.clip(action, low, high)\n\n return action",
"def scaled(values, output_min, output_max, input_min=0, input_max=1):\n values = _normalize(values)\n if input_min >= input_max:\n raise ValueError('input_min must be smaller than input_max')\n input_size = input_max - input_min\n output_size = output_max - output_min\n for v in values:\n yield (((v - input_min) / input_size) * output_size) + output_min",
"def autoscale(self, A):\n self.vmin = ma.min(A)\n self.vmax = ma.max(A)",
"def scale(self, value):\n return (float(value) - float(self.minimum)) / \\\n float(self.maximum - self.minimum) * 2.0 - 1.0",
"def scale(self, scale):\n\t\tself._current_score *= scale",
"def _adjust_scale(self, value):\n if self._min_val <= value <= self._max_val:\n self._scale_var.set(value)\n self.update_label_text()",
"def rescale(num, old_min, old_max, new_min, new_max):\n old_range = old_max - old_min\n new_range = new_max - new_min\n new_val = new_min + (((num - old_min) * new_range)/old_range)\n\n return new_val",
"def scale(x, minimum, maximum):\n return (x - minimum) / (maximum - minimum)",
"def scale(x, feature_range=(-1,1)):\r\n x = x * 2 - 1\r\n return x",
"def rescaled(M,newmin,newmax):\n mmin,mmax = M.min(),M.max()\n M2 = M.copy()\n M2 -= mmin\n M2 *= (newmax-newmin) / (mmax-mmin)\n M2 += newmin\n return M2",
"def rescale(self):\n low = self.datasource.data[\"values\"].min()\n high = self.datasource.data[\"values\"].max()\n\n # force color to be at lower end of the colormap if\n # data is all equal\n if low == high:\n high += 1\n\n self.set_limits_minmax(low, high)",
"def scale(inp, ab):\n\n return inp * ab[0] + ab[1]\n # pass",
"def scale(self, from_min, from_max, to_min, to_max):\n for i in range(len(self.poses)):\n self.poses[i].position.scale(from_min[:3], from_max[:3], to_min[:3], to_max[:3])\n self.wrenches[i].scale(from_min[3:], from_max[3:], to_min[3:], to_max[3:])",
"def scale_range(x, input_range, target_range):\n\n range = [np.amin(x), np.amax(x)]\n x_std = (x - input_range[0]) / (1.0*(input_range[1] - input_range[0]))\n x_scaled = x_std * (1.0*(target_range[1] - target_range[0])) + target_range[0]\n return x_scaled, range",
"def scale(inp, ab):\n\n return inp * ab[0] + ab[1]",
"def scale(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"scale\")"
]
| [
"0.73590297",
"0.72091806",
"0.71684563",
"0.7003353",
"0.6765136",
"0.6575955",
"0.65628123",
"0.6436179",
"0.6347377",
"0.6303791",
"0.6247077",
"0.62256503",
"0.61939955",
"0.6152221",
"0.6135645",
"0.6094537",
"0.6066447",
"0.60465604",
"0.6009634",
"0.60020316",
"0.5991795",
"0.59798384",
"0.59457535",
"0.59426177",
"0.5934268",
"0.59231174",
"0.59218794",
"0.5916188",
"0.59000534",
"0.58760893"
]
| 0.75005263 | 0 |
Takes in a series of means and variances for the logged data of the state space Need this so the different states can be normalized to unit normal before passing into net | def set_statespace_normal(self, means, variances):
if means == [] and variances == []:
print("loading data distribution from the dynamics model")
self.state_means = self.dynam_model.scalarX_tensors_mean
self.state_vars = self.dynam_model.scalarX_tensors_var
else:
self.state_means = torch.Tensor(means)
self.state_vars = torch.Tensor(variances) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def updateMeanAndVar(X, log_gamma, varianceFloor=5.0):",
"def reparameterize(self, mean, logvar):\n if self.training:\n std = torch.exp(0.5 * logvar)\n # std = torch.zeros_like(mean) + 0.25\n eps = torch.randn_like(std)\n return mean + std * eps\n else:\n return mean",
"def sample_mean_var_ml(x):\n n = len(x)\n assert(n > 0)\n if n == 1:\n return x[0], 0\n s = 0.0\n ss = 0.0\n for i in x:\n s += i\n ss += i*i\n mu = s/n\n var = (ss/n) - mu*mu\n return mu, var",
"def _get_normalisation_stats(self):\n p_net_datasets = [self.pdf_dataset] + [self.PDE_dataset] + [self.BC_dataset]\n p_net_means, p_net_stds = get_mean_std_from_datasets(p_net_datasets)\n\n D_net_datasets = [self.PDE_dataset]\n D_net_means, D_net_stds = get_mean_std_from_datasets(D_net_datasets)\n\n U_net_datasets = [self.PDE_dataset]\n U_net_means, U_net_stds = get_mean_std_from_datasets(U_net_datasets)\n\n return p_net_means, p_net_stds, D_net_means, D_net_stds, U_net_means, U_net_stds",
"def updateMeanAndVar(X, log_gamma):\n\n gamma_shape = log_gamma.shape\n x_shape = X.shape\n gamma = np.exp(log_gamma);\n\n mu = np.zeros([gamma_shape[1],x_shape[1]])\n covar = np.zeros([gamma_shape[1],x_shape[1]])\n\n for j in range(gamma_shape[1]): #state\n temp = 0;\n for n in range(x_shape[0]): #dim\n temp = temp + gamma[n,j]*X[n,:]\n\n mu[j,:] = temp/np.sum(gamma[:,j])\n #print(gamma_shape[1])\n #print(x_shape[0])\n u=0\n if (u == 0):\n for j in range(gamma_shape[1]): #state\n temp = 0;\n for n in range(x_shape[0]): #dim\n sig= np.dot(gamma[n,j]*X[n,:]-mu[j,:],X[n,:]-mu[j,:])\n print(sig.shape)\n temp=temp+sig\n #temp = temp + gamma[n,j]*(X[n,:] - mu[j,:])*(X[n,:] - mu[j,:])\n covar[j,:] = np.diagonal(temp)/np.sum(gamma[:,j])\n #print(covar[j,:])\n #print(mu[1,:])\n\n return mu, covar",
"def NLL(sample, params):\n mu = params[:,:,0]\n logsigma = params[:,:,1]\n \n c = normalization.to(mu.device)\n inv_sigma = torch.exp(-logsigma)\n tmp = (sample - mu) * inv_sigma\n return torch.mean(0.5 * (tmp * tmp + 2 * logsigma + c))",
"def std(state, avg=None, keepdims=True, is_log=False):\n return std_raw(state.particles, state.log_weights, avg, keepdims, is_log)",
"def _get_mean_and_log_std(self, *inputs):\n return self._shared_mean_log_std_network(*inputs)",
"def _compute_mean_std(self, sum_, ssum, size):\n assert size > 1, 'BatchNorm computes unbiased standard-deviation, which requires size > 1.'\n mean = sum_ / size\n sumvar = ssum - sum_ * mean\n unbias_var = sumvar / (size - 1)\n bias_var = sumvar / size\n\n self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data\n self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data\n\n return mean, bias_var.clamp(self.eps) ** -0.5",
"def batch_stat(x):\n\tmean = torch.mean(x, dim=[0, 2, 3], keepdim=True)\n\tvar = torch.mean((x-mean)**2, dim=[0, 2, 3], keepdim=True)\n\treturn mean, var",
"def forward(self, state):\n x = F.relu(self.linear1(state))\n x = F.relu(self.linear2(x))\n\n mean = self.mean_linear(x)\n log_std = self.log_std_linear(x)\n # clamp the log std to avoid numerical instability\n log_std = torch.clamp(log_std, MIN_LOG_STD, MAX_LOG_STD)\n\n return mean, log_std",
"def sample(self, mean, std, n_samples, sample_seed):\n self.seed_samples(sample_seed)\n self.Y_mean = mean\n self.Y_std = std\n eps = torch.randn(self.batch_size, n_samples, self.Y_dim)\n samples = eps*torch.exp(0.5*self.logvar.unsqueeze(1)) + self.mu.unsqueeze(1)\n samples = self.unwhiten_back(samples)\n samples = samples.data.cpu().numpy()\n return samples",
"def normalization_stats(completeData):\n data_mean = np.mean(completeData, axis=0)\n data_std = np.std(completeData, axis=0)\n\n dimensions_to_ignore = []\n dimensions_to_use = []\n\n dimensions_to_ignore.extend(list(np.where(data_std < 1e-4)[0]))\n dimensions_to_use.extend(list(np.where(data_std >= 1e-4)[0]))\n\n data_std[dimensions_to_ignore] = 1.0\n\n return data_mean, data_std, dimensions_to_ignore, dimensions_to_use",
"def sample_mean_var_unbiased(x):\n n = len(x)\n assert(n > 0)\n if n == 1:\n return x[0], float('Inf')\n mean, v = sample_mean_var_ml(x)\n var = v*n/(n-1)\n return mean, var",
"def normalise(self,data,take_logs:bool=False):\n\n # Normalise vector to sum up to 1\n normalised_vector = data/np.sum(data)\n\n # If take logs is selected, take logs\n if take_logs:\n return np.log(normalised_vector)\n else:\n return normalised_vector",
"def updateMeanAndVar(X, log_gamma, varianceFloor=5.0):\n N = X.shape[0]#71*13\n D = X.shape[1]\n M = log_gamma.shape[1]\n means = np.zeros((M, D))\n covars = np.zeros((M, D))\n #means = np.mean(X, axis=0)\n #covars = np.cov(X.T)\n for k in range(M):\n for n in range(N):\n means[k, :] += np.exp(log_gamma[n, k]) * X[n,:]\n means[k, :] /= np.sum(np.exp(log_gamma[:, k]))\n for k in range(M):\n for n in range(N):\n covars[k, :] += np.exp(log_gamma[n, k]) * (X[n, :] - means[k, :])**2\n covars[k, :] /= np.sum(np.exp(log_gamma[:, k]))\n covars = np.maximum(covars, varianceFloor)\n\n return means, covars",
"def _compute_layer_moments(x):\n return torch.mean(x, dim=(1, 2, 3), keepdim=True), torch.var(x, dim=(1, 2, 3), keepdim=True)",
"def updateMeanAndVar(X, log_gamma, varianceFloor=5.0):\n N,D=X.shape\n M=log_gamma.shape[1]\n means=np.zeros((M,D))\n covars=np.zeros((M,D))\n gamma=np.exp(log_gamma)\n for m in range(M):\n means[m]=np.sum(gamma[:,m].reshape(-1,1)*X,axis=0)/np.sum(gamma[:,m])\n covars[m]=np.sum(gamma[:,m].reshape(-1,1)*np.power((X-means[m]),2),axis=0)/np.sum(gamma[:,m])\n covars[covars<varianceFloor]=varianceFloor\n return means,covars",
"def normalizeData(meanAndStd, dataset):\n\n for i in range(len(dataset)):\n for j in range(len(dataset[i])-1):\n mean = meanAndStd[j][\"mean\"]\n std = meanAndStd[j][\"std\"]\n dataset[i][j] = (dataset[i][j] - mean)/std",
"def transform(self, sess, xs):\n return sess.run( [self.z_mean, self.z_log_sigma_sq],\n feed_dict={self.x: xs} )",
"def unstandardize(da: xr.DataArray, mean: xr.DataArray, std: xr.DataArray):\n return (std * da) + mean",
"def calculate_ll_normal_simple(data, variances):\n\n n_cells = data.shape[0]\n n_segments = data.shape[1]\n n_states = variances.shape[1]\n\n # Create states as (n_segments, n_cells, n_states) array\n states = np.tile(np.arange(0, n_states, 1), (n_cells, n_segments, 1))\n\n # Calculate mean\n mean = states\n \n # Normal dist log likelihood\n ll = (\n -0.5 * np.log(2. * np.pi)\n -0.5 * np.log(variances[:, np.newaxis, :])\n -1. * (np.square(data[:, :, np.newaxis] - mean) /\n (2. * variances[:, np.newaxis, :])))\n ll[np.isnan(data)] = 0.\n\n return ll",
"def normalize_data(x):\n mvec = x.mean(0)\n stdvec = x.std(axis=0)\n \n return (x - mvec)/stdvec",
"def normalize_data(x):\n mvec = x.mean(0)\n stdvec = x.std(axis=0)\n \n return (x - mvec)/stdvec",
"def normalize(values):\n return (values - np.mean(values)) / np.std(values)",
"def mean_var_sd(x):\n n = x.size\n assert 2 <= n\n mean = x.sum() / n\n diff = x - mean\n var = np.vdot(diff, diff) / (n - 1)\n sd = var ** 0.5\n return {\n 'mean': mean,\n 'var': var,\n 'sd': sd,\n }",
"def generate_initial_state(nm, ns):\n mue = 15 * np.random.rand(nm) # mean\n sge = 3 * (np.random.rand(nm) + 0.1)# std\n lme = (np.random.rand(nm) + 0.1) # mixture ratio (lambda)\n lme /= lme.sum()\n return mue, sge, lme",
"def normalize_data(self, data):\n self.find_mean_std(data)\n return (data - self._data_mean) / self._data_std",
"def reparameterize(self, mean, logvar):\n if self.training:\n std = torch.exp(0.5 * logvar)\n eps = torch.randn_like(std)\n return mean + std * eps\n else:\n # Reconstruction mode\n return mean",
"def meanstd(self):\n\t\tmean = [125.3, 123.0, 113.9] # R,G,B\n\t\tstddev = [63.0, 62.1, 66.7] # R,G,B\n\t\treturn [mean, stddev]"
]
| [
"0.633177",
"0.6291005",
"0.61961645",
"0.61756563",
"0.6090529",
"0.6065415",
"0.6040498",
"0.59970474",
"0.5996735",
"0.5990036",
"0.59628403",
"0.5940341",
"0.5930685",
"0.5929473",
"0.58963656",
"0.58864814",
"0.5882486",
"0.5876815",
"0.5867442",
"0.58599293",
"0.58509624",
"0.5819985",
"0.581712",
"0.581712",
"0.57913023",
"0.57821816",
"0.57798845",
"0.57727975",
"0.5759265",
"0.574658"
]
| 0.758573 | 0 |
merge git and bit repos and extract count information | def getMergedObj(self, git_team, git_repos, bit_repos):
code = 200
err = ""
merged = {}
languages = {}
public_count = git_team.get('public_repos', 0)
followers_count = git_team.get('followers', 0)
fork_count = 0
git_keys = ['fork', 'forks_count', 'watchers_count', 'language', 'description']
bit_keys = ['is_private', 'language', 'description']
for repo in git_repos:
repo_data = {}
for key in git_keys:
current_val = repo.get(key)
if not current_val:
continue # skip if not found
if key == 'fork':
fork_count += 1
elif key == 'language':
lang = current_val.lower()
languages[lang] = languages.get(lang, 0) + 1
merged[repo['name']] = repo_data
for repo in bit_repos:
repo_data = dict()
if repo['name'] not in merged:
for key in bit_keys:
current_val = repo.get(key)
if not current_val:
continue # skip if not found
if key == 'is_private':
public_count += 1
else:
if key == 'language':
lang = current_val.lower()
languages[lang] = languages.get(lang, 0) + 1
try:
watchers_href = repo['links']['watchers']['href']
repo_data['watchers_count'] = repo_data.get('watchers_count', 0) +\
self._json_request(watchers_href).get('size',0)
except KeyError:
pass
merged[repo['name']] = repo_data
data = {
'public_repos_count': public_count,
'followers_count': followers_count,
'forked_repos_count': fork_count,
'non_forked_repos_count': public_count - fork_count,
'list_languages': languages,
'repos': merged
}
merged_data = {
'data': data,
'code': code,
'err': ""
}
return merged_data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __gitStatistics(self):\n self.vcs.gitStatistics(self.project.getProjectPath())",
"def get_git_commiter_count(path):\n process = subprocess.Popen(['git', 'shortlog', '-sn'], cwd=path, stdout=subprocess.PIPE)\n stdout, _ = process.communicate()\n committers = stdout.decode(\"ISO-8859-1\")\n return len(committers.split('\\n'))",
"def git_status(c):\n c.run(\"git submodule foreach git status\")",
"def test_repositories(self):\n\t\ttot_repos = total_repos(self)\n\t\t#self.assertEqual(tot_repos, \"6052353)",
"def display_repos_and_commits(github_id):\r\n\r\n repo_list = get_repos(github_id)\r\n\r\n for repo in repo_list:\r\n commits_count = get_commits(github_id, repo)\r\n print('Repo: {} Number of commits: {}'.format(repo, commits_count))",
"def get_git_commit_count(path):\n process = subprocess.Popen(['git', 'rev-list', 'HEAD', '--count', '--no-merges'], cwd=path, stdout=subprocess.PIPE)\n stdout, _ = process.communicate()\n number = stdout.decode().strip(\"\\n\")\n return int(number)",
"def test_repo_commit_count():\n\n commit_count = BehavioralUtils.count_commits('drupal', 'builds')\n assert commit_count == 4",
"def __gitSubmodulesSummary(self):\n self.vcs.gitSubmoduleSummary(self.project.getProjectPath())",
"def commit_count(commit_info_dict):\n commit_counts = {}\n for release, commit_dict in commit_info_dict.items():\n commit_counts_per_release = {}\n for user_id, commit_list in commit_dict.items():\n commit_counts_per_release[user_id] = len(commit_list)\n commit_counts[release] = commit_counts_per_release\n return commit_counts",
"def test_get_github_repos_count_positive(self):\n self.assertIsNotNone(app.get_github_repos_count(\"dhh\")[\"count\"])",
"def add_git_info(run, scriptpath):\n try:\n repo = Repo(scriptpath, search_parent_directories=True)\n run[\"gitrepo\"] = repo.working_dir\n run[\"gitcommit\"] = repo.head.commit.hexsha\n run[\"gitorigin\"] = get_origin(repo)\n\n if not option_set('ignored metadata', 'diff'):\n whole_diff = ''\n diffs = repo.index.diff(None, create_patch=True)\n for diff in diffs:\n whole_diff += \"\\n\\n\\n\" + \"--- {}\\n+++ {}\\n\".format(\n diff.a_path, diff.b_path) + diff.diff.decode(\"utf-8\")\n\n run['diff'] = whole_diff\n except (InvalidGitRepositoryError, ValueError):\n # We can't store git info for some reason, so just skip it\n pass",
"def unmerged_total(self):\n return int(self.git.rev_list('--count', '{}..{}'.format(self.base_branch, self.topic_branch)))",
"def git_status():\n\tl = []\n\tdebug(\"Not implemented\",1)\n\n\treturn l",
"def GetNumberOfRepoMetas(language: scrape_repos_pb2.LanguageToClone) -> int:\n path = pathlib.Path(language.destination_directory)\n if path.is_dir():\n return len([x for x in path.iterdir() if x.suffix == '.pbtxt'])\n else:\n return 0",
"def __cross_wiki_counts(self):\n\n print(\"Updating counts by merging with CrossWiki\")\n\n cnt = 0\n crosswiki_path = os.path.join(\n self.base_url, \"generic/p_e_m_data/crosswikis_p_e_m.txt\"\n )\n\n with open(crosswiki_path, \"r\", encoding=\"utf-8\") as f:\n for line in f:\n parts = line.split(\"\\t\")\n mention = unquote(parts[0])\n\n if (\"Wikipedia\" not in mention) and (\"wikipedia\" not in mention):\n if mention not in self.wiki_freq:\n self.wiki_freq[mention] = {}\n\n num_ents = len(parts)\n for i in range(2, num_ents):\n ent_str = parts[i].split(\",\")\n ent_wiki_id = int(ent_str[0])\n freq_ent = int(ent_str[1])\n\n if (\n ent_wiki_id\n not in self.wikipedia.wiki_id_name_map[\"ent_id_to_name\"]\n ):\n ent_name_re = self.wikipedia.wiki_redirect_id(ent_wiki_id)\n if (\n ent_name_re\n in self.wikipedia.wiki_id_name_map[\"ent_name_to_id\"]\n ):\n ent_wiki_id = self.wikipedia.wiki_id_name_map[\n \"ent_name_to_id\"\n ][ent_name_re]\n\n cnt += 1\n if (\n ent_wiki_id\n in self.wikipedia.wiki_id_name_map[\"ent_id_to_name\"]\n ):\n if mention not in self.mention_freq:\n self.mention_freq[mention] = 0\n self.mention_freq[mention] += freq_ent\n\n ent_name = self.wikipedia.wiki_id_name_map[\n \"ent_id_to_name\"\n ][ent_wiki_id].replace(\" \", \"_\")\n if ent_name not in self.wiki_freq[mention]:\n self.wiki_freq[mention][ent_name] = 0\n self.wiki_freq[mention][ent_name] += freq_ent",
"def test_repo_built():\n\n count = BehavioralUtils.count_repos_updated('builds')\n # If 1 repo Siteupdates in report repo built successfully.\n assert count == 1",
"def __gitSubmodulesStatus(self):\n self.vcs.gitSubmoduleStatus(self.project.getProjectPath())",
"def get_commit_count():\n if COMMIT_COUNT is None:\n return shell_output('git rev-list {base_version}..HEAD --count'\n .format(base_version=get_base_version()))\n return COMMIT_COUNT",
"def projects_count(args):\n session = GithubSession()\n\n print(f\"counting {args.name}\")\n\n board = session.get_project(args.name)\n\n tally = []\n\n columns = session.get_columns(board)\n for column in columns:\n print(column[\"name\"], file=sys.stderr)\n\n cards = list(session.get_cards(column))\n\n total = Decimal(0)\n unpointed = 0\n num_cards = 0\n num_walk_ins = 0\n issues = []\n walk_ins = []\n walk_in_points = 0\n\n for card_data in cards:\n issue_number = utils.get_issue_number_from_card_data(card_data)\n if not issue_number: # must be a note\n continue\n\n issue_data = session.get_issue(issue_number)\n labels = issue_data[\"labels\"]\n\n num_cards += 1\n\n points = get_points(labels)\n if points:\n total += points\n else:\n unpointed += 1\n\n issue_data = {\n \"issue_number\": issue_number,\n \"points\": str(points),\n \"unpointed\": points is None,\n \"walk_in\": False,\n }\n\n if is_walk_in(labels):\n num_walk_ins += 1\n if points:\n walk_in_points += points\n\n issue_data[\"walk_in\"] = True\n\n walk_ins.append(issue_data)\n\n issues.append(issue_data)\n\n tally.append(\n {\n \"column\": column[\"name\"],\n # 'issues': issues,\n \"num_cards\": num_cards,\n \"num_walk_ins\": num_walk_ins,\n \"walk_in_points\": str(walk_in_points),\n # 'walk_ins': walk_ins,\n \"total_points\": str(total),\n \"unpointed\": unpointed,\n }\n )\n\n print(json.dumps(tally, indent=4))",
"def list_all_repos_info():\n repos = ALL_REPOS\n for repo_name, repo in zip(repos, _repos(repos)):\n repo_name = shorten_path(repo_name)\n print(repo_name)\n try:\n nbr_ahead, nbr_behind = _nbr_commits_ahead_and_behind(repo)\n except git.exc.GitCommandError:\n print(f\" {repo.active_branch.name}\")\n except DetachedHeadError:\n print(f\" HEAD --> {repo.head.commit}\")\n else:\n nb_tabul = 3 if len(repo.active_branch.name) < 6 else 2\n tabuls = \"\\t\" * nb_tabul\n print(f\" {repo.active_branch.name}{tabuls}โ {nbr_behind} โ {nbr_ahead}\")\n if repo.index.diff(None):\n print(\" !!! With unstaged changes !!!\")\n if repo.index.diff(\"HEAD\"):\n print(\" !!! With uncommited changes !!!\")",
"def get_git_info():\n\n diff = \"Could not extract diff\"\n githash = '00000'\n try:\n # Refers to the global qc_config\n PycQEDdir = pq.__path__[0]\n githash = subprocess.check_output(['git', 'rev-parse',\n '--short=10', 'HEAD'], cwd=PycQEDdir)\n diff = subprocess.run(['git', '-C', PycQEDdir, \"diff\"],\n stdout=subprocess.PIPE).stdout.decode('utf-8')\n except Exception:\n pass\n return githash, diff",
"def main():\n parser = argparse.ArgumentParser('compute git hashes')\n parser.add_argument('-d', '--debug', action='store_true')\n parser.add_argument('-k', '--keep-dot-git', action='store_true')\n parser.add_argument('path', nargs='+')\n args = parser.parse_args()\n args.depth = -1 # for debug print\n status = 0\n for path in args.path:\n try:\n try:\n mode, gitclass, size = classify(path)\n except ValueError:\n print('%s: unhashable!' % path)\n status = 1\n continue\n hasher = generic_hash(path, mode, size, args)\n result = hasher.hexdigest()\n if args.debug:\n print('%s %s %s\\t%s' % (strmode(mode), gitclass, result,\n path))\n else:\n print('%s: %s hash = %s' % (path, gitclass, result))\n except OSError as err:\n print(str(err))\n status = 1\n sys.exit(status)",
"def perform_calculations():\n # contains the result of the total repositories\n result = [] \n count_all_lines = 0 # mainatains the line that has been counted\n for_loops_list = []\n func_parameters = [] # function parameter checks list\n no_of_variables = set() # a set containing variables\n docs_comments = [] \n single_line_comments = []\n code_duplication = 0\n repo_imports_set = set() # imports for the entire repo\n current_repo = ''\n for item in traverse_repos():\n current_repo = item['repo_url']\n for path in item['files']:\n with open(path, 'r') as file_:\n lines = file_.readlines()\n # call code duplication\n code_duplication += code_duplication_check(lines)\n for line in lines:\n if re.match(r'^#.+', line.strip()):\n single_line_comments.append(line.strip())\n # this makes it possible to campare later\n # call find_repo_imports\n line_import = find_repo_imports(line)\n \n repo_imports_set.add(line_import)\n # call countlines of code function\n count_all_lines += count_lines_of_code(line.strip())\n # call find_for_loops\n for_loops = find_for_loops(line)\n if for_loops:\n for_loops_list.append(for_loops)\n function = avarage_parameters(line)\n if function:\n func_parameters.append(avarage_parameters(line))\n no_of_variables.add(avarage_variables_per_line(line))\n\n with open(path, 'r') as content_file:\n content = content_file.read()\n docs_comments.extend(find_docstrings_and_comments(content, single_line_comments))\n \n \n\n external_packages = find_external_packages(repo_imports_set)\n repo_lines_of_codes = count_all_lines - len(docs_comments)\n avarage_variables_repo = (len(no_of_variables)-1) / repo_lines_of_codes\n nesting = nesting_depth(for_loops_list) / len(for_loops_list)\n avarage_params = sum(func_parameters) / len(func_parameters)\n repo_result = {\n 'repository_url': current_repo, \n 'number of lines': repo_lines_of_codes, \n 'libraries': external_packages,\n 'nesting factor': nesting,\n 'code duplication': code_duplication,\n 'average parameters': avarage_params,\n 'average variables': avarage_variables_repo\n \n }\n result.append(repo_result)\n\n return result",
"def main(*args):\n\n # Default options\n repos = []\n user_map = NullUserMap()\n plotout = None\n printout = True\n gitlab = None\n\n if \"--help\" in args:\n print(main.__doc__)\n return 0\n\n # Parse command-line \n it = iter(args)\n for a in it:\n if a == \"--users\":\n user_map = FileUserMap(next(it))\n elif a == \"--pdf\":\n plotout = next(it)\n elif a == \"--noprint\":\n printout = False\n elif a == \"--gitlab\":\n gitlab = next(it), next(it)\n else:\n repos.append(a)\n \n # Setup backend\n if gitlab is None:\n coretype = GitCLIBackend\n coreargs = repos\n else:\n coretype = GitlabBackend\n coreargs = gitlab\n\n # Dictionary for storing the data to be presented\n commits = {}\n \n # Find the bound for searching -- the beginning of the week, one year ago\n today = datetime.now().replace(hour=0, minute=0,second=0,microsecond=0)\n year_ago = today.replace(year = today.year - 1)\n _, __, dow = year_ago.isocalendar()\n year_ago-= timedelta(days=(dow-1))\n \n # Processes the git logs and stores some intermediate results in the three\n # dictionaries instantiated above\n for email, date, stats in coretype(*coreargs, since=year_ago.strftime(\"%Y-%m-%d\")):\n \n # Trim date of commit to midnight of that day\n date = date.replace(hour=0,minute=0,second=0,microsecond=0)\n user = user_map.map(email)\n \n if not user in commits:\n commits[user] = {}\n if not date in commits[user]:\n commits[user][date] = 0\n \n commits[user][date] += 1\n \n # Print plaintext report\n if printout:\n \n for user, cal in commits.items():\n \n print(\"Annual summary for %s\" % (user))\n \n for date, count in sorted(cal.items(), key=lambda x: x[0]):\n strdate = date.strftime(\"%x\")\n print(\" %s: %2d commits\" % (strdate, count))\n \n print(\"\")\n\n # Draw plots\n if plotout is not None:\n\n with PdfPages(plotout) as pdf:\n \n labels = []\n offsets = {}\n \n cdict = ((205.,247.,237.), (15.,191.,148.))\n \n cdict = {\n 'red': (\n (0.0, cdict[0][0]/255, cdict[0][0]/255),\n (1.0, cdict[1][0]/255, cdict[1][0]/255)\n ),\n 'green':(\n (0.0, cdict[0][1]/255, cdict[0][1]/255),\n (1.0, cdict[1][1]/255, cdict[1][1]/255)\n ),\n 'blue': (\n (0.0, cdict[0][2]/255, cdict[0][2]/255),\n (1.0, cdict[1][2]/255, cdict[1][2]/255)\n )\n }\n \n plt.register_cmap(name='Sea', data=cdict)\n colormap = plt.get_cmap('Sea')\n \n min_yr, min_week, _ = year_ago.isocalendar()\n max_yr, max_week, _ = today.isocalendar()\n \n week_counts = {yr: weeks_in_year(yr) for yr in range(min_yr, max_yr+1)}\n \n # Generate labels for each week -- \n # Add year to the label of the first week of each year as well as \n # the very first week in the history\n lastmon = None\n for yr, weeks in sorted(week_counts.items(), key=lambda x: x[0]):\n cur = datetime(year=yr, month=1, day=4) # jan 4 is always in week 1 of the iso year\n for i in range(weeks):\n mon = cur.strftime(\"%b\")\n if mon != lastmon:\n labels.append(cur.strftime(\"%b\"))\n else:\n labels.append(\"\")\n offsets[(yr, i+1)] = len(offsets)\n cur += timedelta(days=7)\n lastmon = mon\n \n for user in commits:\n \n fig = plt.figure(figsize=(7.5, 1.65))\n\n gs = gridspec.GridSpec(2, 1, height_ratios=[8, 1]) \n ax, cax = plt.subplot(gs[0]), plt.subplot(gs[1])\n \n maxcommits = ceil(max(commits[user].values()) * 1.5)\n \n for date, count in commits[user].items():\n yr, wk, dow = date.isocalendar()\n offset = offsets[(yr, wk)]\n \n ax.add_patch(\n patches.Rectangle(\n (offset+0.05, dow - 1 + 0.05),\n 0.95, 0.95,\n linewidth=0,\n facecolor=colormap(1. * (count - 1) / (maxcommits) )\n )\n )\n \n ax.set_title(\"Commit summary for %s\" % user, y=1.28)\n \n ax.xaxis.tick_top()\n ax.set_xticks([x for x in np.arange(len(offsets)) if labels[int(x)] != \"\"])\n ax.set_xticks(np.arange(len(offsets)), minor=True)\n \n ax.set_xticklabels([x for x in labels if x != \"\"])\n ax.set_xlim(offsets[(min_yr, min_week)], offsets[(max_yr, max_week)]+1)\n\n ax.set_ylim(0, 7)\n ax.set_yticks(np.arange(7))\n ax.set_yticklabels([\"S \",\"M \",\"T \",\"W \",\"R \",\"F \",\"S \"])\n ax.invert_yaxis()\n \n if maxcommits <= 10:\n top = maxcommits\n step = 1.\n else:\n top = (maxcommits - 1) + 11 - ((maxcommits - 1) % 11)\n step = top/11.\n\n colorticks = np.arange(0., top+(step/2), step) / (top)\n colorlabels = [\"%d\" % (x*top) for x in colorticks]\n \n cbar = colorbar.ColorbarBase(\n cax, cmap=colormap,\n orientation='horizontal'\n )\n cbar.set_ticks(colorticks)\n cbar.set_ticklabels(colorlabels)\n cax.set_xlim(colorticks[0], colorticks[-1])\n \n for label in ax.get_xticklabels():\n label.set_horizontalalignment('left')\n \n for label in ax.get_yticklabels():\n label.set_horizontalalignment('center')\n label.set_verticalalignment('top')\n \n for item in (\n [ax.xaxis.label, ax.yaxis.label] +\n ax.get_xticklabels() + ax.get_yticklabels() +\n cax.get_xticklabels()\n ):\n item.set_fontsize(7)\n \n ax.title.set_fontsize(10)\n fig.subplots_adjust(top=0.7, bottom=0.15)\n \n pdf.savefig(fig)\n\n return 0",
"def refresh():\n git.fetch()\n output = str(git.merge('--ff-only')).strip()\n if output != 'Already up to date.':\n print(output)\n git.fetch('--tags')",
"def get_commits(): # pragma: no cover\n global commit_data\n all_commits = 0\n team = {\n 'stevex196x': 0,\n 'TheSchaft': 0,\n 'melxtru': 0,\n 'aylish19': 0,\n 'connormlewis': 0,\n 'tsukkisuki': 0\n }\n while all_commits == 0:\n url = 'https://api.github.com/repos/connormlewis/idb/stats/contributors'\n data = requests.get(\n url, headers={'Authorization': 'token ' + os.environ['API_TOKEN']})\n json_list = data.json()\n for entry in json_list:\n total = entry['total']\n user_name = entry['author']['login']\n if user_name in team:\n team[user_name] = total\n all_commits += total\n return team, all_commits",
"def get_count(owner, repo_slug, auth_tokens, endpoint):\n count_url = make_req_url(owner, repo_slug, endpoint, 0)\n response = send_bitbucket_request(count_url, auth_tokens)\n if response and 'count' in response:\n return response['count']-1\n return 0",
"def main():\n\t\tn = 0 \n\t\tfor page in range(pages):\n\t\t\t\tpageNumber = str(page + 1)\n\t\t\t\tprint \"Processing page number \" + pageNumber\n\t\t\t\tpageUrl = 'https://api.github.com/users/' + USER + '/gists?page=' + pageNumber + '&per_page=' + str(int(perpage))\n\t\t\t\tu = urlopen (pageUrl)\n\t\t\t\tgists = json.load(u)\n\t\t\t\t\t\t \n\t\t\t\tfor gist in gists:\n\t\t\t\t\t\tn += 1\n\t\t\t\t\t\tprint \"==== %d ====\" % n\n\t\t\t\t\t\t# print gist.keys()\n\t\t\t\t\t\tgistd = gist['id']\n\t\t\t\t\t\tgisturl = gist['html_url']\n\t\t\t\t\t\tgistdesc = gist['description'] or gistd\n\t\t\t\t\t\tgistfiles = gist['files']\n\t\t\t\t\t\tprint \"gistd: \", gistd\n\t\t\t\t\t\tprint \"gisturl: \", gisturl\n\t\t\t\t\t\tprint \"gistdesc: \", gistdesc\n\t\t\t\t\t\tprint \"gistfiles: \", len(gistfiles)\n\t\t\t\t\t\tfor f in gistfiles:\n\t\t\t\t\t\t\t\tfileurl = gistfiles[f]['raw_url']\n\t\t\t\t\t\t\t\t_filetype = gistfiles[f]['language']\n\t\t\t\t\t\t\t\tif _filetype in ALLOWED_FILE_TYPES:\n\t\t\t\t\t\t\t\t\t\tfiletype = _filetype\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\tfiletype = \"None\"\n\t\t\t\t\t\t\t\tprint \"fileurl: \", fileurl \n\t\t\t\t\t\t\t\tprint \"filetype: \", filetype, \"(found='%s')\" % _filetype \n\t\t\t\t\t \n\t\t\t\t\t\t\t\tif TESTING:\n\t\t\t\t\t\t\t\t\t\t# testing\n\t\t\t\t\t\t\t\t\t\treq = urlopen(fileurl)\n\t\t\t\t\t\t\t\t\t\tbodytext = req.read()\n\t\t\t\t\t\t\t\t\t\tencoding=req.headers['content-type'].split('charset=')[-1]\n\t\t\t\t\t\t\t\t\t\tucontent = unicode(bodytext, encoding)\n\t\t\t\t\t\t\t\t\t\tbodytext = \"# \" + gisturl + \"\\n\\n\" + ucontent\n\t\t\t\t\t\t\t\t\t\t# bodytext = ucontent\n\t\t\t\t\t\t\t\t\t\timport_dash(gistdesc, bodytext, filetype)\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\t\t\t\treq = urlopen(fileurl)\n\t\t\t\t\t\t\t\t\t\t\t\tbodytext = req.read()\n\t\t\t\t\t\t\t\t\t\t\t\tencoding=req.headers['content-type'].split('charset=')[-1]\n\t\t\t\t\t\t\t\t\t\t\t\tucontent = unicode(bodytext, encoding)\n\t\t\t\t\t\t\t\t\t\t\t\tbodytext = \"# \" + gisturl + \"\\n\\n\" + ucontent\n\t\t\t\t\t\t\t\t\t\t\t\t# bodytext = ucontent\n\t\t\t\t\t\t\t\t\t\t\t\timport_dash(gistdesc, bodytext, filetype)\n\t\t\t\t\t\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\t\t\t\t\t\t\tprint e\n\t\t\t\t\t\t\t\t\t\t\t\tprint \"*** ERROR WRITING TO sqlite3 ***\"\n\t\t\t\t\t\t\t\t\t\t\t\tpass\n\n\t\t\t\tif TESTING:\n\t\t\t\t\t\t# so to avoid calling github API too much...\n\t\t\t\t\t\tbreak",
"def get_git_status(self) -> GitStatus:\n import gitdb.exc # type: ignore\n\n repo = get_git_repo()\n\n if not repo or self._base_commit is None:\n return GitStatus([], [], [], [])\n\n try:\n repo.rev_parse(self._base_commit)\n except gitdb.exc.BadName:\n raise ActionFailure(f\"Unknown git ref '{self._base_commit}'\")\n\n # Output of git command will be relative to git project root\n status_output = zsplit(\n git.diff(\n \"--cached\",\n \"--name-status\",\n \"--no-ext-diff\",\n \"-z\",\n \"--diff-filter=ACDMRTUXB\",\n \"--ignore-submodules\",\n self._base_commit,\n ).stdout.decode()\n )\n\n added = []\n modified = []\n removed = []\n unmerged = []\n while status_output:\n code = status_output[0]\n fname = status_output[1]\n trim_size = 2\n\n if not code.strip():\n continue\n if code == StatusCode.Untracked or code == StatusCode.Ignored:\n continue\n\n # The following detection for unmerged codes comes from `man git-status`\n if code == StatusCode.Unmerged:\n unmerged.append(self._fname_to_path(repo, fname))\n if (\n code[0] == StatusCode.Renamed\n ): # code is RXXX, where XXX is percent similarity\n removed.append(self._fname_to_path(repo, fname))\n fname = status_output[2]\n trim_size += 1\n added.append(self._fname_to_path(repo, fname))\n if code == StatusCode.Added:\n added.append(self._fname_to_path(repo, fname))\n if code == StatusCode.Modified:\n modified.append(self._fname_to_path(repo, fname))\n if code == StatusCode.Deleted:\n removed.append(self._fname_to_path(repo, fname))\n\n status_output = status_output[trim_size:]\n debug_echo(\n f\"Git status:\\nadded: {added}\\nmodified: {modified}\\nremoved: {removed}\\nunmerged: {unmerged}\"\n )\n\n return GitStatus(added, modified, removed, unmerged)",
"def _count_langpacks(server_config, repo_id):\n keyword = 'Package Langpacks:'\n completed_proc = cli.Client(server_config).run((\n 'pulp-admin repo list --repo-id {} '\n '--fields content_unit_counts'\n ).format(repo_id).split())\n lines = [\n line for line in completed_proc.stdout.splitlines()\n if keyword in line\n ]\n assert len(lines) in (0, 1)\n if len(lines) == 0:\n return 0\n else:\n return int(lines[0].split(keyword)[1].strip())"
]
| [
"0.62743694",
"0.59985805",
"0.5849858",
"0.5814272",
"0.5812936",
"0.57753754",
"0.5728794",
"0.5617592",
"0.5600996",
"0.55920964",
"0.5562345",
"0.54946303",
"0.5492678",
"0.5465922",
"0.54608595",
"0.5435852",
"0.5432932",
"0.5418635",
"0.5418293",
"0.53994155",
"0.5394219",
"0.5351661",
"0.5350734",
"0.53314817",
"0.5302077",
"0.5297346",
"0.52687603",
"0.5263678",
"0.52492636",
"0.5236067"
]
| 0.66621345 | 0 |
fetch team information from bitbucket and git | def getMergedTeam(self):
response_obj = dict(
data="",
err="",
code=200
)
git_repos_data = []
bit_repos_data = []
GIT_TEAM_URI = config.GITHUB_API_ENDPOINT + "/orgs/{team_name}".format(team_name=self.team_name)
git_team_info = self._json_request(GIT_TEAM_URI)
GIT_REPOS_URI = git_team_info.get('repos_url')
if GIT_REPOS_URI:
self.repo = GIT_REPOS_URI
git_repos_data = self._json_request(GIT_REPOS_URI)
else:
print('Cant find any information for this team on github') # you can log some thing here
""" or just throw an json error = Not Found
response_obj.update({
'code': 404,
'err': 'Cant find any information for this team on github'
})
return response_obj
"""
# There is a pagination in API. lets do some fix
BIT_REPOS_URI = config.BITBUCKET_API_ENDPOINT + "/repositories/{team_name}".format(team_name=self.team_name)
if BIT_REPOS_URI:
bit_repos_data = self._recersive_request(BIT_REPOS_URI)
else:
print('Cant find any information for this team on bitbucket') # you can log some thing here
""" or just throw an json error = Not Found
response_obj.update({
'code': 404,
'err': 'Cant find any information for this team on bitbucket'
})
return response_obj
"""
merged_data = self.getMergedObj(git_team_info, git_repos_data, bit_repos_data)
response_obj.update (merged_data)
return response_obj | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_commits(): # pragma: no cover\n global commit_data\n all_commits = 0\n team = {\n 'stevex196x': 0,\n 'TheSchaft': 0,\n 'melxtru': 0,\n 'aylish19': 0,\n 'connormlewis': 0,\n 'tsukkisuki': 0\n }\n while all_commits == 0:\n url = 'https://api.github.com/repos/connormlewis/idb/stats/contributors'\n data = requests.get(\n url, headers={'Authorization': 'token ' + os.environ['API_TOKEN']})\n json_list = data.json()\n for entry in json_list:\n total = entry['total']\n user_name = entry['author']['login']\n if user_name in team:\n team[user_name] = total\n all_commits += total\n return team, all_commits",
"def __gitFetch(self):\n self.vcs.gitFetch(self.project.getProjectPath())",
"def get_companies_and_people(team):",
"def get_people(team):",
"def test_retrieve_team(self):\n pass",
"def get_from_git(project, obj, params={}, verbose=0):\n\n url = \"%s%s/raw/%s\" % (GIT_URL, project, obj)\n return load_yaml(requester(url, params=params,\n headers={'Accept': 'application/json'},\n verbose=verbose).text)",
"def describe_workteam(WorkteamName=None):\n pass",
"def get_available_companies(team):",
"def get_teams():",
"def get_info_from_api(team_name):\n if \"-\" in team_name:\n team_name = team_name.replace(\"-\", \"+\")\n if \"brighton\" in team_name: # some teams has different names than in sofa-score\n team_name = \"brighton\"\n if \"leicester\" in team_name:\n team_name = \"leicester\"\n if \"norwich\" in team_name:\n team_name = \"norwich\"\n if \"mallorca\" in team_name:\n team_name = \"mallorca\"\n if \"parma\" in team_name:\n team_name = \"parma+calcio\"\n if \"bayern\" in team_name:\n team_name = \"bayern\"\n if \"koln\" in team_name:\n team_name = \"fc+koln\"\n if \"union+berlin\" in team_name:\n team_name = \"union+berlin\"\n if \"fsv+mainz\" in team_name:\n team_name = \"mainz\"\n if \"hoffenheim\" in team_name:\n team_name = \"hoffenheim\"\n if \"mgladbach\" in team_name:\n team_name = \"borussia+monchengladbach\"\n if \"schalke\" in team_name:\n team_name = \"schalke\"\n if \"leverkusen\" in team_name:\n team_name = \"leverkusen\"\n if \"paderborn\" in team_name:\n team_name = \"paderborn\"\n print(team_name)\n response = requests.get(cfg.API_URL + team_name)\n team_data = json.loads(response.text)\n return team_data['teams'][0]",
"def get_available_companies_and_people(team):",
"def test_teams_get_team_v1(self):\n pass",
"def test_basketballteams_get(self):\n pass",
"def getMergedObj(self, git_team, git_repos, bit_repos):\n code = 200\n err = \"\"\n merged = {}\n languages = {}\n public_count = git_team.get('public_repos', 0)\n followers_count = git_team.get('followers', 0)\n fork_count = 0\n git_keys = ['fork', 'forks_count', 'watchers_count', 'language', 'description']\n bit_keys = ['is_private', 'language', 'description']\n for repo in git_repos:\n repo_data = {}\n for key in git_keys:\n current_val = repo.get(key)\n\n if not current_val:\n continue # skip if not found\n if key == 'fork':\n fork_count += 1\n elif key == 'language':\n lang = current_val.lower()\n languages[lang] = languages.get(lang, 0) + 1\n\n merged[repo['name']] = repo_data\n\n for repo in bit_repos:\n repo_data = dict()\n if repo['name'] not in merged:\n for key in bit_keys:\n current_val = repo.get(key)\n if not current_val:\n continue # skip if not found\n if key == 'is_private':\n public_count += 1\n else:\n if key == 'language':\n lang = current_val.lower()\n languages[lang] = languages.get(lang, 0) + 1\n try:\n watchers_href = repo['links']['watchers']['href']\n repo_data['watchers_count'] = repo_data.get('watchers_count', 0) +\\\n self._json_request(watchers_href).get('size',0)\n except KeyError:\n pass\n merged[repo['name']] = repo_data\n data = {\n 'public_repos_count': public_count,\n 'followers_count': followers_count,\n 'forked_repos_count': fork_count,\n 'non_forked_repos_count': public_count - fork_count,\n 'list_languages': languages,\n 'repos': merged\n }\n merged_data = {\n 'data': data,\n 'code': code,\n 'err': \"\"\n }\n return merged_data",
"async def _get_team_info(self, server_id: str, team_id: str):\n params = {}\n url = self.api_url + 'teams/{}'.format(team_id)\n\n return await self._make_request(url, params, server_id)",
"def get(self, project_slug):\n project = Project.query.filter_by(slug=project_slug).first_or_404()\n\n if not (project.public or current_user.is_authenticated()):\n flask_restful.abort(404)\n\n return [\n dict(name=job.git_branch)\n for job\n in (\n project.jobs.distinct(Job.git_branch)\n .order_by(sqlalchemy.asc(Job.git_branch))\n )\n if job.git_branch is not None\n ]",
"def test_get_team_history(self):\n pass",
"def send_bitbucket_request(req_url, auth_tokens):\n # Success status 200, return JSON\n req = requests.get(req_url, auth=auth_tokens)\n if req.status_code == 200:\n return json.loads(req.content)\n return {}",
"def do_pull(self, arg):\n checkLocalGitLocation()\n teamorindividual = input(\"Is this a team or individual (t or i):\")\n if teamorindividual == 'i':\n for student in returnAllStudents():\n os.system(\"cd %s && git pull https://github.ccs.neu.edu/%s && cd ..\" %\n (localgitlocation, 'cs5500/' + student))\n else:\n for team in returnAllTeams():\n os.system(\"cd %s && git pull https://github.ccs.neu.edu/%s/%s && cd ..\" %\n (localgitlocation + '/' + team, githuborg, team))",
"def test_get_open_requests_by_team(self):\n pass",
"def get_details(self, repo=None):\n api_json = []\n\n #get all branches from this repo\n branches = self.make_branches(self.getBranch(repo))\n\n today = datetime.date.today()\n yesterday = today - datetime.timedelta(2)\n\n for branch in branches:\n args = {\"per_page\": \"100\",\n \"sha\": branch,\n \"author\": self.username,\n \"since\": yesterday.isoformat()}\n args = self.make_args(args)\n repo_url = \"/\".join([self.url, \"repos\", repo, \"commits\"])\n repo_url = repo_url + args\n\n request = urllib2.Request(repo_url, headers=self.headers)\n response = urllib2.urlopen(request)\n raw_data = response.read()\n commits_info = self.process_factory(simplejson.loads(raw_data))\n api_json = api_json + commits_info\n\n print repo_url\n\n print api_json\n return api_json",
"def get_issues(): # pragma: no cover\n global issue_data\n team = {\n 'stevex196x': 0,\n 'TheSchaft': 0,\n 'melxtru': 0,\n 'aylish19': 0,\n 'connormlewis': 0,\n 'tsukkisuki': 0\n }\n all_issues = 0\n while all_issues == 0:\n url = ('https://api.github.com/repos/connormlewis/idb/'\n 'issues?state=all&filter=all&per_page=100')\n data = requests.get(\n url, headers={'Authorization': 'token ' + os.environ['API_TOKEN']})\n link = data.headers.get('Link', None)\n for i in range(1, int(find_last_page(link)) + 1):\n url = (\n 'https://api.github.com/repos/connormlewis/idb/'\n 'issues?state=all&filter=all&per_page=100' + '&page=' + str(i))\n data = requests.get(\n url,\n headers={'Authorization': 'token ' + os.environ['API_TOKEN']})\n json_list = data.json()\n for entry in json_list:\n if 'pull_request' not in entry:\n all_issues += 1\n if entry['user']['login'] in team:\n team[entry['user']['login']] += 1\n return team, all_issues",
"def test_subworkflows_info_remote_gitlab(self):\n mods_info = nf_core.subworkflows.SubworkflowInfo(\n self.pipeline_dir, \"bam_sort_stats_samtools\", remote_url=GITLAB_URL, branch=GITLAB_SUBWORKFLOWS_BRANCH\n )\n mods_info_output = mods_info.get_component_info()\n console = Console(record=True)\n console.print(mods_info_output)\n output = console.export_text()\n\n assert \"Subworkflow: bam_sort_stats_samtools\" in output\n assert \"Inputs\" in output\n assert \"Outputs\" in output\n assert \"--git-remote\" in output",
"def fetch():\n project = get_project(require=True)\n resp = request('post', '/api/v0/projects/{id}/fetch/'.format(id=project.id))\n data = resp.json()\n commits = data.get('commits', ())\n if commits:\n for commit in commits:\n success('Fetched: {ref} ({identifier})'.format(ref=commit['ref'], identifier=commit['identifier']))\n success('{n} new commits were fetched!'.format(n=len(commits)))\n else:\n info('No new commits.')\n errors = data.get('errors', ())\n for error in errors:\n warning(error)",
"def test_projects_get(self):\n response = self.client.open('/project-tracker/projects',\n method='GET')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))",
"def test_teams_get_teams_v2(self):\n pass",
"def org_info(self):\n\n response = self.postman.request('info')\n\n if (response.status_code == requests.codes.ok):\n data = response.json()\n\n self.repos = data['public_repos']\n self.created = data['created_at']\n self.updated = data['updated_at']\n\n self.repo_info()\n self.member_info()",
"def repo_info():\n return TEST_REPOS_INFO[0]",
"def initSWProject():\r\n \r\n components = []\r\n \r\n #get all committers\r\n committers = v.getAllContributors()\r\n #print committers[0]\r\n devs = []\r\n \r\n commits_for_devs = {}\r\n \r\n #get all commits \r\n commits = v.getCommitsForBranch() \n count = len(committers)\r\n temp = count\r\n \n \r\n coms = v.getCommitsFromNetworkData()\n print len(coms)\n coms.reverse()\n \r\n for commit in coms:\r\n \r\n author = commit[\"author\"]\r\n #for every name, insert a commit\r\n try:\r\n commits_for_devs[author]\r\n except:\r\n #print author\r\n #print commit[\"committer\"]\r\n commits_for_devs[author] = commit\n dev = DeveloperInfo(commit[\"login\"])\r\n dev.name = commit[\"author\"]\n \n \r\n dev.latestcommit = Commit(commit[\"login\"],commit[\"author\"],\"\",\"\")\n \n devs.append(dev)\n \"\"\"\n print commit[\"author\"]\r\n print commit[\"login\"]\r\n print commit[\"message\"]\r\n print commit[\"id\"]\n print commit[\"date\"]\r\n \"\"\"\n print \"_______________\"\r\n \n count -= 1\n \n for committer in committers:\r\n \n name = \"\"\n try:\n name = committer[\"name\"]\n except:\n pass\r\n \r\n if committer[\"login\"] == author or commit[\"login\"] == committer[\"login\"] or name == author:\r\n cur = committer\n print \"commit found for: \",committer[\"login\"]\r\n \"\"\" \r\n for commit in commits:\r\n \r\n author = commit[\"author\"]\n #login = commit[\"login\"]\r\n #for every name, insert a commit\r\n try:\r\n commits_for_devs[author[\"name\"]]\r\n except:\r\n #print author\r\n #print commit[\"committer\"]\r\n commits_for_devs[author[\"name\"]] = commit\r\n \r\n count -= 1\r\n \r\n if count < 1:\r\n break # every one has commit\r\n \r\n #print commits_for_devs\r\n \"\"\"\r\n #now we have commit ids... fetch the data for all committers\r\n for keys,values in commits_for_devs.iteritems():\r\n id = values[\"id\"]\r\n ci = v.getCommitInformation(id)\r\n c = ci[\"commit\"]\r\n \r\n files,folders = resolveFilesAndFolders(c)\r\n \r\n cur = None\r\n #locate correct committer\r\n commitcount = 0\r\n \r\n for committer in committers:\r\n author = values[\"author\"]\n \n name = \"\"\n try:\n name = committer[\"name\"]\n except:\n pass\r\n\r\n if committer[\"login\"] == author[\"login\"] or author[\"name\"] == committer[\"login\"] or name == keys:\r\n cur = committer\n print \"commit found for: \",committer[\"login\"]\r\n if cur:\r\n commitcount = cur[\"contributions\"]\r\n \r\n print \"number of commits: %d for developer: %s\"%(commitcount,keys)\r\n #myCommit = commitdispatcher.Commit(values[\"author\"][\"name\"],values[\"message\"],folders,files)\r\n #swdeveloper.SWDeveloper(self.scene,keys,commitcount,myCommit,False)\r\n #init every developer so that each has latest commits, commit count and names in place\r\n \r\n \r\n #project = swproject.SWProject(self.scene,\"naali\",components)\r\n return \"\"",
"def __gitBundleFetch(self):\n self.vcs.gitBundleFetch(self.project.getProjectPath())"
]
| [
"0.6490177",
"0.62863183",
"0.6233867",
"0.6118521",
"0.6044377",
"0.5923517",
"0.5899961",
"0.5893558",
"0.58813363",
"0.5877972",
"0.5859925",
"0.58368886",
"0.5832686",
"0.5759416",
"0.5752883",
"0.5711394",
"0.56964076",
"0.5665525",
"0.566056",
"0.56461406",
"0.56074584",
"0.56073344",
"0.5605583",
"0.56040466",
"0.5590818",
"0.55869955",
"0.55850756",
"0.5561617",
"0.55586034",
"0.5557134"
]
| 0.719228 | 0 |
This is a higher level function that send(). It sends a Msg to the specified user. If no address is specified, then the user's last used address (user.current_address) is used. | async def send_to_user(self, user: User, msg: Msg, address: str = None):
if address is None:
address = user.current_address
await self.send(msg, address) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def send_as_user(self, command, msg, user=None):\n user = self if user is None else user\n self._write(f':{user.ident} {command} {msg}')",
"def do_send_touser(self,customer_id,currency,amount):\n \"\"\"usage: send_touser customerID currency amount\"\"\"\n try:\n bitstamp.send_touser(customer_id,currency,amount)\n except:\n print \"Incorrect Usage. Invalid args given.\"\n self.onecmd('help send_touser')",
"def sendPrivateMessage(self, user, message):\n if user is None:\n # This happens e.g. when a command comes from the web\n print \"Message not delivered, user was None:\"\n print message\n return\n self.connection.sendDirectMessage(self, user, message.encode('ascii', 'ignore'))",
"def sendto(self, name, msg):\n self.send(\"send/{}/{}:{}\".format(self.msg_id, name, msg))\n self.msg_id += 1",
"def send_direct_message(self, user, message, client):\n if self.verify_user_existance(user):\n destination = self.get_user_socket(user)\n self.send_message(message, destination)\n else:\n self.send_message('Usuario no encontrado.', client.get_socket())",
"def send_message(stdscr, username=None):\n # Show the cursor and echo output.\n curses.curs_set(1)\n curses.echo()\n stdscr.clear()\n stdscr.refresh()\n if username is None:\n safe_put(stdscr, \"Recipient username: \", (0, 0))\n username = stdscr.getstr(0, 20)\n stdscr.clear()\n stdscr.refresh()\n tnu = taunet.users.by_name(username)\n if tnu == None:\n print(\"No such user. Known users: \" + \", \".join(sorted([u.name for u in taunet.users.all()])))\n return\n if not is_online(tnu):\n print(\"Couldn't connect to that user's host.\")\n return\n safe_put(stdscr, \"Message:\", (0, 0))\n message = stdscr.getstr(0, 9)\n stdscr.clear()\n stdscr.refresh()\n ship_tnm(tnu, taunet.TauNetMessage().outgoing(tnu.name, message))",
"def _send(message, user):\n # Send a message to the device corresponding to the provided\n # registration token.\n try:\n response = messaging.send(message)\n except messaging.ApiCallError:\n print(f\"ApiCallError. the following user's token is invalid: {user.id} - {user.username}\")",
"def sendto(self,msg,address):\n\n address = self.pubIPToMorse(address);\n \n if not self.validIPAndPort:\n print(\"Error: Invalid IP and port or socket has not been bound with an IP and port: message not sent!\");\n return;\n\n to_ip_addr = address[0];\n to_port = address[1];\n msg = msg.decode(\"utf-8\"); #Convert from bytearray to a string for ease of operation\n\n # Assemble UDP package\n udp_package = to_port + self.my_port + msg;\n\n # Assemble IP package\n ip_header = to_ip_addr + self.my_ip_addr + self.protocol_identifier + t.base36encode(len(udp_package));\n ip_package = ip_header + udp_package;\n\n # Assemble MAC package\n # First check to see if the MAC of the recieving IP is known, if not address message to router\n if to_ip_addr in self.macDict.keys(): mac_to = self.macDict[to_ip_addr];\n else: mac_to = self.macDict['router_mac']; # This only works if you're not the router...\n # Then assemble the remainder of the MAC package\n mac_from = self.my_mac;\n # Send the message\n print(mac_to+mac_from+ip_package)\n t.sendMessage(mac_to,mac_from,ip_package);",
"def send_msg_to_user(client: WebClient, request_form: dict, msg: str, blocks=None) -> dict:\n return client.chat_postEphemeral(\n channel=request_form.get('channel_id'),\n user=request_form.get('user_id'),\n text=msg,\n blocks=blocks\n )",
"def send(self, msg, label=\"\"):\n self.remoter.tx(msg) # send to remote\n log.debug(\"%s sent %s:\\n%s\\n\\n\", self.remoter, label, bytes(msg))",
"def send(self, msg):\n self.message('Me', msg)",
"def sendto(self, data: bytes, address: Tuple) -> int:\n ...",
"def _broadcast_user(cls, sender, sender_sid, recipient, text, chat_id=None):\n # todo make this method async\n recipient_sid = cls.get_user_sid(recipient.id)\n if not recipient_sid:\n cls._cache_msg(sender.id, recipient.id, text, chat_id)\n return\n data = {'sender_id': sender.id, 'recipient_id': recipient.id,\n 'text': text, 'chat_id': chat_id or 'private', 'time': time()}\n app.socketio.emit('message', data, room=recipient_sid)",
"def handle(message, pseudo, to_address):\n contact = model.get(\n model.Contact,\n pseudonym=pseudo,\n mask=to_address.contact\n )\n \n # Invalid contact mask\n if not contact:\n # TODO: Should we warn user that they have sent invalid contact mask?\n logging.info(\"MAIL: Invalid Reply contact: %s+%s -> ?\" % (\n pseudo.mask,\n to_address.contact,\n ))\n return \n\n logging.info(\"MAIL: Reply: '%s' -> '%s'\" % (pseudo.email, contact.email))\n \n # Send message\n sanitize_message(message, pseudo, to_address, contact)\n\n message.sender = pseudo.email\n message.to = contact.email\n\n message.send()",
"def send_msg(self, payload, to_addr, reply_to_addr):\n self._client.send_msg(payload, to_addr, reply_to_addr)",
"def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])",
"def send_message(self, message, user, msg_type=MSG_TYPE_MESSAGE):\n final_msg = {'room': str(self.id), 'message': message, 'username': user.username, 'msg_type': msg_type}\n\n # Send out the message to everyone in the room\n self.websocket_group.send(\n {\"text\": json.dumps(final_msg)}\n )",
"def send(self, user_connection: Tuple[Tuple[str, int], Any], message: str):\n user_connection[1].sendall(message.encode())",
"async def send_message(user, message):\n try:\n return await user.send(message)\n except ConnectionClosed:\n pass",
"async def send(self, msg: Msg, address: str):\n ident, interface = address.split(address_split_char)\n\n try:\n inbox = self.plugin_inboxes[interface]\n except AttributeError:\n raise AzuraBotError(f\"There is no inbox registered for \"\n f\"'{interface} (address {address})\")\n\n print(f\"[bot] Private message: AzuraBot->{address}: {msg.text}\")\n await inbox.put(msg)",
"def _send(self, what, value, address, **kwargs):\n\n print('_send: please override me.')",
"def send_message(user_id, name, user_info, subject, body):\n send_mail(subject, body, settings.SERVER_EMAIL, [\"%s <%s>\" % (name, user_id)],\n fail_silently=False, html_message=body)",
"def send_user_email(user, subject, template_name, context=None):\n\n if context is None:\n context = {}\n\n context['user'] = user\n\n to = (user.email,)\n\n send(subject, to, template_name, context)",
"def send(msg, dest=None):",
"def send_message(user_id, message):\r\n try:\r\n service = get_service('token.pickle')\r\n message = (service.users().messages().send(userId=user_id, body=message).execute())\r\n print('Message Id: %s' % message['id'])\r\n return message\r\n\r\n except errors.HttpError as error:\r\n print('An error occurred: %s' % error)",
"def send_to(self, target, msg):\n\t\tif self.cid is None:\n\t\t\traise UsageError(\"Not in a group!\")\n\t\tidb, payload = msg[0], msg[1:]\n\t\tself.sendMessage(idb + chr(target) + payload, True)",
"def email_user(self, subject, message, from_email=None, **kwargs):\n send_mail(subject, message, from_email, [self.email], **kwargs)",
"def _send_msg(self, contact):\n msg_content = input('{} :'.format(self._user.username))\n if msg_content == '0': \n return self.homepage()\n self._user.send_msg(contact, msg_content)\n\n return self._send_msg(contact)",
"async def message(self, ctx:utils.Context, user:discord.User, *, content:str):\n\n await user.send(content)",
"def send(User,data):\n BufferManagement.append_to_buffer(data,User['ID'],User['GameID'],\"OUT\")"
]
| [
"0.6948198",
"0.6436093",
"0.63397884",
"0.6205436",
"0.6199159",
"0.61925495",
"0.6088808",
"0.6087555",
"0.6080063",
"0.6040955",
"0.5998877",
"0.5942789",
"0.59125847",
"0.5907251",
"0.58854675",
"0.5878623",
"0.58739597",
"0.58509094",
"0.58420265",
"0.58088267",
"0.5808288",
"0.58047956",
"0.58011883",
"0.5783414",
"0.5745025",
"0.57331836",
"0.57319784",
"0.57039577",
"0.5702677",
"0.5696693"
]
| 0.86238986 | 0 |
Given a previous message old_msg, this function replies to it with the text in the text argument. | async def reply(self, old_msg: Msg, text: str):
if old_msg.direction == azurabot.msg.TO_BOT:
direction = azurabot.msg.FROM_BOT
else:
direction = azurabot.msg.TO_BOT
reply_msg = Msg(direction=direction,
user=old_msg.user,
reply_to=old_msg.reply_to,
text=text)
await old_msg.put(reply_msg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def do_something(incoming_msg):\n return \"i did what you said - {}\".format(incoming_msg.text)",
"def text_reply(msg):\n if msg['Type'] != TEXT:\n # sanitize the text field so that we can assume it always contains string.\n # and this is also to avoid infinite loop during serialization in the persist function\n msg['Text'] = msg['Type']\n\n to_user_id_name = msg['ToUserName']\n from_user_id_name = msg['FromUserName']\n\n if is_my_outgoing_msg(msg):\n handle_outgoing_msg(msg, to_user_id_name)\n else: # this is an incoming message from my friend\n handle_incoming_msg(msg, from_user_id_name)",
"def add_message_text(self,text,sender):\n\t\tnew_message = Message(text=text,sender=sender, thread=self)\n\t\tnew_message.save()\n\t\tself.last_message = new_message\n\t\tself.save()\n\t\tfor c in self.clients.exclude(id=sender.id):\n\t\t\tUnreadReceipt.objects.create(recipient=c,thread=self,message=new_message)\n\t\treturn new_message",
"def text_message(update: Update, _: CallbackContext) -> None:\n update.message.reply_text(\n f\"Thank you for sending: {update.message.text},\\n\" +\n f\"but I am waiting only for images...\")",
"def processFaxbotMessage(self, txt):\r\n with self.__lock:\r\n if \"I do not understand your request\" in txt:\r\n replyTxt = (\"FaxBot does not have the requested monster '{}'. \"\r\n \"(Check the list at {} )\"\r\n .format(self._lastRequest, self.fax_list_url)) \r\n self._lastRequest = None\r\n self._lastRequestTime = None\r\n return replyTxt\r\n if \"just delivered a fax\" in txt:\r\n self._lastRequest = None\r\n self._lastRequestTime = None\r\n return (\"FaxBot received the request too early. \"\r\n \"Please try again.\")\r\n if \"try again tomorrow\" in txt:\r\n self._noMoreFaxesTime = utcTime()\r\n txt = (\"I'm not allowed to request any more faxes today. \"\r\n \"Request manually with /w FaxBot {}\"\r\n .format(self._lastRequest))\r\n self._lastRequest = None\r\n self._lastRequestTime = utcTime()\r\n return txt\r\n m = re.search(r'has copied', txt)\r\n if m is not None:\r\n self._lastRequest = None\r\n self._lastRequestTime = None\r\n self._lastFaxBotTime = utcTime()\r\n # suppress output from checkForNewFax since we are returning\r\n # the text, to be output later\r\n return self.checkForNewFax(False)\r\n self._lastRequest = None\r\n self._lastRequestTime = None\r\n return \"Received message from FaxBot: {}\".format(txt)",
"def on_text_message(self, update, context):\n chat_id = update.effective_chat.id\n log.info(\"Msg from:%s `%s`\", chat_id, update.effective_message.text)\n\n if context.user_data[\"state\"] == c.State.EXPECTING_AMOUNT:\n log.info(\"Vol:%s spent %s MDL on this request\", chat_id, update.effective_message.text)\n # TODO validate the message and make sure it is a number, discuss whether this is necessary at all\n # TODO send this to the server, we need to define an API for that\n request_id = context.user_data[\"current_request\"]\n\n # Write this amount to the persistent state, so we can rely on it later\n context.bot_data[request_id][\"amount\"] = update.effective_message.text\n\n # Then we have to ask them to send a receipt.\n self.send_message_ex(update.message.chat_id, c.MSG_FEEDBACK_RECEIPT)\n context.user_data[\"state\"] = c.State.EXPECTING_RECEIPT\n return\n\n if context.user_data[\"state\"] == c.State.EXPECTING_FURTHER_COMMENTS:\n log.info(\"Vol:%s has further comments: %s\", chat_id, update.effective_message.text)\n request_id = context.user_data[\"current_request\"]\n context.bot_data[request_id][\"further_comments\"] = update.effective_message.text\n self.finalize_request(update, context, request_id)\n return\n\n if context.user_data[\"state\"] == c.State.EXPECTING_PROFILE_DETAILS:\n self.build_profile(update, context, raw_text=update.effective_message.text)\n return\n\n # if we got this far it means it is some sort of an arbitrary message that we weren't yet expecting\n log.warning(\"unexpected message ..........\")",
"def OnChanTextMessage(self, msg):\n self.handle_inbound_irc_msg(\"OnChanTextMessage\", msg)\n return znc.CONTINUE",
"def handle_text_message(event: MessageEvent):\n\n line_bot_api = LineBotApi(LINE_CHANNEL_TOKEN)\n line_bot_api.reply_message(\n event.reply_token, TextSendMessage(event.message.text)\n )",
"def usr_msg(update, msg: str = '', error: bool = True) -> None:\n if error:\n update.effective_message.reply_text(\n \"An error occured ๐, sorry.\",\n reply_markup=ReplyKeyboardRemove(),\n parse_mode=ParseMode.MARKDOWN,\n )\n if msg:\n update.effective_message.reply_text(\n msg,\n reply_markup=ReplyKeyboardRemove(),\n parse_mode=ParseMode.MARKDOWN,\n )",
"def send_message(self, text):\n self.__telegram_info.message.reply_text(text)",
"def update_message(self, text):\n self.message = text\n if self.verbose:\n print self.message",
"def message(self, text):\n for char in text:\n if char == '\\n':\n self.cmd(0xC0) # next line\n else:\n self.cmd(ord(char),True)",
"def message(self, text):\n for char in text:\n if char == '\\n':\n self.cmd(0xC0) # next line\n else:\n self.cmd(ord(char),True)",
"def message(self, text):\n for char in text:\n if char == '\\n':\n self.cmd(0xC0) # next line\n else:\n self.cmd(ord(char),True)",
"async def msg(self, text: Union[str, bytes]):\n await self.session.msg(text)",
"def __addmsg(self, msg: str) -> None:\n # region Docstring\n # endregion\n self.record += msg\n self.textbox.kill()\n self.textbox = UITextBox(\n html_text=self.record,\n relative_rect=Rect((0, 0), (self.size[0], self.size[1] - 25)),\n container=self,\n manager=self.ui_manager,\n )",
"def __msg_handler(self, bot, update):\n trigger = update.message.text\n self.__handler(bot, update, trigger)",
"def reply(self, text=None):\n self.message.click()\n self.message.send_keys(Keys.ARROW_RIGHT)\n try:\n self.message.find_element_by_xpath(\"//div[@aria-label='Reply']\").click()\n except NoSuchElementException:\n raise Exception(\"Message has been been deleted\")\n if text is not None:\n self.get_chat().send_message(text)",
"def _telegram_message_callback(self, update: Update, _: CallbackContext):\n text = update.message.text\n self._from_telegram_string_publisher.publish(String(data=text))",
"def speak(self, newtext):\n self.lock.acquire()\n try:\n if newtext:\n self.text = newtext\n finally:\n self.lock.release()",
"def handleMessage(msg):",
"def __msg_handler(self, update, bot):\n trigger = update.message.text\n self.__handler(bot, update, trigger)",
"def d_sendText(self, messageText):\n #print \"send message %s\" % messageText\n self.sendUpdate(\"sendText\", [messageText])",
"def __sendMessage(self):\n # TODO: Switch to this when implemented\n \n msg = self.ui.inputWidget.toPlainText()\n self.ui.inputWidget.clear()\n strv = StringView()\n strv.appendText(unicode(msg))\n self._amsn_conversation.sendMessage(strv)\n self.ui.textEdit.append(\"<b>/me says:</b><br>\"+unicode(msg)+\"\")",
"def handle_unknown_message(event):\n text_message = TextSendMessage(text='็กๆณ็่งฃๆญค่จๆฏ: \"{}\"'.format(event.message.text))\n line_bot_api.reply_message(event.reply_token, text_message)",
"def make_reply(msg):\n reply = ''\n if msg is not None:\n for i in range(len(messages)):\n if msg == message[i]:\n reply = m_responses[i]\n return reply",
"def msg(self, text=None, **kwargs):\n if self.db.bot:\n self.db.bot.msg(text=text, **kwargs)",
"def update_text_message(self, id_user:int, text_new:str) -> bool:\n try:\n self.cursor.execute(f\"UPDATE {table_users_settings} SET text_sending = ? WHERE id_user = ?;\", (text_new, id_user))\n self.connection.commit()\n return True\n except Exception as e:\n msg = f\"We face problems with updating the message text; Mistake: {e}\"\n self.proceed_error(msg)\n return False",
"def default_message(update: Update, _: CCT) -> None:\n cast(Message, update.effective_message).reply_text(\n \"Sorry, but I can only text messages. \" 'Send \"/help\" for more information.'\n )",
"def echo(update, context):\r\n update.message.reply_text(update.message.text)"
]
| [
"0.6766925",
"0.6720746",
"0.63862306",
"0.6342622",
"0.61958545",
"0.6135881",
"0.611654",
"0.6096974",
"0.6047219",
"0.6044003",
"0.6026358",
"0.60032403",
"0.60032403",
"0.60032403",
"0.5950733",
"0.5946362",
"0.59281623",
"0.59240496",
"0.5913224",
"0.58990586",
"0.5894044",
"0.58927333",
"0.58633786",
"0.583459",
"0.57993436",
"0.5798612",
"0.5794132",
"0.57933116",
"0.57728326",
"0.5764276"
]
| 0.789176 | 0 |
generate a translator function that will map from a 1hot repr of chord to a classical chord signature | def get_onehot2chordnotes_transcoder():
chord_id2sign = np.load('csv/chord-1hot-signatures-rev.npy')
def f(chord):
"""
Translate from 1-hot array of dim {DIM} back to superimposed repr of dim=12
:param chord: 1-hot representation of chords in (M, T, Dim)
:return: chord signature in (M, T, 12)
"""
m, length, dim = chord.shape
chord2 = chord.reshape([m*length, dim])
index = np.argmax(chord2, axis=1)
new_chord = chord_id2sign[index, :]
return new_chord.reshape(m, length, 12)
return f | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_onehot2weighted_chords_transcoder():\r\n chord_id2sign = np.load('csv/chord-1hot-signatures-rev.npy')\r\n\r\n def f(chord):\r\n \"\"\"\r\n Translate from 1-hot array of dim {DIM} back to superimposed repr of dim=12\r\n :param chord: 1-hot representation of chords in (M, T, Dim)\r\n :return: chord signature in (M, T, 12)\r\n \"\"\"\r\n m, length, dim = chord.shape\r\n chord2 = chord.reshape([m*length, dim])\r\n weighted_chords = np.dot(chord2, chord_id2sign)\r\n return weighted_chords.reshape(m, length, 12)\r\n return f",
"def translate(codon):\n \n table = { \n 'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M', \n 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T', \n 'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K', \n 'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R', \n 'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L', \n 'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P', \n 'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q', \n 'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R', \n 'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V', \n 'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A', \n 'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E', \n 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G', \n 'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S', \n 'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L', \n 'TAC':'Y', 'TAT':'Y', 'TAA':'*', 'TAG':'*', \n 'TGC':'C', 'TGT':'C', 'TGA':'*', 'TGG':'W', \n } \n \n assert codon in table.keys(), \"Not a valid codon sequence.\"\n \n return table[codon]",
"def back_translate(seq):\n\n base_nucleotide_list = []\n for i in seq:\n res = __get_key(i,CodonTable)\n base_nucleotide_list.append(res)\n return ''.join(base_nucleotide_list)",
"def f(chord):\r\n m, length, dim = chord.shape\r\n chord2 = chord.reshape([m*length, dim])\r\n weighted_chords = np.dot(chord2, chord_id2sign)\r\n return weighted_chords.reshape(m, length, 12)",
"def translate_sequence(sequence, genetic_code = {'GUC': 'V', 'ACC': 'T', 'GUA': 'V', 'GUG': 'V', 'ACU': 'T', 'AAC': 'N', 'CCU': 'P', 'UGG': 'W', 'AGC': 'S', 'AUC': 'I', 'CAU': 'H', 'AAU': 'N', 'AGU': 'S', 'GUU': 'V', 'CAC': 'H', 'ACG': 'T', 'CCG': 'P', 'CCA': 'P', 'ACA': 'T', 'CCC': 'P', 'UGU': 'C', 'GGU': 'G', 'UCU': 'S', 'GCG': 'A', 'UGC': 'C', 'CAG': 'Q', 'GAU': 'D', 'UAU': 'Y', 'CGG': 'R', 'UCG': 'S', 'AGG': 'R', 'GGG': 'G', 'UCC': 'S', 'UCA': 'S', 'UAA': '*', 'GGA': 'G', 'UAC': 'Y', 'GAC': 'D', 'UAG': '*', 'AUA': 'I', 'GCA': 'A', 'CUU': 'L', 'GGC': 'G', 'AUG': 'M', 'CUG': 'L', 'GAG': 'E', 'CUC': 'L', 'AGA': 'R', 'CUA': 'L', 'GCC': 'A', 'AAA': 'K', 'AAG': 'K', 'CAA': 'Q', 'UUU': 'F', 'CGU': 'R', 'CGC': 'R', 'CGA': 'R', 'GCU': 'A', 'GAA': 'E', 'AUU': 'I', 'UUG': 'L', 'UUA': 'L', 'UGA': '*', 'UUC': 'F'}, start_pos = 0):\n #find first orf\n #first_orf_seq = find_first_orf(sequence)\n\n # ensure sequence is uppercase\n seq = sequence.upper()\n\n #translate the sequence\n protein = \"\"\n for i in range(0, len(seq) - (len(seq) % 3), 3):\n codon = seq[i:i + 3]\n if genetic_code[codon] == \"*\":\n break\n protein += genetic_code[codon]\n return protein",
"def f(chord):\r\n m, length, dim = chord.shape\r\n chord2 = chord.reshape([m*length, dim])\r\n index = np.argmax(chord2, axis=1)\r\n new_chord = chord_id2sign[index, :]\r\n return new_chord.reshape(m, length, 12)",
"def transcribe(seq):\n rna = ''\n for letter in seq:\n if letter == 'A':\n rna = rna + 'U'\n elif letter == 'T':\n rna = rna + 'A'\n elif letter == 'G':\n rna = rna + 'C'\n else:\n rna = rna + 'G'\n return rna",
"def formatChordLabel(cl):\n # The only change I can think of: Cmaj -> C\n cl = cl.replace(\"maj\", \"\") if cl.endswith(\"maj\") else cl\n cl = cl.replace(\"-\", \"b\")\n return cl",
"def one_hot_encoder(self, DNA_string):\n\n if self.selex_predict_str_adaptor != 0:\n DNA_string = \"A\" * self.selex_predict_str_adaptor + DNA_string + 'A' * self.selex_predict_str_adaptor\n\n trantab = DNA_string.maketrans('ACGT', '0123')\n str_arr = [\"\" for x in range(self.num_of_str)]\n for i in range(0, self.num_of_str): ##each substring goes to different element array\n str_arr[i] = DNA_string[i: i + self.selex_str_len]\n\n # if the \"ACGT\"\n # won't be added it will be impossible to convert sequnces which miss one of the letters\n str_arr[self.num_of_str - 1] = str_arr[self.num_of_str - 1] + \"ACGT\"\n\n final_str = list(\"\")\n for i in range(0, self.num_of_str):\n final_str += list(str_arr[i].translate(trantab))\n\n return to_categorical(final_str)[0:-4] # returns the matrix without the \"ACGT\"",
"def encode_chord(self, string):\n return string",
"def encode_chord(self, string):\n return string",
"def one_hot_encoding(sequence):\n\n mydict = {\n \"A\": np.asarray([1, 0, 0, 0]),\n \"a\": np.asarray([1, 0, 0, 0]),\n \"C\": np.asarray([0, 1, 0, 0]),\n \"c\": np.asarray([0, 1, 0, 0]),\n \"G\": np.asarray([0, 0, 1, 0]),\n \"g\": np.asarray([0, 0, 1, 0]),\n \"T\": np.asarray([0, 0, 0, 1]),\n \"t\": np.asarray([0, 0, 0, 1]),\n \"Y\": np.asarray([0, 1, 0, 1]),\n \"y\": np.asarray([0, 1, 0, 1]),\n \"R\": np.asarray([1, 0, 1, 0]),\n \"r\": np.asarray([1, 0, 1, 0]),\n \"S\": np.asarray([0, 1, 1, 0]),\n \"s\": np.asarray([0, 1, 1, 0]),\n \"W\": np.asarray([1, 0, 0, 1]),\n \"w\": np.asarray([1, 0, 0, 1]),\n \"K\": np.asarray([0, 0, 1, 1]),\n \"k\": np.asarray([0, 0, 1, 1]),\n \"M\": np.asarray([1, 1, 0, 0]),\n \"m\": np.asarray([1, 1, 0, 0]),\n \"B\": np.asarray([0, 1, 1, 1]),\n \"b\": np.asarray([0, 1, 1, 1]),\n \"D\": np.asarray([1, 0, 1, 1]),\n \"d\": np.asarray([1, 0, 1, 1]),\n \"H\": np.asarray([1, 1, 0, 1]),\n \"h\": np.asarray([1, 1, 0, 1]),\n \"V\": np.asarray([1, 1, 1, 0]),\n \"v\": np.asarray([1, 1, 1, 0]),\n \"N\": np.asarray([0, 0, 0, 0]),\n \"n\": np.asarray([0, 0, 0, 0]),\n \"-\": np.asarray([0, 0, 0, 0]),\n }\n print(f\"Seq: {sequence}\")\n if len(sequence) > 0:\n nuc_list = list()\n for nuc in list(sequence):\n nuc_list.append(mydict[nuc])\n result = np.stack(np.asarray(nuc_list, dtype=\"int8\"))\n return result\n else: \n print(\"ERROR! sequence is too short\")",
"def convert_to_one_letter_code_sing(seq):\n conversion = {\n \"GLY\": \"G\", \"PRO\": \"P\", \"VAL\": \"V\", \"ALA\": \"A\", \"LEU\": \"L\",\n \"ILE\": \"I\", \"MET\": \"M\", \"CYS\": \"C\", \"PHE\": \"F\", \"TYR\": \"Y\",\n \"TRP\": \"W\", \"HIS\": \"H\", \"ARG\": \"R\", \"LYS\": \"K\", \"GLN\": \"Q\",\n \"THR\": \"T\", \"ASP\": \"D\", \"ASN\": \"N\", \"SER\": \"S\", \"GLU\": \"E\"\n }\n n_seq = conversion[seq]\n return n_seq",
"def binary_transformation(sequence:str, binary_dict:dict):\r\n binary_sequence = \"\"\r\n for letter in sequence:\r\n binary_sequence += str(binary_dict[letter])\r\n return binary_sequence",
"def map_caesar(key, plaintext):\n letters = string.ascii_lowercase\n mask = letters[key:] + letters[:key]\n transtab = str.maketrans(letters, mask)\n return plaintext.translate(transtab)",
"def matrixconverter(seqmatrix):\n\n\tdic = {0:\"A\",1:\"C\",2:\"G\",3:\"T\"} # dictionary of indexes of each nucleotide for matrices\n\ta = np.transpose(np.nonzero(np.transpose(seqmatrix))).tolist()\n\tseqstring = \"\"\n\tfor i in a:\n\t\tseqstring += dic[i[1]]\n\treturn seqstring",
"def _convert(self, message, get_leter_index):\r\n\t\tord_a = ord('a')\r\n\t\treturn \"\".join(\r\n\t\t\t_nth_letter(get_leter_index(ord(char) - ord_a, ord(key_char) - ord_a))\r\n\t\t\t\tfor char, key_char in zip(message, itertools.cycle(self.key))\r\n\t\t)",
"def Translate(self):\n dna_to_protein = {\n 'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',\n 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',\n 'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',\n 'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',\n 'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',\n 'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',\n 'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',\n 'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',\n 'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',\n 'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',\n 'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',\n 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',\n 'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',\n 'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',\n 'TAC':'Y', 'TAT':'Y', 'TAA':'*', 'TAG':'*',\n 'TGC':'C', 'TGT':'C', 'TGA':'*', 'TGG':'W',\n }\n \n length = self.length\n reading = {}\n for i in range(3):\n reading['frame_'+str(i+1)] = tuple([dna_to_protein[self.sequence[index:index+3]] for index in range(i,length-2,3)])\n reverse_strand = Analyze_DNA_Sequence.Complementary(self,'5-3')\n for i in range(3):\n reading['frame_'+str(i+4)] = tuple([dna_to_protein[reverse_strand[index:index+3]] for index in range(i,length-2,3)])\n\n return reading",
"def mgChord(value, chord):\n ret = None\n if chord == 'M':\n ret = mgChordMajor(value)\n elif chord == 'm':\n ret = mgChordMinor(value)\n elif chord == 'dim':\n ret = mgChordDiminished(value)\n elif chord == 'aug':\n ret = mgChordAugmented(value)\n\n return ret",
"def dummy_junction14():\n return \"junction:chr1:176-324:+\"",
"def dummy_junction13():\n return 'junction:chr1:176-299:+'",
"def __getSupportingChords(self, key):\n lookupTable = {\n 'C': ('F', 'Dm', 'C', 'Am', 'G', 'Em'),\n 'Am': ('F', 'Dm', 'C', 'Am', 'G', 'Em'),\n\n 'G': ('C', 'Am', 'G', 'Em', 'D', 'Bm'),\n 'Em': ('C', 'Am', 'G', 'Em', 'D', 'Bm'),\n\n 'D': ('G', 'Em', 'D', 'Bm', 'A', 'F#m'),\n 'Bm': ('G', 'Em', 'D', 'Bm', 'A', 'F#m'),\n\n 'A': ('D', 'Bm', 'A', 'F#m', 'E', 'C#m'),\n 'F#m': ('D', 'Bm', 'A', 'F#m', 'E', 'C#m'),\n\n 'E': ('A', 'F#m', 'E', 'C#m', 'B', 'G#m'),\n 'C#m': ('A', 'F#m', 'E', 'C#m', 'B', 'G#m'),\n\n 'B': ('E', 'C#m', 'B', 'G#m', 'F#', 'Gb', 'Ebm'),\n 'G#m': ('E', 'C#m', 'B', 'G#m', 'F#', 'Gb', 'Ebm'),\n\n 'F#': ('B', 'G#m', 'F#', 'Gb', 'Ebm', 'Db', 'Bbm'),\n 'Gb#': ('B', 'G#m', 'F#', 'Gb', 'Ebm', 'Db', 'Bbm'),\n 'Ebm': ('B', 'G#m', 'F#', 'Gb', 'Ebm', 'Db', 'Bbm'),\n\n 'Db': ('F#', 'Gb', 'Eb', 'Db', 'Bbm', 'Ab', 'Fm'),\n 'Bbm': ('F#', 'Gb', 'Eb', 'Db', 'Bbm', 'Ab', 'Fm'),\n\n 'Ab': ('Db', 'Bbm', 'Ab', 'Fm', 'Eb', 'Cm'),\n 'Fm': ('Db', 'Bbm', 'Ab', 'Fm', 'Eb', 'Cm'),\n\n 'Eb': ('Ab', 'Fm', 'Eb', 'Cm', 'Bb', 'Gm'),\n 'Cm': ('Ab', 'Fm', 'Eb', 'Cm', 'Bb', 'Gm'),\n\n 'Bb': ('Eb', 'Cm', 'Bb', 'Gm', 'F', 'Dm'),\n 'Gm': ('Eb', 'Cm', 'Bb', 'Gm', 'F', 'Dm'),\n\n 'F': ('Bb', 'Gm', 'F', 'Dm', 'C', 'Am'),\n 'Dm': ('Bb', 'Gm', 'F', 'Dm', 'C', 'Am')\n }",
"def one_hot_encoder(self, DNA_string, **kwargs):\n if kwargs['start_linker'] is None:\n start_linker = end_linker = \"A\" * self.linker_sequence_length\n else:\n start_linker = kwargs['start_linker'][-self.linker_sequence_length:]\n end_linker = kwargs['end_linker'][:self.linker_sequence_length]\n\n # if the \"ACGT\"\n # won't be added it will be impossible to convert sequnces which miss one of the letters\n DNA_string = start_linker + DNA_string + end_linker + \"ACGT\"\n trantab = DNA_string.maketrans('ACGT', '0123')\n data = list(DNA_string.translate(trantab))\n return to_categorical(data)[0:-4] # returns the matrix without the \"ACGT\"",
"def map2mw_D(d,k1,entry,mwverbs,cformsd):\n if k1 in map2mw_special_D:\n return map2mw_special_D[k1]\n ans = map2mw_D_1(d,k1,cformsd)\n if ans:\n return ans\n return '?'\n ans = map2mw_D_2(d,k1,entry,mwverbs)\n if ans:\n return ans\n \n k = re.sub(r'Ami$','',k1) \n if k in d:\n mwrec = d[k]\n if mwrec.cat == 'preverb':\n return k\n\n return '?'",
"def trans_chord(m):\n m = asmatrix(m)\n row_norms = sqrt(sum(square(m), axis=1))\n result = m / row_norms\n return result",
"def _create_conversion_trie(strict):\n t = pygtrie.CharTrie()\n\n for beta, uni in _map.BETACODE_MAP.items():\n if strict:\n t[beta] = uni\n else:\n # The order of accents is very strict and weak. Allow for many orders of\n # accents between asterisk and letter or after letter. This does not\n # introduce ambiguity since each betacode token only has one letter and\n # either starts with a asterisk or a letter.\n diacritics = beta[1:]\n\n perms = itertools.permutations(diacritics)\n for perm in perms:\n perm_str = beta[0] + ''.join(perm)\n t[perm_str.lower()] = uni\n t[perm_str.upper()] = uni\n\n return t",
"def mapperCRT(elt, p: int, q: int, action: bool = True, Verbose: bool = False):\n # Mapping\n if action:\n a = elt % p\n b = elt % q\n\n if Verbose and q != p:\n print(f\"Converting {elt} in Zpq to a in Zp and b in Zq.\")\n print(f\"With a = {a} mod {p} and b = {b} mod {q}\")\n\n return (a, b)\n\n x = ChineseRemainder(elt, [p, q], Verbose)\n return x",
"def _transform(self, original, coder):\n msg = list(original)\n for k in range(len(msg)):\n if 0x590 < ord(msg[k]) < 0xfb50:\n msg[k] = coder[msg[k]]\n return u\"\".join(msg)",
"def build_label_transform():\n\n return NALabelEncoder()",
"def question_new_translate():"
]
| [
"0.6614671",
"0.5769357",
"0.5698559",
"0.56645834",
"0.56529045",
"0.55155694",
"0.55034935",
"0.54679215",
"0.5425101",
"0.54120916",
"0.54120916",
"0.53971213",
"0.538774",
"0.5374609",
"0.5356483",
"0.53515774",
"0.53292423",
"0.5324584",
"0.5315523",
"0.52981585",
"0.52625",
"0.5258921",
"0.5251392",
"0.5232141",
"0.52276427",
"0.5217243",
"0.52121615",
"0.52014416",
"0.5194802",
"0.51946926"
]
| 0.6836312 | 0 |
generate a translator function that will map from a 1hot repr of chord to a classical chord signature | def get_onehot2weighted_chords_transcoder():
chord_id2sign = np.load('csv/chord-1hot-signatures-rev.npy')
def f(chord):
"""
Translate from 1-hot array of dim {DIM} back to superimposed repr of dim=12
:param chord: 1-hot representation of chords in (M, T, Dim)
:return: chord signature in (M, T, 12)
"""
m, length, dim = chord.shape
chord2 = chord.reshape([m*length, dim])
weighted_chords = np.dot(chord2, chord_id2sign)
return weighted_chords.reshape(m, length, 12)
return f | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_onehot2chordnotes_transcoder():\r\n chord_id2sign = np.load('csv/chord-1hot-signatures-rev.npy')\r\n\r\n def f(chord):\r\n \"\"\"\r\n Translate from 1-hot array of dim {DIM} back to superimposed repr of dim=12\r\n :param chord: 1-hot representation of chords in (M, T, Dim)\r\n :return: chord signature in (M, T, 12)\r\n \"\"\"\r\n m, length, dim = chord.shape\r\n chord2 = chord.reshape([m*length, dim])\r\n index = np.argmax(chord2, axis=1)\r\n new_chord = chord_id2sign[index, :]\r\n return new_chord.reshape(m, length, 12)\r\n return f",
"def translate(codon):\n \n table = { \n 'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M', \n 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T', \n 'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K', \n 'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R', \n 'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L', \n 'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P', \n 'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q', \n 'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R', \n 'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V', \n 'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A', \n 'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E', \n 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G', \n 'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S', \n 'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L', \n 'TAC':'Y', 'TAT':'Y', 'TAA':'*', 'TAG':'*', \n 'TGC':'C', 'TGT':'C', 'TGA':'*', 'TGG':'W', \n } \n \n assert codon in table.keys(), \"Not a valid codon sequence.\"\n \n return table[codon]",
"def back_translate(seq):\n\n base_nucleotide_list = []\n for i in seq:\n res = __get_key(i,CodonTable)\n base_nucleotide_list.append(res)\n return ''.join(base_nucleotide_list)",
"def f(chord):\r\n m, length, dim = chord.shape\r\n chord2 = chord.reshape([m*length, dim])\r\n weighted_chords = np.dot(chord2, chord_id2sign)\r\n return weighted_chords.reshape(m, length, 12)",
"def translate_sequence(sequence, genetic_code = {'GUC': 'V', 'ACC': 'T', 'GUA': 'V', 'GUG': 'V', 'ACU': 'T', 'AAC': 'N', 'CCU': 'P', 'UGG': 'W', 'AGC': 'S', 'AUC': 'I', 'CAU': 'H', 'AAU': 'N', 'AGU': 'S', 'GUU': 'V', 'CAC': 'H', 'ACG': 'T', 'CCG': 'P', 'CCA': 'P', 'ACA': 'T', 'CCC': 'P', 'UGU': 'C', 'GGU': 'G', 'UCU': 'S', 'GCG': 'A', 'UGC': 'C', 'CAG': 'Q', 'GAU': 'D', 'UAU': 'Y', 'CGG': 'R', 'UCG': 'S', 'AGG': 'R', 'GGG': 'G', 'UCC': 'S', 'UCA': 'S', 'UAA': '*', 'GGA': 'G', 'UAC': 'Y', 'GAC': 'D', 'UAG': '*', 'AUA': 'I', 'GCA': 'A', 'CUU': 'L', 'GGC': 'G', 'AUG': 'M', 'CUG': 'L', 'GAG': 'E', 'CUC': 'L', 'AGA': 'R', 'CUA': 'L', 'GCC': 'A', 'AAA': 'K', 'AAG': 'K', 'CAA': 'Q', 'UUU': 'F', 'CGU': 'R', 'CGC': 'R', 'CGA': 'R', 'GCU': 'A', 'GAA': 'E', 'AUU': 'I', 'UUG': 'L', 'UUA': 'L', 'UGA': '*', 'UUC': 'F'}, start_pos = 0):\n #find first orf\n #first_orf_seq = find_first_orf(sequence)\n\n # ensure sequence is uppercase\n seq = sequence.upper()\n\n #translate the sequence\n protein = \"\"\n for i in range(0, len(seq) - (len(seq) % 3), 3):\n codon = seq[i:i + 3]\n if genetic_code[codon] == \"*\":\n break\n protein += genetic_code[codon]\n return protein",
"def f(chord):\r\n m, length, dim = chord.shape\r\n chord2 = chord.reshape([m*length, dim])\r\n index = np.argmax(chord2, axis=1)\r\n new_chord = chord_id2sign[index, :]\r\n return new_chord.reshape(m, length, 12)",
"def transcribe(seq):\n rna = ''\n for letter in seq:\n if letter == 'A':\n rna = rna + 'U'\n elif letter == 'T':\n rna = rna + 'A'\n elif letter == 'G':\n rna = rna + 'C'\n else:\n rna = rna + 'G'\n return rna",
"def formatChordLabel(cl):\n # The only change I can think of: Cmaj -> C\n cl = cl.replace(\"maj\", \"\") if cl.endswith(\"maj\") else cl\n cl = cl.replace(\"-\", \"b\")\n return cl",
"def one_hot_encoder(self, DNA_string):\n\n if self.selex_predict_str_adaptor != 0:\n DNA_string = \"A\" * self.selex_predict_str_adaptor + DNA_string + 'A' * self.selex_predict_str_adaptor\n\n trantab = DNA_string.maketrans('ACGT', '0123')\n str_arr = [\"\" for x in range(self.num_of_str)]\n for i in range(0, self.num_of_str): ##each substring goes to different element array\n str_arr[i] = DNA_string[i: i + self.selex_str_len]\n\n # if the \"ACGT\"\n # won't be added it will be impossible to convert sequnces which miss one of the letters\n str_arr[self.num_of_str - 1] = str_arr[self.num_of_str - 1] + \"ACGT\"\n\n final_str = list(\"\")\n for i in range(0, self.num_of_str):\n final_str += list(str_arr[i].translate(trantab))\n\n return to_categorical(final_str)[0:-4] # returns the matrix without the \"ACGT\"",
"def encode_chord(self, string):\n return string",
"def encode_chord(self, string):\n return string",
"def one_hot_encoding(sequence):\n\n mydict = {\n \"A\": np.asarray([1, 0, 0, 0]),\n \"a\": np.asarray([1, 0, 0, 0]),\n \"C\": np.asarray([0, 1, 0, 0]),\n \"c\": np.asarray([0, 1, 0, 0]),\n \"G\": np.asarray([0, 0, 1, 0]),\n \"g\": np.asarray([0, 0, 1, 0]),\n \"T\": np.asarray([0, 0, 0, 1]),\n \"t\": np.asarray([0, 0, 0, 1]),\n \"Y\": np.asarray([0, 1, 0, 1]),\n \"y\": np.asarray([0, 1, 0, 1]),\n \"R\": np.asarray([1, 0, 1, 0]),\n \"r\": np.asarray([1, 0, 1, 0]),\n \"S\": np.asarray([0, 1, 1, 0]),\n \"s\": np.asarray([0, 1, 1, 0]),\n \"W\": np.asarray([1, 0, 0, 1]),\n \"w\": np.asarray([1, 0, 0, 1]),\n \"K\": np.asarray([0, 0, 1, 1]),\n \"k\": np.asarray([0, 0, 1, 1]),\n \"M\": np.asarray([1, 1, 0, 0]),\n \"m\": np.asarray([1, 1, 0, 0]),\n \"B\": np.asarray([0, 1, 1, 1]),\n \"b\": np.asarray([0, 1, 1, 1]),\n \"D\": np.asarray([1, 0, 1, 1]),\n \"d\": np.asarray([1, 0, 1, 1]),\n \"H\": np.asarray([1, 1, 0, 1]),\n \"h\": np.asarray([1, 1, 0, 1]),\n \"V\": np.asarray([1, 1, 1, 0]),\n \"v\": np.asarray([1, 1, 1, 0]),\n \"N\": np.asarray([0, 0, 0, 0]),\n \"n\": np.asarray([0, 0, 0, 0]),\n \"-\": np.asarray([0, 0, 0, 0]),\n }\n print(f\"Seq: {sequence}\")\n if len(sequence) > 0:\n nuc_list = list()\n for nuc in list(sequence):\n nuc_list.append(mydict[nuc])\n result = np.stack(np.asarray(nuc_list, dtype=\"int8\"))\n return result\n else: \n print(\"ERROR! sequence is too short\")",
"def convert_to_one_letter_code_sing(seq):\n conversion = {\n \"GLY\": \"G\", \"PRO\": \"P\", \"VAL\": \"V\", \"ALA\": \"A\", \"LEU\": \"L\",\n \"ILE\": \"I\", \"MET\": \"M\", \"CYS\": \"C\", \"PHE\": \"F\", \"TYR\": \"Y\",\n \"TRP\": \"W\", \"HIS\": \"H\", \"ARG\": \"R\", \"LYS\": \"K\", \"GLN\": \"Q\",\n \"THR\": \"T\", \"ASP\": \"D\", \"ASN\": \"N\", \"SER\": \"S\", \"GLU\": \"E\"\n }\n n_seq = conversion[seq]\n return n_seq",
"def binary_transformation(sequence:str, binary_dict:dict):\r\n binary_sequence = \"\"\r\n for letter in sequence:\r\n binary_sequence += str(binary_dict[letter])\r\n return binary_sequence",
"def map_caesar(key, plaintext):\n letters = string.ascii_lowercase\n mask = letters[key:] + letters[:key]\n transtab = str.maketrans(letters, mask)\n return plaintext.translate(transtab)",
"def matrixconverter(seqmatrix):\n\n\tdic = {0:\"A\",1:\"C\",2:\"G\",3:\"T\"} # dictionary of indexes of each nucleotide for matrices\n\ta = np.transpose(np.nonzero(np.transpose(seqmatrix))).tolist()\n\tseqstring = \"\"\n\tfor i in a:\n\t\tseqstring += dic[i[1]]\n\treturn seqstring",
"def _convert(self, message, get_leter_index):\r\n\t\tord_a = ord('a')\r\n\t\treturn \"\".join(\r\n\t\t\t_nth_letter(get_leter_index(ord(char) - ord_a, ord(key_char) - ord_a))\r\n\t\t\t\tfor char, key_char in zip(message, itertools.cycle(self.key))\r\n\t\t)",
"def Translate(self):\n dna_to_protein = {\n 'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',\n 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',\n 'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',\n 'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',\n 'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',\n 'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',\n 'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',\n 'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',\n 'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',\n 'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',\n 'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',\n 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',\n 'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',\n 'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',\n 'TAC':'Y', 'TAT':'Y', 'TAA':'*', 'TAG':'*',\n 'TGC':'C', 'TGT':'C', 'TGA':'*', 'TGG':'W',\n }\n \n length = self.length\n reading = {}\n for i in range(3):\n reading['frame_'+str(i+1)] = tuple([dna_to_protein[self.sequence[index:index+3]] for index in range(i,length-2,3)])\n reverse_strand = Analyze_DNA_Sequence.Complementary(self,'5-3')\n for i in range(3):\n reading['frame_'+str(i+4)] = tuple([dna_to_protein[reverse_strand[index:index+3]] for index in range(i,length-2,3)])\n\n return reading",
"def mgChord(value, chord):\n ret = None\n if chord == 'M':\n ret = mgChordMajor(value)\n elif chord == 'm':\n ret = mgChordMinor(value)\n elif chord == 'dim':\n ret = mgChordDiminished(value)\n elif chord == 'aug':\n ret = mgChordAugmented(value)\n\n return ret",
"def dummy_junction14():\n return \"junction:chr1:176-324:+\"",
"def dummy_junction13():\n return 'junction:chr1:176-299:+'",
"def __getSupportingChords(self, key):\n lookupTable = {\n 'C': ('F', 'Dm', 'C', 'Am', 'G', 'Em'),\n 'Am': ('F', 'Dm', 'C', 'Am', 'G', 'Em'),\n\n 'G': ('C', 'Am', 'G', 'Em', 'D', 'Bm'),\n 'Em': ('C', 'Am', 'G', 'Em', 'D', 'Bm'),\n\n 'D': ('G', 'Em', 'D', 'Bm', 'A', 'F#m'),\n 'Bm': ('G', 'Em', 'D', 'Bm', 'A', 'F#m'),\n\n 'A': ('D', 'Bm', 'A', 'F#m', 'E', 'C#m'),\n 'F#m': ('D', 'Bm', 'A', 'F#m', 'E', 'C#m'),\n\n 'E': ('A', 'F#m', 'E', 'C#m', 'B', 'G#m'),\n 'C#m': ('A', 'F#m', 'E', 'C#m', 'B', 'G#m'),\n\n 'B': ('E', 'C#m', 'B', 'G#m', 'F#', 'Gb', 'Ebm'),\n 'G#m': ('E', 'C#m', 'B', 'G#m', 'F#', 'Gb', 'Ebm'),\n\n 'F#': ('B', 'G#m', 'F#', 'Gb', 'Ebm', 'Db', 'Bbm'),\n 'Gb#': ('B', 'G#m', 'F#', 'Gb', 'Ebm', 'Db', 'Bbm'),\n 'Ebm': ('B', 'G#m', 'F#', 'Gb', 'Ebm', 'Db', 'Bbm'),\n\n 'Db': ('F#', 'Gb', 'Eb', 'Db', 'Bbm', 'Ab', 'Fm'),\n 'Bbm': ('F#', 'Gb', 'Eb', 'Db', 'Bbm', 'Ab', 'Fm'),\n\n 'Ab': ('Db', 'Bbm', 'Ab', 'Fm', 'Eb', 'Cm'),\n 'Fm': ('Db', 'Bbm', 'Ab', 'Fm', 'Eb', 'Cm'),\n\n 'Eb': ('Ab', 'Fm', 'Eb', 'Cm', 'Bb', 'Gm'),\n 'Cm': ('Ab', 'Fm', 'Eb', 'Cm', 'Bb', 'Gm'),\n\n 'Bb': ('Eb', 'Cm', 'Bb', 'Gm', 'F', 'Dm'),\n 'Gm': ('Eb', 'Cm', 'Bb', 'Gm', 'F', 'Dm'),\n\n 'F': ('Bb', 'Gm', 'F', 'Dm', 'C', 'Am'),\n 'Dm': ('Bb', 'Gm', 'F', 'Dm', 'C', 'Am')\n }",
"def one_hot_encoder(self, DNA_string, **kwargs):\n if kwargs['start_linker'] is None:\n start_linker = end_linker = \"A\" * self.linker_sequence_length\n else:\n start_linker = kwargs['start_linker'][-self.linker_sequence_length:]\n end_linker = kwargs['end_linker'][:self.linker_sequence_length]\n\n # if the \"ACGT\"\n # won't be added it will be impossible to convert sequnces which miss one of the letters\n DNA_string = start_linker + DNA_string + end_linker + \"ACGT\"\n trantab = DNA_string.maketrans('ACGT', '0123')\n data = list(DNA_string.translate(trantab))\n return to_categorical(data)[0:-4] # returns the matrix without the \"ACGT\"",
"def map2mw_D(d,k1,entry,mwverbs,cformsd):\n if k1 in map2mw_special_D:\n return map2mw_special_D[k1]\n ans = map2mw_D_1(d,k1,cformsd)\n if ans:\n return ans\n return '?'\n ans = map2mw_D_2(d,k1,entry,mwverbs)\n if ans:\n return ans\n \n k = re.sub(r'Ami$','',k1) \n if k in d:\n mwrec = d[k]\n if mwrec.cat == 'preverb':\n return k\n\n return '?'",
"def trans_chord(m):\n m = asmatrix(m)\n row_norms = sqrt(sum(square(m), axis=1))\n result = m / row_norms\n return result",
"def _create_conversion_trie(strict):\n t = pygtrie.CharTrie()\n\n for beta, uni in _map.BETACODE_MAP.items():\n if strict:\n t[beta] = uni\n else:\n # The order of accents is very strict and weak. Allow for many orders of\n # accents between asterisk and letter or after letter. This does not\n # introduce ambiguity since each betacode token only has one letter and\n # either starts with a asterisk or a letter.\n diacritics = beta[1:]\n\n perms = itertools.permutations(diacritics)\n for perm in perms:\n perm_str = beta[0] + ''.join(perm)\n t[perm_str.lower()] = uni\n t[perm_str.upper()] = uni\n\n return t",
"def mapperCRT(elt, p: int, q: int, action: bool = True, Verbose: bool = False):\n # Mapping\n if action:\n a = elt % p\n b = elt % q\n\n if Verbose and q != p:\n print(f\"Converting {elt} in Zpq to a in Zp and b in Zq.\")\n print(f\"With a = {a} mod {p} and b = {b} mod {q}\")\n\n return (a, b)\n\n x = ChineseRemainder(elt, [p, q], Verbose)\n return x",
"def _transform(self, original, coder):\n msg = list(original)\n for k in range(len(msg)):\n if 0x590 < ord(msg[k]) < 0xfb50:\n msg[k] = coder[msg[k]]\n return u\"\".join(msg)",
"def build_label_transform():\n\n return NALabelEncoder()",
"def question_new_translate():"
]
| [
"0.68381053",
"0.57690674",
"0.5698061",
"0.56648403",
"0.56513613",
"0.5516458",
"0.5503976",
"0.546884",
"0.5425767",
"0.5412974",
"0.5412974",
"0.5397893",
"0.5389627",
"0.5374363",
"0.5356093",
"0.5352136",
"0.53298694",
"0.53236294",
"0.5316585",
"0.5300051",
"0.5264589",
"0.5259459",
"0.52527124",
"0.5231217",
"0.5228716",
"0.5217337",
"0.52116704",
"0.5200117",
"0.51946276",
"0.5192101"
]
| 0.66158575 | 1 |
Drop all dummy posts used during tests. | def tearDownClass(cls):
for post in cls.session.wp_post_objects:
cls.session.connection.call(DeletePost(post.id))
# Test cleanup() method to assert that all dummy test posts
# have been trashed and are dropped from database.
cls.session.cleanup()
# Assert that no items remain in WordPress trash / have been dropped from database.
self.assertFalse(self.session.connection.call(GetPosts({'post_status': 'trash'}))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setUp(self):\n\n Post.query.delete()",
"def tearDown(self):\n db.drop_all()",
"def tearDown(self):\n db.drop_all()",
"def tearDown(self):\n\n User.query.delete()\n Blog.query.delete()",
"def tearDown(self):\n User.objects.all().delete()\n Project.objects.all().delete()\n Review.objects.all().delete()",
"def tearDown(self):\n Pics.objects.all().delete()\n Category.objects.all().delete()\n Location.objects.all().delete()",
"def tearDown(self):\n self.db.drop_all()\n pass",
"def setUp(self):\n Pet.remove_all()",
"def tearDown(self):\n\n User.objects.all().delete()\n Movie.objects.all().delete()\n Vote.objects.all().delete()",
"def wipe_test_data(alias='default'):\n import ikwen.core.models\n import ikwen.accesscontrol.models\n import permission_backend_nonrel.models\n import echo.models\n\n Group.objects.using(alias).all().delete()\n for name in ('Application', 'Service', 'Config', 'ConsoleEventType', 'ConsoleEvent', 'Country', ):\n model = getattr(ikwen.core.models, name)\n model.objects.using(alias).all().delete()\n for name in ('Member', 'AccessRequest', ):\n model = getattr(ikwen.accesscontrol.models, name)\n model.objects.using(alias).all().delete()\n for name in ('UserPermissionList', 'GroupPermissionList',):\n model = getattr(permission_backend_nonrel.models, name)\n model.objects.using(alias).all().delete()\n for name in ('SMSCampaign', 'SMSObject', 'Balance', 'Refill', 'Bundle', ):\n model = getattr(echo.models, name)\n model.objects.using(alias).all().delete()",
"def test_removePost(self):\n\t\tself.client.force_authenticate(user = User.objects.get(id=3))\n\n\t\tpost5 = Post.objects.create(author=User.objects.get(id=3), \n\t\t\ttext=\"Mahmut is best computer geek I have ever met\",\n\t\t\tgroup=Group.objects.get(id=3))\n\n\t\turl = \"/posts/4/\"\n\t\tresponse = self.client.delete(url, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n\t\turl = \"/posts/3/\"\n\t\tresponse = self.client.delete(url, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def purge_posts(app, env, docname):\n\n if hasattr(env, \"ablog_posts\"):\n env.ablog_posts.pop(docname, None)\n filename = os.path.split(docname)[1]\n env.domains[\"std\"].data[\"labels\"].pop(filename, None)\n env.domains[\"std\"].data[\"anonlabels\"].pop(filename, None)",
"def tearDown(self):\n account_models.User.objects.all().delete()\n photo_models.PhotoFeed.objects.all().delete()\n photo_models.PhotoClassification.objects.filter(name__in=[\"Rural\", \"Abstract\", \"City\"]).delete()\n test_helpers.clear_directory('backend/media/', '*.jpg')",
"def tearDown(self):\r\n testing.tearDown()\r\n empty_db()",
"def tearDown(self):\r\n testing.tearDown()\r\n empty_db()",
"def tearDown(self):\r\n testing.tearDown()\r\n empty_db()",
"def tearDown(self):\r\n testing.tearDown()\r\n empty_db()",
"def tearDown(self):\r\n testing.tearDown()\r\n empty_db()",
"def tearDown(self):\r\n testing.tearDown()\r\n empty_db()",
"def tearDown(self):\r\n testing.tearDown()\r\n empty_db()",
"def tearDown(self):\r\n testing.tearDown()\r\n empty_db()",
"def tearDown(self):\r\n testing.tearDown()\r\n empty_db()",
"def test_posts_count(self):\n tags_db = Tag.objects.all()\n for tag in tags_db:\n should_be = Post.objects.filter(tags=tag).count()\n self.assertEqual(tag.posts_count(), should_be)\n self.assertEqual(tag.total_posts, should_be)\n\n posts = Post.objects.filter(tags__title__in=['tag4', 'tag3'])\n for it in posts:\n it.delete()\n\n tags_db = Tag.objects.all()\n for tag in tags_db:\n should_be = Post.objects.filter(tags=tag).count()\n self.assertEqual(tag.posts_count(), should_be)\n self.assertEqual(tag.total_posts, should_be)\n\n self.clear_cache()\n for tag in tags_db:\n should_be = Post.objects.filter(tags=tag).count()\n self.assertEqual(tag.posts_count(), should_be)\n self.assertEqual(tag.total_posts, should_be)\n\n # class TagPinnedSearch(BaseTestCase):\n #\n # tag_count = 10\n #\n # def setUp(self):\n # super().setUp()\n #\n # tags = [Tag(title='tag{}'.format(it)) for it in range(TagPinnedSearch.tag_count)]\n # Tag.objects.bulk_create(tags)\n #\n # def test_pin_tag(self):\n # index = TagPinnedSearch.tag_count // 2\n # title = 'tag{}'.format(index)\n # url = reverse_lazy('tag-detail', kwargs={'pk': title})\n # url += 'pin/'\n #\n # response = self.put_json(url, {})\n #\n # self.assertEqual(response.status_code, status.HTTP_200_OK)\n #\n # self.user.refresh_from_db()\n #\n # self.assertEqual(self.user.pinned_tags.count(), 1)\n # self.assertTrue(self.user.pinned_tags.filter(title=title))\n #\n # def test_unpin_tag(self):\n # tag = Tag.objects.get(title='tag5')\n #\n # self.user.pinned_tags.add(tag)\n #\n # url = reverse_lazy('tag-detail', kwargs={'pk': tag.title})\n # url += 'unpin/'\n #\n # response = self.put_json(url, {})\n # self.user.refresh_from_db()\n #\n # self.assertEqual(response.status_code, status.HTTP_200_OK)\n # self.assertEqual(self.user.pinned_tags.count(), 0)\n #\n # def test_pinned_tag_list(self):\n # limit = 5\n # tags = Tag.objects.all()[:limit]\n #\n # for it in tags:\n # self.user.pinned_tags.add(it)\n #\n # url = reverse_lazy('tag-list')\n # url += 'pinned/'\n #\n # response = self.client.get(url)\n #\n # self.assertEqual(response.status_code, status.HTTP_200_OK)\n #\n # self.assertEqual(len(response.data['results']), limit)\n #\n # tags = {it.title for it in tags}\n # titles = {it['title'] for it in response.data['results']}\n #\n # self.assertEqual(tags, titles)",
"def clean_leftovers(tests):\n for test in tests:\n test.clean()",
"def clear_dummy_obj(self):\n for d in self.dummies:\n self.map.remove_node(d)\n\n self.dummies = []",
"def teardown_module():\n Tweet.delete_all()",
"def tearDown(self) -> None:\n self.things.db.session.remove() # type: ignore\n self.things.db.drop_all() # type: ignore",
"def tearDown(self) -> None:\n self.things.db.session.remove() # type: ignore\n self.things.db.drop_all() # type: ignore",
"def tearDown(self):\n Tag.objects.all().delete()\n super(TagTest, self).tearDown()",
"def tearDown(self):\r\n empty_db()"
]
| [
"0.6742712",
"0.63928",
"0.63928",
"0.63647366",
"0.63469243",
"0.62678415",
"0.6257659",
"0.6196355",
"0.6166326",
"0.61662084",
"0.6144767",
"0.6108191",
"0.6105125",
"0.6013753",
"0.6013753",
"0.6013753",
"0.6013753",
"0.6013753",
"0.6013753",
"0.6013753",
"0.6013753",
"0.6013753",
"0.598044",
"0.59653884",
"0.59635544",
"0.5962858",
"0.59442085",
"0.5934859",
"0.59174645",
"0.59120715"
]
| 0.6716512 | 1 |
The function rotate render the bitmap within the full image canvas and rotate the whole canvas with a given rotator (ImageRotator class object contain size of image and angle to rotate) | def rotate(self, rotator):
full_img_data = np.zeros(rotator.src_imsize + self.data.shape[2:], dtype=self.data.dtype)
full_img_data[
self.origin.row:(self.origin.row + self.data.shape[0]),
self.origin.col:(self.origin.col + self.data.shape[1]), ...] = self.data[:, :, ...]
rotated_full_data = rotator.rotate_img(full_img_data, use_inter_nearest=True)
# Rotate the bounding box to find out the bounding box of the rotated bitmap within the full image.
rotated_bbox = self.to_bbox().rotate(rotator)
rotated_origin = PointLocation(row=rotated_bbox.top, col=rotated_bbox.left)
return MultichannelBitmap(data=rotated_bbox.get_cropped_numpy_slice(rotated_full_data), origin=rotated_origin) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rotate(self, image, angle):\n tmpImage = pygame.transform.rotate(image ,angle)\n imageCentreX = tmpImage.get_rect()[0] + tmpImage.get_rect()[2]/2\n imageCentreY = tmpImage.get_rect()[1] + tmpImage.get_rect()[3]/2\n\n targetWidth = tmpImage.get_rect()[2]\n targetHeight = tmpImage.get_rect()[3]\n\n imageOut = pygame.Surface((targetWidth, targetHeight))\n imageOut.fill((255,255,0))\n imageOut.set_colorkey((255,255,0))\n imageOut.blit(tmpImage,(0,0), pygame.Rect( imageCentreX-targetWidth/2,imageCentreY-targetHeight/2, targetWidth, targetHeight ) )\n return imageOut",
"def rotate(img, angle, resample=False, expand=False, center=None):\r\n \r\n return img.rotate(angle, resample, expand, center)",
"def rotate(self):\n\n last_center = self.rect.center\n self.image = pg.transform.rotate(self.image_copy,self.angle)\n self.rect = self.image.get_rect()\n self.rect.center = last_center\n self.angle +=self.rotate_by",
"def rotate(self, *args, **kwargs):\n return _image.image_rotate(self, *args, **kwargs)",
"def Rotate(angle):\n def rotate_img(img, angle=angle):\n img = Ft.rotate(img, angle, resample=BILINEAR)\n return img\n return rotate_img",
"def rotate(self, degrees):\n self.drawer.flush()\n self.img = self.img.rotate(degrees, PIL.Image.BICUBIC)\n self.update_drawer_img()\n return self",
"def _rotate(self, angle):\n self.rotatedImage = pygame.transform.rotozoom(self.needle, angle, 1.0)\n self.rotatedImageRectangle = self.rotatedImage.get_rect()\n\n # compensate for rotation of needle\n self.rotatedImageRectangle.center = (self.needlePos)\n self.rotatedImageRectangle.center += np.array([np.cos(math.radians(angle)) * self.needleOffset[0],\n -np.sin(math.radians(angle)) * self.needleOffset[1]])\n\n # blit images\n self._blit_images()",
"def rotate_and_wrap_image(self, image, degree_of_rotation):\n\n image_center = tuple(np.array(image.shape[1::-1]) / 2)\n rot_mat = cv2.getRotationMatrix2D(image_center, degree_of_rotation, 1.0)\n # borderMode (constant) and borderValue are important for maintaiing consistency \n ri = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR, borderMode = cv2.BORDER_CONSTANT,borderValue = (255,255,255))\n return ri",
"def rotate_no_clip(self, angle):\n # Calculate the size the expanded image needs to be to contain rotated image\n x, y = self.width, self.height\n w = abs(x*math.cos(angle)) + abs(y*math.sin(angle))\n h = abs(x*math.sin(angle)) + abs(y*math.cos(angle))\n\n # Paste the image into a larger frame and rotate\n img = Image.blank(w, h, 4, 0)\n img.paste(self, w/2-x/2, h/2-y/2)\n rotated = img.rotate(angle, (w/2, h/2))\n\n return rotated",
"def rotatedView(img, angle, enlarge=True, extend=Views.extendBorder):\n cx = img.dimension(0) / 2.0\n cy = img.dimension(1) / 2.0\n toCenter = AffineTransform2D()\n toCenter.translate(-cx, -cy)\n rotation = AffineTransform2D()\n # Step 1: place origin of rotation at the center of the image\n rotation.preConcatenate(toCenter)\n # Step 2: rotate around the Z axis\n rotation.rotate(radians(angle))\n # Step 3: undo translation to the center\n rotation.preConcatenate(toCenter.inverse())\n rotated = RV.transform(Views.interpolate(extend(img),\n NLinearInterpolatorFactory()), rotation)\n if enlarge:\n # Bounds:\n bounds = repeat((sys.maxint, 0)) # initial upper- and lower-bound values \n # for min, max to compare against \n transformed = zeros(2, 'f')\n for corner in product(*zip(repeat(0), Intervals.maxAsLongArray(img))):\n rotation.apply(corner, transformed)\n bounds = [(min(vmin, int(floor(v))), max(vmax, int(ceil(v))))\n for (vmin, vmax), v in zip(bounds, transformed)]\n minC, maxC = map(list, zip(*bounds)) # transpose list of 2 pairs\n # into 2 lists of 2 values\n imgRot = Views.zeroMin(Views.interval(rotated, minC, maxC))\n else:\n imgRot = Views.interval(rotated, img)\n return imgRot",
"def rotate(self, angle):\n self.rotatedImage = pygame.transform.rotate(self.needle, angle)\n self.rotatedImageRectangle = self.rotatedImage.get_rect()\n \n # compensate for rotation of needle\n self.rotatedImageRectangle.center = (NEEDLE_POS)\n self.rotatedImageRectangle.center += np.array([np.cos(math.radians(angle)) * OFFSET_X,\n -np.sin(math.radians(angle)) * OFFSET_Y])\n \n # blit instrument\n self.update()",
"def rotateIMG(self):\n self.blit_image = pygame.transform.rotate(self.blit_image, self.blitHeading - 45)\n self.rect = self.blit_image.get_rect()\n self.rect.center = (int(self.pos.x), int(self.pos.y))",
"def rot_image(original_image, angle):\n # Tรคmรค copypastettu jostain netistรค. Vaatii ettรค kuva on neliรถ.\n assert original_image.get_width() == original_image.get_height(), \\\n \"Can't rotate image - not square. %r\" % original_image\n orig_rect = original_image.get_rect()\n rot_image = pygame.transform.rotate(original_image, angle)\n rot_rect = orig_rect.copy()\n rot_rect.center = rot_image.get_rect().center\n rot_image = rot_image.subsurface(rot_rect).copy()\n return rot_image",
"def rotate(img, angle, resample=False, expand=False, center=None):\n\n #if not _is_pil_image(img):\n # raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n return img.rotate(angle, resample, expand, center)",
"def rotated_image(image):\n orientation = parameter('Orientation',90) # in degrees counter-clockwise\n if orientation == None: orienation = 0\n return image.rotate(orientation)",
"def rotate(self, angle: NumberType, auto_checkpoint: bool = True) -> 'BaseImage':\n assert isinstance(angle, NumberInstance)\n if angle == self._angle:\n return self\n if not self._rotated and auto_checkpoint:\n self.checkpoint()\n if self._rotated:\n self.restore()\n self._rotated = True\n self._surface = pygame.transform.rotate(self._surface, angle)\n self._angle = angle % 360\n return self",
"def create_modified_image(input_image, rotation_angle, x_offset, y_offset):\n image = Image.open(input_image)\n width, height = image.size\n\n # Calculate the maximum size needed to fit the rotated image\n max_size = max(image.size)\n max_width = max_size if width == max_size else int(max_size * (width / height))\n max_height = max_size if height == max_size else int(max_size * (height / width))\n\n # Create a blank canvas with the maximum size\n modified_image = Image.new('RGB', (max_width, max_height), color=0)\n\n # Rotate the input image\n rotated_image = image.rotate(rotation_angle, expand=True)\n rotated_image = rotated_image.resize((width*5, height*2))\n\n # Calculate the new position based on the offsets\n new_x = int((max_width - rotated_image.width) / 2 + x_offset)\n new_y = int((max_height - rotated_image.height) / 2 + y_offset)\n\n # Paste the rotated image onto the blank canvas\n modified_image.paste(rotated_image, (new_x, new_y))\n\n # Resize the modified image to fit the canvas\n modified_image = modified_image.resize((width*2, height*2))\n\n return modified_image",
"def _rotate_img(self, results):\n angle = self.angle\n height, width = results['img_shape'][:2]\n center = ((width - 1) * 0.5, (height - 1) * 0.5)\n\n # Compute rotation matrix\n mat_rotation = cv2.getRotationMatrix2D(center, -angle, 1)\n\n # Compute height and width of new image after rotation\n cos = np.abs(mat_rotation[0, 0])\n sin = np.abs(mat_rotation[0, 1])\n width_new = height * sin + width * cos\n height_new = height * cos + width * sin\n mat_rotation[0, 2] += (width_new - width) * 0.5\n mat_rotation[1, 2] += (height_new - height) * 0.5\n width_new = int(np.round(width_new))\n height_new = int(np.round(height_new))\n \n # Rotate image\n img_rotation = cv2.warpAffine(results['img'], mat_rotation, (width_new, height_new),\n borderValue=self.borderValue)\n results['img'] = img_rotation\n results['img_shape'] = results['img'].shape\n\n # Rotate corresponding annotations: all boxes in bbox_fields\n for key in results.get('bbox_fields', []):\n gt_boxes_ret = []\n for poly in results[key]:\n rot_array = []\n poly_length = len(poly)\n\n if poly_length == 4:\n poly = [poly[0], poly[1], poly[2], poly[1], poly[2], poly[3], poly[0], poly[3]]\n\n # Convert to np.array of shape (:, 2)\n for i in range(0, len(poly), 2):\n rot_array.append(np.array([poly[i], poly[i + 1]]))\n\n # Rotate corresponding annotations\n rot_array = np.array([rot_array])\n rot_array = cv2.transform(rot_array, mat_rotation).squeeze().reshape(len(poly))\n\n if poly_length == 4:\n x_coords = rot_array[0::2]\n y_coords = rot_array[1::2]\n rot_array = np.array([\n np.min(x_coords),\n np.min(y_coords),\n np.max(x_coords),\n np.max(y_coords)\n ])\n\n gt_boxes_ret.append(rot_array)\n if len(results[key]) > 0:\n results[key] = gt_boxes_ret\n\n # Rotate gt_bboxes according to gt_poly_bboxes\n if 'gt_bboxes' in results and 'gt_poly_bboxes' in results:\n gt_bboxes = []\n gt_bboxes_ignore = []\n\n for poly in results['gt_poly_bboxes']:\n poly = np.array(poly, dtype=np.double)\n x_coords = poly[0::2]\n y_coords = poly[1::2]\n aligned_poly = [\n np.min(x_coords),\n np.min(y_coords),\n np.max(x_coords),\n np.max(y_coords)\n ]\n gt_bboxes.append(aligned_poly)\n\n for poly in results['gt_poly_bboxes_ignore']:\n poly = np.array(poly, dtype=np.double)\n x_coords = poly[0::2]\n y_coords = poly[1::2]\n aligned_poly = [\n np.min(x_coords),\n np.min(y_coords),\n np.max(x_coords),\n np.max(y_coords)\n ]\n gt_bboxes_ignore.append(aligned_poly)\n\n if len(gt_bboxes) == 0:\n gt_bboxes = np.zeros((0, 4), dtype=np.float32)\n else:\n gt_bboxes = np.array(gt_bboxes, dtype=np.float32)\n\n if len(gt_bboxes_ignore) == 0:\n gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)\n else:\n gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)\n\n results['gt_bboxes'] = gt_bboxes\n results['gt_bboxes_ignore'] = gt_bboxes_ignore\n\n # Rotate corresponding annotations: all boxes in cbbox_fields\n for key in results.get('cbbox_fields', []):\n gt_cboxes_ret = []\n for instance in results[key]:\n tmp_cboxes = []\n for poly in instance:\n rot_array = []\n # Convert to np.array of shape (:, 2)\n for i in range(0, len(poly), 2):\n rot_array.append(np.array([poly[i], poly[i + 1]]))\n rot_array = np.array([rot_array])\n # Rotate corresponding annotations\n rot_array = cv2.transform(rot_array, mat_rotation).squeeze().reshape(len(poly))\n tmp_cboxes.append(rot_array)\n gt_cboxes_ret.append(tmp_cboxes)\n results[key] = gt_cboxes_ret\n\n # Rotate corresponding annotations: all masks in mask_fields\n for key in results.get('mask_fields', []):\n mask = results[key].masks.transpose((1, 2, 0))\n if len(results[key].masks) == 0:\n results[key] = results[key].resize((height_new, width_new))\n else:\n # Rotate mask\n mask_rotation = cv2.warpAffine(mask, mat_rotation, (width_new, height_new),\n borderValue=self.borderValue)\n if mask_rotation.ndim == 2:\n # case when only one mask, (h, w)\n mask_rotation = mask_rotation[:, :, None] # (h, w, 1)\n mask_rotation = mask_rotation.transpose((2, 0, 1))\n results[key].masks = mask_rotation\n results[key].height = height_new\n results[key].width = width_new",
"def rotate(image, rect, angle):\n new_image = pygame.transform.rotate(image, angle) # Rotate the original image without modifying it.\n rect = new_image.get_rect(center=rect.center) # Get a new rect with the center of the old rect.\n return new_image, rect",
"def rotate(self, angle):\n image_center = np.array(self.img.shape[1::-1]) / 2\n rot_mat = cv2.getRotationMatrix2D(tuple(image_center), angle, 1.0)\n\n self.img = cv2.warpAffine(\n self.img, rot_mat, self.img.shape[1::-1], flags=cv2.INTER_LINEAR\n )\n\n self.edits.append(f\"rotate:{angle}\")\n return self",
"def img_rotate(img, angle, center, fillval=0):\n rows, cols = img.shape[:2]\n M = cv2.getRotationMatrix2D(center, angle, 1)\n return cv2.warpAffine(img, M, (cols, rows), borderValue=fillval)",
"def rotateImage(self, img, angle=90):\n if (angle == 90) :\n return(cv2.flip(cv2.transpose(img),flipCode=0))\n elif (angle == -90) :\n return(cv2.flip(cv2.transpose(img),flipCode=1))\n else :\n center = (img.shape[1]/2.0,img.shape[0]/2.0)\n rotate = cv2.getRotationMatrix2D(center, angle, 1.0)\n return cv2.warpAffine(img, rotate, (img.shape[1], img.shape[0]))",
"def rotateImage(self):\n self.cnvImgOrig.rotate(\"./images/origPic.tiff\")\n self.cnvImgTest.rotate(\"./images/testPic.tiff\")",
"def rotate(self):\r\n # Rotate the image.\r\n self.image = pg.transform.rotozoom(self.orig_image, -self.angle, 1)\r\n # Rotate the offset vector.\r\n offset_rotated = self.offset.rotate(self.angle)\r\n print(\"offset_rotated:\", offset_rotated)\r\n # Create a new rect with the center of the sprite + the offset.\r\n self.rect = self.image.get_rect(center=self.pos+offset_rotated)",
"def rotate(self, radians, center):\n degrees = radians * 180 / math.pi\n matrix = opencv.getRotationMatrix2D(center.tuple(), degrees, 1.0)\n\n rotated = opencv.warpAffine(self.img, matrix, (self.width, self.height))\n return Image(rotated)",
"def _spin(self):\n center = self.rect.center\n self.dizzy += 12 # rotate 12 degree clockwise\n\n if self.dizzy >= 360:\n self.dizzy = 0\n self.image = self.original # reset the image to its original ones after rotated\n else:\n self.image = pygame.transform.rotate(self.original, self.dizzy)\n\n self.rect = self.image.get_rect()\n self.rect.center = center # make sure the image would not move when spinning",
"def rotate(self, angle):\n rotmat = rotation_matrix_2d(angle)\n rotated = np.dot(rotmat.T, [self.pix_x.value, self.pix_y.value])\n self.pix_x = rotated[0] * self.pix_x.unit\n self.pix_y = rotated[1] * self.pix_x.unit\n self.pix_rotation -= angle",
"def rot_center(self, image, angle):\n orig_rect = image.get_rect()\n rot_image = pygame.transform.rotate(image, angle)\n rot_rect = orig_rect.copy()\n rot_rect.center = rot_image.get_rect().center\n rot_image = rot_image.subsurface(rot_rect).copy()\n return rot_image",
"def rot_center(self, image, angle):\n orig_rect = image.get_rect()\n rot_image = pygame.transform.rotate(image, angle)\n rot_rect = orig_rect.copy()\n rot_rect.center = rot_image.get_rect().center\n rot_image = rot_image.subsurface(rot_rect).copy()\n return rot_image",
"def rotate(self, angle, resample=NEAREST, expand=0, center=None,\r\n translate=None, fillcolor=None):\r\n angle = angle % 360.0\r\n if fillcolor is None:\r\n fillcolor = (0, 0, 0)\r\n if expand == 0:\r\n # grab the dimensions of the image\r\n h, w = self.size[1], self.size[0]\r\n\r\n # if the center is None, initialize it as the center of\r\n # the image\r\n if center is None:\r\n center = (w // 2, h // 2)\r\n scale = 1.0\r\n # perform the rotation\r\n M = cv2.getRotationMatrix2D(center, angle, scale)\r\n _im = cv2.warpAffine(self._instance, M, (w, h), borderValue=fillcolor)\r\n else:\r\n _im = self.rotate_bound(angle)\r\n if translate is not None:\r\n _im = self.translated(_im, translate[0], translate[0])\r\n return Image(_im)"
]
| [
"0.7114526",
"0.6855607",
"0.685048",
"0.6798645",
"0.67613417",
"0.6659643",
"0.66030043",
"0.6512459",
"0.64869493",
"0.64115447",
"0.6395608",
"0.63924265",
"0.63760424",
"0.63394535",
"0.6326849",
"0.63236195",
"0.63056964",
"0.6284121",
"0.62591845",
"0.6254835",
"0.62527454",
"0.6240356",
"0.62353086",
"0.6188269",
"0.61368906",
"0.60942316",
"0.6089414",
"0.6077915",
"0.6077915",
"0.6076881"
]
| 0.7153731 | 0 |
Crop the current MultichannelBitmap object with a given rectangle | def crop(self, rect):
maybe_cropped_area = self.to_bbox().crop(rect)
if len(maybe_cropped_area) == 0:
return []
else:
[cropped_area] = maybe_cropped_area
cropped_origin = PointLocation(row=cropped_area.top, col=cropped_area.left)
cropped_area_in_data = cropped_area.translate(drow=-self._origin.row, dcol=-self.origin.col)
return [MultichannelBitmap(data=cropped_area_in_data.get_cropped_numpy_slice(self._data),
origin=cropped_origin,)] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def crop_rect(self, rect: 'pygame.Rect') -> 'BaseImage':\n self._surface = self.get_crop_rect(rect)\n return self",
"def crop(\n self,\n x: NumberType,\n y: NumberType,\n width: NumberType,\n height: NumberType\n ) -> 'BaseImage':\n self._surface = self.get_crop(x, y, width, height)\n return self",
"def crop(image, dimX, dimY):\n # TODO\n return image",
"def crop_img(image, bound):\n scale = 1.01 # 1%\n return image.crop((bound.vertices[0].x // scale, bound.vertices[0].y // scale,\n int(bound.vertices[2].x * scale), int(bound.vertices[2].y) * scale))",
"def crop(self, bbox):\n if self.__huge: # image is huge and not totally in RAM\n band = bbox[3] - bbox[1] # width of the tile band\n self.__tile[1][3] = band # set the tile height\n self.__tile[2] = self.__offset + self.imwidth * bbox[1] * 3 # set offset of the band\n self.__image.close()\n self.__image = Image.open(self.path) # reopen / reset image\n self.__image.size = (self.imwidth, band) # set size of the tile band\n self.__image.tile = [self.__tile]\n return self.__image.crop((bbox[0], 0, bbox[2], band))\n else: # image is totally in RAM\n return self.__pyramid[0].crop(bbox)",
"def doCrop(image, x, y, w, h):\n\tcrop_height = int((config.FACE_HEIGHT / float(config.FACE_WIDTH)) * w)\n\tmidy = y + h/2\n\ty1 = max(0, midy-crop_height/2)\n\ty2 = min(image.shape[0]-1, midy+crop_height/2)\n\treturn image[y1:y2, x:x+w]",
"def crop(img: 'np.ndarray', x: int, y: int, width: int, height: int) -> 'np.ndarray':\n return img[y:y+height, x:x+width]",
"def faceCrop(im,x,y,w,h,m):\r\n sizeX, sizeY = im.size\r\n new_x, new_y = max(0,x-m*w), max(0,y-m*h)\r\n new_w = w + 2*m*w if sizeX > (new_x + w + 2*m*w) else sizeX - new_x\r\n new_h = h + 2*m*h if sizeY > (new_y + h + 2*m*h) else sizeY - new_y\r\n new_x,new_y,new_w,new_h = int(new_x),int(new_y),int(new_w),int(new_h)\r\n return im.crop((new_x,new_y,new_x+new_w,new_y+new_h))",
"def _generate_crop(self):\n if self.box_drawn == True:\n if (self.cd_pic_num != -1) & (self.cd_crop_num == 1):\n self.communicator.generate_crop(picture_num=self.cd_pic_num, \\\n xa=self.xa, ya=self.ya, xb=self.xb, yb=self.yb)\n else:\n print \"ERROR: can only generate a new crop from a thumbnail\"\n else:\n print \"ERROR: please select an area to generate a crop from\"",
"def crop(img, x, y, w, h):\n check_type(img)\n return img.crop((x, y, x + w, y + h))",
"def crop(self, *args, **kwargs):\n return _image.image_crop(self, *args, **kwargs)",
"def crop(self,crop_vector = [None, None, None, None]):\n xmin,xmax,ymin,ymax = crop_vector\n \n xmin = self._obj.x.min() if xmin is None else xmin\n xmax = self._obj.x.max() if xmax is None else xmax\n ymin = self._obj.y.min() if ymin is None else ymin\n ymax = self._obj.y.max() if ymax is None else ymax \n \n self._obj = self._obj.sel(x=slice(xmin, xmax),y=slice(ymin,ymax))\n\n return self._obj",
"def get_crop(\n self,\n x: NumberType,\n y: NumberType,\n width: NumberType,\n height: NumberType\n ) -> 'pygame.Surface':\n assert 0 <= x < self.get_width(), \\\n 'X position must be between 0 and the image width'\n assert 0 <= y < self.get_height(), \\\n 'Y position must be between 0 and the image width'\n assert 0 < width <= self.get_width(), \\\n 'Width must be greater than zero and less than the image width'\n assert 0 < height <= self.get_height(), \\\n 'Height must be greater than zero and less than the image height'\n assert (x + width) <= self.get_width(), \\\n 'Crop box cannot exceed image width'\n assert (y + height) <= self.get_height(), \\\n 'Crop box cannot exceed image height'\n rect = pygame.Rect(0, 0, 0, 0)\n rect.x = x\n rect.y = y\n rect.width = width\n rect.height = height\n return self.get_crop_rect(rect)",
"def crop(self, coords):\n pass",
"def get_crop_rect(self, rect: 'pygame.Rect') -> 'pygame.Surface':\n return self._surface.subsurface(rect)",
"def crop_bounding_box(im, x, y, w, h):\n return im[y:y+h, x:x+w]",
"def crop(img, boundaries):\n minx, miny, maxx, maxy = boundaries\n return img[miny:maxy, minx:maxx]",
"def __call__(self, img):\n image_width, image_height = img.size\n image_short = min(image_width, image_height)\n\n crop_size = float(self.imgsize) / (self.imgsize + 32) * image_short\n\n crop_height, crop_width = crop_size, crop_size\n crop_top = int(round((image_height - crop_height) / 2.))\n crop_left = int(round((image_width - crop_width) / 2.))\n return img.crop((crop_left, crop_top, crop_left + crop_width, crop_top + crop_height))",
"def __call__(self, img):\n image_width, image_height = img.size\n image_short = min(image_width, image_height)\n\n crop_size = float(self.imgsize) / (self.imgsize + 32) * image_short\n\n crop_height, crop_width = crop_size, crop_size\n crop_top = int(round((image_height - crop_height) / 2.))\n crop_left = int(round((image_width - crop_width) / 2.))\n return img.crop((crop_left, crop_top, crop_left + crop_width, crop_top + crop_height))",
"def crop_to_square(self, image):\n orig_height, orig_width, orig_channels = image.shape\n if orig_height > orig_width:\n return image[:orig_width, ...]\n elif orig_height < orig_width:\n return image[:, :orig_height, ...]\n return image",
"def crop_frame(frame, crop_region):\n tl_x = crop_region['top_left_x'] \n tl_y = crop_region['top_left_y']\n br_x = crop_region['bottom_right_x']\n br_y = crop_region['bottom_right_y']\n return frame[tl_y:br_y, tl_x:br_x]",
"def crop_frame(frame):\n (h,w,c) = frame.shape\n return frame[int(h/2):h, 0:w]",
"def crop(self, bbox):\n return self.__pyramid[0].crop(bbox)",
"def crop(self, image):\n\t\treturn image.copy()[self.ymin:self.ymax,self.xmin:self.xmax]",
"def crop(self,channel,center_coord,crop_size,z_coord=None,z_size=1): \n x1=center_coord[0]-int(crop_size/2)\n x2=x1+crop_size\n y1=center_coord[1]-int(crop_size/2)\n y2=y1+crop_size\n img_crop=MicImage()\n img_crop._metaData={**self._metaData}\n img_crop.xml=self.xml\n\n\n if z_coord is not None and z_size>1:\n z1=z_coord-int(z_size/2)\n if z1<0:\n z1=0\n z2=z1+z_size\n if (z_coord is not None and z_size==1):\n z1=z_coord\n z2=z1+1\n if z_coord is None:\n z1=0\n z2=-1\n\n img_crop.pixels= self.pixels[z1:z2,x1:x2,y1:y2,channel]\n \n if img_crop.pixels.shape[0]==1:\n img_crop.pixels=np.squeeze(img_crop.pixels)\n img_crop.sumprj=np.squeeze(img_crop.pixels)\n img_crop.maxprj=np.squeeze(img_crop.pixels)\n else:\n img_crop.prj(\"max\")\n img_crop.prj(\"sum\")\n img_crop._metaData.update({\"size_x\": crop_size})\n img_crop._metaData.update({\"size_x\": crop_size})\n\n return img_crop",
"def crop(img):\n new_shape = min(img.shape[0], img.shape[1])\n \n return img[0:new_shape, 0:new_shape, ...]",
"def Crop_Image(img, mask, x, y, width, height):\n img = img[y:y+height, x:x+width,:]\n mask = mask[y:y+height, x:x+width,:]\n return img, mask",
"def crop_image(image):\r\n return image[40:-20, :]",
"def crop(img, i, j, h, w):\n return img.crop((j, i, j + w, i + h))",
"def crop(img, i, j, h, w):\n return img.crop((j, i, j + w, i + h))"
]
| [
"0.70140654",
"0.69734436",
"0.6824329",
"0.6681341",
"0.6640309",
"0.6611977",
"0.656057",
"0.6558125",
"0.64772123",
"0.64610535",
"0.64189845",
"0.64062244",
"0.63840437",
"0.63834655",
"0.63748735",
"0.6367454",
"0.63590825",
"0.6340213",
"0.6340213",
"0.6328262",
"0.6312861",
"0.6309528",
"0.63065594",
"0.6302325",
"0.6286966",
"0.6284825",
"0.6278765",
"0.6253471",
"0.6242938",
"0.6242938"
]
| 0.7179569 | 0 |
Resize the current MultichannelBitmap to match a certain size | def resize(self, in_size, out_size):
scaled_origin, scaled_data = resize_origin_and_bitmap(self._origin, self._data, in_size, out_size)
return MultichannelBitmap(data=scaled_data, origin=scaled_origin) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Rescale(self):\r\n picWidth,picHeight = self.oldSize = self.GetSizeTuple()\r\n bitmap = self.scaled = self.bitmap\r\n if not bitmap: return\r\n imgWidth,imgHeight = bitmap.GetWidth(),bitmap.GetHeight()\r\n if self.scaling == 2 or (self.scaling == 1 and (imgWidth > picWidth or imgHeight > picHeight)):\r\n image = bitmap.ConvertToImage()\r\n factor = min(1.0*picWidth/imgWidth,1.0*picHeight/imgHeight)\r\n newWidth,newHeight = int(factor*imgWidth),int(factor*imgHeight)\r\n self.scaled = image.Scale(newWidth,newHeight).ConvertToBitmap()\r\n #self.scaled = image.Scale(newWidth,newHeight,wx.IMAGE_QUALITY_HIGH ).ConvertToBitmap()\r",
"def SetUniformBitmapSize(self, size):\r\n\r\n self._requested_bmp_size = wx.Size(*size)\r\n\r\n # if window is already initialized, recalculate the tab height\r\n if self._dummy_wnd:\r\n self.UpdateTabCtrlHeight()",
"def SetSize(*args, **kwargs):\n return _gdi_.Bitmap_SetSize(*args, **kwargs)",
"def _resize(img, max_dim=128):\n if max(img.shape[:3]) <= max_dim:\n return img\n else:\n new_size = [max_dim / s if s >= max_dim else 1.0 for s in img.shape[:3]]\n new_size.append(1.0) # for channel\n return scipy.ndimage.zoom(img, new_size, order=2)",
"def resize(self):\n pass",
"def Pane_Resized( self, new_sizes ):\r\n if(new_sizes[0] > 200 ):\r\n cb.xtotal = new_sizes[0]-100\r\n self.canvas_one.config(width = new_sizes[0])\r\n self.canvas_scale.config(width = new_sizes[0])\r\n else:\r\n cb.xtotal = 200-100\r\n self.canvas_one.config(width = 200)\r\n self.canvas_scale.config(width = 200)\r\n if (len(new_sizes) > 1 ):\r\n self.canvas_two.config(width=new_sizes[1])\r\n self.system.Draw()",
"def rescale(self, factor):\n scaled_size = (int(self.width * factor), int(self.height * factor))\n return self.resize(scaled_size)",
"def resized_map(self, new_size):\n\n new_map = cv2.resize(self.map.copy(), new_size)\n cur_count = np.sum(new_map)\n\n # Avoid dividing by zero\n if cur_count == 0:\n return new_map\n\n scale = self.count / cur_count\n new_map *= scale\n return new_map",
"def _zoomCamera(self, sizeChange):\n self.camSize -= sizeChange",
"def adjust(self, image):\n ...",
"def update_size(self):\n self.size = self.image.size\n self.width, self.height = self.size",
"def rescale(self, event: tkinter.Event) -> None:\n # the properties which are linked to the event of reconfiguration\n # contain all the new sizes of the panel :\n self.width, self.height = event.width - 4, event.height - 4\n # The subtraction of 4 pixels is here to compensate the width\n # of the 'highlight bordure' rolling the canvas)\n self.draw_board()",
"def _resize_short_within(self, img, short, max_size, mult_base=1, interp=Image.BILINEAR):\n w, h = img.size\n im_size_min, im_size_max = (h, w) if w > h else (w, h)\n scale = float(short) / float(im_size_min)\n if np.round(scale * im_size_max / mult_base) * mult_base > max_size:\n # fit in max_size\n scale = float(np.floor(max_size / mult_base) * mult_base) / float(im_size_max)\n new_w, new_h = (int(np.round(w * scale / mult_base) * mult_base),\n int(np.round(h * scale / mult_base) * mult_base))\n img = img.resize((new_w, new_h), interp)\n return img",
"def set_size(self, width, height):\r\n \r\n self.image = pygame.transform.scale(self.image, (width, height))\r\n self.rect = self.image.get_rect()",
"def resize(self, new_size):\n resized_img = opencv.resize(self.img, new_size)\n return Image(resized_img)",
"def set_size(self, size=None):\n if not size:\n size = self.output_size\n self.img = cv2.resize(self.img, size)\n self.update_image()\n self.update_size()",
"def scale_widget_to_image_size(self):\n if self._image is not None:\n im = self._image.make_image()\n self.width = im.shape[1]\n self.height = im.shape[0]",
"def resize(self, width, height):\n\t\tself._set_image(\n\t\t\tSolidColorImagePattern(\n\t\t\t\tcolor=(self._r,self._g,self._b,self._a)\n\t\t\t).create_image(width, height)\n\t\t)",
"def resize(img):\n size = (500, 500)\n img.thumbnail(size)\n return img",
"def resizeImage(self):\n ratio = float(self.qIma.width()) / float(self.qIma.height())\n if self.qIma.width() > self.qIma.height():\n maxWidth = 300\n maxHeight = int(300 / ratio)\n else:\n maxWidth = int(300 / ratio)\n maxHeight = 300\n img = self.qIma.toImage().scaled(maxWidth, maxHeight, QtCore.Qt.KeepAspectRatio)\n return img",
"def resize(self, old, new):",
"def _set_pixel_size(self) -> None:\n # Not Pansharpened images\n if self.band_combi == Sv1BandCombination.PMS:\n # TODO: manage default resolution for PAN band ?\n self.pixel_size = self._ms_res\n # Pansharpened images\n else:\n self.pixel_size = self._pan_res",
"def scaleToBBXSize(sourceImg, targetBBX):\n outImage = cv2.resize(src=sourceImg,\n dsize=(targetBBX[3], targetBBX[4]),\n interpolation=cv2.INTER_AREA)\n\n return outImage",
"def resizePreview(self):\n ratio = float(self.qIma.width()) / float(self.qIma.height())\n if self.qIma.width() > self.qIma.height():\n width = 300\n height = int(float(width) / ratio)\n else:\n height = 170\n width = int(float(height) / ratio)\n if 'prodManager' in os.path.basename(self._ima):\n width = 300\n height = 170\n self.lPreview.setMinimumSize(width, height)\n self.lPreview.setMaximumSize(width, height)",
"def resize(self, size: Tuple[int, int]):\n if self._immutable:\n raise ValueError(\"Immutable textures cannot be resized\")\n\n gl.glActiveTexture(gl.GL_TEXTURE0 + self._ctx.default_texture_unit)\n gl.glBindTexture(self._target, self._glo)\n\n self._width, self._height = size\n\n self._texture_2d(None)",
"def setPixelsPerInchShrinkToFit(self,value):\n self.PDFreactorConfiguration.in1[\"pixelsPerInchShrinkToFit\"] = value",
"def resize(img, size):\n img = cv2.resize(img, tuple(size[::-1]))\n return img",
"def _pool_and_resize(self):\n # Pool if there are enough screens to do so.\n if self.frame_skip > 1:\n np.maximum(\n self.screen_buffer[0],\n self.screen_buffer[1],\n out=self.screen_buffer[0])\n\n transformed_image = cv2.resize(\n self.screen_buffer[0], (self.screen_size, self.screen_size),\n interpolation=cv2.INTER_AREA)\n int_image = np.asarray(transformed_image, dtype=np.uint8)\n return np.expand_dims(int_image, axis=2)",
"def _scale_to_mbs_frame(self : \"animation\",\n img : \"np.ndarray\"\n ) -> \"np.ndarray\":\n xnew = img.shape[0] + self._mbs - img.shape[0]%self._mbs\n ynew = img.shape[1] + self._mbs - img.shape[1]%self._mbs\n return (255*resize(img, (xnew, ynew))).astype(np.uint8)",
"def ConvertToScaledBitmap(self, size, window=None):\n size = wx.Size(*size)\n if window:\n size.width = int(size.width * window.GetContentScaleFactor())\n size.height = int(size.height * window.GetContentScaleFactor())\n\n # We can only have one overall scale factor for both dimensions with\n # this rasterization method, so chose either the minimum of width or\n # height to help ensure it fits both ways within the specified size.\n sx = size.width / self.width\n sy = size.height / self.height\n scale = min(sx, sy)\n return self.ConvertToBitmap(scale=scale, width=size.width, height=size.height)"
]
| [
"0.6741827",
"0.62626356",
"0.60623795",
"0.60617864",
"0.6012608",
"0.59815633",
"0.5941668",
"0.59120166",
"0.5884829",
"0.58750904",
"0.5854437",
"0.5848284",
"0.5848173",
"0.5843797",
"0.5824694",
"0.5809576",
"0.58015764",
"0.5798132",
"0.5786924",
"0.57853585",
"0.5768778",
"0.57661206",
"0.5733641",
"0.5718825",
"0.5717419",
"0.57093257",
"0.5688968",
"0.56639487",
"0.56592417",
"0.56506544"
]
| 0.73829323 | 0 |
The function base64_2_data convert base64 encoded string to numpy | def base64_2_data(s: str) -> np.ndarray:
saved_bytes = io.BytesIO(zlib.decompress(base64.b64decode(s)))
return np.load(saved_bytes) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def from_base64(x):\n\treturn numpy.fromstring(\n\t\tbinascii.a2b_base64(x),\n\t\tdtype=numpy.uint8\n\t)",
"def base64_decode_array(inStr, dtype):\n return np.frombuffer(base64.decodestring(inStr), dtype=dtype)",
"def base64_to_numpy_image(b64: str) -> np.ndarray:\n image = np.array(Image.open(BytesIO(base64.b64decode(b64))))\n return image",
"def base64_decode_image(inStr):\n imgDat, imgType, imgShape = json.loads(inStr)\n imgDat = bytes(imgDat, encoding=\"utf-8\")\n\n imgDat = base64_decode_array(imgDat, imgType)\n imgDat = imgDat.reshape(imgShape)\n return imgDat",
"def base_64_to_img(base_64_string):\r\n # convert image into np array\r\n return cv2.imdecode(\r\n np.frombuffer(base64.b64decode(base_64_string.split(\";base64,\").pop()), np.uint8),\r\n cv2.IMREAD_COLOR)",
"def data_2_base64(data: np.ndarray) -> str:\n bytes_io = io.BytesIO()\n np.save(bytes_io, data, allow_pickle=False)\n return base64.b64encode(zlib.compress(bytes_io.getvalue())).decode('utf-8')",
"def base64_2_mask(s: str) -> np.array:\n z = zlib.decompress(base64.b64decode(s))\n n = np.fromstring(z, np.uint8)\n \n return cv2.imdecode(n, cv2.IMREAD_UNCHANGED)[:, :, 3].astype(bool) * 1",
"def decode(base64img, size):\n # the bytes of the image ---> np array\n image_64_decode = base64.decodebytes(base64img.encode('utf-8'))\n image = Image.open(BytesIO(image_64_decode))\n\n # resize, and convert to RGB\n image = image.resize(size, Image.ANTIALIAS)\n image = image.convert('RGB')\n\n nparr = np.array(image)\n nparr = nparr[:,:,:3]\n return nparr",
"def decode_base64(in_str):\n import base64\n return base64.decodestring(in_str)",
"def base64_decode(n, encoding='ISO-8859-1'):\t\n decoded = base64.decodestring(n.encode('ascii'))\t\n return tonative(decoded, encoding)",
"def decode_base64(in_str):\n return base64.decodestring(in_str)",
"def decode_base64(data):\n\n image = None\n try:\n image = base64.decodestring(data)\n except:\n print \"Could not decode base64 image from json\"\n\n return image",
"def convert_str_to_image(image_string):\n image = image_string.partition('base64,')[2]\n img_data = base64.b64decode(image)\n return img_data",
"def decode_base64(data):\n missing_padding = 4 - len(data) % 4\n if missing_padding:\n data += b'='* missing_padding\n return base64.decodestring(data)",
"def base64(s):\n return b64encode(s,'[]').replace('=','_')",
"def base64_data(cls, val):\n return cls('base64_data', val)",
"def decode_base64(data):\n missing_padding = len(data) % 4\n if missing_padding != 0:\n data += b'='* (4 - missing_padding)\n return base64.decodebytes(data)",
"def ascii_to_numpy(ascii_diagram, as_bytes=True):\n ascii_diagram = [list(i) for i in ascii_diagram]\n ascii_diagram = np.array(ascii_diagram)\n v_to_bytes = np.vectorize(to_bytes)\n return v_to_bytes(ascii_diagram) if as_bytes else ascii_diagram",
"def convertdataTOimage(data):\n data = data.partition(\",\")[2]\n padding = len(data)%4\n data += \"=\"*padding\n image = Image.open(BytesIO(b64decode(data)))\n return image",
"def _b64decode(self, string):\n import base64\n return base64.b64decode(string)",
"def _b64decode(self, string):\n import base64\n return base64.b64decode(string)",
"def decodeBase64(data):\n missing_padding = 4 - len(data)%4\n if missing_padding:\n data += b'='* missing_padding\n\n return base64.decodestring(data)",
"def from_base64(cls, b64_str, content_type=None):\n if b64_str is None:\n raise ValueError(\"Base 64 string should not be None\")\n # The decode function does not like taking unicode strings.\n # Additionally, the encoding alphabet should not container any unicode\n # symbols, so this aught to be safe.\n b64_str = str(b64_str)\n return DataMemoryElement(base64.urlsafe_b64decode(b64_str),\n content_type)",
"def decode_distance_matrix(encoded):\n bin_data = base64.decodestring(encoded.encode()) # From ascii string to decoded Bytes object\n flat = np.load(BytesIO(bin_data), allow_pickle=False) # Load the flattened numpy array\n return unflatten_distance_matrix(flat)",
"def decode_base64(self, s):\n return self.transcode(struct.unpack('!L', base64.b64decode(s + '==', self.extra_chars))[0])",
"def data64(self, value: str) -> None:\n self.data = Image.decode64(value)",
"def de_base64(msg):\n try:\n msg_ascii = msg.encode('ascii')\n msg_bytes = base64.b64decode(msg_ascii)\n msg_decoded = msg_bytes.decode('ascii')\n return msg_decoded\n except:\n print('Invalid base64-encoded string')",
"def decode_data ( data ) :\n cipher = get_cipher( data )\n index = 0\n firstpass = []\n datalen = len( data )\n while index < datalen :\n if index % 2 == 0 :\n firstpass.append( chr( ord( data[ index ] ) - cipher ) )\n else :\n firstpass.append( chr( ord( data[ index ] ) + cipher ) )\n index += 1\n\n firstpass[ 0 ] = data[ 0 ]\n firstpass[ -1 ] = data[ -1 ]\n firstpass[ -2 ] = data[ -2 ]\n decoded_data = ''.join( firstpass )\n return base64.b64decode( decoded_data )",
"def base64_string(self) -> global___Expression:",
"def decode_data(data):\n try:\n return json_decode(base64.b64decode(data))\n except:\n return None"
]
| [
"0.8184459",
"0.79700077",
"0.7619043",
"0.754169",
"0.7244293",
"0.71252644",
"0.6947376",
"0.659718",
"0.6586803",
"0.65722215",
"0.65642273",
"0.6504433",
"0.64950395",
"0.6386613",
"0.63705456",
"0.63646716",
"0.63078606",
"0.62903976",
"0.62824816",
"0.62819695",
"0.62819695",
"0.62278956",
"0.62088114",
"0.61908686",
"0.61706984",
"0.61417174",
"0.6053029",
"0.60084605",
"0.5999597",
"0.59821004"
]
| 0.88050646 | 0 |
he function data_2_base64 convert numpy array to base64 encoded string | def data_2_base64(data: np.ndarray) -> str:
bytes_io = io.BytesIO()
np.save(bytes_io, data, allow_pickle=False)
return base64.b64encode(zlib.compress(bytes_io.getvalue())).decode('utf-8') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def base64_encode_array(inArray):\n return base64.b64encode(inArray)",
"def _encode_base64(data: str) -> str:\n ebytes = base64.b64encode(data.encode(\"utf-8\"))\n estring = str(ebytes, \"utf-8\")\n return estring",
"def _encodeArray(self, array):\n\n # Actually, we want dtype,naxis,axNlen,base64(array)\n return base64.b64encode(array.tostring())",
"def base64(s):\n return b64encode(s,'[]').replace('=','_')",
"def base64_string(self) -> global___Expression:",
"def np_to_base64(img_np):\n img = Image.fromarray(img_np.astype('uint8'), 'RGB')\n buffered = BytesIO()\n img.save(buffered, format=\"PNG\")\n return u\"data:image/png;base64,\" + base64.b64encode(buffered.getvalue()).decode(\"ascii\")",
"def base64_encode(data):\n return base64.encodestring(data);",
"def base64_encode_image(inArray):\n imgDat = [base64_encode_array(inArray).decode(\"utf-8\")]\n imgType = str(inArray.dtype)\n imgShape = inArray.shape\n return json.dumps([ imgDat, imgType, imgShape ])",
"def fn_base64(self, value):\n if isinstance(value, str):\n value = value.encode()\n return base64.b64encode(value).decode()",
"def to_base64(a):\n\treturn str(\n\t\tbinascii.b2a_base64(\n\t\t\ta.tostring()\n\t\t).strip(),\n\t\t\"UTF-8\"\n\t)",
"def getbase64(nparr,):\n if type(nparr) == type({}):\n nparr = nparr['img']\n im = Image.fromarray(nparr)\n buf = BytesIO()\n im.save(buf,format=\"JPEG\")\n return base64.b64encode(buf.getvalue()).decode('ascii')",
"def b64enc(data: bytes) -> str:\n\n return base64.standard_b64encode(data).decode(\"utf-8\")",
"def base64_2_data(s: str) -> np.ndarray:\n saved_bytes = io.BytesIO(zlib.decompress(base64.b64decode(s)))\n return np.load(saved_bytes)",
"def my_base64encode(s):\n return base64.b64encode(s).decode(\"utf-8\")",
"def base64(self):\n image = self.png.getvalue()\n return base64.encodestring(image).decode('utf-8')",
"def data64(self) -> str:\n return Image.encode64(self.data)",
"def encode_data(data):\n bytes = json.dumps(data).encode('utf-8').encode('base64').replace('\\n', '')\n assert len(bytes) < 250 * 1024\n return bytes",
"def from_base64(x):\n\treturn numpy.fromstring(\n\t\tbinascii.a2b_base64(x),\n\t\tdtype=numpy.uint8\n\t)",
"def b64_json_enc(data):\n json_str = json.dumps(data)\n return base64.b64encode(json_str.encode()).decode()",
"def base64_data(cls, val):\n return cls('base64_data', val)",
"def _b64_encode(data):\n enc = base64.b64encode(data)\n return enc.translate(B64_TO_BCRYPT, '=')",
"def _b64(b):\n return base64.urlsafe_b64encode(b).decode('utf8').replace(\"=\", \"\")",
"def btoa(a):\n return base64.b64encode(a.encode()).decode()",
"def b64_of_bytes(data: bytes) -> str:\n return base64.b64encode(data).decode()",
"def convert_to_base64(str):\n str_bytes = str.encode(\"utf-8\")\n str_bytes_base64 = base64.b64encode(str_bytes)\n str_base64 = str_bytes_base64.decode(\"utf-8\")\n return str_base64",
"def image_to_base64str(image):\n file_bytes = image.file.read()\n base64_img_str = 'data:image;base64, '\n base64_img_str += str(base64.b64encode(file_bytes), 'utf-8')\n return base64_img_str",
"def base64_encode(text):\n if not isinstance(text, (bytes, bytearray)):\n text = bytes(text.encode())\n encode = base64.b64encode(text)\n return encode.decode('ascii')",
"def base64_filter(val, indent=2):\n if isinstance(val, Undefined):\n return \"\"\n s = json.dumps(val).encode(\"utf-8\")\n return b64encode(s).decode(\"utf-8\")",
"def base64ify(bytes_or_str):\n if isinstance(bytes_or_str, str):\n input_bytes = bytes_or_str.encode(\"utf8\")\n else:\n input_bytes = bytes_or_str\n\n output_bytes = base64.urlsafe_b64encode(input_bytes)\n return output_bytes.decode(\"ascii\")",
"def save_img_base64(_preds):\n img = Image.fromarray(_preds)\n buff = BytesIO()\n img.save(buff, format=\"JPEG\")\n return base64.b64encode(buff.getvalue())"
]
| [
"0.7588745",
"0.7516981",
"0.7438014",
"0.7362127",
"0.7350758",
"0.7344294",
"0.7296927",
"0.72549736",
"0.7129535",
"0.7069589",
"0.70046437",
"0.6933189",
"0.689586",
"0.6836253",
"0.68321025",
"0.6796364",
"0.67913944",
"0.67715025",
"0.67699975",
"0.67620367",
"0.6727047",
"0.67143065",
"0.66850245",
"0.6636276",
"0.6588172",
"0.6565462",
"0.6555536",
"0.6551874",
"0.6534863",
"0.6532713"
]
| 0.87411064 | 0 |
Returns a REST API source of ``Player`` data. It is assumed the REST API returns player data in the expected JSON format. | def get_rest_data_source(uri):
def players_from_rest():
response = requests.get(uri)
response.raise_for_status()
return parse_players_json(response.text)
return players_from_rest | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_player_info(player_name):\r\n\r\n api_url = api_url_base + player_name\r\n\r\n response = requests.get(api_url, headers=headers)\r\n\r\n if response.status_code >= 500:\r\n print('[!] [{0}] Server Error'.format(response.status_code))\r\n return None\r\n elif response.status_code == 404:\r\n print('[!] [{0}] URL not found: [{1}]'.format(response.status_code,api_url))\r\n return None\r\n elif response.status_code == 401:\r\n print('[!] [{0}] Authentication Failed'.format(response.status_code))\r\n return None\r\n elif response.status_code >= 300:\r\n print('[!] [{0}] Unexpected Redirect'.format(response.status_code))\r\n return None\r\n elif response.status_code == 200:\r\n return json.loads(response.content)\r\n else:\r\n print('[?] Unexpected Error: [HTTP {0}]: Content: {1}'.format(response.status_code,response.content))\r\n return None",
"async def get_players(self):\r\n if os.environ.get(\"WoW_Token\") is None:\r\n return\r\n else:\r\n async with aiohttp.ClientSession().get('https://us.api.battle.net/wow/leaderboard/3v3?locale=en_US&apikey=' + os.environ.get(\"WoW_Token\")) as res:\r\n if res.status == 200:\r\n data = await res.json()\r\n output = {}\r\n for player in range(0, 965):\r\n output[int(player)] = data['rows'][player]\r\n with open('Pvp_Players.json', 'w') as pvp_players:\r\n json.dump(output, pvp_players)\r\n return output",
"def player_from_raw(data: Dict[str, Any]) -> andesite.Player:\n return build_from_raw(andesite.Player, data)",
"def test_retrieve_players(self):\n Player.objects.create(name='Mayita', victories=0,\n defeats=0)\n Player.objects.create(name='Moiso', victories=0,\n defeats=0)\n\n res = self.client.get(PLAYERS_URL)\n\n players = Player.objects.all().order_by('-name')\n serializer = PlayerSerializer(players, many=True)\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)",
"def get(self, player_id):\n current_player = DBPlayer.query.get(player_id)\n if not current_player:\n return get_response(404, 'Not exists.')\n results_wrapper = marshal(current_player, player_fields)\n return get_response(200, 'Got.', results_wrapper)",
"def test_get_player(self):\n pass",
"def player_data(self):\n return self._player",
"def fetch_players_stats():\n players_scraper = PlayerStatsScraper(API_URL, API_HEADERS)\n result = players_scraper.save_objects()\n return result",
"def player_get_by_id(player_id):\n player = player_ctrl.get(player_id=player_id)\n\n response = {\n \"status\": 200,\n \"id\": player.id,\n \"shortname\": player.shortname,\n \"fullname\": player.fullname,\n \"backnumber\": player.backnumber,\n \"height\": player.height,\n \"weight\": player.weight,\n \"nation\": player.nation,\n \"team_id\": player.team_id,\n \"avatar\": player.avatar_json,\n }\n\n return jsonify(response)",
"def players(self, game: str) -> Response:\n\n endpoint = '/api/players'\n query = f'?game={game}'\n return self.fetch(endpoint, query)",
"def get(self, id=None):\n if not id:\n position = request.args.get(\"position\")\n logger.info(\n f\"Retrieving all players, optionally filtered by position={position}\"\n )\n\n return self._get_all_players(position), 200\n\n logger.info(f\"Retrieving player by id {id}\")\n\n try:\n return self._get_player_by_id(id), 200\n except NoResultFound:\n abort(404, message=\"Player not found\")",
"async def get_player(self) -> Optional[andesite.Player]:\n ...",
"def get_players_data(players):\n\n users_response = requests.get(\n url=f'{settings.USER_MANAGER_URL}/user-data/',\n params={'player_id': players},\n timeout=5 # in sec\n )\n if users_response.status_code == 200:\n return users_response.json().get('players')\n return {}",
"def getindex(self):\n players = [dict(plr) for plr in meta.Session.query(model.Player).all()]\n return {'success': True, 'data': players}",
"def getPlayerInfo(name):\n r = []\n ids = players[name]\n r_url = get('people', {'personIds': ids,\n 'ver': 'v1'})\n #constants.BASE_URL + \"/people/{}\".format(ids)\n\n return r_url",
"def get_player(self):\n return self.player",
"def get_player(self):\n return self.player",
"async def get_self(self) -> PlayerInfo:\n e = await self.request.request(url=f'https://users.roblox.com/v1/users/authenticated', method='get')\n a = PlayerInfo(player_id=e['id'], request=self.request)\n await a.update()\n return a",
"def players():\n try:\n return template('players.html', players=SERVER.players.values())\n except RoboBattleshipException as e:\n return JsonResponse.error(e)\n except:\n LOG.exception(\"Failed to show a list of all registered players on the \"\n \"server\")\n return JsonResponse.error(101)",
"def read_player_data(self, player_file):\n\n #read a single players file\n with open(self.player_path + player_file, 'r') as f:\n data = json.load(f)\n f.close()\n return(data)",
"def get_player(self):\r\n return self.player_control.get_player()",
"def get_player(self):\n return self._pubg.player(self.player_id, self.shard)",
"def player_list():\n page = request.args.get(\"page\", \"1\")\n count = request.args.get(\"count\", \"12\")\n team_id = request.args.get(\"team_id\")\n\n if not team_id:\n raise BadRequest(\"Nama team tidak boleh kosong\")\n\n # type conversion\n page = int(page)\n count = int(count)\n team_id = int(team_id)\n\n player = player_ctrl.get_list(page=page, count=count, team_id=team_id)\n\n response = {\n \"status\": 200 if player.items != [] else 204,\n \"has_next\": player.has_next,\n \"has_prev\": player.has_prev,\n \"total\": player.total,\n \"result\": _entity_player_list(player.items)\n }\n\n return jsonify(response)",
"def get(self, player_name):\n player = self._get_player(player_name)\n return player['data'''] if player else None",
"def get_player_origin(self, player_name: str, show_all_hits: bool = False) -> list:\n new_base_url: str = \"https://api.mozambiquehe.re/origin?\"\n new_base_url += f\"&player={player_name}\"\n if show_all_hits:\n new_base_url += \"&showAllHits\"\n return self._make_request(additional_params={}, new_base_url=new_base_url)",
"def read_by_api_id(player_api_id):\n if util.is_None(player_api_id):\n return None\n try:\n return Cache.get_element(player_api_id, \"PLAYER_BY_API_ID\")\n except KeyError:\n pass\n\n filter = {\"player_api_id\": player_api_id}\n try:\n sqllite_row = SQLLite.get_connection().select(\"Player\", **filter)[0]\n except IndexError:\n return None\n player = Player(sqllite_row[\"id\"])\n for attribute, value in sqllite_row.items():\n player.__setattr__(attribute, value)\n\n Cache.add_element(player.player_fifa_api_id, player, \"PLAYER_BY_FIFA_API_ID\")\n Cache.add_element(player.player_api_id, player, \"PLAYER_BY_API_ID\")\n Cache.add_element(player.player_name, player, \"PLAYER_BY_NAME\")\n Cache.add_element(player.id, player, \"PLAYER_BY_ID\")\n\n return player",
"def get_roster_players_via_api(self, team, season=None):\n # setting up empty list of players\n players = list()\n\n if season is None:\n season = str(retrieve_season())\n\n # creating stats api url with optional season parameter\n url = \"\".join((self.API_TEAM_SITE_PREFIX, str(team.team_id)))\n url_params = {\n 'expand': 'team.roster',\n 'season': \"%s%d\" % (season, int(season) + 1)\n }\n # retrieving data\n r = requests.get(url, params=url_params)\n team_data = r.json()\n\n if 'teams' not in team_data:\n logging.warn(\n \"+ %s not part of the league in %s/%d\" % (\n team, season, int(season) + 1))\n return players\n\n team_data = team_data['teams'][0]\n\n if 'roster' not in team_data:\n logging.warn(\n \"+ No roster found for %s/%d %s\" % (\n season, int(season) + 1, team))\n return players\n\n roster = team_data['roster']['roster']\n\n for plr_src in roster:\n # retrieving player if of current player in roster\n plr_id = plr_src['person']['id']\n # searching and optionally creating player with found player id\n plr = self.search_player_by_id(plr_id)\n players.append(plr)\n\n return players",
"def get_players_info(team_name):\n # hit this url in browser or postman like http://127.0.0.1:5000/getPlayersInfo/TeamName and it will return json data\n final_player_list = []\n if request.method == 'GET':\n team_res = Team.query.filter_by(team_name=team_name).first()\n if team_res:\n player_res = Player.query.filter_by(team_id=team_res.team_id).all()\n for rec in range(len(player_res)):\n player_info = {}\n player_info['Player_First_Name'] = player_res[rec].player_fname\n player_info['Player_Lirst_Name'] = player_res[rec].player_lname\n player_info['Team'] = team_name\n player_info['Player_ID'] = player_res[rec].player_id\n player_info['Team_ID'] = player_res[rec].team_id\n final_player_list.append(player_info)\n return json.dumps({\"TeamInformation\": final_player_list})\n else:\n return json.dumps({team_name: \"Team is not available\"})",
"async def do_playerlist():\n\n download = urllib.request.urlopen(server_api2)\n data = json.loads(download.read())\n player_list = []\n try:\n for i in data['players']['sample']:\n player_list.append(i['name'])\n except KeyError:\n if data['online'] == False:\n await bot.send_message(c, 'Failed. The server is offline.')\n return\n else:\n await bot.send_message(c, 'There are no players online.')\n return\n string = ''\n for i in player_list:\n string += '{}, '.format(i)\n await bot.send_message(c, string)",
"def get_currently_playing(self):\r\n return requests.get(\r\n f\"{API_URL}/me/player/currently-playing\",\r\n headers={\r\n \"Accept\": \"application/json\",\r\n \"Authorization\": f\"Bearer {self.access_token}\"\r\n }\r\n )"
]
| [
"0.6757945",
"0.6371991",
"0.6367396",
"0.6248512",
"0.6150757",
"0.6095749",
"0.60823464",
"0.6076938",
"0.6022456",
"0.6011816",
"0.60081345",
"0.5968988",
"0.59689283",
"0.5915473",
"0.59142816",
"0.5868256",
"0.5868256",
"0.5829394",
"0.57885414",
"0.5780477",
"0.5779154",
"0.57741165",
"0.57735527",
"0.5722641",
"0.5722567",
"0.5710327",
"0.57032776",
"0.5698842",
"0.56683415",
"0.5648685"
]
| 0.7371019 | 0 |
Returns a file source of ``Player`` data. It is assumed the file contains player data in the expected JSON format. | def get_file_data_source(filename):
def players_from_file():
with open(filename, 'r') as f:
json_str = '\n'.join(f.readlines())
return parse_players_json(json_str)
return players_from_file | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_player_data(self, player_file):\n\n #read a single players file\n with open(self.player_path + player_file, 'r') as f:\n data = json.load(f)\n f.close()\n return(data)",
"def get_file(file):\n with open(file) as data_file:\n return json.load(data_file)",
"def load_player_list(self, team_file):\n\n # Returns a player object list\n\n team_data = self.read_team_data(team_file)\n data = []\n for player_file in team_data['player_files']:\n data.append(self.read_player_data(player_file))\n return(data)",
"def player_from_raw(data: Dict[str, Any]) -> andesite.Player:\n return build_from_raw(andesite.Player, data)",
"def loadPlayerFile (self):\n #print self.__filename\n if self.__filename == \"\":\n self.__setPlayerFilename()\n #print \"filename= \" + self.__filename \n try:\n #filename handled internally -- derive it from playerName\n# print self.__filename\n f = open(self.__filename, \"r\")\n tempIn = pickle.load(f)\n self.__playerName = tempIn.getPlayerName()\n self.setBestStepRun(tempIn.getBestStepRun())\n self.__songDictionary = tempIn.getAllSongs()\n self.setDifficulty(tempIn.getDifficulty())\n self.setHighScore(tempIn.getHighScore())\n self.setLevelReached(tempIn.getLevelReached())\n f.close() \n except IOError:\n raise PlayerIOError(\"Unable to read player info from file.\")",
"def load_player(filename):\n map_location = None if CUDA else lambda storage, loc: storage\n return torch.load(\"./Players/%s.pth\" % filename, map_location=map_location)",
"def get_file(filename: str) -> dict:\n return loads(open(f\"data/gamedata/{filename}.json\", \"r\", encoding=\"utf-8\").read())",
"def player_data(self):\n return self._player",
"def get_players(lf):\n players = {}\n if os.path.isfile(lf):\n for l in open(lf).readlines():\n m = re.match(r'(\\w+)\\s+(.*)$',l)\n if m:\n players[m.group(1)] = m.group(2)\n return players",
"def data_from_file(filename):\n with open(data_full_filename(filename)) as f:\n return json.loads(f.read())",
"def _get_data_file(self, data_path):\n\n return json.load(open(data_path))",
"def load_players_dict(self):\n player_dict = {}\n for player_file in os.listdir(self.player_path):\n player_name = player_file.split(sep='.')[0]\n player_dict[player_name] = player_file\n return(player_dict)",
"def get(self, player_name):\n player = self._get_player(player_name)\n return player['data'''] if player else None",
"def _read_source(self):\n \n if self.fileType == FTPythonCompiled or \\\n self.fileType == FTCompiledModule:\n return None\n \n filename = Filename(self.filename)\n filename.setExtension('py')\n try:\n file = open(filename, 'rU')\n except IOError:\n return None\n return file.read()",
"def get_data(self):\n with open(self.filepath, 'r') as openFile:\n data = json.load(openFile)\n logging.info('Loaded JSON data file.')\n return data",
"def get_file_data(filename):",
"def parsePlayerData():\n\ttry:\n\t\trawPlayerData = str(re.findall(bracketRegex, urllib.urlopen(mapURL).read())[0])\n\texcept:\n\t\tprint \"exception!\"\n\t\trawPlayerData = None\n\tif rawPlayerData is not None:\n\t\tfixedPlayerData = re.sub(\"'(\\d+)'\", '\\g<1>', rawPlayerData).replace(\"\\\\'\", \"\").replace(\"'\", '\"')\n\t\treturn json.loads(fixedPlayerData, 'latin1')",
"def read_team_data(self,team_file):\n with open(self.team_path + team_file, 'r') as f:\n data = json.load(f)\n f.close()\n return(data)",
"def load_source_file(self, idx: int, path: str = None) -> List[Any]:\n if self.dataset is None:\n raise ValueError('No known dataset found!')\n path = path if path is not None else self.path\n idx = int(max(0, min(idx, self._max_file_count-1)))\n file_name = self._file_format.format(idx)\n with gzip.open(os.path.join(path, file_name), 'r') as file:\n json_data = file.read().decode()\n data = json.loads(json_data)\n return data",
"def load(cls):\n playerdata = Data.raw_load(\"savedata.dat\")\n for key in playerdata:\n cls.name = playerdata[\"name\"]\n cls.max_hp = playerdata[\"max_hp\"]\n cls.hp = playerdata[\"hp\"]\n cls.lv = playerdata[\"lv\"]\n cls.exp = playerdata[\"exp\"]\n cls.atk = playerdata[\"atk\"]\n cls._def = playerdata[\"_def\"]\n cls.inventory = playerdata[\"inventory\"]\n cls.pin = playerdata[\"pin\"]",
"def get_file_obj(self, file):\n repository = \"{}/{}\".format(self.org, self.repo)\n ghrepo = self.github.get_repo(repository)\n obj = ghrepo.get_contents(file)\n return obj",
"def get_rest_data_source(uri):\n def players_from_rest():\n response = requests.get(uri)\n response.raise_for_status()\n return parse_players_json(response.text)\n return players_from_rest",
"def getPlayerFilename(self):\n if (self.__playerName != \"???\"):\n return self.__filename\n else:\n return \"\"",
"def make(filename):\r\n\r\n # Source file is csv file\r\n extension = \".csv\"\r\n if filename.endswith(extension):\r\n return JSONFromCSV(re.sub((extension + \"$\"), \"\", filename))\r\n\r\n return None",
"def load_user_data():\n try:\n with open(filename) as file_obj:\n username = json.load(file_obj)\n except FileNotFoundError:\n return None\n else:\n return username",
"def input_data(self):\n return read_json(self.file_path)",
"def load(source_file):\n return loads(source_file.read())",
"def _load_json_data(filename):\n\n relative_path = join(\"data\", filename)\n absolute_path = join(dirname(__file__), relative_path)\n\n with open(absolute_path) as data_file:\n return json.loads(data_file.read())",
"def getTrialData():\n with open('data/trialdata.txt', 'r') as f:\n data = json.load(f)\n return data",
"async def json_protocol_source(tmp_path: Path) -> ProtocolSource:\n simple_protocol = (\n get_shared_data_root() / \"protocol\" / \"fixtures\" / \"6\" / \"simpleV6.json\"\n )\n return await ProtocolReader().read_saved(files=[simple_protocol], directory=None)"
]
| [
"0.7611685",
"0.615944",
"0.6125585",
"0.60973024",
"0.6078081",
"0.59851694",
"0.58206",
"0.5683008",
"0.5597726",
"0.55805624",
"0.5526197",
"0.55232203",
"0.552299",
"0.5507355",
"0.5463816",
"0.54605025",
"0.54236525",
"0.53874636",
"0.5384293",
"0.5369403",
"0.53625804",
"0.5355897",
"0.53512895",
"0.5349626",
"0.534545",
"0.5342711",
"0.5340639",
"0.53220004",
"0.53129935",
"0.5287675"
]
| 0.8262929 | 0 |
Returns a generated source of ``Player`` data. Each generated player is assigned a random skill rating for skating, shooting, and checking between ``min_rating`` and ``max_rating``. | def get_generated_data_source(num_players, min_rating=DEFAULT_MIN_RATING, max_rating=DEFAULT_MAX_RATING):
return lambda: generate_players(num_players, min_rating, max_rating) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_player_data():\n names = [\"Gunther O'Brian\",\n 'Workman Gloom',\n 'Esme Ramsey',\n 'Cornelius Games',\n 'Kline Greenlemon',\n 'Hotbox Sato',\n 'Famous Owens',\n 'Jenkins Good']\n nums = [77, 31, 37, 6, 14, 53, 7, 64]\n avgs = [0.40666, 0.118451, 0.400093, 0.335117,\n 0.425694, 0.353378, 0.179842, 0.246856]\n\n return names, nums, avgs",
"def get_player_stats() -> List[BaseStat]:\n return [BoostStat(),\n PositionalTendencies(),\n Averages(),\n BallDistanceStat(),\n ControlsStat(),\n SpeedTendencies(),\n CarryStat(),\n PerPossessionStat(),\n SpeedTendencies(),\n RumbleItemStat(),\n KickoffStat(),\n DropshotStats(),\n DemoStat()\n ]",
"def random(self=None, sample=100, min=0, max=100):\r\n\t\treturn DataStatistics([randint(min, max) for i in range(sample)])",
"def returnPlayerStats(self):\n\t\tplayerStats = [self.name, \n\t\t\t\t\t self.agility, \n\t\t\t\t\t self.personality, \n\t\t\t\t\t self.sanity, \n\t\t\t\t\t self.strength, \n\t\t\t\t\t self.progress]\n\t\treturn playerStats",
"def __init__(self,player,skill,difficulty):\n\n # Lowest possible skill roll == skill lvl - player lvl (or 0)\n lower_bound = max(0,skill.level - player.level)\n\n # Highest possible skill roll == skill lvl + 2*player level\n upper_bound = skill.level + (2*player.level)\n\n # Sets critical range (upper percentile to be considered crit)\n crit_range = player.crit_level / 100\n\n self.roll = random.randint(lower_bound,upper_bound)\n if (self.roll/upper_bound) > (1-crit_range):\n self.crit=True\n else:\n self.crit=False\n\n if self.roll >= difficulty:\n self.hit=True\n else:\n self.hit=False\n\n return self.hit, self.crit",
"def generateEnemyStats(healthRange, powerRange, smartsRating):\n\n stats = {\n 'healthRating': healthRange,\n 'powerRating': powerRange,\n 'smartsRating': smartsRating\n }\n return stats",
"def player_from_raw(data: Dict[str, Any]) -> andesite.Player:\n return build_from_raw(andesite.Player, data)",
"def _get_player_data(self, pids):\n\t\tplayer_ratings = np.full(len(pids), self.x0, dtype=float)\n\t\tplayer_variances = np.full(len(pids), self.P0, dtype=float)\n\t\tplayer_obs_times = np.full(len(pids), np.datetime64('2010-01-01'))\n\t\tfor i, pid in enumerate(pids):\n\t\t\tif pid in self.current_player_ratings.keys():\n\t\t\t\tplayer_ratings[i] = self.current_player_ratings[pid]['rating']\n\t\t\t\tplayer_variances[i] = self.current_player_ratings[pid]['variance']\n\t\t\t\tplayer_obs_times[i] = self.current_player_ratings[pid]['last_obs']\n\t\treturn player_ratings, player_variances, player_obs_times",
"def generate_random_character(name, max_health, min_health, max_strength,\n min_strength):\n return Character(random.randint(min_strength, max_strength),\n random.randint(min_health, max_health),\n name, random.random(), random.random())",
"def create_random_player(name=\"\", level=0, race=RACE.NONE, sex=SEX.NONE, way=WAY.NONE):\n if not name and name != \"\":\n log.bug(\"name non รจ un parametro valido: %r\" % name)\n return\n\n if level < 0 or level > config.max_level:\n log.bug(\"level non รจ un parametro valido: %d\" % level)\n return\n\n if not race:\n log.bug(\"race non รจ un parametro valido: %r\" % race)\n return\n\n if not sex:\n log.bug(\"sex non รจ un parametro valido: %r\" % sex)\n return\n\n if not way:\n log.bug(\"way non รจ un parametro valido: %r\" % way)\n return\n\n # -------------------------------------------------------------------------\n\n player = Player()\n player = create_random_mob(player, name, level, race, sex, way)\n\n # Ora che player possiede una razza ed un sesso puรฒ creare un nome\n # casuale se questo non รจ stato passato\n if not player.name:\n player.name = create_random_name(player.race, player.sex, is_player_name=True)\n player.code = remove_colors(player.name.lower())\n\n # Crea il giocatore con i dati di base\n # (TD) dovrei impostare casualmente tanti altri attributi\n player.flags.randomize()\n create_random_reputations(player)\n\n return player",
"def single_player_rater(player_name):\n ros_proj_b_list = BatterProjection.objects.all()\n ros_proj_p_list = PitcherProjection.objects.all()\n player = single_player_rater_html(player_name, ros_proj_b_list, ros_proj_p_list)\n player_stats = \"\"\n if any(\"P\" in pos for pos in player.pos):\n player_stats = (\"${player.dollarValue:^5.2f} - {player.name:^25} - {player.pos:^25}\" +\n \" - {player.wins:^3} - {player.svs:^2} - {player.sos:^3}\" +\n \"- {player.era:^4} - {player.whip:^4}\\n\").format(player=player)\n else:\n player_stats = (\"${player.dollarValue:^5.2f} - {player.name:^25} - {player.pos:^25}\" +\n \" - {player.runs:^3} - {player.hrs:^2} - {player.rbis:^3}\" +\n \" - {player.sbs:^2} - {player.ops:^5}\\n\").format(player=player)\n\n return player_stats",
"def create(self, validated_data):\n user_data = validated_data.pop('user')\n user = UserSerializer.create(UserSerializer(), validated_data=user_data)\n player, created = UserPlayer.objects.update_or_create(user=user,\n xp=validated_data.pop('xp'),\n score=validated_data.pop('score'))\n player.save()\n avatar_data = validated_data.pop('avatar')\n for avatar in avatar_data:\n player.avatar.add(avatar)\n player.save()\n return player",
"def sample(self):\r\n \r\n # the experiences of the minibatch are choosed randomly (the minibatch has the size batch_size)\r\n indices = np.random.randint(0, len(self.data), self.batch_size)\r\n states, actions, rewards, next_states, finishs = [], [], [], [], []\r\n \r\n # we add the experience in the minibatch\r\n for i in indices:\r\n states.append(self.data[i][0])\r\n actions.append(self.data[i][1])\r\n rewards.append(self.data[i][2])\r\n next_states.append(self.data[i][3])\r\n finishs.append(self.data[i][4])\r\n \r\n # converting numpy arrays to float tensors (pytorch can't work with numpy array)\r\n return states, torch.FloatTensor(actions), torch.FloatTensor(rewards), \\\r\n next_states, torch.FloatTensor(finishs)",
"def create_samples(self, skills_sample_fraction=1.0, users_sample_fraction=1.0):\n # Sampling\n self.sample_skills_to_be_covered(skills_sample_fraction)\n self.sample_users(users_sample_fraction)",
"def sample(self):\n\n global current_date # consider an alternative\n random_second = MAX_NUM_SECS_BETWEEN_EVENTS * random.random()\n current_date = current_date + dt.timedelta(seconds=random_second)\n data = {\n 'user_id': self.id,\n 'timestamp': current_date,\n 'location': {\n 'lat': fake.coordinate(\n center=self.home_location[0],\n radius=self.mobility\n ),\n 'lng': fake.coordinate(\n center=self.home_location[1],\n radius=self.mobility\n )\n },\n 'cough': random.random() > self.general_health,\n 'temperature': random.random() > self.general_health,\n }\n\n return data",
"def testrandom(self):\n for i in range(100):\n WeaponAbility()",
"def randomize(self):\n if self.randomize_players is True:\n random.shuffle(self.player_field)",
"def generate_data():\n player_df = get_players_df(2018)\n stats_df = construct(2018, player_df[\"PlayerID\"])\n stats_df['NAME'] = player_df['FirstName'] + \" \" + player_df['LastName']\n stats_df[\"MPG\"] = pd.to_numeric(stats_df[\"MPG\"])\n stats_df.drop(stats_df[stats_df[\"MPG\"] < 15].index, inplace=True)\n stats_df.to_csv(\"data.csv\", index=False)",
"def __init__(\r\n self,\r\n player_count=4,\r\n strategy=[HumanRandom(), HumanRandom(), HumanRandom(), HumanRandom()],\r\n rules=None,\r\n ):\r\n # shuffle the cards\r\n shuffle(self.community_cards)\r\n shuffle(self.chance_cards)\r\n\r\n self.player_positions = [0] * player_count\r\n self.current_player = randint(0, player_count - 1)\r\n self.player_list = []\r\n for i in range(player_count):\r\n self.player_list.append(\r\n Player(uid=i, token=self.token[i], strategy=strategy[i])\r\n )\r\n self.full_turn_count = 1",
"def load_ratings():\n\n print \"Ratings\"\n\n # Delete all rows in table, so if we need to run this a second time,\n # we won't be trying to add duplicate users\n Rating.query.delete()\n\n # Read u.data file and insert data\n for row in open(\"seed_data/u.data\"):\n row = row.rstrip()\n user_id, movie_id, score, timestamp = row.split(\"\\t\")\n\n user_id = int(user_id)\n movie_id = int(movie_id)\n score = int(score)\n\n #from rating class take the movie_id and make it equal to the movie_id \n #from the for loop above. We are calling it to make an instance of the rating\n #class\n rating = Rating(movie_id=movie_id, user_id=user_id, score=score)\n \n #We need to add to the session or it won't ever be stored\n db.session.add(rating)\n\n #Once we're done, we should commit our work\n db.session.commit()",
"def skills():\n all_skills = [\"strength\", \"patience\", \"cleanliness\", \"leadership\", \"communication\",\n \"science\", \"math\", \"engineering\", \"intelligence\", \"driving\"]\n\n random.shuffle(all_skills)\n return all_skills[0:2]",
"def generate_products(self = random.sample, name = random.choice(result), price = random.randint(5, 100), weight = random.randint(5, 100), \nflammability= random.uniform(0, 2.5)):\n return sample",
"def get_player_data(self, player, season, mtgs=None, past=None, future=None, single=False):\n\n avail = []\n scheduled = []\n\n # Should be empty arrays if None\n if past is None:\n past = []\n if future is None:\n future = []\n\n nplayed = Schedule.objects.filter(meeting__in=past, player=player).count()\n nscheduled = Schedule.objects.filter(meeting__in=future, player=player).count()\n\n av = PlayerAvailability.objects.get_for_season_player(player, season)\n\n p = {\n 'name': player.first + ' ' + player.last,\n 'id': player.id,\n 'isavail': av.available,\n 'scheduled': av.scheduled,\n 'played': av.played,\n 'nplayed': nplayed,\n 'nscheduled': nscheduled + nplayed,\n 'single': single\n }\n\n return p",
"def get_player_codes():\n return {\n Player.GKP: Player.objects.goalkeepers().aggregate(Max('code'))['code__max'] or 1000,\n Player.DEF: Player.objects.defenders().aggregate(Max('code'))['code__max'] or 2000,\n Player.MID: Player.objects.midfielders().aggregate(Max('code'))['code__max'] or 3000,\n Player.STR: Player.objects.strikers().aggregate(Max('code'))['code__max'] or 4000,\n }",
"def load_ratings(self):\n logging.debug(\"Loading ratings data...\")\n\n # loading ratings\n data=requests.get(self.__URL_RATINGS)\n self.__dataframe_ratings=pd.DataFrame(data.json())\n # calculate implicit and explicit ratings\n # XXX use a function to calculate implicit rating considering the video lead time\n self.__dataframe_ratings['rating_implicit'] = (self.__dataframe_ratings['video_watch_time']/100) * 0.3\n self.__dataframe_ratings['rating_explicit'] = (self.__dataframe_ratings['rating_value']) * 0.7\n\n # create a new column to put implicit or explicit rating value\n self.__dataframe_ratings['overall_rating_value'] = self.__dataframe_ratings['rating_implicit'] + self.__dataframe_ratings['rating_explicit']\n\n logging.debug(\"Ratings data loaded! n=%s\" % self.__dataframe_ratings.shape[0])\n\n return self.__dataframe_ratings",
"def __init__(self, name=\"Player\", resources=[0,0,0,0,0,0,0,0], xor_resources=None,\\\n current_hand=None, structures=None, starting_gold=3, discounted_resources=None):\n if structures != None:\n self.structures = structures # by type? Should we have a structure type? \n else:\n self.structures = []\n \n self.name = name\n self.wonders = None \n player.west_natural= False\n player.west_manufactured = False\n player.east_natural= False\n player.east_manufactured= False\n\n if current_hand == None:\n self.current_hand = None\n else:\n self.current_hand = current_hand #I dont know if we need this\n self.starting_gold = starting_gold",
"def level_1_policy_player_1_func(self, y_max=1747, iteration=1, gp_samples=None):\n if not self.r2b2_light_player_1:\n # if R2-B2-Lite is not taken\n x_1, all_ucb = acq_max(ac=self.util_rr.utility, gp=self.gp_1, y_max=y_max, bounds=self.bounds, \\\n iteration=iteration, gp_samples=gp_samples, \\\n player_id=1, action_dist=self.action_dist_player_2, \\\n sub_domain_player_1=self.sub_domain_player_1, \\\n sub_domain_player_2=self.sub_domain_player_2, \\\n sampling_approximation=self.sampling_approximation)\n x_1 = x_1.reshape(1, -1)\n\n else:\n # if R2-B2-Lite is taken\n print(\"[R2-B2_Lite for Player 1]\")\n domain_ind = np.arange(self.sub_domain_player_2.shape[0])\n x_2_ind = np.random.choice(domain_ind, 1, p=self.action_dist_player_2)\n x_2_sim = self.sub_domain_player_2[x_2_ind, :]\n\n rep = np.tile(x_2_sim, (self.sub_domain_player_1.shape[0], 1))\n sub_domain = np.concatenate((self.sub_domain_player_1, rep), axis=1)\n\n para_dict = {\"gp\":self.gp_1, \"y_max\":y_max, \"iteration\":iteration, \"gp_samples\":gp_samples}\n all_ucb = []\n for x in sub_domain:\n all_ucb.append(-self.util.utility(x, para_dict)[0][0])\n all_ucb = np.array(all_ucb)\n x_1 = self.sub_domain_player_1[np.argmax(all_ucb)]\n x_1 = x_1.reshape(1, -1)\n\n return x_1, all_ucb",
"def __initStats(self):\n players = self.teamparser.getPlayers()\n try:\n stats = players[(self.team, self.position)]\n except KeyError, err:\n stats = (0, 0, 0, 0, 0, 0)\n raise TypeError, \"Invalid Team/Position: \" + self.team\n self.max = int(stats[0]) #maximum\n self.ma = int(stats[1]) #movement\n self.st = int(stats[2]) #strength\n self.ag = int(stats[3]) #agility\n self.av = int(stats[4]) #armor value\n self.costs = int(stats[5]) #costs\n self.injury = 0 #injury\n self.ssp = 0 #starplayerpoints\n self.touchdowns = 0 #touchdown\n self.completions = 0 #completions\n self.interceptions = 0 #interceptions\n self.casualties = 0 #casualties\n self.mvpawards = 0 #most valuable player awards",
"def get_current_ratings(self) -> DataFrame:\n df = self.player_df.copy()\n df[\"rating\"] = df[\"player\"].apply(lambda x: x.rating)\n df[\"n_games\"] = df[\"player\"].apply(lambda x: x.count_games())\n df = df.sort_values(\"player\", ascending=False).reset_index(drop=True)\n df[\"rank\"] = range(1, df.shape[0] + 1)\n df = df[[\"rank\", \"player_id\", \"n_games\", \"rating\"]]\n return df",
"def createPlayer(self):\n sw, ne = self.playerCreationRectangle\n x = self.random.randrange(sw.x, ne.x)\n y = 1.0\n z = self.random.randrange(sw.y, ne.y)\n player = Player(Vector(x, y, z), 2, self.seconds)\n for observer in self.observers:\n observer.playerCreated(player)\n self.players.append(player)\n return player"
]
| [
"0.5868942",
"0.5538956",
"0.52820337",
"0.5259538",
"0.5210891",
"0.51927793",
"0.51602036",
"0.5145461",
"0.51008767",
"0.5097428",
"0.5094346",
"0.5041324",
"0.50287414",
"0.50021684",
"0.5001608",
"0.50004065",
"0.49834913",
"0.49777955",
"0.49602842",
"0.49199477",
"0.48919204",
"0.48904672",
"0.48901805",
"0.48876446",
"0.48861265",
"0.4881533",
"0.48694837",
"0.48542503",
"0.48525086",
"0.48430222"
]
| 0.7219732 | 0 |
Return all of the links between two nodes in a topology | def getlinks2(topology, node1, node2):
allLinks = topology.loadResources({"resourceType":"Link"})
links = []
for l in allLinks:
(dstNode,dstPort) = linkednode2(l,node1)
if (dstNode,dstPort) == (None, None):
continue
(dstNode,dstPort) = linkednode2(l,node2)
if (dstNode,dstPort) == (None, None):
continue
links.append(l)
return links | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_links(self, node): # pragma: no cover\n\t\traise NotImplementedError",
"def get_links(node_tree, socket):\n return tuple(link for link in node_tree.links\n if (link.from_socket == socket or\n link.to_socket == socket))",
"def get_links(self) -> list:\n result = []\n path = self._path\n lp = len(path)\n for link in self._parent().get_links():\n if link._node[\"first\"][:lp] == path:\n result.append(link)\n elif link._node[\"second\"][:lp] == path:\n result.append(link)\n return result",
"def get_links(self, node):\n\n # TODO: send help\n\n if not node.label:\n return []\n cls = node.__class__\n\n return [\n (\n e.__src_dst_assoc__,\n psqlgraph.Node.get_subclass_named(e.__dst_class__).label,\n )\n for e in psqlgraph.Edge._get_edges_with_src(cls.__name__)\n if hasattr(psqlgraph.Node.get_subclass_named(e.__dst_class__), \"project_id\")\n ] + [\n (\n e.__dst_src_assoc__,\n psqlgraph.Node.get_subclass_named(e.__src_class__).label,\n )\n for e in psqlgraph.Edge._get_edges_with_dst(cls.__name__)\n if hasattr(psqlgraph.Node.get_subclass_named(e.__src_class__), \"project_id\")\n ]",
"def connecting(node1, node2):\n comp_list = []\n \"\"\":type : list[components.Component]\"\"\"\n if node1 == node2:\n return []\n for comp in node1.connected_comps:\n if comp.neg == node2:\n comp_list.append(comp)\n elif comp.pos == node2:\n comp_list.append(comp)\n return comp_list",
"def load_links(graph, nodes):\n for edge in graph.get_edges():\n src = edge.get_source()\n dst = edge.get_destination()\n assert(src in nodes)\n assert(dst in nodes)\n nodes[src]['down'].add(dst)\n nodes[dst]['up'].add(src)",
"def build_links(self):\n xygrid = self.xymap.xygrid\n\n # we must use the xygrid coordinates\n x, y = self.x, self.y\n\n # scan in all directions for links\n for direction, (dx, dy) in MAPSCAN.items():\n\n lx, ly = x + dx, y + dy\n\n if lx in xygrid and ly in xygrid[lx]:\n link = xygrid[lx][ly]\n\n # just because there is a link here, doesn't mean it has a\n # connection in this direction. If so, the `end_node` will be None.\n end_node, weight, steps = link.traverse(REVERSE_DIRECTIONS[direction])\n\n if end_node:\n # the link could be followed to an end node!\n\n self.first_links[direction] = link\n\n # check the actual direction-alias to use, since this may be\n # different than the xygrid cardinal directions. There must be\n # no duplicates out of this node or there will be a\n # multi-match error later!\n first_step_name = steps[0].direction_aliases.get(direction, direction)\n if first_step_name in self.closest_neighbor_names:\n raise MapParserError(\n f\"has more than one outgoing direction '{first_step_name}'. \"\n \"All directions out of a node must be unique.\",\n self,\n )\n self.closest_neighbor_names[first_step_name] = direction\n\n node_index = end_node.node_index\n self.weights[node_index] = weight\n self.links[direction] = end_node\n # this is useful for map building later - there could be multiple\n # links tied together until getting to the node\n self.xy_steps_to_node[direction] = steps\n\n # used for building the shortest path. Note that we store the\n # aliased link directions here, for quick display by the\n # shortest-route solver\n shortest_route = self.shortest_route_to_node.get(node_index, (\"\", [], BIGVAL))[\n 2\n ]\n if weight < shortest_route:\n self.shortest_route_to_node[node_index] = (first_step_name, steps, weight)",
"def nodes(topology):\n return topology.nodes()",
"def component_links(self) -> List:\n if self.type in [\"execution_node\", \"super_node\"]:\n return self._node[\"inputs\"][0].get(\"links\", [])\n else:\n # binding nodes do not contain links\n return []",
"def iter_links(self):\n for site in self.iter_sites():\n for u in range(self.dim):\n yield tuple(list(site) + [u])",
"def nodes(self) -> tuple[Node, Node]:\n self.lab.sync_topology_if_outdated()\n return self.node_a, self.node_b",
"def neighbors(node, topology):\n return [n for n in topology[node]]",
"def calculate_paths(topology):\n nodes = topology['nodes']\n edges = topology['links']\n\n dist = [[len(nodes) + 1 for x in range(len(nodes))] for y in range(len(nodes))]\n paths = [[[] for x in range(len(nodes))] for y in range(len(nodes))]\n\n for e in edges.values():\n s, d = int(e['source']), int(e['target'])\n dist[s][d] = dist[d][s] = 1\n paths[s][d] = [e['id']]\n paths[d][s] = [e['id']]\n\n for k in range(len(nodes)):\n for i in range(len(nodes)):\n for j in range(len(nodes)):\n if dist[i][k] + dist[k][j] < dist[i][j]:\n dist[i][j] = dist[i][k] + dist[k][j]\n paths[i][j] = paths[i][k] + paths[k][j]\n return paths",
"def enumerate_links_around_node(self, node):\n\n l0 = self.node_link[node]\n l = l0\n edges = []\n traversing = True\n while traversing:\n edges.append(l)\n v = l[0]\n if v == node:\n l = self.pred_right[l]\n else:\n l = self.pred_left[l]\n if l0 == l:\n traversing = False\n if l0[1] == l[0] and l0[0] == l[1]:\n traversing = False\n #print v, l\n #raw_input('here')\n return edges",
"def relations_from(self, start_node):",
"def node_targets(self, node):\r\n node = self.coalesce_node(node)\r\n nodes =[conn[1] for conn in self.connections if conn[0] == node]\r\n return nodes",
"def all_pairs(self):\n return chain(self.nx_graph.edges(), nx.non_edges(self.nx_graph))",
"def set_connection_between_nodes(self):\n\n for i, node in enumerate(self.list_empty_nodes):\n line = node.labyrinth_position[0]\n column = node.labyrinth_position[1]\n\n for j in range(i+1, len(self.list_empty_nodes)):\n line_j = self.list_empty_nodes[j].labyrinth_position[0]\n column_j = self.list_empty_nodes[j].labyrinth_position[1]\n \n if i != j and ((line == line_j and column == column_j - 1) \\\n or (line == line_j and column == column_j + 1) \\\n or (column == column_j and line == line_j - 1) \\\n or (column == column_j and line == line_j + 1)) \\\n and (not node in self.list_empty_nodes[j].connected_to) \\\n and (not self.list_empty_nodes[j] in node.connected_to):\n node.connected_to.append(self.list_empty_nodes[j])\n self.list_empty_nodes[j].connected_to.append(node)",
"def _add_links_from_mergers(self):\n for i, node_name in enumerate(self.node_list):\n self.builder.addDirectedLink(node_name, self, islot=i)",
"def getReferences(self, fromnode):\n\n node = self.findNode(fromnode)\n out_edges, _ = self.get_edges(node)\n return out_edges",
"def relations_to(self, end_node):",
"def generate_graph(nodes):\n \n nodes = nodes.dropna()\n l = []\n edge_format = \"{0} {1}\"\n \n for x1, x2 in zip(nodes.shift(), nodes):\n if not(pd.isnull(x1) or pd.isnull(x2)):\n if x1 != x2:\n l.append(edge_format.format(x1, x2))\n \n return l",
"def multi_edge():\n from networkx.readwrite import json_graph\n import networkx as nx\n import autonetkit\n # returns a house graph\n data = {'directed': False,\n 'graph': [],\n 'links': [{'_ports': {'r4': 2, 'r5': 1},\n 'raw_interfaces': {},\n 'source': 0,\n 'target': 1},\n {'_ports': {'r2': 3, 'r4': 1},\n 'raw_interfaces': {},\n 'source': 0,\n 'target': 3},\n {'_ports': {'r2': 4, 'r4': 3},\n 'raw_interfaces': {},\n 'source': 0,\n 'target': 3},\n {'_ports': {'r3': 3, 'r5': 2},\n 'raw_interfaces': {},\n 'source': 1,\n 'target': 4},\n {'_ports': {'r1': 1, 'r2': 1},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 3},\n {'_ports': {'r1': 3, 'r2': 5},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 3},\n {'_ports': {'r1': 2, 'r3': 1},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 4},\n {'_ports': {'r1': 4, 'r3': 4},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 4},\n {'_ports': {'r1': 5, 'r3': 5},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 4},\n {'_ports': {'r2': 2, 'r3': 2},\n 'raw_interfaces': {},\n 'source': 3,\n 'target': 4}],\n 'multigraph': True,\n 'nodes': [{'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r4 to r2', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r4 to r5', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r4 to r2', 'id': 'eth2'}},\n 'asn': 2,\n 'device_type': 'router',\n 'id': 'r4',\n 'label': 'r4',\n 'x': 675,\n 'y': 300},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r5 to r4', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r5 to r3', 'id': 'eth1'}},\n 'asn': 2,\n 'device_type': 'router',\n 'id': 'r5',\n 'label': 'r5',\n 'x': 675,\n 'y': 500},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r1 to r2', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r1 to r3', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r1 to r2', 'id': 'eth2'},\n 4: {'category': 'physical', 'description': 'r1 to r3', 'id': 'eth3'},\n 5: {'category': 'physical', 'description': 'r1 to r3', 'id': 'eth4'}},\n 'asn': 1,\n 'device_type': 'router',\n 'id': 'r1',\n 'label': 'r1',\n 'x': 350,\n 'y': 400},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r2 to r1', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r2 to r3', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r2 to r4', 'id': 'eth2'},\n 4: {'category': 'physical', 'description': 'r2 to r4', 'id': 'eth3'},\n 5: {'category': 'physical', 'description': 'r2 to r1', 'id': 'eth4'}},\n 'asn': 1,\n 'device_type': 'router',\n 'id': 'r2',\n 'label': 'r2',\n 'x': 500,\n 'y': 300},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r3 to r1', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r3 to r2', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r3 to r5', 'id': 'eth2'},\n 4: {'category': 'physical', 'description': 'r3 to r1', 'id': 'eth3'},\n 5: {'category': 'physical', 'description': 'r3 to r1', 'id': 'eth4'}},\n 'asn': 1,\n 'device_type': 'router',\n 'id': 'r3',\n 'label': 'r3',\n 'x': 500,\n 'y': 500}]}\n graph = json_graph.node_link_graph(data)\n anm = autonetkit.anm.NetworkModel()\n g_in = anm.add_overlay(\"input\")\n g_in._replace_graph(nx.MultiGraph(graph))\n # TODO: check if should build overlays here rather than clone in?\n g_phy = anm[\"phy\"]\n g_phy._replace_graph(graph)\n return anm",
"def links (self):\n return (link for src, dst, link in self.network.edges_iter(data=True) if\n link.type == Link.STATIC or link.type == Link.DYNAMIC)",
"def links_summary(self, is_print=True):\n if not self.nodes:\n self.get_nodes()\n if not self.links:\n self.get_links()\n\n _links_summary = []\n for _l in self.links:\n if not _l.nodes:\n continue\n _side_a = _l.nodes[0]\n _side_b = _l.nodes[1]\n _node_a = [x for x in self.nodes if x.node_id == _side_a[\"node_id\"]][0]\n _port_a = [\n x[\"name\"]\n for x in _node_a.ports\n if x[\"port_number\"] == _side_a[\"port_number\"]\n and x[\"adapter_number\"] == _side_a[\"adapter_number\"]\n ][0]\n _node_b = [x for x in self.nodes if x.node_id == _side_b[\"node_id\"]][0]\n _port_b = [\n x[\"name\"]\n for x in _node_b.ports\n if x[\"port_number\"] == _side_b[\"port_number\"]\n and x[\"adapter_number\"] == _side_b[\"adapter_number\"]\n ][0]\n endpoint_a = f\"{_node_a.name}: {_port_a}\"\n endpoint_b = f\"{_node_b.name}: {_port_b}\"\n if is_print:\n print(f\"{endpoint_a} ---- {endpoint_b}\")\n _links_summary.append((_node_a.name, _port_a, _node_b.name, _port_b))\n\n return _links_summary if not is_print else None",
"def find_connected_links(self, node):\n connected_link_set = []\n if node in self.outgoing_links:\n for links in self.outgoing_links[node].values():\n connected_link_set.extend(links)\n \n if node in self.incoming_links:\n for links in self.incoming_links[node].values():\n connected_link_set.extend(links)\n \n return connected_link_set",
"def links(self):\n\t\treturn self.list_of_links",
"def learn_my_links(self):\n assert (self.graph != None)\n links = self.graph.edges()\n mylinks = []\n\n for link in links:\n u, v = link[:2]\n if (v in self.switches or u in self.switches):\n self.graph[u][v]['mylink'] = True\n mylinks.append((u, v))\n\n # remove duplicates\n self.mylinks = list(set(mylinks))",
"def connectNodes(imgR,nodes,start,goal):\n alphabet = string.ascii_lowercase\n nodeConnections = [[] for i in range(len(nodes)+2)]\n for index, node in enumerate(nodes):\n paths = adjPaths(imgR,node)\n for path in paths:\n result = checkPath(imgR,nodes,node,path)\n if not result == 0: \n nIndex = nodes.index(result)\n nodeConnections[index+1].append(alphabet[nIndex+1])\n paths = adjPaths(imgR,start) # add start to nodes\n for path in paths:\n result = checkPath(imgR,nodes,start,path)\n if not result == 0: \n nIndex = nodes.index(result)\n nodeConnections[0].append(alphabet[nIndex+1])\n for node in nodeConnections[0]:\n nodeConnections[alphabet.index(node)].append(alphabet[0])\n paths = adjPaths(imgR,goal) # add goal to nodes\n for path in paths:\n result = checkPath(imgR,nodes,goal,path)\n if not result == 0: \n nIndex = nodes.index(result)\n nodeConnections[len(nodeConnections)-1].append(alphabet[nIndex+1])\n for node in nodeConnections[len(nodeConnections)-1]:\n nodeConnections[alphabet.index(node)].append(alphabet[len(nodeConnections)-1])\n return [alphabet[i] for i in range(len(nodes)+2)], nodeConnections",
"def node_sources(self, node):\r\n node = self.coalesce_node(node)\r\n nodes =[conn[0] for conn in self.connections if conn[1] == node]\r\n return nodes"
]
| [
"0.68479264",
"0.67361224",
"0.66511023",
"0.6613033",
"0.6471993",
"0.6390997",
"0.63857716",
"0.6317723",
"0.6274998",
"0.6270319",
"0.62160325",
"0.6210695",
"0.6173468",
"0.61675924",
"0.6159017",
"0.61007774",
"0.60911804",
"0.6051444",
"0.60479224",
"0.59839463",
"0.5983186",
"0.5975178",
"0.5941893",
"0.5937959",
"0.5925788",
"0.59157044",
"0.5889761",
"0.5882579",
"0.58816224",
"0.58520275"
]
| 0.80981374 | 0 |
In the following logical topology , the OSCARS circuits ends onto the port on the core router connected to the HwSwitch. This function returns the port on the HwSwitch that is connected to the the core router when the OSCARS circuit terminates. | def getgriport(topology,hwswitch,core,griport):
hwswitchname = hwswitch.resourceName
corename = core.resourceName
links = getlinks2(topology, corename, hwswitchname)
if links == None or len(links) == 0:
print "No links from",corename,"to",hwswitchname
return False
corelink = None
for link in links:
(node,port) = linkednode2(link, hwswitchname)
if port != None and port == griport:
# found the link between HwSwith and Core that ends to the OSCARS circuit.
corelink = link
break
(node,hwport_tocore) = linkednode2(corelink,corename)
return hwport_tocore | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_switch(self,host):\n switch_list = self.__graph_dict[host]\n switch_num = switch_list[0]\n return switch_num",
"def swconnect(localpop, remotepop, mac, vc, meter):\n core = Container.fromAnchor(localpop.properties['CoreRouter'])\n corename = core.resourceName\n (corename,coredom,coreport,corevlan) = getvcnode(vc, corename)\n remotecore = Container.fromAnchor(remotepop.properties['CoreRouter'])\n remotecorename = remotecore.resourceName\n (remotecorename,remotecoredom,remotecoreport,remotecorevlan) = getvcnode(vc, remotecorename)\n\n hwswitch = Container.fromAnchor(localpop.properties['HwSwitch'])\n hwswitchname = hwswitch.resourceName\n swswitch = Container.fromAnchor(localpop.properties['SwSwitch'])\n swswitchname = swswitch.resourceName\n\n remotehwswitch = Container.fromAnchor(remotepop.properties['HwSwitch'])\n remotehwswitchname = remotehwswitch.resourceName\n remoteswswitch = Container.fromAnchor(remotepop.properties['SwSwitch'])\n remoteswswitchname = remoteswswitch.resourceName\n\n topology = Container.getContainer(localpop.properties['SwSwitch']['containerName'])\n\n # Find hwswitch/port - core/port\n hwport_tocore = getgriport(topology, hwswitch, core, coreport)\n # Find remotehwswitch/port - remotecore/port\n remotehwport_tocore = getgriport(topology, remotehwswitch, remotecore, remotecoreport)\n\n links = getlinks2(topology, hwswitchname, swswitchname)\n if links == None or len(links) == 0:\n print \"No links from \", hwswitchname, \" to \", swswitchname\n return None\n hwswlink = None\n for l in links:\n (node, port) = linkednode2(l, swswitchname)\n if port != None:\n # Found the (a) link\n hwswlink = l\n hwport_tosw = port\n break\n\n remotelinks = getlinks2(topology, remotehwswitchname, remoteswswitchname)\n if remotelinks == None or len(remotelinks) == 0:\n print \"No links from \", remotehwswitchname, \" to \", remoteswswitchname\n return None\n remotehwswlink = None\n for l in remotelinks:\n (node, port) = linkednode2(l, remoteswswitchname)\n if port != None:\n # Found the (a) link\n remotehwswlink = l\n remotehwport_tosw = port\n break\n\n # Find the ports on hwswitch and remotehwswitch that go to the corresponding software switches\n\n # Set up forwarding for broadcast traffic from the new local pop\n # Install outbound flow on hwswitch from swswitch to the GRI\n fh1 = SCC.SdnInstallForward1(javaByteArray2(hwswitch.properties['DPID']),\n 1,\n BigInteger.ZERO,\n str(hwport_tosw), # hw port facing software switch\n int(corevlan),\n \"00:00:00:00:00:00\",\n mac,\n str(hwport_tocore),\n int(corevlan),\n mac,\n 0,\n 0,\n meter)\n if fh1 == None:\n return None\n\n # Install inbound flow on remotehwswitch from GRI to remoteswswitch\n fh2 = SCC.SdnInstallForward1(javaByteArray2(remotehwswitch.properties['DPID']),\n 1,\n BigInteger.ZERO,\n str(remotehwport_tocore),\n int(remotecorevlan),\n \"00:00:00:00:00:00\",\n mac,\n str(remotehwport_tosw), # remotehw port facing remote software switch\n int(remotecorevlan),\n mac,\n 0,\n 0,\n meter)\n if fh2 == None:\n SCC.deleteforward(fh1)\n return None\n\n # Set up forwarding for broadcast traffic to the new local pop\n # Install inbound flow on hwswitch from GRI to swswitch\n fh3 = SCC.SdnInstallForward1(javaByteArray2(hwswitch.properties['DPID']),\n 1,\n BigInteger.ZERO,\n str(hwport_tocore),\n int(corevlan),\n \"00:00:00:00:00:00\",\n mac,\n str(hwport_tosw), # hw port facing software switch\n int(corevlan),\n mac,\n 0,\n 0,\n meter)\n if fh3 == None:\n SCC.deleteforward(fh1)\n SCC.deleteforward(fh2)\n return None\n\n # Install outbound flow on remotehwswitch from remoteswswitch to GRI\n fh4 = SCC.SdnInstallForward1(javaByteArray2(remotehwswitch.properties['DPID']),\n 1,\n BigInteger.ZERO,\n str(remotehwport_tosw), # remotehw port facing remote software switch\n int(remotecorevlan),\n \"00:00:00:00:00:00\",\n mac,\n str(remotehwport_tocore),\n int(remotecorevlan),\n mac,\n 0,\n 0,\n meter)\n if fh4 == None:\n SCC.deleteforward(fh1)\n SCC.deleteforward(fh2)\n SCC.deleteforward(fh3)\n return None\n\n # Return something\n return (fh1, fh2, fh3, fh4)",
"def get_logical_port(self):\n return None",
"def sw_port(self):\n return self.raw.get('sw_port')",
"def Port(self) -> int:",
"def get_res_port():\n return get_port() + 1",
"def head_port(self):\n return self.head_args.port[0] if self.head_args else None",
"def connectexitfanout(localpop,\n corevlan,\n forwards,\n meter,\n mac):\n\n hwswitch = Container.fromAnchor(localpop.properties['HwSwitch'])\n hwswitchname = hwswitch.resourceName\n swswitch = Container.fromAnchor(localpop.properties['SwSwitch'])\n swswitchname = swswitch.resourceName\n topology = Container.getContainer(localpop.properties['SwSwitch']['containerName'])\n\n # print \"connectexitfanout localpop\", localpop, \"corevlan\", corevlan, \"mac\", mac\n\n # Find the port on the software switch connected to the hardware switch\n links = getlinks2(topology, swswitchname, hwswitchname)\n if links == None or len(links) == 0:\n print \"No links from\", swswitchname, \"to\", hwswitchname\n return None\n hwswitchlink = None\n swport_tohw = None\n for link in links:\n (node, port) = linkednode2(link, hwswitchname)\n if port != None:\n # Found it!\n hwswitchlink = link\n swport_tohw = port\n break\n if swport_tohw == None:\n print \"No output port on\", swswitchname, \"facing\", hwswitchname\n return None\n\n for f in forwards:\n f.outPort = str(swport_tohw)\n # print \"FORW: outport\", f.outPort, \"vlan\", f.vlan, \"dstMac\", f.dstMac\n\n # Convert the list of forwarding destinations to a Java array.\n fwdsarr = jarray.array(forwards, SdnControllerClientL2Forward)\n\n # print \"dpid\", swswitch.props['dpid']\n fh = SCC.SdnInstallForward(javaByteArray2(swswitch.properties['DPID']),\n 1,\n BigInteger.ZERO,\n str(swport_tohw),\n int(corevlan),\n None,\n mac,\n fwdsarr,\n 0,\n 0,\n meter)\n\n return fh",
"def port(self) -> int:",
"def get_ofport(ifce):\n return check_output(\n split(\"sudo ovs-vsctl get Interface {} ofport\".format(ifce)))",
"def port(self):\n\n return self.config.dict[\"fhdhr\"][\"port\"]",
"def ethernet_switch_address(self):\n return self._props[\"optional\"].get(self._ethernet_switch_prop)",
"def getCurPort(self):\n cmd_string = '?6'\n data = self.sendRcv(cmd_string)\n with self._syringeErrorHandler():\n try:\n port = int(data)\n except ValueError:\n raise SyringeError(7, self.__class__.ERROR_DICT)\n self.state['port'] = port\n return port",
"def ws_port(self):\r\n return self._ws_port",
"def external_port(self):\r\n return self._external_port",
"def comm_port(self):\r\n return self._comm_port",
"def _get_port(self):\n return self.__port",
"def openCircuit(srv):",
"def masterPort(self):\r\n return self._masterPort",
"def __get_port(self) -> int:\n\t\ttry:\n\t\t\treturn int(os.getenv('MQTT_DRIVEN_PORT'))\n\t\texcept:\n\t\t\treturn 1883",
"def id(self): # pylint: disable=invalid-name\n return \"{}:{}\".format(self.switch.dpid, self.port_number)",
"def circuit(self):\n return jet.Circuit(num_wires=4, dim=2)",
"def _get_nport(self):\n return self.__nport",
"def get_port(self):\n return self.port",
"def head_port_monitoring(self):\n return self.head_args.port_monitoring if self.head_args else None",
"def launch ():\n def start_switch (event):\n log.info(\"switch %s has come up\" % event.dpid)\n log.info(event.connection.ports)\n sw = switches_by_dpid.get(event.dpid)\n\n if sw is None:\n # New switch\n sw = TopoSwitch(event.connection)\n switches_by_dpid[event.dpid] = sw\n sw.connect(event.connection)\n else:\n sw.connect(event.connection)\n core.openflow.addListenerByName(\"ConnectionUp\", start_switch)",
"def internal_switch(self) -> s11.InternalSwitch:\n return self.calobs.internal_switch",
"def __get_port(self) -> int:\n\t\ttry:\n\t\t\treturn int(os.getenv('MQTT_DRIVER_PORT'))\n\t\texcept:\n\t\t\treturn 1883",
"def port(name):\n\n words = name.upper().split('-', 1)\n\n if len(words) == 1:\n words.append(words[0][1])\n\n return int(f\"{ord(words[0][0])}{ord(words[1][0])}\")",
"def internal_port(self):\r\n return self._internal_port"
]
| [
"0.6056018",
"0.5864472",
"0.5744896",
"0.569074",
"0.5611226",
"0.5543881",
"0.55240554",
"0.54477835",
"0.5420691",
"0.5382262",
"0.5346929",
"0.5321753",
"0.528606",
"0.5251105",
"0.52226937",
"0.5214782",
"0.5148469",
"0.51428545",
"0.51317555",
"0.5099614",
"0.509746",
"0.50633",
"0.5058907",
"0.50329643",
"0.5015823",
"0.5014081",
"0.5010711",
"0.50041085",
"0.5003797",
"0.50030667"
]
| 0.6257958 | 0 |
Create entries on the local hardware switch that pass broadcast traffic to and from the connected host | def connecthostbroadcast(localpop,
hwport_tosite,
sitevlan,
meter=3,
broadcast_rewritemac = None):
hwswitch = Container.fromAnchor(localpop.properties['HwSwitch'])
hwswitchname = hwswitch.resourceName
swswitch = Container.fromAnchor(localpop.properties['SwSwitch'])
swswitchname = swswitch.resourceName
topology = Container.getContainer(localpop.properties['SwSwitch']['containerName'])
# Find the port on the HwSwitch connected to the software switch
links = getlinks2(topology, hwswitchname, swswitchname)
if links == None or len(links) == 0:
print "No links from", hwswitchname, "to", swswitchname
return False
hwport_tosw = None
for link in links:
(node, port) = linkednode2(link, swswitchname)
if port != None:
# Found the link we're looking for
hwport_tosw = port
break
broadcast = "FF:FF:FF:FF:FF:FF"
translated_broadcast = broadcast
if broadcast_rewritemac != None:
translated_broadcast = broadcast_rewritemac
fh1 = SCC.SdnInstallForward1(javaByteArray2(hwswitch.properties['DPID']),
1,
BigInteger.ZERO,
str(hwport_tosw),
int(sitevlan),
"00:00:00:00:00:00",
translated_broadcast,
str(hwport_tosite),
int(sitevlan),
broadcast,
0,
0,
meter)
if fh1 == None:
return None
fh2 = SCC.SdnInstallForward1(javaByteArray2(hwswitch.properties['DPID']),
1,
BigInteger.ZERO,
str(hwport_tosite),
int(sitevlan),
"00:00:00:00:00:00",
broadcast,
str(hwport_tosw),
int(sitevlan),
translated_broadcast,
0,
0,
meter)
if fh2 == None:
SCC.deleteforward(fh1)
return None
return (fh1, fh2) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hh_assign_ports():\n print('******* Assigning ports')\n for device in ci_addrs.switches_hh1:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_hh1_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_hh1_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_hh_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr')\n\n for device in ci_addrs.switches_hh2:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_hh2_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_hh2_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_hh_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr')",
"def connectentryfanoutmac(localpop,\n hostmac,\n hostvlan,\n forwards,\n meter,\n mac):\n hwswitch = Container.fromAnchor(localpop.properties['HwSwitch'])\n hwswitchname = hwswitch.resourceName\n swswitch = Container.fromAnchor(localpop.properties['SwSwitch'])\n swswitchname = swswitch.resourceName\n topology = Container.getContainer(localpop.properties['SwSwitch']['containerName'])\n\n # print \"connectentryfanout localpop\", localpop, \"host\", host, \"hostvlan\", hostvlan, \"mac\", mac\n\n # Find the port on the software switch connected to the hardware switch\n links = getlinks2(topology, swswitchname, hwswitchname)\n if links == None or len(links) == 0:\n print \"No links from\", swswitchname, \"to\", hwswitchname\n return None\n hwswitchlink = None\n swport_tohw = None\n for link in links:\n (node, port) = linkednode2(link, hwswitchname)\n if port != None:\n # Found it!\n hwswitchlink = link\n swport_tohw = port\n break\n if swport_tohw == None:\n print \"No output port on\", swswitchname, \"facing\", hwswitchname\n return None\n\n # The fanout flow is \"interesting\" in that the input plus the multiple outputs\n # all are on the same port (but different VLANs). Fill in the outputs.\n for f in forwards:\n f.outPort = str(swport_tohw)\n # print \"FORW: outport\", f.outPort, \"vlan\", f.vlan, \"dstMac\", f.dstMac\n\n # Convert the list of forwarding destinations to a Java array.\n fwdsarr = jarray.array(forwards, SdnControllerClientL2Forward)\n\n # print \"dpid\", swswitch.props['dpid']\n # This flow being installed is unusual in that it does a source MAC address\n # filter as well\n fh = SCC.SdnInstallForward(javaByteArray2(swswitch.properties['DPID']),\n 1,\n BigInteger.ZERO,\n str(swport_tohw),\n int(hostvlan),\n hostmac,\n mac,\n fwdsarr,\n 0,\n 0,\n meter)\n\n return fh",
"def topo_conf():\n for k in switches.keys():\n switches_ip[k] = IPAddr((192<<24)+int(k))\n switches_mac[k] = EthAddr(\"aa\"+ \"%010d\"%(k))",
"async def _hw_init(self):\n await self._write_async(b\":XR\\r\") # Broadcast: initialize + execute\n # Note: no need to consume reply here because there is none (since we are using broadcast)",
"def launch ():\n def start_switch (event):\n log.info(\"switch %s has come up\" % event.dpid)\n log.info(event.connection.ports)\n sw = switches_by_dpid.get(event.dpid)\n\n if sw is None:\n # New switch\n sw = TopoSwitch(event.connection)\n switches_by_dpid[event.dpid] = sw\n sw.connect(event.connection)\n else:\n sw.connect(event.connection)\n core.openflow.addListenerByName(\"ConnectionUp\", start_switch)",
"def broadcast(loopstate):\n cmdstring = 'sudo hcitool -i hci0 cmd ' # Send cmd to hci0\n cmdstring += '0x08 ' # Set group to BLE\n cmdstring += '0x0008 ' # Set command to HCI_LE_Set_Advertising_Data\n cmdstring += '0D ' # Length of entire following data, in bytes\n cmdstring += '02 ' # Length of flag info\n cmdstring += '01 ' # Use AD flags\n cmdstring += '02 ' # Flag value:\n # bit 0 (OFF) LE Limited Discoverable Mode\n # bit 1 (ON) LE General Discoverable Mode\n # bit 2 (OFF) BR/EDR Not Supported\n # bit 3 (ON) Simultaneous LE and BR/EDR to Same Device Capable (controller)\n # bit 4 (ON) Simultaneous LE and BR/EDR to Same Device Capable (Host)\n cmdstring += '09 ' # Length of following message, in bytes\n cmdstring += '07 ' # GAP value (07 = 128 Bit Complete Service UUID List)\n cmdstring += '42 69 63 79 63 6c 65 ' # Header to identify beacon message-\n # - and it's also is Bicycle in ASCII!\n if loopstate:\n cmdstring = cmdstring + LOOP_ON\n else:\n cmdstring = cmdstring + LOOP_OFF + ' >/dev/null 2>&1'\n subprocess.call(cmdstring, shell=True)\n subprocess.call('sudo hciconfig hci0 leadv 3 >/dev/null 2>&1', shell=True)",
"def get_topology_data(self, ev):\n switch_list = get_switch(self, None) # .topology_api_app\n dpids = []\n # TODO: Create only if not exisiting\n for switch in switch_list:\n dpids.append(switch.dp.id)\n # self.create_switch(switch)\n self.create_logical_router(switch)\n\n print (\"l3 Switch ENTER Done dpid-list:{}\".format(dpids))",
"def handle_connect(self):\n #print \"Switch initiated on: %s:%s\" % (self.address, self.port)\n self.buffer.append(messages.of_hello)\n self.buffer.append(messages.of_features_request)\n self.buffer.append(messages.of_set_config)",
"async def async_setup_entry(\n hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback\n) -> None:\n\n bhyve = hass.data[DOMAIN][entry.entry_id][CONF_CLIENT]\n\n switches = []\n devices = filter_configured_devices(entry, await bhyve.devices)\n programs = await bhyve.timer_programs\n\n device_by_id = {}\n\n for device in devices:\n device_id = device.get(\"id\")\n device_by_id[device_id] = device\n if device.get(\"type\") == DEVICE_SPRINKLER:\n if not device.get(\"status\"):\n _LOGGER.warning(\n \"Unable to configure device %s: the 'status' attribute is missing. Has it been paired with the wifi hub?\",\n device.get(\"name\"),\n )\n continue\n\n # Filter out any programs which are not for this device\n device_programs = [\n program for program in programs if program.get(\"device_id\") == device_id\n ]\n\n switches.append(\n BHyveRainDelaySwitch(hass, bhyve, device, \"weather-pouring\")\n )\n\n all_zones = device.get(\"zones\")\n for zone in all_zones:\n zone_name = zone.get(\"name\")\n # if the zone doesn't have a name, set it to the device's name if there is only one (eg a hose timer)\n if zone_name is None:\n zone_name = (\n device.get(\"name\") if len(all_zones) == 1 else \"Unnamed Zone\"\n )\n switches.append(\n BHyveZoneSwitch(\n hass,\n bhyve,\n device,\n zone,\n zone_name,\n device_programs,\n \"water-pump\",\n )\n )\n\n for program in programs:\n program_device = device_by_id.get(program.get(\"device_id\"))\n program_id = program.get(\"program\")\n if program_device is not None and program_id is not None:\n _LOGGER.info(\"Creating switch: Program %s\", program.get(\"name\"))\n switches.append(\n BHyveProgramSwitch(\n hass, bhyve, program_device, program, \"bulletin-board\"\n )\n )\n\n async_add_entities(switches, True)\n\n async def async_service_handler(service):\n \"\"\"Map services to method of BHyve devices.\"\"\"\n _LOGGER.info(\"%s service called\", service.service)\n method = SERVICE_TO_METHOD.get(service.service)\n if not method:\n _LOGGER.warning(\"Unknown service method %s\", service.service)\n return\n\n params = {\n key: value for key, value in service.data.items() if key != ATTR_ENTITY_ID\n }\n entity_ids = service.data.get(ATTR_ENTITY_ID)\n component = hass.data.get(SWITCH_DOMAIN)\n if entity_ids:\n target_switches = [component.get_entity(entity) for entity in entity_ids]\n else:\n return\n\n method_name = method[\"method\"]\n _LOGGER.debug(\"Service handler: %s %s\", method_name, params)\n\n for entity in target_switches:\n if not hasattr(entity, method_name):\n _LOGGER.error(\"Service not implemented: %s\", method_name)\n return\n await getattr(entity, method_name)(**params)\n\n for service, details in SERVICE_TO_METHOD.items():\n schema = details[\"schema\"]\n hass.services.async_register(\n DOMAIN, service, async_service_handler, schema=schema\n )",
"def broadcast_thread(self):\n while True:\n try:\n logger.info('broadcast routing table (dpid=%s)', dpid_to_str(self.dp.id))\n for port_no, port in self.ports.items():\n if port.neighbor_switch_dpid:\n self.switches[port.neighbor_switch_dpid].add_to_queue((port, self.tbl))\n self.switches[port.neighbor_switch_dpid].trigger_update()\n time.sleep(self.tbl.advertise_interval)\n except:\n logger.info('broadcast thread of dpid=%s is killed', dpid_to_str(self.dp.id))\n break",
"def create_logical_router(self, switch):\n if self.nb_api is None:\n self.nb_api = api_nb.NbApi.get_instance(False)\n\n # TODO: lswitch from nb api\n router_ports = []\n dpid = str(switch.dp.id)\n\n for port in switch.ports:\n # network = \"192.168.33.1/24\",\n network = None\n ip = None\n if dpid == '1':\n if port.port_no == 1:\n network = SUBNET1\n ip = DP1_PORT1_GATEWAY_IP\n else:\n network = SUBNET2\n ip = DP1_PORT2_GATEWAY_IP\n elif dpid == '2':\n if port.port_no == 1:\n network = SUBNET2\n ip = DP2_PORT1_GATEWAY_IP\n else:\n network = SUBNET3\n ip = DP2_PORT2_GATEWAY_IP\n elif dpid == '3':\n if port.port_no == 1:\n network = SUBNET3\n ip = DP3_PORT1_GATEWAY_IP\n else:\n network = SUBNET4\n ip = DP3_PORT2_GATEWAY_IP\n else:\n print \"Datapath {} not supported. Router not created!\".format(dpid)\n return\n if network and ip:\n router_port = l3.LogicalRouterPort(lswitch=\"{}\".format(switch.dp.id),\n topic=\"fake_tenant1\",\n network=network,\n gateway_ip=ip,\n mac=\"{}\".format(port.hw_addr),\n port_no=str(port.port_no),\n unique_key=4,\n id=\"{}:{}\".format(switch.dp.id, port.port_no))\n router_ports.append(router_port)\n\n router = l3.LogicalRouter(name=\"router_of_{}\".format(switch.dp.id),\n topic=\"fake_tenant1\",\n version=10,\n id=\"{}\".format(switch.dp.id),\n unique_key=5,\n ports=router_ports)\n self.nb_api.create(router)",
"def createWIFIAccessPoint():\n ifname = config.get(\"interface\", \"wifi\")\n ipaddress = config.get(\"hotspot\", \"ip\")\n prefix = int(config.get(\"hotspot\", \"prefix\"))\n ssid = config.get(\"hotspot\", \"ssid\")\n password = config.get(\"hotspot\", \"password\")\n ################################\n s_wifi = dbus.Dictionary(\n {\n \"ssid\": dbus.ByteArray(ssid.encode(\"utf-8\")),\n \"mode\": \"ap\",\n })\n s_wsec = dbus.Dictionary(\n {\n \"key-mgmt\": \"wpa-psk\",\n \"psk\": password\n })\n s_con = dbus.Dictionary(\n {\"type\": \"802-11-wireless\",\n \"interface-name\":ifname ,\n \"uuid\": str(uuid.uuid4()),\n \"id\": ssid,\n \"autoconnect\":dbus.Boolean(True)\n })\n addr1 = dbus.Dictionary({\"address\": ipaddress, \"prefix\": dbus.UInt32(prefix)})\n dns = []\n s_ip4 = dbus.Dictionary(\n {\n \"address-data\": dbus.Array([addr1], signature=dbus.Signature(\"a{sv}\")),\n \"dns\": dbus.Array(dns, signature=dbus.Signature('u'), variant_level=1),\n \"method\": \"manual\",\n })\n s_ip6 = dbus.Dictionary({\"method\": \"ignore\"})\n con = dbus.Dictionary(\n {\n \"802-11-wireless\": s_wifi,\n \"802-11-wireless-security\":s_wsec,\n \"connection\": s_con,\n \"ipv4\": s_ip4,\n \"ipv6\": s_ip6\n })\n try:\n logging.info(\"Creating hotspot connection: {} - {}\".format(s_con[\"id\"], s_con[\"uuid\"]))\n ##########\n bus = dbus.SystemBus()\n proxy = bus.get_object(\n \"org.freedesktop.NetworkManager\", \"/org/freedesktop/NetworkManager/Settings\"\n )\n settings = dbus.Interface(proxy, \"org.freedesktop.NetworkManager.Settings\")\n connection = settings.AddConnection(con)\n logging.info(f\"Created access point connection {connection}\")\n except Exception as e:\n logging.error(\"Hotspot connection creation failed\")\n logging.error(e)",
"def setupHw():\n\n pin.setupHw()\n pin.setupOutPins(traffic_lights)\n pin.setDebug(False)",
"def create_host(self, port=20298):\n # We use UDP to broadcast the host\n self.host = Networking.Host(0.5, 0.5, 5)\n\n \"\"\"\n # find unused ip address\n for i in range(255, 0, -1):W\n for j in range(255, 0, -1):\n try:\n print(\"Checking for free address at 192.168.\"+str(i)+\".\"+str(j))\n # try to connect to the specified ip address\n socket.gethostbyaddr(\"192.168.\"+str(i)+\".\"+str(j))\n except socket.error:\n print(\"Address is unused\")\n # if we cannot connect to the address, that means the ip address is unused\n server_ip = \"192.168.\"+str(i)+\".\"+str(j)\n break\n if server_ip is not None:\n break\n \"\"\"\n\n # port = self.get_open_port()\n # listen to all address\n server_ip = \"0.0.0.0\"\n client_ip = \"localhost\"\n\n try:\n self.host.connect(server_ip, port)\n self.host.accepting_allow()\n\n # Clear the broadcast blocker\n # self.stop_broadcast.clear()\n # broadcaster = threading.Thread(target=self.broadcast_game, args=(None, self.stop_broadcast))\n # broadcaster.start()\n\n # The host also acts as a client\n self.join_host(client_ip, 20298, GameStateModel.instance().host)\n except MastermindErrorSocket:\n logger.error(\"Failed to create a host\")",
"def connect_to_switches(self):\n for p4switch in self.topo.get_p4switches():\n thrift_port = self.topo.get_thrift_port(p4switch)\n self.controllers[p4switch] = SimpleSwitchThriftAPI(thrift_port)",
"def on_inner_dp_join(self, dp):\n ofp = dp.ofproto\n parser = dp.ofproto_parser\n\n #Deletion of already existing OF flows\n LOG.debug('TOPO MNGR: Deleting flow table configuration of newly added forwarder ID: ' + str(dp.id) )\n dp.send_msg(parser.OFPFlowMod(datapath=dp, command=ofp.OFPFC_DELETE))\n dp.send_msg(parser.OFPFlowMod(datapath=dp, command=ofp.OFPFC_DELETE, table_id=OF_GPRS_TABLE))\n #TODO: Shall we wipe-out OFconfig data as well?\n\n\n ##########################\n ## Main table (0)\n ## TODO:change to echo only!\n\n ## Networks self-discovery using icmp messages\n ## Redirect all pings with ipv4_dst=DISCOVERY_IP_DST to controller\n LOG.debug('TOPO MNGR: Installing ICMP topology discovery flows on forwarder: ' + str(dp.id))\n match = parser.OFPMatch(eth_type=0x0800, ip_proto=1, icmpv4_type=8, icmpv4_code=0, ipv4_dst=DISCOVERY_IP_DST)\n actions = [ parser.OFPActionOutput(ofp.OFPP_CONTROLLER) ]\n ##TEST SHAPING ON FWD 11 on port 1 - trying to shape also measurement packets\n self.add_flow(dp, 100, match, actions, 0)\n\n ##Controller uses ARP to resolve mac_addresses of APNs\n ##All arp replies with target IP of DISCOVERY_ARP_IP are redirected to controller\n\n ## FIXED: All ARPs replies are redirected to the controller regardless of the target IP\n ##\n ## TODO: In general we should reply only to ARPs from the APNs subnet, and per APN basis (from the configuration)\n\n LOG.debug('TOPO MNGR: Installing ARP APN discovery flows on forwarder: ' + str(dp.id))\n match = parser.OFPMatch(eth_type=0x0806, arp_op=2, )\n actions = [ parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.add_flow(dp, 100, match, actions)\n\n\n # We match only ethertype, that means all IP packed data should be match\n # It is some kind of flow of last resort, therefore it has small priority\n if dp.id in LAN_TYPE_FORWARDERS:\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dp.id) + 'is a LAN edge forwarder, installing additional rules' )\n match = parser.OFPMatch(eth_type=0x0800)\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.add_flow(dp, 2, match, actions)\n\n rate = 500\n # all fwds have set of meters\n for i in range(0,11):\n meter_id = i\n if i == 0:\n rate = 100000000\n elif i % 2 == 0:\n rate = 1000\n else:\n rate = 2000\n # KBPS is actually kbps\n meter = {'meter_id': meter_id, 'flags': 'KBPS',\n 'bands': [{'type': 'DROP', 'rate': rate}]}\n self.mod_meter_entry(dp, meter, meter_id, dp.ofproto.OFPMC_ADD)\n LOG.debug(\"FLOW MNGR: Added METER ID: \" + str(i) + \" with rate: \" + str(rate) + \" to FWD ID: \" + str(dp.id) )\n\n\n ##Following rules are applied only on forwarders bellonging to BSS_EDGE_FORWARDER group\n ##Rules are applied based on priority of match (highest priority first)\n if dp.id in BSS_EDGE_FORWARDER:\n\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dp.id) + ' is an access edge forwarder, installing aditional rules')\n ## UDP 23000 is GPRS-NS and all packets that match this are forwarded to OF_GPRS_TABLE flow table\n inst = [ parser.OFPInstructionGotoTable(OF_GPRS_TABLE) ]\n match = parser.OFPMatch(eth_type=0x0800,ip_proto=inet.IPPROTO_UDP, udp_dst=VGSN_PORT)\n req = parser.OFPFlowMod(datapath=dp, priority=200, match=match, instructions=inst)\n dp.send_msg(req)\n\n ## VGSN_PHY and BSS_PHY ports are bridged -- DHCP, ARP, Abis & stuff\n ## XXX: what if vGSN is not on same forwarder as BSS\n actions = [ parser.OFPActionOutput(VGSN_PHY_PORT) ]\n inst = [ parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions) ]\n match = parser.OFPMatch(in_port=BSS_PHY_PORT)\n req = parser.OFPFlowMod(datapath=dp, priority=10, match=match, instructions=inst)\n dp.send_msg(req)\n actions = [ parser.OFPActionOutput(BSS_PHY_PORT) ]\n inst = [ parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions) ]\n match = parser.OFPMatch(in_port=VGSN_PHY_PORT)\n req = parser.OFPFlowMod(datapath=dp, priority=10, match=match, instructions=inst)\n dp.send_msg(req)\n\n\n #################\n ## OF_GPRS-TABLE (2)\n ##TODO: BSS <-> vGSS separate tunnel for communication!\n ##TODO: deletion, modification od PDP CNT\n\n ## if packet is not first segment of user data packet (IS part of sndcp fragmented packet) it's DROPED\n match = parser.OFPMatch( sndcp_first_segment=0 )\n actions = [ ]\n inst = [ parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions) ]\n req = parser.OFPFlowMod(datapath=dp, table_id=OF_GPRS_TABLE, priority=200, match=match, instructions=inst)\n dp.send_msg(req)\n\n ## if packet is first segment of SNDCP packet with more than one segment, it's forwarded to controller\n ## when controller recieves such packet it sends ICMP fragmentation_needed to its sender and drops original\n match = parser.OFPMatch( sndcp_first_segment=1, sndcp_more_segments=1 )\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER) ]\n inst = [ parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions) ]\n req = parser.OFPFlowMod(datapath=dp, table_id=OF_GPRS_TABLE, priority=200, match=match, instructions=inst)\n dp.send_msg(req)\n\n ##if it's SNDCP packet taht still wasnt matched (rules with higher priority are inserted on PDP CNT activation)\n ##we assume it's packet of unknown PDP CNT and we DROP it\n match = parser.OFPMatch( sndcp_first_segment=1 )\n actions = [ ]\n inst = [ parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions) ]\n req = parser.OFPFlowMod(datapath=dp, table_id=OF_GPRS_TABLE, priority=1, match=match, instructions=inst)\n dp.send_msg(req)\n\n ##Everything else is Signalzation and is forwarded either to BSS or vGSN\n # XXX: co ak bss a vgsn nie su spolu na jednom DPID?\n actions = [ parser.OFPActionOutput(VGSN_PHY_PORT) ]\n inst = [ parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions) ]\n match = parser.OFPMatch(in_port=BSS_PHY_PORT)\n req = parser.OFPFlowMod(datapath=dp, table_id=OF_GPRS_TABLE, priority=0, match=match, instructions=inst)\n dp.send_msg(req)\n actions = [ parser.OFPActionOutput(BSS_PHY_PORT) ]\n inst = [ parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions) ]\n match = parser.OFPMatch(in_port=VGSN_PHY_PORT)\n req = parser.OFPFlowMod(datapath=dp, table_id=OF_GPRS_TABLE, priority=0, match=match, instructions=inst)\n dp.send_msg(req)",
"def onRegisterNetworkBroadcast(self):\n pass",
"def register_traffic(self, intent):\n servers = collections.defaultdict(list)\n clients = collections.defaultdict(list)\n _trules = []\n for rule in intent:\n srchost = self.get_ep_host(rule['src'])\n dsthost = self.get_ep_host(rule['dst'])\n\n if not srchost:\n log.error(\"No host found for running traffic from IP : %s\",\n rule['src'])\n continue\n elif not dsthost:\n log.error(\"No host found for running traffic from IP : %s\",\n rule['dst'])\n continue\n\n servers[dsthost].append(rule)\n clients[srchost].append(rule)\n\n trule = self.create_traffic_rule(rule)\n _trules.append(trule)\n\n # Register at endpoint and create local representation.\n if config.get_param('TRAFFIC_START_SERVERS_FIRST'):\n # Start Servers first and then Clients.\n host_rules_map = [servers, clients]\n else:\n # Start Servers / Clients in single call.\n # May result in some cool off time required before the\n # traffic settles.\n for host, rules in clients.items():\n servers[host].extend(rules)\n host_rules_map = [servers]\n\n def _register_traffic_rules(host, rules):\n with LydianClient(host) as dclient:\n dclient.controller.register_traffic(rules)\n\n # Start Server before the client.\n for host_rules in host_rules_map:\n collection = [(host, (host, rules), {})\n for host, rules in host_rules.items()]\n ThreadPool(_register_traffic_rules, collection)\n\n self.rules_app.add_rules(_trules) # Persist rules to local db",
"def setup(env, channel, interT, station, mean):\r\n network = Network(env, channel)\r\n system = System(env, network, station, mean)\r\n transmitList = []\r\n \r\n while True:\r\n system.stations",
"def __init__(self,\n device_name,\n create_device_func,\n props,\n hub_name_prop,\n primary_port_prop,\n secondary_port_prop,\n ethernet_switch_prop,\n ethernet_port_prop,\n get_switchboard_if_initialized,\n power_and_data_share_cable=False,\n pre_off_func=None):\n super().__init__(device_name=device_name)\n\n self._create_device_func = create_device_func\n self._hub_name_prop = hub_name_prop\n self._primary_port_prop = primary_port_prop\n self._secondary_port_prop = secondary_port_prop\n self._props = props\n self._ethernet_switch = None\n\n # Set the properties\n self._get_switchboard_if_initialized = get_switchboard_if_initialized\n self._power_and_data_share_cable = power_and_data_share_cable\n self._pre_off_func = pre_off_func\n self._ethernet_switch_prop = ethernet_switch_prop\n self._ethernet_port_prop = ethernet_port_prop",
"def kk_assign_ports():\n print('******* Assigning ports')\n for device in ci_addrs.switches_kk_all:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_kk_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_kk_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_kk_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr')",
"def start_sending_to_switch(self):\n self.switch_active = True\n for message in self.internal_switch_buffer:\n self.switch.buffer.append(message)\n self.internal_switch_buffer = []",
"def add_vport(self, switch_name):\n # Create tap devices for the VM\n tap_name = 'tap' + str(self._vport_id)\n self._vport_id += 1\n tap_cmd_list = ['sudo', 'ip', 'tuntap', 'del', tap_name, 'mode', 'tap']\n # let's assume, that all VMs have NIC QUEUES enabled or disabled\n # at the same time\n if int(settings.getValue('GUEST_NIC_QUEUES')[0]):\n tap_cmd_list += ['multi_queue']\n tasks.run_task(tap_cmd_list, self._logger,\n 'Creating tap device...', False)\n\n tap_cmd_list = ['sudo', 'ip', 'tuntap', 'add', tap_name, 'mode', 'tap']\n # let's assume, that all VMs have NIC QUEUES enabled or disabled\n # at the same time\n if int(settings.getValue('GUEST_NIC_QUEUES')[0]):\n tap_cmd_list += ['multi_queue']\n tasks.run_task(tap_cmd_list, self._logger,\n 'Creating tap device...', False)\n if settings.getValue('VSWITCH_JUMBO_FRAMES_ENABLED'):\n tasks.run_task(['ifconfig', tap_name, 'mtu',\n str(settings.getValue('VSWITCH_JUMBO_FRAMES_SIZE'))],\n self._logger, 'Setting mtu size', False)\n\n tasks.run_task(['sudo', 'ip', 'addr', 'flush', 'dev', tap_name],\n self._logger, 'Remove IP', False)\n tasks.run_task(['sudo', 'ip', 'link', 'set', 'dev', tap_name, 'up'],\n self._logger, 'Bring up ' + tap_name, False)\n\n bridge = self._bridges[switch_name]\n of_port = bridge.add_port(tap_name, [])\n return (tap_name, of_port)",
"def swconnect(localpop, remotepop, mac, vc, meter):\n core = Container.fromAnchor(localpop.properties['CoreRouter'])\n corename = core.resourceName\n (corename,coredom,coreport,corevlan) = getvcnode(vc, corename)\n remotecore = Container.fromAnchor(remotepop.properties['CoreRouter'])\n remotecorename = remotecore.resourceName\n (remotecorename,remotecoredom,remotecoreport,remotecorevlan) = getvcnode(vc, remotecorename)\n\n hwswitch = Container.fromAnchor(localpop.properties['HwSwitch'])\n hwswitchname = hwswitch.resourceName\n swswitch = Container.fromAnchor(localpop.properties['SwSwitch'])\n swswitchname = swswitch.resourceName\n\n remotehwswitch = Container.fromAnchor(remotepop.properties['HwSwitch'])\n remotehwswitchname = remotehwswitch.resourceName\n remoteswswitch = Container.fromAnchor(remotepop.properties['SwSwitch'])\n remoteswswitchname = remoteswswitch.resourceName\n\n topology = Container.getContainer(localpop.properties['SwSwitch']['containerName'])\n\n # Find hwswitch/port - core/port\n hwport_tocore = getgriport(topology, hwswitch, core, coreport)\n # Find remotehwswitch/port - remotecore/port\n remotehwport_tocore = getgriport(topology, remotehwswitch, remotecore, remotecoreport)\n\n links = getlinks2(topology, hwswitchname, swswitchname)\n if links == None or len(links) == 0:\n print \"No links from \", hwswitchname, \" to \", swswitchname\n return None\n hwswlink = None\n for l in links:\n (node, port) = linkednode2(l, swswitchname)\n if port != None:\n # Found the (a) link\n hwswlink = l\n hwport_tosw = port\n break\n\n remotelinks = getlinks2(topology, remotehwswitchname, remoteswswitchname)\n if remotelinks == None or len(remotelinks) == 0:\n print \"No links from \", remotehwswitchname, \" to \", remoteswswitchname\n return None\n remotehwswlink = None\n for l in remotelinks:\n (node, port) = linkednode2(l, remoteswswitchname)\n if port != None:\n # Found the (a) link\n remotehwswlink = l\n remotehwport_tosw = port\n break\n\n # Find the ports on hwswitch and remotehwswitch that go to the corresponding software switches\n\n # Set up forwarding for broadcast traffic from the new local pop\n # Install outbound flow on hwswitch from swswitch to the GRI\n fh1 = SCC.SdnInstallForward1(javaByteArray2(hwswitch.properties['DPID']),\n 1,\n BigInteger.ZERO,\n str(hwport_tosw), # hw port facing software switch\n int(corevlan),\n \"00:00:00:00:00:00\",\n mac,\n str(hwport_tocore),\n int(corevlan),\n mac,\n 0,\n 0,\n meter)\n if fh1 == None:\n return None\n\n # Install inbound flow on remotehwswitch from GRI to remoteswswitch\n fh2 = SCC.SdnInstallForward1(javaByteArray2(remotehwswitch.properties['DPID']),\n 1,\n BigInteger.ZERO,\n str(remotehwport_tocore),\n int(remotecorevlan),\n \"00:00:00:00:00:00\",\n mac,\n str(remotehwport_tosw), # remotehw port facing remote software switch\n int(remotecorevlan),\n mac,\n 0,\n 0,\n meter)\n if fh2 == None:\n SCC.deleteforward(fh1)\n return None\n\n # Set up forwarding for broadcast traffic to the new local pop\n # Install inbound flow on hwswitch from GRI to swswitch\n fh3 = SCC.SdnInstallForward1(javaByteArray2(hwswitch.properties['DPID']),\n 1,\n BigInteger.ZERO,\n str(hwport_tocore),\n int(corevlan),\n \"00:00:00:00:00:00\",\n mac,\n str(hwport_tosw), # hw port facing software switch\n int(corevlan),\n mac,\n 0,\n 0,\n meter)\n if fh3 == None:\n SCC.deleteforward(fh1)\n SCC.deleteforward(fh2)\n return None\n\n # Install outbound flow on remotehwswitch from remoteswswitch to GRI\n fh4 = SCC.SdnInstallForward1(javaByteArray2(remotehwswitch.properties['DPID']),\n 1,\n BigInteger.ZERO,\n str(remotehwport_tosw), # remotehw port facing remote software switch\n int(remotecorevlan),\n \"00:00:00:00:00:00\",\n mac,\n str(remotehwport_tocore),\n int(remotecorevlan),\n mac,\n 0,\n 0,\n meter)\n if fh4 == None:\n SCC.deleteforward(fh1)\n SCC.deleteforward(fh2)\n SCC.deleteforward(fh3)\n return None\n\n # Return something\n return (fh1, fh2, fh3, fh4)",
"def _add_to_switch(self, _switch, context):\n _network = context.current['id']\n _vlanid = context.current['provider:segmentation_id']\n\n # BRIDGE_PORT_URL = '{url_prefix}://{switch_name_or_ip}:{port}/networks/{vlan}/{network_id}/{port_id}'\n for _switchport in _switch.get('ports'):\n try:\n _request = requests.put(\n BRIDGE_PORT_URL.format(url_prefix=self.url_prefix,\n port=self.protocol_port,\n switch_name_or_ip=_switch.get('name'),\n vlan=unicode(_vlanid),\n network_id=_network,\n port_id=_switchport)\n )\n LOG.info(\n _LI('Sending PUT API Call to Switch %s'),\n _request.url\n )\n if _request.status_code != requests.codes.ok:\n LOG.error(\n _LE(\"Failed To Provision Switch %s\"), _request.text)\n raise MechanismDriverError()\n except ConnectionError:\n LOG.error(\n _LE('Failed to connect to switch %s'),\n _request.url\n )",
"def switch_features_handler(self, ev):\n\n super(RyuRest, self).switch_features_handler(ev) # Call the original switch features method\n datapath = ev.msg.datapath\n self.switches[datapath.id] = datapath\n self.mac_to_port.setdefault(datapath.id, {})",
"def __init__(\n self,\n on_device: Callable[[SwitcherBase], Any],\n broadcast_ports: List[int] = [\n SWITCHER_UDP_PORT_TYPE1,\n SWITCHER_UDP_PORT_TYPE1_NEW_VERSION,\n SWITCHER_UDP_PORT_TYPE2,\n SWITCHER_UDP_PORT_TYPE2_NEW_VERSION,\n ],\n ) -> None:\n self._on_device = on_device\n self._broadcast_ports = broadcast_ports\n self._is_running = False\n self._transports: Dict[int, Optional[BaseTransport]] = {}",
"def _start_streaming_ram_to_host(self):\n self.regs.SDRAM_HOST_READ_GO = 1\n self.regs.CSTREAM_CFG = 1",
"def get_bt_smarthub_data(self):\n import btsmarthub_devicelist\n\n data = btsmarthub_devicelist.get_devicelist(router_ip=self.host, only_active_devices=True)\n devices = {}\n for device in data:\n try:\n devices[device['UserHostName']] = {\n 'ip': device['IPAddress'],\n 'mac': device['PhysAddress'],\n 'host': device['UserHostName'],\n 'status': device['Active']\n }\n except (KeyError, 'no'):\n pass\n return devices",
"def _handle_HostEvent (self, event):\n self.host_alive.append(event.entry) \n print type(event.entry).__name__"
]
| [
"0.59532166",
"0.5937295",
"0.5799532",
"0.5706568",
"0.56275046",
"0.5578826",
"0.55391914",
"0.5535615",
"0.5502016",
"0.5501313",
"0.5489316",
"0.5470721",
"0.54283583",
"0.5427892",
"0.5426942",
"0.5419042",
"0.5402045",
"0.5376207",
"0.53679425",
"0.53496623",
"0.53475976",
"0.5331087",
"0.5302909",
"0.5284414",
"0.5282471",
"0.528059",
"0.52601594",
"0.5240479",
"0.522834",
"0.52208835"
]
| 0.6400162 | 0 |
Create fanout entry on source POP's software switch | def connectentryfanoutmac(localpop,
hostmac,
hostvlan,
forwards,
meter,
mac):
hwswitch = Container.fromAnchor(localpop.properties['HwSwitch'])
hwswitchname = hwswitch.resourceName
swswitch = Container.fromAnchor(localpop.properties['SwSwitch'])
swswitchname = swswitch.resourceName
topology = Container.getContainer(localpop.properties['SwSwitch']['containerName'])
# print "connectentryfanout localpop", localpop, "host", host, "hostvlan", hostvlan, "mac", mac
# Find the port on the software switch connected to the hardware switch
links = getlinks2(topology, swswitchname, hwswitchname)
if links == None or len(links) == 0:
print "No links from", swswitchname, "to", hwswitchname
return None
hwswitchlink = None
swport_tohw = None
for link in links:
(node, port) = linkednode2(link, hwswitchname)
if port != None:
# Found it!
hwswitchlink = link
swport_tohw = port
break
if swport_tohw == None:
print "No output port on", swswitchname, "facing", hwswitchname
return None
# The fanout flow is "interesting" in that the input plus the multiple outputs
# all are on the same port (but different VLANs). Fill in the outputs.
for f in forwards:
f.outPort = str(swport_tohw)
# print "FORW: outport", f.outPort, "vlan", f.vlan, "dstMac", f.dstMac
# Convert the list of forwarding destinations to a Java array.
fwdsarr = jarray.array(forwards, SdnControllerClientL2Forward)
# print "dpid", swswitch.props['dpid']
# This flow being installed is unusual in that it does a source MAC address
# filter as well
fh = SCC.SdnInstallForward(javaByteArray2(swswitch.properties['DPID']),
1,
BigInteger.ZERO,
str(swport_tohw),
int(hostvlan),
hostmac,
mac,
fwdsarr,
0,
0,
meter)
return fh | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def connectexitfanout(localpop,\n corevlan,\n forwards,\n meter,\n mac):\n\n hwswitch = Container.fromAnchor(localpop.properties['HwSwitch'])\n hwswitchname = hwswitch.resourceName\n swswitch = Container.fromAnchor(localpop.properties['SwSwitch'])\n swswitchname = swswitch.resourceName\n topology = Container.getContainer(localpop.properties['SwSwitch']['containerName'])\n\n # print \"connectexitfanout localpop\", localpop, \"corevlan\", corevlan, \"mac\", mac\n\n # Find the port on the software switch connected to the hardware switch\n links = getlinks2(topology, swswitchname, hwswitchname)\n if links == None or len(links) == 0:\n print \"No links from\", swswitchname, \"to\", hwswitchname\n return None\n hwswitchlink = None\n swport_tohw = None\n for link in links:\n (node, port) = linkednode2(link, hwswitchname)\n if port != None:\n # Found it!\n hwswitchlink = link\n swport_tohw = port\n break\n if swport_tohw == None:\n print \"No output port on\", swswitchname, \"facing\", hwswitchname\n return None\n\n for f in forwards:\n f.outPort = str(swport_tohw)\n # print \"FORW: outport\", f.outPort, \"vlan\", f.vlan, \"dstMac\", f.dstMac\n\n # Convert the list of forwarding destinations to a Java array.\n fwdsarr = jarray.array(forwards, SdnControllerClientL2Forward)\n\n # print \"dpid\", swswitch.props['dpid']\n fh = SCC.SdnInstallForward(javaByteArray2(swswitch.properties['DPID']),\n 1,\n BigInteger.ZERO,\n str(swport_tohw),\n int(corevlan),\n None,\n mac,\n fwdsarr,\n 0,\n 0,\n meter)\n\n return fh",
"def setup_fan():\n global dev_fan\n dev_fan = iot_fan.Fan(config.option('pin_name', 'Fan'))\n fan_init()",
"def usrp_sink_make(*args):\n return _uhd_swig.usrp_sink_make(*args)",
"def hsdpa_physical_downlink_settings_carrier2(self):\r\r\n carrier = 2\r\r\n\r\r\n config_list = []\r\r\n\r\r\n config_list.append ( \"%-24s %-18s\" % (\"Channel( Carrier 2)\", \"Level\"))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"==================\", \"=====\"))\r\r\n\r\r\n pcpich_level = -11\r\r\n self.set_pcpich_code_level(carrier=carrier, leveldB=pcpich_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"P-CPICH\", pcpich_level))\r\r\n\r\r\n\r\r\n # *****************************************************************************\r\r\n # Configure 2 HS-SCCH: level, channelization code, UE ID and dummy UE ID\r\r\n # *****************************************************************************\r\r\n hssch_level_1 = -18.0\r\r\n hssch_level_2 = -18.0\r\r\n self.set_hssch_level(hssch_num=1, carrier=carrier, leveldB=hssch_level_1)\r\r\n self.set_hssch_level(hssch_num=2, carrier=carrier, leveldB=hssch_level_2)\r\r\n self.set_hssch_code(hssch_num=1, carrier=carrier, codeNum=2)\r\r\n self.set_hssch_code(hssch_num=2, carrier=carrier, codeNum=7)\r\r\n\r\r\n config_list.append ( \"%-24s %-18s\" % (\"HS-SCCH #1\", hssch_level_1))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"HS-SCCH #2\", hssch_level_2))\r\r\n\r\r\n self.set_default_ue_id_hssch(carrier=carrier)\r\r\n\r\r\n # HS-PDSCH Enhanced Settings\r\r\n self.set_hsdsch_mpo(carrier=carrier, control=\"AUTO\", pwrOffsetManual=\"\")\r\r\n self.hsdsch_unsched_frames(carrier=carrier, usFrameType='DUMMY')\r\r\n\r\r\n # *****************************************************************************\r\r\n # Configure HS-PDSCH: level and first channelization code number\r\r\n # *****************************************************************************\r\r\n hsdsch_level = -1.6\r\r\n self.set_hsdsch_level(carrier=carrier, leveldB = hsdsch_level)\r\r\n self.set_hsdsch_chanelisation_code(carrier=carrier, code=1)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"HS-PDSCH\", hsdsch_level))\r\r\n\r\r\n config_list.append (\"\")\r\r\n\r\r\n for line in config_list:\r\r\n print line",
"def create_fan_service(accessory):\n service = accessory.add_service(ServicesTypes.FAN)\n\n cur_state = service.add_char(CharacteristicsTypes.ON)\n cur_state.value = 0\n\n direction = service.add_char(CharacteristicsTypes.ROTATION_DIRECTION)\n direction.value = 0\n\n speed = service.add_char(CharacteristicsTypes.ROTATION_SPEED)\n speed.value = 0",
"def create_fanv2_service(accessory):\n service = accessory.add_service(ServicesTypes.FAN_V2)\n\n cur_state = service.add_char(CharacteristicsTypes.ACTIVE)\n cur_state.value = 0\n\n direction = service.add_char(CharacteristicsTypes.ROTATION_DIRECTION)\n direction.value = 0\n\n speed = service.add_char(CharacteristicsTypes.ROTATION_SPEED)\n speed.value = 0\n\n swing_mode = service.add_char(CharacteristicsTypes.SWING_MODE)\n swing_mode.value = 0",
"def onSetRelayOutput(self, event):",
"def hsdpa_physical_downlink_settings(self):\r\r\n\r\r\n config_list = []\r\r\n\r\r\n config_list.append (\"\")\r\r\n\r\r\n config_list.append ( \"%-24s %-18s\" % (\"Channel( Carrier 1)\", \"Level\"))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"==================\", \"=====\"))\r\r\n\r\r\n pcpich_level = -10.2\r\r\n self.set_pcpich_code_level(carrier=1, leveldB=pcpich_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"P-CPICH\", pcpich_level))\r\r\n\r\r\n psch_level = -15.2\r\r\n ssch_level = psch_level\r\r\n pccpch_level = -12.2\r\r\n self.write('CONFigure:WCDMa:SIGN:DL:LEVel:PSCH %s' %psch_level)\r\r\n self.write('CONFigure:WCDMa:SIGN:DL:LEVel:SSCH %s' %ssch_level)\r\r\n self.write('CONFigure:WCDMa:SIGN:DL:LEVel:PCCPch %s' %pccpch_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"P-SCH\", psch_level))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"S-SCH\", ssch_level))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"P-CCPCH\", pccpch_level))\r\r\n\r\r\n\r\r\n # SCCPH power level and channelisation code\r\r\n sccpch_level = -12.2\r\r\n self.set_dl_chan_code_level(dl_chan='SCCPch', code=2, level_dB=sccpch_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"S-CCPCH\", sccpch_level))\r\r\n\r\r\n # PICH power level and channelisation code\r\r\n pich_level = -15.2\r\r\n self.set_dl_chan_code_level(dl_chan='PICH', code=2, level_dB=pich_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"PICH\", pich_level))\r\r\n\r\r\n # AICH power level and channelisation code\r\r\n aich_level = -15.2\r\r\n self.set_dl_chan_code_level(dl_chan='AICH', code=3, level_dB=aich_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"AICH\", aich_level))\r\r\n\r\r\n # DPCH power and channelisation code\r\r\n dpch_level = -18.2\r\r\n self.set_dl_chan_code_level(dl_chan='DPCH', code=3, level_dB=dpch_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"DPCH\", dpch_level))\r\r\n\r\r\n # F-DPCH power and channelisation ocde\r\r\n fdpch_level = -18.2\r\r\n self.set_dl_chan_code_level(dl_chan='FDPCh', code=6, level_dB=fdpch_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"F-DPCH\", fdpch_level))\r\r\n\r\r\n # DPCH enhanced settings\r\r\n self.configure_enhanced_dl_dpch()\r\r\n\r\r\n\r\r\n # *****************************************************************************\r\r\n # Configure 2 HS-SCCH: level, channelization code, UE ID and dummy UE ID\r\r\n # *****************************************************************************\r\r\n hssch_level_1 = -20.2\r\r\n hssch_level_2 = -20.2\r\r\n self.set_hssch_level(hssch_num=1, carrier=1, leveldB=hssch_level_1)\r\r\n self.set_hssch_level(hssch_num=2, carrier=1, leveldB=hssch_level_2)\r\r\n self.set_hssch_code(hssch_num=1, carrier=1, codeNum=2)\r\r\n self.set_hssch_code(hssch_num=2, carrier=1, codeNum=7)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"HS-SCCH #1\", hssch_level_1))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"HS-SCCH #2\", hssch_level_2))\r\r\n\r\r\n self.set_default_ue_id_hssch(carrier=1)\r\r\n\r\r\n # HS-PDSCH Enhanced Settings\r\r\n self.set_hsdsch_mpo(carrier=1, control=\"AUTO\", pwrOffsetManual=\"\")\r\r\n # unscheduled frame type for HSDPA\r\r\n # possible types are 'DUMMy', 'DTX'\r\r\n self.hsdsch_unsched_frames(carrier=1, usFrameType='DUMMY')\r\r\n\r\r\n # *****************************************************************************\r\r\n # Configure HS-PDSCH: level and first channelization code number\r\r\n # *****************************************************************************\r\r\n\r\r\n hsdsch_level = -1.2\r\r\n self.set_hsdsch_level(carrier=1, leveldB = hsdsch_level)\r\r\n self.set_hsdsch_chanelisation_code(code=1, carrier=1)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"HS-PDSCH\", hsdsch_level))\r\r\n\r\r\n\r\r\n # // *****************************************************************************\r\r\n # Set level and channelization code of E-AGCH, E-HICH and E-RGCH.\r\r\n # *****************************************************************************\r\r\n eagch_level = -20.2\r\r\n ehich_level = -20.2\r\r\n ergch_level = -20.2\r\r\n self.set_dl_chan_code_level(dl_chan='EAGCh', code=3, level_dB=eagch_level)\r\r\n self.set_dl_chan_code_level(dl_chan='EHICh', code=6, level_dB=ehich_level)\r\r\n self.set_dl_chan_code_level(dl_chan='ERGCh', code=6, level_dB=ergch_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"E-AGCH\", eagch_level))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"E-HICH\", ehich_level))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"E-RGCH\", ergch_level))\r\r\n\r\r\n config_list.append (\"\")\r\r\n\r\r\n for line in config_list:\r\r\n print line\r\r\n\r\r\n if self.dc_hsdpa:\r\r\n\r\r\n self.hsdpa_physical_downlink_settings_carrier2()",
"def create_fanv2_service_with_min_step(accessory):\n service = accessory.add_service(ServicesTypes.FAN_V2)\n\n cur_state = service.add_char(CharacteristicsTypes.ACTIVE)\n cur_state.value = 0\n\n direction = service.add_char(CharacteristicsTypes.ROTATION_DIRECTION)\n direction.value = 0\n\n speed = service.add_char(CharacteristicsTypes.ROTATION_SPEED)\n speed.value = 0\n speed.minStep = 25\n\n swing_mode = service.add_char(CharacteristicsTypes.SWING_MODE)\n swing_mode.value = 0",
"def pumpprobe_signal_test_build():\r\n inst.write(\"*RST\")\r\n #inst.write(\"DISP OFF\")\r\n inst.write(\":TRAC:CHAN1 ON\") # enables channel coupling at output 1\r\n inst.write(\":FUNC1 USER\") # set signal output to Arbitrary\r\n inst.write(\"FUNC1:USER p_short_1per\") # set Arbitrary waveform to pump pulse\r\n #inst.query(\"FUNC1:USER?\")\r\n #inst.write(\"FUNC1 USER\")\r\n #inst.write(\"FUNC1:USER 100KHZ, 0.1\") #FUNC can't seem to overwrite freq & amp settings\r\n \r\n inst.write(\":VOLT1 1\")\r\n inst.write(\":FREQ1 200KHZ\") \r\n \r\n inst.write(\":AM1:DEPT 100PCT\")\r\n inst.write(\":AM1:STAT ON\") #AM needs activation using STATus ON\r\n \r\n inst.write(\":AM1:SOUR EXT\") #sets source of AM\r\n \r\n \r\n inst.write(\"FUNC2:USER probe_2_12_p\")\r\n #inst.query(\"FUNC2:USER?\")\r\n inst.write(\":VOLT2 .1\")\r\n inst.write(\":FREQ2 200KHZ\")\r\n inst.write(\":AM2:DEPT 100PCT\")\r\n inst.write(\":AM2:STAT ON\")\r\n \r\n inst.write(\"CHAN:MATH plus\") #adds both channels at one output\r\n inst.write(\":OUTP1 ON\")",
"def launch ():\n def start_switch (event):\n log.info(\"switch %s has come up\" % event.dpid)\n log.info(event.connection.ports)\n sw = switches_by_dpid.get(event.dpid)\n\n if sw is None:\n # New switch\n sw = TopoSwitch(event.connection)\n switches_by_dpid[event.dpid] = sw\n sw.connect(event.connection)\n else:\n sw.connect(event.connection)\n core.openflow.addListenerByName(\"ConnectionUp\", start_switch)",
"def __init__(self, spread, depth, bandwidth, delay, loss, fpga, fpga_bandwidth=None, fpga_delay=None,\n fpga_loss=None, poisson=None):\n logger = logging.getLogger(__name__)\n\n # Initialize topology #\n Topo.__init__(self)\n\n # Setup parameters\n fpga_bandwidth = bandwidth if fpga_bandwidth is None else fpga_bandwidth\n fpga_delay = delay if fpga_delay is None else halve_delay(fpga_delay)\n fpga_loss = loss * 2 if fpga_loss is None else fpga_loss\n\n if poisson:\n link_opts = dict(bw=bandwidth, delay=get_poisson_delay(delay), loss=loss, use_htb=True)\n fpga_link_opts = dict(bw=fpga_bandwidth, delay=get_poisson_delay(fpga_delay),\n loss=fpga_loss, use_htb=True)\n else:\n link_opts = dict(bw=bandwidth, delay=delay, loss=loss, use_htb=True)\n fpga_link_opts = dict(bw=fpga_bandwidth, delay=fpga_delay, loss=fpga_loss, use_htb=True)\n cloud_link_opts = dict(bw=1000, delay='0ms', loss=0, use_htb=True)\n\n # Add hosts and switches #\n\n # switch naming convention:\n # s[level][switch_number]\n\n switches = [[None for _ in range(spread ** (depth - 1))] for _ in range(depth - 1)]\n hosts = [None for _ in range(spread ** (depth - 1))]\n\n for i in range(depth):\n for j in range(spread ** i):\n if i == (depth - 1):\n hosts[j] = self.addHost('h' + str(j))\n else:\n sw_name = 's' + str(i) + str(j)\n switches[i][j] = self.addSwitch(sw_name)\n if fpga is not None and fpga == i:\n # Create host to serve as FPGA in switch\n # Will have one link to the relevant FPGA\n # The link will have the bandwidth and loss specified by the user, and half\n # the delay\n # These parameters are as if they were caused by the FPGA, rather than a\n # link\n # As a result, latency is halved since it will essentially be doubled by the\n # packet flowing in\n # and out of the host\n self.addHost('f{}'.format(j))\n self.addLink(sw_name, 'f{}'.format(j), **fpga_link_opts)\n\n # Add host to serve as cloud\n # Will have one high bandwidth, 0 latency link to root switch\n self.addHost('cloud')\n self.addLink(switches[0][0], 'cloud', **cloud_link_opts)\n\n # Add links #\n\n for i, row in enumerate(switches):\n for j, switch in enumerate(row):\n if switch is None:\n break\n if i == (depth - 2):\n for k in range(spread):\n # add a link between the current switch, and all hosts\n # directly beneath it.\n # (spread * j) + k will get all the appropriate hosts\n logger.debug(\"Adding standard link from switch[{}][{}] to \"\n \"host[{}]\".format(i, j, (spread * j) + k))\n self.addLink(switch, hosts[(spread * j) + k], **link_opts)\n\n else:\n for k in range(spread):\n # add a link between the current switch, and all\n # switches directly beneath it.\n # i + 1 refers to 1 level deeper in the tree, and\n # (spread * j) + k will get all the appropriate child\n # switches on that level.\n logger.debug(\"Adding standard link from switch[{}][{}] to \"\n \"switch[{}][{}]\".format(i, j, i + 1, (spread * j) + k))\n self.addLink(switch, switches[i + 1][(spread * j) + k], **link_opts)",
"def cbMqtt_dev_fan(client, userdata, message):\n if not mqtt_message_log(message):\n return\n command = iot.get_command_index(message.payload.decode('utf-8'))\n try:\n value = float(message.payload)\n except ValueError:\n value = None\n if message.topic == mqtt.topic_name('mqtt_topic_fan_command',\n mqtt.GROUP_DEFAULT):\n fan_pin = dev_fan.pin\n if command == iot.Command.ON and pi.is_pin_off(fan_pin):\n pi.pin_on(fan_pin)\n mqtt_publish_fan_status()\n elif command == iot.Command.OFF and pi.is_pin_on(fan_pin):\n pi.pin_off(fan_pin)\n mqtt_publish_fan_status()\n elif command == iot.Command.TOGGLE:\n if pi.is_pin_on(fan_pin):\n pi.pin_off(fan_pin)\n else:\n pi.pin_on(fan_pin)\n mqtt_publish_fan_status()\n elif command == iot.Command.STATUS:\n mqtt_publish_fan_state()\n elif command == iot.Command.RESET:\n fan_init()\n mqtt_publish_fan_state()\n elif message.topic == mqtt.topic_name('fan_command_percon'):\n if value is not None:\n dev_fan.percentage_on = value\n mqtt_publish_fan_percon()\n mqtt_publish_fan_tempon()\n logger.info('Updated fan percentage ON=%s%%', value)\n elif message.topic == mqtt.topic_name('fan_command_percoff'):\n if value is not None:\n dev_fan.percentage_off = value\n mqtt_publish_fan_percoff()\n mqtt_publish_fan_tempoff()\n logger.info('Updated fan percentage OFF=%s%%', value)\n elif message.topic == mqtt.topic_name('fan_command_tempon'):\n if value is not None:\n dev_fan.temperature_on = value\n mqtt_publish_fan_tempon()\n mqtt_publish_fan_percon()\n logger.info('Updated fan temperature ON=%sยฐC', value)\n elif message.topic == mqtt.topic_name('fan_command_tempoff'):\n if value is not None:\n dev_fan.temperature_off = value\n mqtt_publish_fan_tempoff()\n mqtt_publish_fan_percoff()\n logger.info('Updated fan temperature OFF=%sยฐC', value)\n # Unexpected command\n else:\n logger.debug(\n 'Unexpected topic \"%s\" with value: \"%s\"',\n message.topic,\n message.payload\n )",
"def connecthostbroadcast(localpop,\n hwport_tosite,\n sitevlan,\n meter=3,\n broadcast_rewritemac = None):\n\n hwswitch = Container.fromAnchor(localpop.properties['HwSwitch'])\n hwswitchname = hwswitch.resourceName\n swswitch = Container.fromAnchor(localpop.properties['SwSwitch'])\n swswitchname = swswitch.resourceName\n topology = Container.getContainer(localpop.properties['SwSwitch']['containerName'])\n\n # Find the port on the HwSwitch connected to the software switch\n links = getlinks2(topology, hwswitchname, swswitchname)\n if links == None or len(links) == 0:\n print \"No links from\", hwswitchname, \"to\", swswitchname\n return False\n hwport_tosw = None\n for link in links:\n (node, port) = linkednode2(link, swswitchname)\n if port != None:\n # Found the link we're looking for\n hwport_tosw = port\n break\n\n broadcast = \"FF:FF:FF:FF:FF:FF\"\n translated_broadcast = broadcast\n if broadcast_rewritemac != None:\n translated_broadcast = broadcast_rewritemac\n\n fh1 = SCC.SdnInstallForward1(javaByteArray2(hwswitch.properties['DPID']),\n 1,\n BigInteger.ZERO,\n str(hwport_tosw),\n int(sitevlan),\n \"00:00:00:00:00:00\",\n translated_broadcast,\n str(hwport_tosite),\n int(sitevlan),\n broadcast,\n 0,\n 0,\n meter)\n if fh1 == None:\n return None\n\n fh2 = SCC.SdnInstallForward1(javaByteArray2(hwswitch.properties['DPID']),\n 1,\n BigInteger.ZERO,\n str(hwport_tosite),\n int(sitevlan),\n \"00:00:00:00:00:00\",\n broadcast,\n str(hwport_tosw),\n int(sitevlan),\n translated_broadcast,\n 0,\n 0,\n meter)\n if fh2 == None:\n SCC.deleteforward(fh1)\n return None\n\n return (fh1, fh2)",
"def do_show_pinout(self, arg):\n try:\n if arg:\n showboard = int(arg)\n else:\n showboard = self.phil.read_reg('sys.status.board')['data']\n if showboard == 1:\n print(\"\"\"\nPHILIP-B -> BLUEPILL\n\n ____\n ___|__|___\n DUT_RST = B12 - | | - GND\n DUT_CTS = B13 - | | - GND\n DUT_RTS = B14 - | | - 3V3\nUSER_BTN = B15 - | | - NRST\n DUT_IC = A8 - | | - B11 = DUT_RX\n IF_TX = A9 - | | - B10 = DUT_TX\n IF_RX = A10 - | | - B1 = PM_V_ADC\n USB_DM = A11 - | | - B0 = PM_HI_ADC\n USB_DP = A12 - | | - A7 = PM_LO_ADC\n DUT_NSS = A15 - | | - A6 = DUT_ADC\n DUT_SCK = B3 - | | - A5 = TEST_FAIL\n DUT_MISO = B4 - | | - A4 = TEST_WARN\n DUT_MOSI = B5 - | | - A3 = TEST_PASS\n DUT_SCL = B6 - | | - A2 = DEBUG2\n DUT_SDA = B7 - | | - A1 = DEBUG1\n DUT_PWM = B8 - | | - A0 = DEBUG0\n DUT_DAC = B9 - | | - C15\n 5V - | | - C14\n GND - | | - C13 = LED0\n 3V3 - | | - VBAT\n __________\n ||||\n\"\"\")\n else:\n print(\"\"\"\nPHILIP-N -> NUCLEO-F103RB\nCN6\n\n DUT_SCL = PB8 = SCL/D15 -\n DUT_SDA = PB9 = SDA/D14 -\n AVDD -\n GND -\n- LED0 = PA5 = SCK/D13 -\n- IOREF MISO/D12 -\n- NRST PWM/MOSI/D11 -\n- 3V3 PWM/CS/D10 -\n- 5V PWM/D9 -\n- GND DUT_TX = PA9 = D8 -\n- GND |CN5|\n- VIN DUT_IC = PA8 = D7 -\n|CN6| PWM/D6 -\n- A0 = PA0 = TEST_WARN DUT_MISO = PB4 = PWM/D5 -\n- A1 = PA1 = TEST_FAIL DUT_MOSI = PB5 = D4 -\n- A2 = PA4 = TEST_PASS DUT_SCK = PB3 = PWM/D3 -\n- A3 = PB0 = DUT_ADC DUT_RX = PA10 = D2 -\n- A4 = PC1 = PM_HI_ADC IF_TX = PA2 = TX/D1 -\n- A5 = PC0 = PM_V_ADC IF_RX = PA3 = RX/D0 -\n|CN8| |CN9|\n\n -1 - DUT_DAC -1 - DUT_PWM\n -2 - DUT_SCL -2 -\n -3 - DUT_SDA -3 -\n -4 - -4 -\n -5 - -5 -\n -6 - LED0 -6 - DUT_RTS\n -7 - -7 - DUT_CTS\n -8 - -8 -\n DUT_NSS -9 - -9 -\n -10- -10-\n -11- DUT_TX -11- DUT_RST\n USER_BTN -12- DUT_IC -12-\n -13- -13- DEBUG2\n -14- TEST_WARN DUT_MISO -14- DEBUG1\n -15- TEST_FAIL DUT_MOSI -15- DEBUG0\n -16- TEST_PASS DUT_SCK -16-\n -17- DUT_ADC DUT_RX -17-\nPM_LO_ADC -18- PM_HI_ADC IF_TX -18-\n -19- PM_V_ADC IF_RX -19-\n |CN7| |CN10|\n\"\"\")\n except (ValueError) as exc:\n print(exc)",
"def send(self, msg):\r\n\r\n # don't need to handle barrier messages\r\n if not hasattr(msg, 'command'):\r\n return\r\n\r\n subcmd = OvsSender.subcmds[msg.command]\r\n \r\n\r\n # TODO: this is different for remote switches (ie, on physical network)\r\n dest = msg.switch.name\r\n\r\n params = []\r\n if msg.match.nw_src is not None:\r\n params.append(\"nw_src={0}\".format(msg.match.nw_src))\r\n if msg.match.nw_dst is not None:\r\n params.append(\"nw_dst={0}\".format(msg.match.nw_dst))\r\n if msg.match.dl_src is not None:\r\n params.append(\"dl_src={0}\".format(msg.match.dl_src))\r\n if msg.match.dl_dst is not None:\r\n params.append(\"dl_dst={0}\".format(msg.match.dl_dst))\r\n if msg.match.dl_type is not None:\r\n params.append(\"dl_type={0}\".format(msg.match.dl_type))\r\n\r\n params.append(\"priority={0}\".format(msg.priority))\r\n actions = [\"flood\" if a == OFPP_FLOOD else str(a) for a in msg.actions]\r\n\r\n if msg.command == OFPFC_ADD:\r\n params.append(\"action=output:\" + \",\".join(actions))\r\n\r\n paramstr = \",\".join(params)\r\n cmd = \"{0} {1} {2} {3}\".format(OvsSender.command,\r\n subcmd,\r\n dest,\r\n paramstr)\r\n ret = os.system(cmd)\r\n return ret",
"def on_inner_dp_join(self, dp):\n ofp = dp.ofproto\n parser = dp.ofproto_parser\n\n #Deletion of already existing OF flows\n LOG.debug('TOPO MNGR: Deleting flow table configuration of newly added forwarder ID: ' + str(dp.id) )\n dp.send_msg(parser.OFPFlowMod(datapath=dp, command=ofp.OFPFC_DELETE))\n dp.send_msg(parser.OFPFlowMod(datapath=dp, command=ofp.OFPFC_DELETE, table_id=OF_GPRS_TABLE))\n #TODO: Shall we wipe-out OFconfig data as well?\n\n\n ##########################\n ## Main table (0)\n ## TODO:change to echo only!\n\n ## Networks self-discovery using icmp messages\n ## Redirect all pings with ipv4_dst=DISCOVERY_IP_DST to controller\n LOG.debug('TOPO MNGR: Installing ICMP topology discovery flows on forwarder: ' + str(dp.id))\n match = parser.OFPMatch(eth_type=0x0800, ip_proto=1, icmpv4_type=8, icmpv4_code=0, ipv4_dst=DISCOVERY_IP_DST)\n actions = [ parser.OFPActionOutput(ofp.OFPP_CONTROLLER) ]\n ##TEST SHAPING ON FWD 11 on port 1 - trying to shape also measurement packets\n self.add_flow(dp, 100, match, actions, 0)\n\n ##Controller uses ARP to resolve mac_addresses of APNs\n ##All arp replies with target IP of DISCOVERY_ARP_IP are redirected to controller\n\n ## FIXED: All ARPs replies are redirected to the controller regardless of the target IP\n ##\n ## TODO: In general we should reply only to ARPs from the APNs subnet, and per APN basis (from the configuration)\n\n LOG.debug('TOPO MNGR: Installing ARP APN discovery flows on forwarder: ' + str(dp.id))\n match = parser.OFPMatch(eth_type=0x0806, arp_op=2, )\n actions = [ parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.add_flow(dp, 100, match, actions)\n\n\n # We match only ethertype, that means all IP packed data should be match\n # It is some kind of flow of last resort, therefore it has small priority\n if dp.id in LAN_TYPE_FORWARDERS:\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dp.id) + 'is a LAN edge forwarder, installing additional rules' )\n match = parser.OFPMatch(eth_type=0x0800)\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.add_flow(dp, 2, match, actions)\n\n rate = 500\n # all fwds have set of meters\n for i in range(0,11):\n meter_id = i\n if i == 0:\n rate = 100000000\n elif i % 2 == 0:\n rate = 1000\n else:\n rate = 2000\n # KBPS is actually kbps\n meter = {'meter_id': meter_id, 'flags': 'KBPS',\n 'bands': [{'type': 'DROP', 'rate': rate}]}\n self.mod_meter_entry(dp, meter, meter_id, dp.ofproto.OFPMC_ADD)\n LOG.debug(\"FLOW MNGR: Added METER ID: \" + str(i) + \" with rate: \" + str(rate) + \" to FWD ID: \" + str(dp.id) )\n\n\n ##Following rules are applied only on forwarders bellonging to BSS_EDGE_FORWARDER group\n ##Rules are applied based on priority of match (highest priority first)\n if dp.id in BSS_EDGE_FORWARDER:\n\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dp.id) + ' is an access edge forwarder, installing aditional rules')\n ## UDP 23000 is GPRS-NS and all packets that match this are forwarded to OF_GPRS_TABLE flow table\n inst = [ parser.OFPInstructionGotoTable(OF_GPRS_TABLE) ]\n match = parser.OFPMatch(eth_type=0x0800,ip_proto=inet.IPPROTO_UDP, udp_dst=VGSN_PORT)\n req = parser.OFPFlowMod(datapath=dp, priority=200, match=match, instructions=inst)\n dp.send_msg(req)\n\n ## VGSN_PHY and BSS_PHY ports are bridged -- DHCP, ARP, Abis & stuff\n ## XXX: what if vGSN is not on same forwarder as BSS\n actions = [ parser.OFPActionOutput(VGSN_PHY_PORT) ]\n inst = [ parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions) ]\n match = parser.OFPMatch(in_port=BSS_PHY_PORT)\n req = parser.OFPFlowMod(datapath=dp, priority=10, match=match, instructions=inst)\n dp.send_msg(req)\n actions = [ parser.OFPActionOutput(BSS_PHY_PORT) ]\n inst = [ parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions) ]\n match = parser.OFPMatch(in_port=VGSN_PHY_PORT)\n req = parser.OFPFlowMod(datapath=dp, priority=10, match=match, instructions=inst)\n dp.send_msg(req)\n\n\n #################\n ## OF_GPRS-TABLE (2)\n ##TODO: BSS <-> vGSS separate tunnel for communication!\n ##TODO: deletion, modification od PDP CNT\n\n ## if packet is not first segment of user data packet (IS part of sndcp fragmented packet) it's DROPED\n match = parser.OFPMatch( sndcp_first_segment=0 )\n actions = [ ]\n inst = [ parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions) ]\n req = parser.OFPFlowMod(datapath=dp, table_id=OF_GPRS_TABLE, priority=200, match=match, instructions=inst)\n dp.send_msg(req)\n\n ## if packet is first segment of SNDCP packet with more than one segment, it's forwarded to controller\n ## when controller recieves such packet it sends ICMP fragmentation_needed to its sender and drops original\n match = parser.OFPMatch( sndcp_first_segment=1, sndcp_more_segments=1 )\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER) ]\n inst = [ parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions) ]\n req = parser.OFPFlowMod(datapath=dp, table_id=OF_GPRS_TABLE, priority=200, match=match, instructions=inst)\n dp.send_msg(req)\n\n ##if it's SNDCP packet taht still wasnt matched (rules with higher priority are inserted on PDP CNT activation)\n ##we assume it's packet of unknown PDP CNT and we DROP it\n match = parser.OFPMatch( sndcp_first_segment=1 )\n actions = [ ]\n inst = [ parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions) ]\n req = parser.OFPFlowMod(datapath=dp, table_id=OF_GPRS_TABLE, priority=1, match=match, instructions=inst)\n dp.send_msg(req)\n\n ##Everything else is Signalzation and is forwarded either to BSS or vGSN\n # XXX: co ak bss a vgsn nie su spolu na jednom DPID?\n actions = [ parser.OFPActionOutput(VGSN_PHY_PORT) ]\n inst = [ parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions) ]\n match = parser.OFPMatch(in_port=BSS_PHY_PORT)\n req = parser.OFPFlowMod(datapath=dp, table_id=OF_GPRS_TABLE, priority=0, match=match, instructions=inst)\n dp.send_msg(req)\n actions = [ parser.OFPActionOutput(BSS_PHY_PORT) ]\n inst = [ parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions) ]\n match = parser.OFPMatch(in_port=VGSN_PHY_PORT)\n req = parser.OFPFlowMod(datapath=dp, table_id=OF_GPRS_TABLE, priority=0, match=match, instructions=inst)\n dp.send_msg(req)",
"def set_sfp_info(self,port=0):\n if(uart.SerialTxEsc(self.device)):\n return 1\n if(port==0):\n sfp_add_sentence = \"sfp add \"+self.sfp0_pn+\" \"+str(self.sfp0_tx)+\" \"+str(self.sfp0_rx)+\" \"+str(self.sfp0_alpha)\n uart.SerialTx(self.device,sfp_add_sentence)\n elif(port==1):\n sfp_add_sentence = \"sfp add \"+self.sfp1_pn+\" \"+str(self.sfp1_tx)+\" \"+str(self.sfp1_rx)+\" \"+str(self.sfp1_alpha)+\" 1\"\n uart.SerialTx(self.device,sfp_add_sentence)\n else:\n print(\"Port is invalid.\")\n return 1\n print(uart.SerialTx(self.device,\"sfp show\"))\n return 0",
"def stream_conv(usage):\n if usage.startswith('UP') and len(usage) == 3:\n return \"AP\"+usage[-1]\n elif usage == \"UPMEAN\":\n return \"APM\"\n else:\n return usage",
"def __init__(self, upstream=None,\n downstream=None,\n name='',\n verbose=0, mdot = 0.0):\n global _mfccount\n if name == '':\n name = 'MFC_'+`_mfccount`\n _mfccount += 1\n FlowDevice.__init__(self,1,name,verbose)\n if upstream and downstream:\n self.install(upstream, downstream)\n if mdot:\n self.set(mdot = mdot)",
"def make(*args):\n return _uhd_swig.usrp_sink_make(*args)",
"def get_hosts_fanout(self, target, listener_type):",
"def mqtt_publish_fan_state():\n mqtt_publish_fan_status()\n mqtt_publish_fan_percon()\n mqtt_publish_fan_percoff()\n mqtt_publish_fan_tempon()\n mqtt_publish_fan_tempoff()",
"def hop_channel(self, channel):\n self.logger.info(\"Hopping to channel %s\", channel)\n os.system(f\"iwconfig {self.interface} channel {channel}\")",
"def create_command(self, on_or_off: bool, port: int):\n return self.power_strip.create_command(port, on_or_off)",
"def addFanOut(self,gate):\n assert type(gate)==Gate\n self.fanOut.append(gate)",
"def sweep_relay():",
"def launch ():\n #core.addListenerByName(\"UpEvent\", _go_up)\n core.registerNew(MAC_Filter)",
"def Online(port):\n\tport.write(\"F\")",
"def output(self):\n return {\n \"device\": self.device.id, \n \"action\": \"SetTarget\",\n \"arguments\": [\n {\n \"name\": \"newTargetValue\", \n \"value\": self.value\n }\n ], \n \"service\": \"urn:upnp-org:serviceId:SwitchPower1\"\n }"
]
| [
"0.5853953",
"0.56127334",
"0.5453903",
"0.53140277",
"0.5251082",
"0.52267796",
"0.5201543",
"0.51840484",
"0.5074265",
"0.5068683",
"0.50396186",
"0.5023897",
"0.5015812",
"0.49987084",
"0.49089834",
"0.49046764",
"0.48974884",
"0.48914778",
"0.48834187",
"0.48745808",
"0.48707557",
"0.48532808",
"0.48509157",
"0.48410892",
"0.48076344",
"0.48058558",
"0.48032993",
"0.48013952",
"0.47996554",
"0.47986165"
]
| 0.6274215 | 0 |
Create exit fanout flow on software switch of a destination POP. This handles broadcast traffic before it exits the network | def connectexitfanout(localpop,
corevlan,
forwards,
meter,
mac):
hwswitch = Container.fromAnchor(localpop.properties['HwSwitch'])
hwswitchname = hwswitch.resourceName
swswitch = Container.fromAnchor(localpop.properties['SwSwitch'])
swswitchname = swswitch.resourceName
topology = Container.getContainer(localpop.properties['SwSwitch']['containerName'])
# print "connectexitfanout localpop", localpop, "corevlan", corevlan, "mac", mac
# Find the port on the software switch connected to the hardware switch
links = getlinks2(topology, swswitchname, hwswitchname)
if links == None or len(links) == 0:
print "No links from", swswitchname, "to", hwswitchname
return None
hwswitchlink = None
swport_tohw = None
for link in links:
(node, port) = linkednode2(link, hwswitchname)
if port != None:
# Found it!
hwswitchlink = link
swport_tohw = port
break
if swport_tohw == None:
print "No output port on", swswitchname, "facing", hwswitchname
return None
for f in forwards:
f.outPort = str(swport_tohw)
# print "FORW: outport", f.outPort, "vlan", f.vlan, "dstMac", f.dstMac
# Convert the list of forwarding destinations to a Java array.
fwdsarr = jarray.array(forwards, SdnControllerClientL2Forward)
# print "dpid", swswitch.props['dpid']
fh = SCC.SdnInstallForward(javaByteArray2(swswitch.properties['DPID']),
1,
BigInteger.ZERO,
str(swport_tohw),
int(corevlan),
None,
mac,
fwdsarr,
0,
0,
meter)
return fh | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def on_inner_dp_join(self, dp):\n ofp = dp.ofproto\n parser = dp.ofproto_parser\n\n #Deletion of already existing OF flows\n LOG.debug('TOPO MNGR: Deleting flow table configuration of newly added forwarder ID: ' + str(dp.id) )\n dp.send_msg(parser.OFPFlowMod(datapath=dp, command=ofp.OFPFC_DELETE))\n dp.send_msg(parser.OFPFlowMod(datapath=dp, command=ofp.OFPFC_DELETE, table_id=OF_GPRS_TABLE))\n #TODO: Shall we wipe-out OFconfig data as well?\n\n\n ##########################\n ## Main table (0)\n ## TODO:change to echo only!\n\n ## Networks self-discovery using icmp messages\n ## Redirect all pings with ipv4_dst=DISCOVERY_IP_DST to controller\n LOG.debug('TOPO MNGR: Installing ICMP topology discovery flows on forwarder: ' + str(dp.id))\n match = parser.OFPMatch(eth_type=0x0800, ip_proto=1, icmpv4_type=8, icmpv4_code=0, ipv4_dst=DISCOVERY_IP_DST)\n actions = [ parser.OFPActionOutput(ofp.OFPP_CONTROLLER) ]\n ##TEST SHAPING ON FWD 11 on port 1 - trying to shape also measurement packets\n self.add_flow(dp, 100, match, actions, 0)\n\n ##Controller uses ARP to resolve mac_addresses of APNs\n ##All arp replies with target IP of DISCOVERY_ARP_IP are redirected to controller\n\n ## FIXED: All ARPs replies are redirected to the controller regardless of the target IP\n ##\n ## TODO: In general we should reply only to ARPs from the APNs subnet, and per APN basis (from the configuration)\n\n LOG.debug('TOPO MNGR: Installing ARP APN discovery flows on forwarder: ' + str(dp.id))\n match = parser.OFPMatch(eth_type=0x0806, arp_op=2, )\n actions = [ parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.add_flow(dp, 100, match, actions)\n\n\n # We match only ethertype, that means all IP packed data should be match\n # It is some kind of flow of last resort, therefore it has small priority\n if dp.id in LAN_TYPE_FORWARDERS:\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dp.id) + 'is a LAN edge forwarder, installing additional rules' )\n match = parser.OFPMatch(eth_type=0x0800)\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.add_flow(dp, 2, match, actions)\n\n rate = 500\n # all fwds have set of meters\n for i in range(0,11):\n meter_id = i\n if i == 0:\n rate = 100000000\n elif i % 2 == 0:\n rate = 1000\n else:\n rate = 2000\n # KBPS is actually kbps\n meter = {'meter_id': meter_id, 'flags': 'KBPS',\n 'bands': [{'type': 'DROP', 'rate': rate}]}\n self.mod_meter_entry(dp, meter, meter_id, dp.ofproto.OFPMC_ADD)\n LOG.debug(\"FLOW MNGR: Added METER ID: \" + str(i) + \" with rate: \" + str(rate) + \" to FWD ID: \" + str(dp.id) )\n\n\n ##Following rules are applied only on forwarders bellonging to BSS_EDGE_FORWARDER group\n ##Rules are applied based on priority of match (highest priority first)\n if dp.id in BSS_EDGE_FORWARDER:\n\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dp.id) + ' is an access edge forwarder, installing aditional rules')\n ## UDP 23000 is GPRS-NS and all packets that match this are forwarded to OF_GPRS_TABLE flow table\n inst = [ parser.OFPInstructionGotoTable(OF_GPRS_TABLE) ]\n match = parser.OFPMatch(eth_type=0x0800,ip_proto=inet.IPPROTO_UDP, udp_dst=VGSN_PORT)\n req = parser.OFPFlowMod(datapath=dp, priority=200, match=match, instructions=inst)\n dp.send_msg(req)\n\n ## VGSN_PHY and BSS_PHY ports are bridged -- DHCP, ARP, Abis & stuff\n ## XXX: what if vGSN is not on same forwarder as BSS\n actions = [ parser.OFPActionOutput(VGSN_PHY_PORT) ]\n inst = [ parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions) ]\n match = parser.OFPMatch(in_port=BSS_PHY_PORT)\n req = parser.OFPFlowMod(datapath=dp, priority=10, match=match, instructions=inst)\n dp.send_msg(req)\n actions = [ parser.OFPActionOutput(BSS_PHY_PORT) ]\n inst = [ parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions) ]\n match = parser.OFPMatch(in_port=VGSN_PHY_PORT)\n req = parser.OFPFlowMod(datapath=dp, priority=10, match=match, instructions=inst)\n dp.send_msg(req)\n\n\n #################\n ## OF_GPRS-TABLE (2)\n ##TODO: BSS <-> vGSS separate tunnel for communication!\n ##TODO: deletion, modification od PDP CNT\n\n ## if packet is not first segment of user data packet (IS part of sndcp fragmented packet) it's DROPED\n match = parser.OFPMatch( sndcp_first_segment=0 )\n actions = [ ]\n inst = [ parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions) ]\n req = parser.OFPFlowMod(datapath=dp, table_id=OF_GPRS_TABLE, priority=200, match=match, instructions=inst)\n dp.send_msg(req)\n\n ## if packet is first segment of SNDCP packet with more than one segment, it's forwarded to controller\n ## when controller recieves such packet it sends ICMP fragmentation_needed to its sender and drops original\n match = parser.OFPMatch( sndcp_first_segment=1, sndcp_more_segments=1 )\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER) ]\n inst = [ parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions) ]\n req = parser.OFPFlowMod(datapath=dp, table_id=OF_GPRS_TABLE, priority=200, match=match, instructions=inst)\n dp.send_msg(req)\n\n ##if it's SNDCP packet taht still wasnt matched (rules with higher priority are inserted on PDP CNT activation)\n ##we assume it's packet of unknown PDP CNT and we DROP it\n match = parser.OFPMatch( sndcp_first_segment=1 )\n actions = [ ]\n inst = [ parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions) ]\n req = parser.OFPFlowMod(datapath=dp, table_id=OF_GPRS_TABLE, priority=1, match=match, instructions=inst)\n dp.send_msg(req)\n\n ##Everything else is Signalzation and is forwarded either to BSS or vGSN\n # XXX: co ak bss a vgsn nie su spolu na jednom DPID?\n actions = [ parser.OFPActionOutput(VGSN_PHY_PORT) ]\n inst = [ parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions) ]\n match = parser.OFPMatch(in_port=BSS_PHY_PORT)\n req = parser.OFPFlowMod(datapath=dp, table_id=OF_GPRS_TABLE, priority=0, match=match, instructions=inst)\n dp.send_msg(req)\n actions = [ parser.OFPActionOutput(BSS_PHY_PORT) ]\n inst = [ parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions) ]\n match = parser.OFPMatch(in_port=VGSN_PHY_PORT)\n req = parser.OFPFlowMod(datapath=dp, table_id=OF_GPRS_TABLE, priority=0, match=match, instructions=inst)\n dp.send_msg(req)",
"def connectentryfanoutmac(localpop,\n hostmac,\n hostvlan,\n forwards,\n meter,\n mac):\n hwswitch = Container.fromAnchor(localpop.properties['HwSwitch'])\n hwswitchname = hwswitch.resourceName\n swswitch = Container.fromAnchor(localpop.properties['SwSwitch'])\n swswitchname = swswitch.resourceName\n topology = Container.getContainer(localpop.properties['SwSwitch']['containerName'])\n\n # print \"connectentryfanout localpop\", localpop, \"host\", host, \"hostvlan\", hostvlan, \"mac\", mac\n\n # Find the port on the software switch connected to the hardware switch\n links = getlinks2(topology, swswitchname, hwswitchname)\n if links == None or len(links) == 0:\n print \"No links from\", swswitchname, \"to\", hwswitchname\n return None\n hwswitchlink = None\n swport_tohw = None\n for link in links:\n (node, port) = linkednode2(link, hwswitchname)\n if port != None:\n # Found it!\n hwswitchlink = link\n swport_tohw = port\n break\n if swport_tohw == None:\n print \"No output port on\", swswitchname, \"facing\", hwswitchname\n return None\n\n # The fanout flow is \"interesting\" in that the input plus the multiple outputs\n # all are on the same port (but different VLANs). Fill in the outputs.\n for f in forwards:\n f.outPort = str(swport_tohw)\n # print \"FORW: outport\", f.outPort, \"vlan\", f.vlan, \"dstMac\", f.dstMac\n\n # Convert the list of forwarding destinations to a Java array.\n fwdsarr = jarray.array(forwards, SdnControllerClientL2Forward)\n\n # print \"dpid\", swswitch.props['dpid']\n # This flow being installed is unusual in that it does a source MAC address\n # filter as well\n fh = SCC.SdnInstallForward(javaByteArray2(swswitch.properties['DPID']),\n 1,\n BigInteger.ZERO,\n str(swport_tohw),\n int(hostvlan),\n hostmac,\n mac,\n fwdsarr,\n 0,\n 0,\n meter)\n\n return fh",
"def connecthostbroadcast(localpop,\n hwport_tosite,\n sitevlan,\n meter=3,\n broadcast_rewritemac = None):\n\n hwswitch = Container.fromAnchor(localpop.properties['HwSwitch'])\n hwswitchname = hwswitch.resourceName\n swswitch = Container.fromAnchor(localpop.properties['SwSwitch'])\n swswitchname = swswitch.resourceName\n topology = Container.getContainer(localpop.properties['SwSwitch']['containerName'])\n\n # Find the port on the HwSwitch connected to the software switch\n links = getlinks2(topology, hwswitchname, swswitchname)\n if links == None or len(links) == 0:\n print \"No links from\", hwswitchname, \"to\", swswitchname\n return False\n hwport_tosw = None\n for link in links:\n (node, port) = linkednode2(link, swswitchname)\n if port != None:\n # Found the link we're looking for\n hwport_tosw = port\n break\n\n broadcast = \"FF:FF:FF:FF:FF:FF\"\n translated_broadcast = broadcast\n if broadcast_rewritemac != None:\n translated_broadcast = broadcast_rewritemac\n\n fh1 = SCC.SdnInstallForward1(javaByteArray2(hwswitch.properties['DPID']),\n 1,\n BigInteger.ZERO,\n str(hwport_tosw),\n int(sitevlan),\n \"00:00:00:00:00:00\",\n translated_broadcast,\n str(hwport_tosite),\n int(sitevlan),\n broadcast,\n 0,\n 0,\n meter)\n if fh1 == None:\n return None\n\n fh2 = SCC.SdnInstallForward1(javaByteArray2(hwswitch.properties['DPID']),\n 1,\n BigInteger.ZERO,\n str(hwport_tosite),\n int(sitevlan),\n \"00:00:00:00:00:00\",\n broadcast,\n str(hwport_tosw),\n int(sitevlan),\n translated_broadcast,\n 0,\n 0,\n meter)\n if fh2 == None:\n SCC.deleteforward(fh1)\n return None\n\n return (fh1, fh2)",
"def _flow_out(self):\n print(\"MESSENGER: flow_out online!\")\n while self.running:\n if self.sendbuffer:\n msg = self.sendbuffer.pop(0)\n for slc in (msg[i:i+1024] for i in range(0, len(msg), 1024)):\n self.sock.send(slc)\n time.sleep(self.sendtick)\n print(\"MESSENGER: flow_out exiting...\")",
"def sweep_relay():",
"def forwarder_state_changed(self, ev):\n\n\n dp = ev.dp\n ofp = dp.ofproto\n parser = dp.ofproto_parser\n\n\n if ev.enter is True:\n # in plain MAC setup, this should install only ICMP and ARP re-route rules, watchout for hardcoded DP id\n self.on_inner_dp_join(dp)\n\t ##For evry new forwarder we send out discovery ICMP packets out of every port except OFPP_CONTROLLER\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dp.id) + ' saying hello to Unifycore Controller, Unifycore warmly welcomes you!')\n for port in dp.ports:\n if port != (ofp.OFPP_CONTROLLER):\n LOG.debug('TOPO MNGR: Controller is sending topology discovery ICMPs to forwarder: ' + str(dp.id) + ', port: ' + str(port))\n _icmp_send(dp,port,DISCOVERY_IP_SRC, DISCOVERY_IP_DST)\n\n ##For evry new forwarder we send out discovery ARP packets out of every port except OFPP_CONTROLLER to find APN\n for apn in APN_POOL:\n if apn.ip_addr != None:\n LOG.debug('TOPO MNGR: Forwarder: '+str(dp.id)+', port: '+ str(port) + ' is looking for APN: ' + str(apn.name) +' at IP: '+str(apn.ip_addr)+' with ARP search source IP: ' + str(apn.arp_origin_ip))\n _arp_send(dp=dp, port_out=port, arp_code=1, ip_target=apn.ip_addr, ip_sender=apn.arp_origin_ip)\n\n\n\n\n\n if ev.enter is False:\n\t ##TODO: We need to scan if any tunnels were affected, and if so, if any PDP COntexts were affected\n ##JUST REMOVING NODE FROM TOPOLOGY ISNT ENOUGH!\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dp.id) + ' is leaving topology. It was a pleasure for us!')\n topo.del_forwarder(dp.id)",
"def performOverflow(self, call):\n overFlowDest = self.getOverflowDest()\n if not overFlowDest:\n self.huntGroup.member_to_distribute = 0\n PrintLog(\"+++++++Debug: Under construction+++++\")\n return\n PrintLog(\"Waiting overflow timeout %s sec\" % self.overflowTimeout)\n time.sleep(self.overflowTimeout)\n if overFlowDest.tserver <> self.tserver:\n overFlowDest = self.trunk(self, overFlowDest)\n if InTrue(GetOption(\"CofFeature\")):\n call.ViaExtRouter = 1\n call.external = 1\n pt = self.partyToDistribute()\n thirdPartyDNRole = PartyRole.Destination\n if pt.Role == PartyRole.ConferenceMember and len(pt.Call.PartyList) >= 3:\n thirdPartyDNRole = PartyRole.ConferenceMember\n thirdPartyDN = \"Trunk\"\n addPrm = {\"ThirdPartyDN\": thirdPartyDN, \"ThirdPartyDNRole\": thirdPartyDNRole}\n if not self.routeRequestOnQueued:\n ev = self.mayBeEvent(EventName.Diverted, pt, timeout=3, addPrm=addPrm)\n else:\n addPrmRU = {\"ReferenceID\": 0, \"Reasons\": None, \"ThirdPartyDN\": thirdPartyDN,\n \"ThirdPartyDNRole\": thirdPartyDNRole}\n ev = self.mayBeEvent(EventName.RouteUsed, pt, timeout=3, addPrm=addPrmRU)\n ev = self.mayBeEvent(EventName.Diverted, pt, timeout=3, addPrm=addPrm)\n if not ev:\n pt.postponedAbandonedOrDiverted = 1\n self.postponedAbandonedOrDiverted = self.postponedAbandonedOrDiverted + 1\n pt.removeFromCall()\n ringPt = overFlowDest.ring(call)\n return ringPt",
"def stop(self):\n self.running = False\n self.hop_channel(\"auto\")",
"def deploy_flow_entry(self, subnet, outport, dstport):\n if outport is None:\n logger.warning('fail to deploy flow entry, cant find output port for %s', str(subnet))\n return\n\n # match by destination IP address\n match = ofctl_v1_0.to_match(self.dp, {'nw_dst': str(subnet), 'dl_type': '2048', 'nw_proto': '1'})\n \n # rewrite source MAC address with gateway's MAC address\n # rewrite destination MAC address with host's MAC address\n # set output port\n actions = []\n actions.append(self.dp.ofproto_parser.OFPActionSetDlSrc(outport.hw_addr.packed))\n actions.append(self.dp.ofproto_parser.OFPActionSetDlDst(dstport.hw_addr.packed))\n actions.append(self.dp.ofproto_parser.OFPActionOutput(outport.port_no))\n\n mod = self.dp.ofproto_parser.OFPFlowMod(\n datapath = self.dp, match = match,\n priority = 1, cookie = 0, actions = actions,\n idle_timeout = FLOW_IDLE_TIMEOUT,\n hard_timeout = FLOW_HARD_TIMEOUT,\n command = self.dp.ofproto.OFPFC_MODIFY)\n\n # send FlowMod\n self.dp.send_msg(mod)",
"def forward(p):\n try:\n if IP in p and p[IP].dst == RD_ADRRESS and p[Ether].src != GW_MAC_ADRRESS and p[Ether].dst == GW_MAC_ADRRESS:\n if p[IP].src not in black_list:\n send(p[1::], iface=IFACE, verbose=0)\n except:\n print(\"error in forward\")\n finally:\n sys.exit()",
"def add_flow_gateway_for_ip(self, datapath, out_port, dst_ip, new_src_mac, new_dst_mac):\n parser = datapath.ofproto_parser\n # eth_type ip : 0x0800\n match = parser.OFPMatch(eth_type=0x0800,\n ipv4_dst=dst_ip,\n )\n actions = [parser.OFPActionSetField(eth_src=new_src_mac),\n parser.OFPActionSetField(eth_dst=new_dst_mac),\n parser.OFPActionOutput(out_port),\n parser.OFPActionDecNwTtl()]\n\n self.add_flow(datapath, 1, match, actions)",
"def exit(self):\n DEBUG = GLOBAL_DEBUG and True\n if DEBUG: print \"exit()\"\n\n # exit() functionality is implemented with a special dst.\n exit_msg = Msg(\n dst = DST_EXIT,\n x = randint(0, UINT32_MAX),\n y = randint(0, UINT32_MAX),\n op = randint(0, UINT8_MAX),\n result = randint(0, UINT64_MAX))\n\n # First, bury a REQUEST.\n self.read(length=SZ_MSG)\n\n # Then, write the exit packet to TAP.\n self.write(str(exit_msg))\n\n # Exit the poller.\n return -1",
"def _handle_ConnectionUp(event):\n\n # construct of_flowmod message\n msg = of.ofp_flow_mod() # create of_flowmod message\n action = of.ofp_action_output(port=of.OFPP_FLOOD) # create an output to port action\n msg.actions.append(action) # append action to the of_flowmod\n\n # send it\n event.connection.send(msg) # send msg to the switch\n\n dest_pid = dpidToStr(event.dpid) # extract the destination(switch) process id\n log.debug(\"controller send %s to node %s.\" % (msg, dest_pid))\n log.info(\"%s act like a hub.\", dest_pid)",
"def shortest_forwarding(self, msg, eth_type, ip_src, ip_dst):\r\n\r\n pkt = packet.Packet(msg.data)\r\n icmp_pkt = pkt.get_protocol(icmp.icmp)\r\n if icmp_pkt:\r\n ip_protocol = 1\r\n print 'icmp processing!'\r\n self.icmp_forwarding(msg, ip_protocol, eth_type, ip_src, ip_dst)\r\n return\r\n datapath = msg.datapath\r\n in_port = msg.match['in_port']\r\n tcp_pkt = None\r\n udp_pkt = None\r\n dst_port = self.awareness.get_host_location(ip_dst)[1]\r\n tcp_pkt = pkt.get_protocol(tcp.tcp)\r\n udp_pkt = pkt.get_protocol(udp.udp)\r\n L4_port = None\r\n flow_info = None\r\n flow_info_reverse = None\r\n\r\n # if not icmp packet,Get ip_proto and L4 port number.\r\n result = self.get_sw(datapath.id, in_port, ip_src, ip_dst) # result = (src_sw, dst_sw)\r\n if (result):\r\n src_sw, dst_sw = result[0], result[1]\r\n if setting.enable_Flow_Entry_L4Port:\r\n ip_proto, L4_port, Flag = self.get_L4_info(tcp_pkt, udp_pkt)\r\n if result:\r\n if dst_sw:\r\n src_sw, dst_sw = result[0], result[1]\r\n if ip_proto and L4_port and Flag:\r\n if ip_proto == 6:\r\n L4_Proto = 'TCP'\r\n elif ip_proto == 17:\r\n L4_Proto = 'UDP'\r\n else:\r\n pass\r\n L4_port.reverse()\r\n flow_info = (eth_type, ip_src, ip_dst, in_port, ip_proto, Flag, L4_port)\r\n flow_info_reverse = (eth_type, ip_dst, ip_src, dst_port, ip_proto, Flag, L4_port)\r\n else:\r\n flow_info = (eth_type, ip_src, ip_dst, in_port)\r\n flow_info_reverse = (eth_type, ip_dst, ip_src, dst_port)\r\n else:\r\n flow_info = (eth_type, ip_src, ip_dst, in_port)\r\n flow_info_reverse = (eth_type, ip_dst, ip_src, dst_port)\r\n info = (ip_src, ip_dst, ip_proto, L4_port[0], L4_port[1])\r\n info2 = (ip_dst, ip_src, ip_proto, L4_port[1], L4_port[0])\r\n if (info in self.register) and (info2 in self.register):\r\n return\r\n self.register.append(info)\r\n self.register.append(info2)\r\n # dst_host and src_host link one same switch\r\n if self.newComingFlows['src'].has_key(ip_src):\r\n self.newComingFlows['src'][ip_src] += 1\r\n else:\r\n self.newComingFlows['src'][ip_src] = 1\r\n if self.newComingFlows['dst'].has_key(ip_dst):\r\n self.newComingFlows['dst'][ip_dst] += 1\r\n else:\r\n self.newComingFlows['dst'][ip_dst] = 1\r\n flowDemand = self._bandwidth_demand(ip_src, ip_dst)\r\n if src_sw == dst_sw:\r\n self.send_packet_out(datapath, msg.buffer_id, in_port, dst_port, msg.data)\r\n else:\r\n if not (str(src_sw).startswith('3') and str(dst_sw).startswith('3')):\r\n return\r\n paths = self.awareness.shortest_paths.get(src_sw).get(dst_sw)\r\n self.graph = self.monitor.graph\r\n path = self._select_paths1(flowDemand, paths)\r\n\r\n # path = self.get_path(src_sw, dst_sw, weight=self.weight)\r\n # Path has already been calculated, just get it.\r\n if path == None:\r\n return\r\n path.reverse()\r\n try:\r\n # bucket=self.swToSegments(path)\r\n # self.Segment_forwarding(flow_info,bucket)\r\n self.install_flow(self.datapaths, self.awareness.link_to_port, path, flow_info_reverse, msg.buffer_id,\r\n ip_dst, ip_src, msg.data)\r\n path.reverse()\r\n if len(flow_info_reverse) == 7:\r\n L4_port.reverse()\r\n self.install_flow(self.datapaths, self.awareness.link_to_port, path, flow_info, msg.buffer_id, ip_src,\r\n ip_dst, msg.data)\r\n # self.compute_runing_time()\r\n\r\n except:\r\n self.flood(msg)",
"def send_flow_mod(self, datapath, flow_info, src_port, dst_port):\r\n parser = datapath.ofproto_parser\r\n actions = []\r\n actions.append(parser.OFPActionOutput(dst_port))\r\n if len(flow_info) == 7:\r\n if flow_info[-3] == 6:\r\n if flow_info[-2] == True:\r\n match = parser.OFPMatch(\r\n in_port=src_port, eth_type=flow_info[0],\r\n ipv4_src=flow_info[1], ipv4_dst=flow_info[2],\r\n ip_proto=6, tcp_src=flow_info[-1][0],tcp_dst=flow_info[-1][1])\r\n else:\r\n pass\r\n elif flow_info[-3] == 17:\r\n if flow_info[-2] == True:\r\n match = parser.OFPMatch(\r\n in_port=src_port, eth_type=flow_info[0],\r\n ipv4_src=flow_info[1], ipv4_dst=flow_info[2],\r\n ip_proto=17, udp_src=flow_info[-1][0],udp_dst=flow_info[-1][1])\r\n else:\r\n pass\r\n elif len(flow_info) == 4:\r\n match = parser.OFPMatch(\r\n in_port=src_port, eth_type=flow_info[0],\r\n ipv4_src=flow_info[1], ipv4_dst=flow_info[2])\r\n elif len(flow_info)==5:\r\n match=parser.OFPMatch(in_port=src_port,eth_type=flow_info[0],ip_protocol=flow_info[1],\r\n ipv4_src = flow_info[2], ipv4_dst = flow_info[3])\r\n else:\r\n pass\r\n\r\n self.add_flow(datapath, 30, match, actions,\r\n idle_timeout=1, hard_timeout=0)",
"def clear_single_switch_rules(switch_id,in_port,out_port):\n print(\"** Remove flows from {}\".format(switch_id))\n in_rule = \"in_port={}\".format(in_port)\n out_rule = \"in_port={}\".format(out_port)\n subprocess.Popen([\"ovs-ofctl\",\"-O\",\"OpenFlow13\",\"del-flows\",switch_id,in_rule],\n stdout=subprocess.PIPE).wait()\n subprocess.Popen([\"ovs-ofctl\",\"-O\",\"OpenFlow13\",\"del-flows\",switch_id,out_rule],\n stdout=subprocess.PIPE).wait()\n\n ### If debugging, remove the comments below to see what the flow rules are\n # result = subprocess.Popen([\"ovs-ofctl\",\"-O\",\"OpenFlow13\",\"dump-flows\",switch_id],\n # stdout=subprocess.PIPE).communicate()[0]\n # print (result)",
"def add_redirect(self, expr, node_host, node_port, openflow_host, openflow_port):\n pusher = self.StaticFlowEntryPusher(openflow_host, openflow_port)\n device = self.Device(openflow_host, openflow_port)\n try:\n (_, connected_dpid, node_mac, node_vlan) = device.get(node_host)\n except KeyError:\n raise\n request_hands_off = {\n \"switch\": connected_dpid,\n \"name\": \"request_hands_off-\" + node_host + \"-\" + node_port + \"-\" + expr,\n \"priority\": \"32767\",\n \"ether-type\": 0x0800,\n \"protocol\": 0x06,\n \"src-ip\": node_host,\n \"src-mac\": node_mac,\n \"dst-ip\": expr,\n \"dst-port\":\"80\",\n \"vlan-id\":node_vlan,\n \"active\":\"true\",\n \"actions\":\"output=normal\"\n }\n request_in = {\n \"switch\": connected_dpid,\n \"name\": \"request_in-\" + node_host + \"-\" + node_port + \"-\" + expr,\n \"priority\": \"32766\",\n \"ether-type\": 0x0800,\n \"protocol\": 0x06,\n \"dst-ip\": expr,\n \"dst-port\": \"80\",\n \"vlan-id\":node_vlan,\n \"active\": \"true\",\n \"actions\": \"set-dst-mac=\" + node_mac + \",set-dst-ip=\" + node_host +\n \",set-dst-port=\" + node_port +\",output=normal\"\n }\n request_out = {\n \"switch\": connected_dpid,\n \"name\": \"request_out-\" + node_host + \"-\" + node_port + \"-\" + expr,\n \"cookie\": \"0\",\n \"priority\": \"32766\",\n \"ether-type\": 0x0800,\n \"protocol\": 0x06,\n \"src-ip\": node_host,\n \"src-mac\": node_mac,\n \"src-port\": node_port,\n \"vlan-id\":node_vlan,\n \"active\": \"true\",\n \"actions\": \"set-src-port=80,set-src-ip=\" + expr + \",output=normal\"\n }\n pusher.remove({\"name\":\"request_hands_off-\" + node_host + \"-\" + node_port + \"-\" + expr})\n pusher.remove({\"name\":\"request_out-\" + node_host + \"-\" + node_port + \"-\" + expr})\n pusher.remove({\"name\":\"request_in-\" + node_host + \"-\" + node_port + \"-\" + expr})\n pusher.set(request_hands_off)\n pusher.set(request_out)\n pusher.set(request_in)",
"def run_flat_delivery(args, seed=None):\n\n if seed is not None:\n HRLutils.set_seed(seed)\n seed = HRLutils.SEED\n\n net = nef.Network(\"run_flat_delivery\")\n\n if \"load_weights\" in args and args[\"load_weights\"] is not None:\n args[\"load_weights\"] += \"_%s\" % seed\n\n stateN = 1200\n contextD = 2\n context_scale = 1.0\n max_state_input = 2\n actions = [(\"up\", [0, 1]), (\"right\", [1, 0]),\n (\"down\", [0, -1]), (\"left\", [-1, 0])]\n\n # ##ENVIRONMENT\n\n env = deliveryenvironment.DeliveryEnvironment(\n actions, HRLutils.datafile(\"contextmap.bmp\"),\n colormap={-16777216: \"wall\", -1: \"floor\", -256: \"a\", -2088896: \"b\"},\n imgsize=(5, 5), dx=0.001, placedev=0.5)\n net.add(env)\n\n print \"generated\", len(env.placecells), \"placecells\"\n\n # ##NAV AGENT\n\n enc = env.gen_encoders(stateN, contextD, context_scale)\n enc = MU.prod(enc, 1.0 / max_state_input)\n\n with open(HRLutils.datafile(\"contextbmp_evalpoints_%s.txt\" % seed)) as f:\n evals = [[float(x) for x in l.split(\" \")] for l in f.readlines()]\n\n nav_agent = smdpagent.SMDPAgent(stateN, len(env.placecells) + contextD,\n actions, name=\"NavAgent\",\n state_encoders=enc, state_evals=evals,\n state_threshold=0.8, **args)\n net.add(nav_agent)\n\n print \"agent neurons:\", nav_agent.countNeurons()\n\n net.connect(nav_agent.getOrigin(\"action_output\"),\n env.getTermination(\"action\"))\n net.connect(env.getOrigin(\"placewcontext\"),\n nav_agent.getTermination(\"state_input\"))\n\n nav_term_node = terminationnode.TerminationNode(\n {terminationnode.Timer((0.6, 0.9)): None}, env, name=\"NavTermNode\",\n contextD=2)\n net.add(nav_term_node)\n net.connect(env.getOrigin(\"context\"),\n nav_term_node.getTermination(\"context\"))\n net.connect(nav_term_node.getOrigin(\"reset\"),\n nav_agent.getTermination(\"reset\"))\n net.connect(nav_term_node.getOrigin(\"learn\"),\n nav_agent.getTermination(\"learn\"))\n net.connect(nav_term_node.getOrigin(\"reset\"),\n nav_agent.getTermination(\"save_state\"))\n net.connect(nav_term_node.getOrigin(\"reset\"),\n nav_agent.getTermination(\"save_action\"))\n\n reward_relay = net.make(\"reward_relay\", 1, 1, mode=\"direct\")\n reward_relay.fixMode()\n net.connect(env.getOrigin(\"reward\"), reward_relay)\n net.connect(nav_term_node.getOrigin(\"pseudoreward\"), reward_relay)\n net.connect(reward_relay, nav_agent.getTermination(\"reward\"))\n\n # period to save weights (realtime, not simulation time)\n weight_save = 600.0\n HRLutils.WeightSaveThread(nav_agent.getNode(\"QNetwork\").saveParams,\n os.path.join(\"weights\", \"%s_%s\" %\n (nav_agent.name, seed)),\n weight_save).start()\n\n # data collection node\n data = datanode.DataNode(period=5,\n filename=HRLutils.datafile(\"dataoutput_%s.txt\" %\n seed))\n net.add(data)\n q_net = nav_agent.getNode(\"QNetwork\")\n data.record_avg(env.getOrigin(\"reward\"))\n data.record_avg(q_net.getNode(\"actionvals\").getOrigin(\"X\"))\n data.record_sparsity(q_net.getNode(\"state_pop\").getOrigin(\"AXON\"))\n data.record_avg(q_net.getNode(\"valdiff\").getOrigin(\"X\"))\n data.record_avg(nav_agent.getNode(\"ErrorNetwork\").getOrigin(\"error\"))\n\n# net.add_to_nengo()\n# net.run(10000)\n net.view()",
"def create_outbound(self, addr, use_new_connection=False):",
"def pswitchoff(chan) :\n s.phaseSwitching(False, chan)",
"def close(self):\n assert self.status in (Status.Active, Status.Passive_fin1), (self.dst_addr, self.status)\n #############################################################################\n # TODO: YOUR CODE HERE #\n #############################################################################\n\n if not self.dst_addr:\n self._close()\n else:\n with self.status_lock:\n if self.status == Status.Active:\n self.status = Status.Active_fin1\n elif self.status == Status.Passive_fin1:\n self.status = Status.Passive_fin2\n\n while not self.transmit_queue.empty() and len(self.waiting_for_ack) > 0:\n time.sleep(1)\n\n datagram = Datagram(fin=1)\n self._send(datagram=datagram)\n self.set_timer(datagram)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################",
"def OspfStopRouter(self, *args, **kwargs):\n payload = { \"Arg1\": self }\n for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]\n for item in kwargs.items(): payload[item[0]] = item[1]\n return self._execute('ospfStopRouter', payload=payload, response_object=None)",
"def add_flow_drop_arp(self, datapath):\n proto = datapath.ofproto\n parser = datapath.ofproto_parser\n # eth_type arp : 0x0806\n match = parser.OFPMatch(eth_type=0x0806, )\n\n instruction = [\n parser.OFPInstructionActions(proto.OFPIT_CLEAR_ACTIONS, [])\n ]\n\n msg = parser.OFPFlowMod(datapath,\n # table_id=OFDPA_FLOW_TABLE_ID_ACL_POLICY,\n priority=1,\n command=proto.OFPFC_ADD,\n match=match,\n instructions=instruction\n )\n datapath.send_msg(msg)",
"def exit(self):\n self._status = \"\"\n self._sock.settimeout(1.0)\n self._sock.sendto(bytes(\"bla\", \"utf-8\"), (self._cfg.host, self._cfg.port))",
"def bulb_off():\n tx = zb_explicit_command\n tx[\"dest_addr_long\"] = GE_LINK_BULB_MAC\n tx[\"cluster\"] = CLUSTER_A\n tx[\"data\"] = DATA_OFF\n response = zb.Send(tx)",
"def get_hosts_fanout(self, target, listener_type):",
"def _handle_PacketIn(self, event):\n msg = of.ofp_packet_out()\n msg.data = event.ofp\n msg.in_port = event.port\n msg.actions.append(of.ofp_action_output(port=of.OFPP_FLOOD))\n event.connection.send(msg)",
"def main():\n args = TrafficScriptArg(\n [u\"src_mac\", u\"dst_mac\", u\"src_ip\", u\"dst_ip\", u\"dscp\"]\n )\n\n rxq = RxQueue(args.get_arg(u\"rx_if\"))\n txq = TxQueue(args.get_arg(u\"tx_if\"))\n\n src_mac = args.get_arg(u\"src_mac\")\n dst_mac = args.get_arg(u\"dst_mac\")\n src_ip = args.get_arg(u\"src_ip\")\n dst_ip = args.get_arg(u\"dst_ip\")\n dscp = int(args.get_arg(u\"dscp\"))\n\n ip_layer = IPv6 if ip_address(src_ip).version == 6 else IP\n\n sent_packets = list()\n pkt_send = (Ether(src=src_mac, dst=dst_mac) /\n ip_layer(src=src_ip, dst=dst_ip) /\n TCP())\n\n pkt_send /= Raw()\n sent_packets.append(pkt_send)\n txq.send(pkt_send)\n\n while True:\n pkt_recv = rxq.recv(2, sent_packets)\n if pkt_recv is None:\n raise RuntimeError(u\"ICMPv6 echo reply Rx timeout\")\n\n if pkt_recv.haslayer(ICMPv6ND_NS):\n # read another packet in the queue if the current one is ICMPv6ND_NS\n continue\n elif pkt_recv.haslayer(ICMPv6MLReport2):\n # read another packet in the queue if the current one is\n # ICMPv6MLReport2\n continue\n elif pkt_recv.haslayer(ICMPv6ND_RA):\n # read another packet in the queue if the current one is\n # ICMPv6ND_RA\n continue\n\n # otherwise process the current packet\n break\n\n if pkt_recv is None:\n raise RuntimeError(u\"Rx timeout\")\n\n if ip_layer == IP:\n check_ipv4(pkt_recv, dscp)\n else:\n check_ipv6(pkt_recv, dscp)\n\n sys.exit(0)",
"def exit(self): \n self.teo_exchange_intent = self.teo_wallet\n self.withdraw_intent = self.euro_wallet\n\n self.register_teo_exchange(self.teo_exchange_intent)\n self.register_withdraw(self.withdraw_intent)\n\n if self.teo_wallet + self.euro_wallet == 0:\n print('Agent exited: ', self.__class__.__name__)\n self.model.schedule.remove(self)",
"def close(self):\n self._udp_handler.send('exit'.encode(encoding='utf-8'))"
]
| [
"0.5967594",
"0.5906457",
"0.56219417",
"0.5474351",
"0.5386218",
"0.53010625",
"0.52660906",
"0.52272975",
"0.5142328",
"0.5108621",
"0.50874406",
"0.5077897",
"0.506066",
"0.49685788",
"0.4954655",
"0.49296337",
"0.4854221",
"0.48277456",
"0.48222342",
"0.47977006",
"0.4785342",
"0.47843796",
"0.4723173",
"0.47228217",
"0.47020602",
"0.46809807",
"0.4676101",
"0.46699035",
"0.46648732",
"0.46592325"
]
| 0.67179614 | 0 |
Make a Java array of bytes from unsigned bytes in Python. Note that Java bytes are signed, whereas in Python they may be either signed or unsigned. | def javaByteArray(a):
b = jarray.zeros(len(a), 'b')
for i in range(len(a)):
b[i] = struct.unpack('b', struct.pack('B', a[i]))[0]
return b | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def javaByteArray2(s):\n a = array.array('B', binascii.unhexlify(s))\n return javaByteArray(a)",
"def decode_u8_array(as_bytes: typing.List[int]) -> typing.List[int]:\n raise NotImplementedError()",
"def decode_byte_array(as_bytes: typing.List[int]) -> bytes:\n return bytes(as_bytes)",
"def bytify(binary):\n\tbytes = [0,0,0,0]\n\ti = 3\n\twhile binary:\n\n\t\tbytes[i] = binary&255\n\t\tbinary >>= 8\n\t\ti -= 1 \n\treturn bytes",
"def decode_u8(as_bytes: typing.List[int]) -> int:\n return le_bytes_to_int(as_bytes, False)",
"def bytes_list_to_array(bytes_list):\r\n digital_bytes = []\r\n for elem in bytes_list:\r\n if isinstance(elem, int):\r\n digital_bytes.append(elem.to_bytes(1, 'little'))\r\n elif isinstance(elem, str):\r\n digital_bytes.append(ord(elem).to_bytes(1, 'little'))\r\n digital_bytes_array = b''.join(digital_bytes)\r\n return digital_bytes_array",
"def toubytes(data, buf_len):\n buf = bytearray(data)\n buffer = (ctypes.c_ubyte * buf_len).from_buffer(buf)\n return buffer",
"def decode_vector_of_t(as_bytes: typing.List[int]) -> list:\n raise NotImplementedError()",
"def vec_to_bytes(val):\n return [int(x*255) for x in val]",
"def _from_java(cls, java_obj):\n swords = java_obj.getStopWords()[:-1] # strip the id\n return load_byte_array(swords)",
"def to_bytearray(x):\n if isinstance(x, bytearray):\n return x\n else:\n return bytearray(x)",
"def _pack_bytes_signed(byte_list):\n return int.from_bytes(byte_list, 'big', signed=True)",
"def decode_uref(as_bytes: typing.List[int]) -> str:\n raise NotImplementedError()",
"def _as_u64_array(array):\n return ffi.cast(\"unsigned long int*\", array.ctypes.data)",
"def byte2array(bytes):\n array = []\n for i, byte in enumerate(bytes):\n if i % 4 == 0:\n array.append([byte])\n else:\n array[i // 4].append(byte)\n return array",
"def bytes_as_char_array(b):\n return \"{ \" + \", \".join(\"0x%02x\" % x for x in b) + \" }\"",
"def _decode_byte_array(fp):\n return fp.read(_decode_int(fp))",
"def bs_to_array(bs):\n return np.array([int(x) for x in bs], dtype=\"int8\")",
"def deserialize(arr):\n return pickle.loads(arr.astype(np.uint8).tobytes())",
"def deserialize(arr):\n return pickle.loads(arr.astype(np.uint8).tobytes())",
"def toubyte(data):\n buf = bytearray(data)\n buffer = (ctypes.c_ubyte).from_buffer(buf)\n return buffer",
"def encode_u8_array(value: typing.List[int]) -> bytes:\n return encode_u32(len(value)) + bytes(value)",
"def to_bit_array(pkt):\n byte_list = list(raw(pkt))\n bin_list = [format(x, \"#010b\") for x in byte_list]\n bin_list = [x[2:] for x in bin_list]\n return bin_list",
"def tobinary_multiples(arr):\n return [np.array(arr_i).tobytes() for arr_i in arr]",
"def convertToByteArray(booleanArray: typing.List[bool]) -> typing.List[int]:\n ...",
"def bytes2binary(inputBytes):\r\n result = 0\r\n for i in inputBytes:\r\n result = result * 256 + int(i)\r\n return bin(result)[2:].rjust(len(inputBytes*8), \"0\")",
"def bytes_to_uuid_list(byte_array):\n result = []\n for i in range(0, len(byte_array)//16):\n result.append(uuid.UUID(bytes=bytes(byte_array[i*16:i*16+16])))\n return result",
"def _pack_bytes(byte_list):\n return int.from_bytes(byte_list, 'big', signed=False)",
"def decode_result(as_bytes: typing.List[int]):\n raise NotImplementedError()",
"def data_to_bitarray(data):\n ba = bitarray()\n ba.frombytes(data)\n return ba"
]
| [
"0.7003046",
"0.6871421",
"0.6711044",
"0.6243507",
"0.61276656",
"0.61108124",
"0.6078007",
"0.60172135",
"0.5982723",
"0.59610796",
"0.5933144",
"0.59135413",
"0.5878097",
"0.583533",
"0.57353103",
"0.5725388",
"0.5707536",
"0.5652822",
"0.56485355",
"0.56485355",
"0.5615048",
"0.56099224",
"0.55984044",
"0.5589713",
"0.55887926",
"0.5575626",
"0.55398965",
"0.5497822",
"0.548689",
"0.5466814"
]
| 0.7075934 | 0 |
Starting the robot in new thread | def _worker(self, robot_id):
robot = Robot(self, rid=robot_id, scroll_times=3)
self.robots.update({robot_id: robot})
d('Starting ROBO_%s' % str(robot_id))
robot.start()
d('End of robot_thread %s ' % str(robot_id))
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def start(self):\n self.thread.start()",
"def start(self):\n self._setup_thread()\n self.thread.start()",
"def runRobot():",
"def start(self):\n self._thread.start()",
"def __init__(self):\n Thread.__init__(self)\n self.start()",
"def __init__(self):\n Thread.__init__(self)\n self.start()",
"def __init__(self):\n Thread.__init__(self)\n self.start() # start the thread",
"def run(self):\n self.thread = threading.Thread(target=self._main)\n self.thread.start()\n self.running = True",
"def connect(self):\n self.start()",
"def start(self):\n \n self.thread.start()\n self.state = \"running\"",
"def start(self) -> None:\n start_thread(super().start, self.__class__.__name__)",
"def run (self):\n t = threading.Thread(target=self.runController)\n t.start()",
"def start(self):\n\n # ioloop.install()\n threading.Thread(target=self.loop.start).start()\n time.sleep(1)",
"def start_thread(self):\n self.stop_thread()\n self.running = True\n self.run_thread = threading.Thread(target=self.run, daemon=True)\n self.run_thread.start()",
"def run(self):\n self.submit()\n self.start()",
"def _start_loop(self):\n self.p = tread.Thread(target=self._loop)\n self.p.start()",
"def _make_thread(self):\r\n pass",
"def start(self) -> None:\n self.stopping.clear()\n self.thread = threading.Thread(target=self._run, daemon=True, name=self.thread_name)\n self.thread.start()",
"def start(self):\n self.stop_recognising.clear()\n self.thread.start()",
"def start(self):\n def f():\n if (self.started): return\n self.started = True\n with client.ServerProxy(self.host) as proxy:\n while (not self.req_shutdown):\n self.update_speed(proxy)\n time.sleep(self.com_freq)\n self.started = False\n self.req_shutdwon = False\n\n Thread(target=f).start()",
"def activate(self):\n self.start()",
"def run(self):\n self.started()",
"def start(self):\n if not self._Thread__initialized:\n raise RuntimeError('thread.__init__() not called')\n if self._Thread__started.is_set():\n raise RuntimeError('threads can only be started once')\n with threading._active_limbo_lock:\n threading._limbo[self] = self\n try:\n start_new_background_thread(self.__bootstrap, ())\n except Exception:\n with threading._active_limbo_lock:\n del threading._limbo[self]\n raise\n self._Thread__started.wait()",
"def start(self) -> None:\n self.should_exit = False\n self._main_thread = threading.Thread(target=self._wrap_start, daemon=True)\n self._main_thread.start()",
"def start(self, robot):\n rospy.loginfo(\"Moving randomly\" + \" - \" + str(robot.robot_id))",
"def __init__(self):\n Thread.__init__(self)\n self.daemon = True\n self.running = True\n\n self.rate = rospy.Rate(10)\n self.point_head_goal = SimpleActionClient('/head_controller/point_head_action', PointHeadAction)\n\n camera_info_msg = rospy.wait_for_message('/xtion/rgb/camera_info', CameraInfo)\n self.camera_intrinsics = array(camera_info_msg.K).reshape((3, 3))\n\n self.looker = PointHeadGoal()\n\n self.looker.target.header.frame_id = '/base_link'\n self.looker.pointing_frame = '/head_2_link'\n\n self.looker.pointing_axis.x = 1.0\n self.looker.pointing_axis.y = 0.0\n self.looker.pointing_axis.z = 0.0\n self.looker.max_velocity = 0.3\n\n look_point = PointStamped()\n look_point.header.frame_id = '/base_link'\n look_point.point.x = 40.0\n look_point.point.y = 0.0\n look_point.point.z = 0.0\n\n self.looker.target = look_point\n self.start()",
"def do_start(self, *arg):\n self._keep_looping = True\n\n print_info(\"Starting sensors\")\n\n self._loop()",
"def start(self):\r\n monitor_thread = Thread(target = self.monitor)\r\n monitor_thread.setDaemon(True)\r\n monitor_thread.start()\r\n\r\n main_thread = Thread(target = self.run)\r\n main_thread.setDaemon(True)\r\n main_thread.start()",
"def start_background_thread(self):\n self.runner = Runner(queue=queue, app_id=self.app_id)\n self.runner.start()\n # TODO: stop the thread at some point?",
"def Start(self):\r\n # Attach a WorkerDispatcher to the current thread\r\n self.m_disp = ttapi.Dispatcher.AttachWorkerDispatcher()\r\n self.m_disp.BeginInvoke(Action(self.Init))\r\n self.m_disp.Run()"
]
| [
"0.72708154",
"0.71561426",
"0.7082271",
"0.70204693",
"0.6998391",
"0.6998391",
"0.69400173",
"0.6863858",
"0.68171334",
"0.6799912",
"0.67390585",
"0.6717737",
"0.66612923",
"0.663221",
"0.6610315",
"0.6556348",
"0.654235",
"0.6528905",
"0.646403",
"0.64595586",
"0.6442346",
"0.6420183",
"0.6374459",
"0.6362534",
"0.63440675",
"0.6335109",
"0.6323897",
"0.631811",
"0.63164467",
"0.63148963"
]
| 0.7174281 | 1 |
checks status of query job, and returns result id if successfull | def _poll_for_new_result(session, job):
while job['status'] not in (3, 4):
response = session.get('{}/api/jobs/{}'.format(REDASH_HOST, job['id']))
job = response.json()['job']
time.sleep(POLL_INTERVAL)
if job['status'] == 3:
return job['query_result_id']
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def request_result(job_id):\n result = _database_operations.get_results(job_id, Session())\n if result is None:\n flask.abort(404)\n else:\n return result",
"def _check_job_status(self) -> str:\n self._assert_job_created()\n\n r = requests.post(\n f'https://{cc.ROUTE_PREFIX}.stratodem.com/jobs/status',\n headers=dict(\n Authorization=f'Bearer {get_api_token()}',\n ),\n json=dict(job_id=self._job_id)\n )\n\n if not r.status_code == 200:\n raise APIQueryFailedException('Failed to determine job status')\n\n r = r.json()\n\n if not r['success']:\n raise APIQueryFailedException(r)\n else:\n return r['message']",
"def __get_job_status_from_queue__(self):\n\n return (lambda job: (int(job[-1]['JobStatus']),\n job[-1]))(self.schedd.query(\"ClusterId =?= {0}\".format(self.id)))",
"def test_update_single_row_if_status_is_in_progress(self):\n first = generate_mock_result(status='IN_PROGRESS', success=False)\n self.db.insert_single_result(first)\n current = self.db.get_result_by_primary_key(first.get('id'))\n self.assertEqual(current.status, 'IN_PROGRESS')\n second = generate_mock_result(status='SUCCESS', success=True)\n self.db.insert_single_result(second)\n current = self.db.get_result_by_primary_key(first.get('id'))\n self.assertEqual(current.status, 'SUCCESS')",
"def check_job_status(self, jobid=None):\n\n if jobid is None:\n if hasattr(self, 'current_job'):\n jobid = self.current_job\n else:\n jobid = self.current_job\n\n response = self._request(\n 'GET', CosmoSim.QUERY_URL + '/{}'.format(jobid) + '/phase',\n auth=(self.username, self.password), data={'print': 'b'},\n cache=False)\n\n log.info(\"Job {}: {}\".format(jobid, response.content))\n return response.content",
"def check_job_status_by_id(job_id):\n print('=' * 40)\n print('check_status_by_job_id', job_id)\n print('=' * 40)\n\n it_worked = check_job_status(job)\n if it_worked:\n return ok_resp(job)\n\n user_msg = ('PreprocessJob still in process: %s') % (job_id)\n return err_resp(user_msg)",
"def test_successful_job(self, _is_coalesced):\n successful_job = json.loads(BASE_JSON % (SUCCESS, 1433166610, 1, 1433166609))[0]\n self.assertEquals(self.query_api.get_job_status(successful_job), SUCCESS)",
"def check_slurm_job_submission(expected_name):\n cmd = ['scontrol', 'show', 'job']\n job_id = 0\n found_job = False\n while True:\n while True:\n try:\n out = Popen(cmd, stdout=PIPE, stderr=PIPE).communicate()[0]\n break\n except:\n sleep(1)\n out = out.split('\\n')\n if 'error' in out[0]:\n sleep(1)\n msg = 'Error checking job status for {0}'.format(expected_name)\n logging.warning(msg)\n continue\n for line in out:\n for word in line.split():\n if 'JobId' in word:\n index = word.find('=') + 1\n job_id = int(word[index:])\n # continue\n if 'Name' in word:\n index = word.find('=') + 1\n if word[index:] == expected_name:\n found_job = True\n\n if found_job and job_id != 0:\n return found_job, job_id\n sleep(1)\n return found_job, job_id",
"def some_job():\r\n\tfor row in rows:\r\n\t\treceipt_number = row[0]\r\n\t\tphone_number = row[2]\r\n\t\treturn case_status_check(receipt_number, phone_number)",
"def check_results(request):\n \n # Check if an ID was supplied.\n if ('ID' not in request.GET):\n response = HttpResponse()\n response.status_code = 400 # Bad Request\n response.reason_phrase = (\"No ID was passed. The ID used to start \"\n \"the classification job must be sent to \"\n \"check the progress. The ID should be \"\n \"passed in a parameter named 'ID'.\")\n return response\n \n # Ensure a file exists with the specified ID.\n id = request.GET['ID']\n if (not File.objects.filter(file_name=id).exists()):\n response = HttpResponse()\n response.status_code = 400 # Bad Request\n response.reason_phrase = ('The passed ID was invalid. If the ID you '\n 'sent was returned by a validate request, '\n 'it is possible the ID has expired and the '\n 'job was deleted.')\n \n # Retrieve the job for the requested file.\n file = File.objects.get(file_name=id)\n job = file.job\n \n # If the job is complete, send the results. Otherwise, send all of the\n # updates for the job.\n has_result = JobResult.objects.filter(job=job).exists()\n return job_results(request, job) if has_result else \\\n job_updates(request, job)",
"async def set_job_status(job_id: str) -> int:\n async with js.WDBConnection() as conn:\n async with conn.cursor() as cur:\n num_rows_affected = await cur.execute(FINALIZE_SUBMISSION_SQL, (job_id,))\n await conn.commit()\n return num_rows_affected",
"def GetResult(jobid, g_params): # {{{\n # retrieving result from the remote server for this job\n gen_logfile = g_params['gen_logfile']\n gen_errfile = g_params['gen_errfile']\n\n webcom.loginfo(f\"GetResult for {jobid}.\\n\", gen_logfile)\n\n path_static = g_params['path_static']\n path_result = os.path.join(path_static, 'result')\n path_cache = g_params['path_cache']\n finished_date_db = g_params['finished_date_db']\n name_server = g_params['name_server']\n\n rstdir = os.path.join(path_result, jobid)\n runjob_logfile = os.path.join(rstdir, \"runjob.log\")\n runjob_errfile = os.path.join(rstdir, \"runjob.err\")\n outpath_result = os.path.join(rstdir, jobid)\n if not os.path.exists(outpath_result):\n os.mkdir(outpath_result)\n\n remotequeue_idx_file = os.path.join(rstdir, \"remotequeue_seqindex.txt\")\n\n torun_idx_file = os.path.join(rstdir, \"torun_seqindex.txt\")\n finished_idx_file = os.path.join(rstdir, \"finished_seqindex.txt\")\n query_parafile = os.path.join(rstdir, \"query.para.txt\")\n\n query_para = {}\n if os.path.exists(query_parafile):\n content = myfunc.ReadFile(query_parafile)\n if content != \"\":\n try:\n query_para = json.loads(content)\n except ValueError:\n query_para = {}\n failed_idx_file = os.path.join(rstdir, \"failed_seqindex.txt\")\n\n starttagfile = os.path.join(rstdir, \"runjob.start\")\n cnttry_idx_file = os.path.join(rstdir, \"cntsubmittry_seqindex.txt\") # index file to keep log of tries\n tmpdir = os.path.join(rstdir, \"tmpdir\")\n finished_seq_file = os.path.join(outpath_result, \"finished_seqs.txt\")\n\n if not os.path.exists(tmpdir):\n os.mkdir(tmpdir)\n\n finished_info_list = [] # [info for finished record]\n finished_idx_list = [] # [origIndex]\n failed_idx_list = [] # [origIndex]\n resubmit_idx_list = [] # [origIndex]\n keep_queueline_list = [] # [line] still in queue\n\n cntTryDict = {}\n if os.path.exists(cnttry_idx_file):\n with open(cnttry_idx_file, 'r') as fpin:\n try:\n cntTryDict = json.load(fpin)\n except Exception:\n cntTryDict = {}\n\n # in case of missing queries, if remotequeue_idx_file is empty but the job\n # is still not finished, force recreating torun_idx_file\n if 'DEBUG' in g_params and g_params['DEBUG']:\n try:\n webcom.loginfo(\"DEBUG: %s: remotequeue_idx_file=%s, size(remotequeue_idx_file)=%d, content=\\\"%s\\\"\\n\" %(jobid, remotequeue_idx_file, os.path.getsize(remotequeue_idx_file), myfunc.ReadFile(remotequeue_idx_file)), gen_logfile)\n except Exception:\n pass\n if ((not os.path.exists(remotequeue_idx_file) or # {{{\n os.path.getsize(remotequeue_idx_file) < 1)):\n idlist1 = []\n idlist2 = []\n if os.path.exists(finished_idx_file):\n idlist1 = myfunc.ReadIDList(finished_idx_file)\n if os.path.exists(failed_idx_file):\n idlist2 = myfunc.ReadIDList(failed_idx_file)\n\n completed_idx_set = set(idlist1 + idlist2)\n\n jobinfofile = os.path.join(rstdir, \"jobinfo\")\n jobinfo = myfunc.ReadFile(jobinfofile).strip()\n jobinfolist = jobinfo.split(\"\\t\")\n if len(jobinfolist) >= 8:\n numseq = int(jobinfolist[3])\n\n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"DEBUG: len(completed_idx_set)=%d+%d=%d, numseq=%d\\n\"%(len(idlist1), len(idlist2), len(completed_idx_set), numseq), gen_logfile)\n\n if len(completed_idx_set) < numseq:\n all_idx_list = [str(x) for x in range(numseq)]\n torun_idx_str_list = list(set(all_idx_list)-completed_idx_set)\n for idx in torun_idx_str_list:\n try:\n cntTryDict[int(idx)] += 1\n except (ValueError, IndexError, KeyError):\n cntTryDict[int(idx)] = 1\n myfunc.WriteFile(\"\\n\".join(torun_idx_str_list)+\"\\n\", torun_idx_file, \"w\", True)\n\n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"recreate torun_idx_file: jobid = %s, numseq=%d, len(completed_idx_set)=%d, len(torun_idx_str_list)=%d\\n\"%(jobid, numseq, len(completed_idx_set), len(torun_idx_str_list)), gen_logfile)\n else:\n myfunc.WriteFile(\"\", torun_idx_file, \"w\", True)\n else:\n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"DEBUG: %s: remotequeue_idx_file %s is not empty\\n\" %(jobid, remotequeue_idx_file), gen_logfile)\n# }}}\n\n text = \"\"\n if os.path.exists(remotequeue_idx_file):\n text = myfunc.ReadFile(remotequeue_idx_file)\n if text == \"\":\n return 1\n lines = text.split(\"\\n\")\n\n nodeSet = set([])\n for i in range(len(lines)):\n line = lines[i]\n if not line or line[0] == \"#\":\n continue\n strs = line.split(\"\\t\")\n if len(strs) != 6:\n continue\n node = strs[1]\n nodeSet.add(node)\n\n myclientDict = {}\n for node in nodeSet:\n wsdl_url = f\"http://{node}/pred/api_submitseq/?wsdl\"\n try:\n myclient = Client(wsdl_url, cache=None, timeout=30)\n myclientDict[node] = myclient\n except Exception as e:\n webcom.loginfo(f\"Failed to access {wsdl_url} with errmsg {e}\", gen_logfile)\n pass\n\n for i in range(len(lines)): # {{{\n line = lines[i]\n\n if 'DEBUG' in g_params and g_params['DEBUG']:\n myfunc.WriteFile(f\"Process {line}\\n\", gen_logfile, \"a\", True)\n if not line or line[0] == \"#\":\n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"DEBUG: line empty or line[0] = '#', ignore\", gen_logfile)\n continue\n strs = line.split(\"\\t\")\n if len(strs) != 6:\n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"DEBUG: len(strs)=%d (!=6), ignore\\n\"%(len(strs)), gen_logfile)\n continue\n origIndex = int(strs[0])\n node = strs[1]\n remote_jobid = strs[2]\n description = strs[3]\n seq = strs[4]\n submit_time_epoch = float(strs[5])\n subfoldername_this_seq = f\"seq_{origIndex}\"\n outpath_this_seq = os.path.join(outpath_result, subfoldername_this_seq)\n\n try:\n myclient = myclientDict[node]\n except KeyError:\n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"DEBUG: node (%s) not found in myclientDict, ignore\"%(node), gen_logfile)\n keep_queueline_list.append(line)\n continue\n try:\n rtValue = myclient.service.checkjob(remote_jobid)\n except Exception as e:\n msg = \"checkjob(%s) at node %s failed with errmsg %s\"%(remote_jobid, node, str(e))\n webcom.loginfo(msg, gen_logfile)\n rtValue = []\n pass\n isSuccess = False\n isFinish_remote = False\n status = \"\"\n if len(rtValue) >= 1:\n ss2 = rtValue[0]\n if len(ss2) >= 3:\n status = ss2[0]\n result_url = ss2[1]\n errinfo = ss2[2]\n\n if errinfo and errinfo.find(\"does not exist\") != -1:\n if 'DEBUG' in g_params and g_params['DEBUG']:\n msg = \"Failed for remote_jobid %s with errmsg %s\"%(remote_jobid, str(errinfo))\n webcom.loginfo(msg, gen_logfile)\n\n isFinish_remote = True\n\n if status == \"Finished\": # {{{\n isFinish_remote = True\n outfile_zip = f\"{tmpdir}/{remote_jobid}.zip\"\n isRetrieveSuccess = False\n myfunc.WriteFile(\"\\tFetching result for %s/seq_%d from %s \" % (\n jobid, origIndex, result_url), gen_logfile, \"a\", True)\n if myfunc.IsURLExist(result_url, timeout=5):\n try:\n myfunc.urlretrieve(result_url, outfile_zip, timeout=10)\n isRetrieveSuccess = True\n myfunc.WriteFile(f\" succeeded on node {node}\\n\", gen_logfile, \"a\", True)\n except Exception as e:\n myfunc.WriteFile(\" failed with %s\\n\"%(str(e)), gen_logfile, \"a\", True)\n pass\n if os.path.exists(outfile_zip) and isRetrieveSuccess:\n cmd = [\"unzip\", outfile_zip, \"-d\", tmpdir]\n webcom.RunCmd(cmd, gen_logfile, gen_errfile)\n rst_fetched = os.path.join(tmpdir, remote_jobid)\n if name_server.lower() == \"pconsc3\":\n rst_this_seq = rst_fetched\n elif name_server.lower() == \"boctopus2\":\n rst_this_seq = os.path.join(rst_fetched, \"seq_0\", \"seq_0\")\n rst_this_seq_parent = os.path.join(rst_fetched, \"seq_0\")\n else:\n rst_this_seq = os.path.join(rst_fetched, \"seq_0\")\n\n if os.path.islink(outpath_this_seq):\n os.unlink(outpath_this_seq)\n elif os.path.exists(outpath_this_seq):\n shutil.rmtree(outpath_this_seq)\n\n if os.path.exists(rst_this_seq) and not os.path.exists(outpath_this_seq):\n cmd = [\"mv\", \"-f\", rst_this_seq, outpath_this_seq]\n webcom.RunCmd(cmd, gen_logfile, gen_errfile)\n if name_server.lower() == \"boctopus2\":\n # move also seq.fa and time.txt for boctopus2\n file1 = os.path.join(rst_this_seq_parent, \"seq.fa\")\n file2 = os.path.join(rst_this_seq_parent, \"time.txt\")\n for f in [file1, file2]:\n if os.path.exists(f):\n try:\n shutil.move(f, outpath_this_seq)\n except:\n pass\n\n fafile_this_seq = os.path.join(outpath_this_seq, \"seq.fa\")\n if webcom.IsCheckPredictionPassed(outpath_this_seq, name_server):\n # relpace the seq.fa with original description\n myfunc.WriteFile('>%s\\n%s\\n'%(description, seq), fafile_this_seq, 'w', True)\n isSuccess = True\n\n if isSuccess:\n # delete the data on the remote server\n try:\n rtValue2 = myclient.service.deletejob(remote_jobid)\n except Exception as e:\n msg = \"Failed to deletejob(%s) on node %s with errmsg %s\"%(remote_jobid, node, str(e))\n webcom.loginfo(msg, gen_logfile)\n rtValue2 = []\n pass\n\n logmsg = \"\"\n if len(rtValue2) >= 1:\n ss2 = rtValue2[0]\n if len(ss2) >= 2:\n status = ss2[0]\n errmsg = ss2[1]\n if status == \"Succeeded\":\n logmsg = \"Successfully deleted data on %s \"\\\n \"for %s\"%(node, remote_jobid)\n else:\n logmsg = \"Failed to delete data on %s for \"\\\n \"%s\\nError message:\\n%s\\n\"%(node, remote_jobid, errmsg)\n else:\n logmsg = \"Failed to call deletejob %s via WSDL on %s\\n\"%(remote_jobid, node)\n\n # delete the downloaded temporary zip file and\n # extracted file\n if os.path.exists(outfile_zip):\n os.remove(outfile_zip)\n if os.path.exists(rst_fetched):\n shutil.rmtree(rst_fetched)\n\n # create or update the md5 cache\n if name_server.lower() == \"prodres\" and query_para != {}:\n md5_key = hashlib.md5((seq+str(query_para)).encode('utf-8')).hexdigest()\n else:\n md5_key = hashlib.md5(seq.encode('utf-8')).hexdigest()\n subfoldername = md5_key[:2]\n md5_subfolder = \"%s/%s\"%(path_cache, subfoldername)\n cachedir = \"%s/%s/%s\"%(path_cache, subfoldername, md5_key)\n\n # copy the zipped folder to the cache path\n origpath = os.getcwd()\n os.chdir(outpath_result)\n shutil.copytree(\"seq_%d\"%(origIndex), md5_key)\n cmd = [\"zip\", \"-rq\", \"%s.zip\"%(md5_key), md5_key]\n webcom.RunCmd(cmd, runjob_logfile, runjob_errfile)\n if not os.path.exists(md5_subfolder):\n os.makedirs(md5_subfolder)\n shutil.move(\"%s.zip\"%(md5_key), \"%s.zip\"%(cachedir))\n shutil.rmtree(md5_key) # delete the temp folder named as md5 hash\n os.chdir(origpath)\n\n # Add the finished date to the database\n date_str = time.strftime(g_params['FORMAT_DATETIME'])\n MAX_TRY_INSERT_DB = 3\n cnttry = 0\n while cnttry < MAX_TRY_INSERT_DB:\n t_rv = webcom.InsertFinishDateToDB(date_str, md5_key, seq, finished_date_db)\n if t_rv == 0:\n break\n cnttry += 1\n time.sleep(random.random()/1.0)\n\n# }}}\n elif status in [\"Failed\", \"None\"]:\n # the job is failed for this sequence, try to resubmit\n isFinish_remote = True\n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"DEBUG: %s, status = %s\\n\"%(remote_jobid, status), gen_logfile)\n\n if status != \"Wait\" and not os.path.exists(starttagfile):\n webcom.WriteDateTimeTagFile(starttagfile, runjob_logfile, runjob_errfile)\n\n if isSuccess: # {{{\n time_now = time.time()\n runtime1 = time_now - submit_time_epoch # in seconds\n timefile = os.path.join(outpath_this_seq, \"time.txt\")\n runtime = webcom.ReadRuntimeFromFile(timefile, default_runtime=runtime1)\n info_finish = webcom.GetInfoFinish(\n name_server, outpath_this_seq,\n origIndex, len(seq), description,\n source_result=\"newrun\", runtime=runtime)\n finished_info_list.append(\"\\t\".join(info_finish))\n finished_idx_list.append(str(origIndex))\n # }}}\n\n # if the job is finished on the remote but the prediction is failed,\n # try resubmit a few times and if all failed, add the origIndex to the\n # failed_idx_file\n if isFinish_remote and not isSuccess:\n cnttry = 1\n try:\n cnttry = cntTryDict[int(origIndex)]\n except KeyError:\n cnttry = 1\n if cnttry < g_params['MAX_RESUBMIT']:\n resubmit_idx_list.append(str(origIndex))\n cntTryDict[int(origIndex)] = cnttry+1\n else:\n failed_idx_list.append(str(origIndex))\n\n if not isFinish_remote:\n time_in_remote_queue = time.time() - submit_time_epoch\n # for jobs queued in the remote queue more than one day (but not\n # running) delete it and try to resubmit it. This solved the\n # problem of dead jobs in the remote server due to server\n # rebooting)\n if (\n status != \"Running\"\n and status != \"\"\n and time_in_remote_queue > g_params['MAX_TIME_IN_REMOTE_QUEUE']):\n # delete the remote job on the remote server\n try:\n rtValue2 = myclient.service.deletejob(remote_jobid)\n except Exception as e:\n webcom.loginfo(\"Failed to run myclient.service.deletejob(%s) on node %s with msg %s\"%(remote_jobid, node, str(e)), gen_logfile)\n rtValue2 = []\n pass\n else:\n keep_queueline_list.append(line)\n# }}}\n # Finally, write log files\n finished_idx_list = list(set(finished_idx_list))\n failed_idx_list = list(set(failed_idx_list))\n resubmit_idx_list = list(set(resubmit_idx_list))\n\n if len(finished_info_list) > 0:\n myfunc.WriteFile(\"\\n\".join(finished_info_list)+\"\\n\", finished_seq_file,\n \"a\", True)\n if len(finished_idx_list) > 0:\n myfunc.WriteFile(\"\\n\".join(finished_idx_list)+\"\\n\", finished_idx_file,\n \"a\", True)\n if len(failed_idx_list) > 0:\n myfunc.WriteFile(\"\\n\".join(failed_idx_list)+\"\\n\", failed_idx_file, \"a\",\n True)\n if len(resubmit_idx_list) > 0:\n myfunc.WriteFile(\"\\n\".join(resubmit_idx_list)+\"\\n\", torun_idx_file,\n \"a\", True)\n\n if len(keep_queueline_list) > 0:\n keep_queueline_list = list(set(keep_queueline_list))\n myfunc.WriteFile(\"\\n\".join(keep_queueline_list)+\"\\n\",\n remotequeue_idx_file, \"w\", True)\n else:\n myfunc.WriteFile(\"\", remotequeue_idx_file, \"w\", True)\n\n with open(cnttry_idx_file, 'w') as fpout:\n json.dump(cntTryDict, fpout)\n\n return 0",
"def query_job_progress():\n pass",
"def request_status(job_id):\n status = _database_operations.get_status(job_id, Session())\n if status is None:\n flask.abort(404)\n else:\n return json.dumps({\n 'status': status.status,\n 'finished': status.finished\n })",
"def run(self,id=None):\n # loop until the process is running or halted.\n while 1:\n\n my_job_status, my_job = self.find_job_and_job_status()\n\n if not my_job_status:\n time.sleep(5)\n continue\n\n if sum(map(lambda st: int(st==my_job_status), self.return_status)) > 0:\n return (my_job_status, my_job.printOld())\n\n time.sleep(5)\n continue",
"def checkjob(self, taskid):\n\t\tfrom subprocess import Popen,PIPE\n\t\timport os\n\n\t\ttry:\n\t\t\tp = self.qstatoutput\n\t\texcept:\n\t\t\t#command = [ 'qstat','-j',id ]\n\t\t\tcommand = [ 'qstat','-u',os.getenv(\"USER\"),'-g','d' ]\n\t\t\tp = Popen( command ,stdout=PIPE,stderr=PIPE ).communicate()\n\t\t\tself.qstatoutput = p\n\n\t\tisincluster = False\n\t\ttaskstatus = {}\n\t\tfor line in p[0].split(\"\\n\"):\n\t\t\tif not str(self.jobsid) in line:\n\t\t\t\tcontinue\n\t\t\tparseline = line.split()\n\t\t\tstatus= parseline[4]\n\t\t\ttry:\n\t\t\t\ttask = int(parseline[9])\n\t\t\texcept IndexError:\n\t\t\t\t# Implies it is still waiting\n\t\t\t\ttask = int(parseline[8])\n\t\t\ttaskstatus[task] = status\n\t\t\tisincluster = True\n\n\t\tif not isincluster:\n\t\t\t# Checking if the outputfiles are there\n\t\t\tif not os.path.exists(self.outputfiles[taskid]):\n\t\t\t\tmessage = \"\\033[1;31mclustermanager.checkjob: Something went wrong in the cluster:\\033[1;m\"\n\t\t\t\tmessage += \"The task '\"+str(taskid)+\"' of the job '\"+str(self.jobsid)\n\t\t\t\tmessage += \"' is already finish but there is no output root file '\"\n\t\t\t\tmessage += self.outputfiles[taskid]+\"'\\n\"\n\t\t\t\tmessage += \"Check the cluster outputs file\"\n\t\t\t\traise message\n\n\t\t\t# Gathering the file outputs in order to add\n\t\t\tself.taskstatus[\"Done\"].append(taskid)\n\t\t\treturn self.outputfiles[taskid]\n\n\t\t# Still in cluster\n\t\t#statustaskdict = dict( [ (status,[]) for status in taskstatus.values() ] )\n\t\tfor task,status in taskstatus.iteritems():\n\t\t\tif status == \"r\" or status == \"t\":\n\t\t\t\tself.taskstatus[\"r\"].append(task)\n\t\t\telif status == \"qw\":\n\t\t\t\tself.taskstatus[\"qw\"].append(task)\n\t\t\telse:\n\t\t\t\tself.taskstatus[\"Undefined\"].append(task)",
"def get_status(self):\n\t\treturn call_sdk_function('PrlJob_GetStatus', self.handle)",
"async def status(self) -> JobStatus:\n async with self._redis.pipeline(transaction=True) as tr:\n tr.exists(result_key_prefix + self.job_id) # type: ignore[unused-coroutine]\n tr.exists(in_progress_key_prefix + self.job_id) # type: ignore[unused-coroutine]\n tr.zscore(self._queue_name, self.job_id) # type: ignore[unused-coroutine]\n is_complete, is_in_progress, score = await tr.execute()\n\n if is_complete:\n return JobStatus.complete\n elif is_in_progress:\n return JobStatus.in_progress\n elif score:\n return JobStatus.deferred if score > timestamp_ms() else JobStatus.queued\n else:\n return JobStatus.not_found",
"def check_result(self, result):\n self.log.info(\"--check_result, result= %s\", result)\n if result[0]['exit_status'] != 0:\n self.fail(\"##Error detected from check_result\")\n else:\n self.log.info(\"--check_result passed\")",
"def test_status(self):\n\n url = '/%s/jobs/?status=RUNNING' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)\n self.assertEqual(result['results'][0]['job_type']['id'], self.job1.job_type.id)",
"def check_result(self, params, server):\n if server['building']:\n # I assume Server and client are on the same TimeZone\n # the API doesn't tell me where is the server (only /systemInfo)\n job_started = datetime.fromtimestamp(int(server['timestamp']) / 1000)\n time_delta = (params['now'] - job_started)\n\n # New in version 2.7 --> datetime.timedelta.total_seconds\n # we want python >= 2.4 so we will do it ourselves\n seconds_since_start = time_delta.seconds + time_delta.days * 86400\n job_duration = self.seconds2human(seconds_since_start)\n if (seconds_since_start >= params['critical'] * 60):\n msg = '%s has been running for %s, see %sconsole#footer' % (\n params['job'],\n job_duration,\n server['url'])\n status = 'CRITICAL'\n elif (seconds_since_start >= params['warning'] * 60):\n msg = '%s has been running for %s, see %sconsole#footer' % (\n params['job'],\n job_duration,\n server['url'])\n status = 'WARNING'\n else:\n msg = '%s still running after %s, watch it on %sconsole#footer' % (\n params['job'],\n job_duration,\n server['url'])\n status = 'OK'\n else:\n # Easy part, the job has completed ...\n if server['result'] == 'SUCCESS':\n duration = self.seconds2human(server['duration'] / 1000)\n msg = '%s exited normally after %s' % (params['job'], duration)\n status = 'OK'\n\n elif server['result'] == 'UNSTABLE':\n duration = self.seconds2human(server['duration'] / 1000)\n msg = '%s is marked as unstable after %s, see %sconsole#footer' % (\n params['job'], duration, server['url'])\n status = 'WARNING'\n\n elif server['result'] == 'FAILURE':\n msg = '%s exited with an error, see %sconsole#footer' % (\n params['job'], server['url'])\n status = 'CRITICAL'\n\n elif server['result'] == 'ABORTED':\n msg = '%s has been aborted, see %sconsole#footer' % (\n params['job'], server['url'])\n status = 'UNKNOWN'\n else:\n # If you get there, patch welcome\n msg = '%s is in a not known state, Jenkins API issue ? see %s' % (\n params['job'], server['url'])\n status = 'UNKNOWN'\n\n return(status, msg)",
"def checkjob(sid, jid):\n with slycat.web.server.remote.get_session(sid) as session:\n return session.checkjob(jid)",
"def get_query_status(self, query_key):\n try:\n while True:\n query_status = getBG(endpoint='interactions/query/status/%s' % (query_key),\n base_url=base_url, data={})\n if query_status['status'] != 'running':\n # query has finished\n break\n else:\n time.sleep(1)\n except requests.HTTPError as e:\n return {\n 'error': str(e)\n }\n return query_status",
"def test_successful_job(self):\n\n successful_job = json.loads(TREEHERDER_JOB % (\"success\", \"completed\"))\n self.assertEquals(self.query_api.get_job_status(successful_job), SUCCESS)",
"def check_backup(self):\n res = 0\n sql = '''select status\n FROM v$rman_backup_job_details\n WHERE start_time > SYSDATE - 1\n ORDER BY END_TIME '''\n self.cur.execute(sql)\n curres = self.cur.fetchall()\n rescount = (self.cur.rowcount)\n if rescount == 0:\n res = 99\n print(res)\n else:\n for i in curres:\n if re.search('FAILED|ERROR', i[0]):\n res = res + 1\n print(res)",
"def check_result(self, coro_id):\n try:\n status, response = self.coros_result.get(coro_id)\n if status != CoroStatus.Queued:\n self.remove_coro(coro_id)\n return status, response\n except KeyError:\n raise CoroMissingException(\"Coroutine Id {}\"\n \" is not Active\".format(coro_id))",
"def test_successful(self):\n\n url = '/%s/jobs/' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 3)\n for entry in result['results']:\n expected = None\n if entry['id'] == self.job1.id:\n expected = self.job1\n elif entry['id'] == self.job2.id:\n expected = self.job2\n elif entry['id'] == self.job3.id:\n expected = self.job3\n else:\n self.fail('Found unexpected result: %s' % entry['id'])\n self.assertEqual(entry['job_type']['name'], expected.job_type.name)\n self.assertEqual(entry['job_type_rev']['job_type']['id'], expected.job_type.id)\n self.assertEqual(entry['is_superseded'], expected.is_superseded)",
"def _wait_for_query_finish(self, job_id: str, max_wait: int = 60) -> str:\n url_inputs = {'redash_host': self.redash_host, 'job_id': job_id}\n query_url = REDASH_TRACK_JOB_ENDPOINT.format(**url_inputs)\n\n query_result_id: Optional[str] = None\n max_time = time.time() + max_wait\n\n while time.time() < max_time:\n resp = r.get(query_url, headers=self.headers)\n resp_json = resp.json()\n\n LOGGER.debug('Received response from Redash job %s: %s', job_id, resp_json)\n\n job_info = resp_json['job']\n job_status = RedashApiResponse(job_info['status'])\n\n if job_status == RedashApiResponse.SUCCESS:\n query_result_id = job_info['query_result_id']\n break\n\n elif job_status == RedashApiResponse.FAILURE:\n raise RedashQueryCouldNotCompleteException(job_info['error'])\n time.sleep(.5)\n\n if query_result_id is None:\n raise RedashQueryCouldNotCompleteException('Query execution took too long')\n\n return query_result_id",
"def test_job_id(self):\n\n url = '/%s/jobs/?job_id=%s' % (self.api, self.job1.id)\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)\n self.assertEqual(result['results'][0]['id'], self.job1.id)",
"def CheckIfJobFinished(jobid, numseq, to_email, g_params): # {{{\n bsname = \"job_final_process\"\n path_result = os.path.join(g_params['path_static'], 'result')\n rstdir = os.path.join(path_result, jobid)\n gen_logfile = g_params['gen_logfile']\n gen_errfile = g_params['gen_errfile']\n name_server = g_params['name_server']\n g_params['jobid'] = jobid\n g_params['numseq'] = numseq\n g_params['to_email'] = to_email\n jsonfile = os.path.join(rstdir, f\"{bsname}.json\")\n myfunc.WriteFile(json.dumps(g_params, sort_keys=True), jsonfile, \"w\")\n binpath_script = os.path.join(g_params['webserver_root'], \"env\", \"bin\")\n\n finished_idx_file = \"%s/finished_seqindex.txt\"%(rstdir)\n failed_idx_file = \"%s/failed_seqindex.txt\"%(rstdir)\n py_scriptfile = os.path.join(binpath_script, f\"{bsname}.py\")\n finished_idx_list = []\n failed_idx_list = []\n if os.path.exists(finished_idx_file):\n finished_idx_list = list(set(myfunc.ReadIDList(finished_idx_file)))\n if os.path.exists(failed_idx_file):\n failed_idx_list = list(set(myfunc.ReadIDList(failed_idx_file)))\n\n lockname = f\"{bsname}.lock\"\n lock_file = os.path.join(g_params['path_result'], g_params['jobid'],\n lockname)\n\n num_processed = len(finished_idx_list)+len(failed_idx_list)\n if num_processed >= numseq: # finished\n if ('THRESHOLD_NUMSEQ_CHECK_IF_JOB_FINISH' in g_params\n and numseq <= g_params['THRESHOLD_NUMSEQ_CHECK_IF_JOB_FINISH']):\n cmd = [\"python\", py_scriptfile, \"-i\", jsonfile]\n (isSubmitSuccess, t_runtime) = webcom.RunCmd(cmd, gen_logfile, gen_errfile)\n elif not os.path.exists(lock_file):\n bash_scriptfile = f\"{rstdir}/{bsname},{name_server},{jobid}.sh\"\n code_str_list = []\n code_str_list.append(\"#!/bin/bash\")\n cmdline = f\"python {py_scriptfile} -i {jsonfile}\"\n code_str_list.append(cmdline)\n code = \"\\n\".join(code_str_list)\n myfunc.WriteFile(code, bash_scriptfile, mode=\"w\", isFlush=True)\n os.chmod(bash_scriptfile, 0o755)\n os.chdir(rstdir)\n cmd = ['sbatch', bash_scriptfile]\n cmdline = \" \".join(cmd)\n verbose = False\n if 'DEBUG' in g_params and g_params['DEBUG']:\n verbose = True\n webcom.loginfo(\"Run cmdline: %s\"%(cmdline), gen_logfile)\n (isSubmitSuccess, t_runtime) = webcom.RunCmd(cmd, gen_logfile, gen_errfile, verbose)\n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"isSubmitSuccess: %s\"%(str(isSubmitSuccess)), gen_logfile)"
]
| [
"0.65486574",
"0.6471142",
"0.6466445",
"0.6453059",
"0.6433005",
"0.6426885",
"0.63764495",
"0.6318302",
"0.62912136",
"0.62787354",
"0.6262894",
"0.62122035",
"0.6210269",
"0.6176431",
"0.61736745",
"0.6141012",
"0.6130964",
"0.6082434",
"0.6055645",
"0.6053044",
"0.60484564",
"0.60039973",
"0.600078",
"0.59841734",
"0.5966555",
"0.5955238",
"0.5944097",
"0.5938657",
"0.5930759",
"0.59089476"
]
| 0.7408653 | 0 |
Check for endpoint relative position and overlap. Used in scan_P__(). | def comp_edge(_P, P): # Used in scan_P_().
_x0 = _P['x0']
_xn = _x0 + _P['L']
x0 = P['x0']
xn = x0 + P['L']
if _xn < xn: # End-point relative position.
return True, x0 < _xn # Overlap.
else:
return False, _x0 < xn | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _in_bounds(self, x, y):\r\n return 0 <= x < 8 and 0 <= y < 8",
"def is_valid_overlap_xy(dir_id, p1, p2, pattern_catalog, pattern_width, adjacency_directions):\n #dir_corrected = (0 - adjacency_directions[dir_id].x, 0 - adjacency_directions[dir_id].y)\n dir_corrected = (0 + adjacency_directions[dir_id].x, 0 + adjacency_directions[dir_id].y)\n dimensions = (1,0)\n not_a_number = -1\n #TODO: can probably speed this up by using the right slices, rather than rolling the whole pattern...\n #print(d, p2, p1)\n shifted = np.roll(np.pad(pattern_catalog[p2], pattern_width, mode='constant', constant_values = not_a_number), dir_corrected, dimensions)\n compare = shifted[pattern_width:pattern_width+pattern_width, pattern_width:pattern_width+pattern_width]\n left = max(0,0 + dir_corrected[0])\n right = min(pattern_width, pattern_width + dir_corrected[0])\n top = max(0,0 + dir_corrected[1])\n bottom = min(pattern_width, pattern_width + dir_corrected[1])\n a = pattern_catalog[p1][top:bottom,left:right]\n b = compare[top:bottom,left:right]\n res = np.array_equal(a,b)\n #print(f\"res: {res}\")\n return res",
"def check_allowed_positions(scan, psi, probe_shape):\n int_scan = scan // 1\n less_than_one = int_scan < 1\n greater_than_psi = np.stack(\n (int_scan[..., -2] >= psi.shape[-2] - probe_shape[-2],\n int_scan[..., -1] >= psi.shape[-1] - probe_shape[-1]),\n -1,\n )\n if np.any(less_than_one) or np.any(greater_than_psi):\n x = np.logical_or(less_than_one, greater_than_psi)\n raise ValueError(\"These scan positions exist outside field of view:\\n\"\n f\"{scan[np.logical_or(x[..., 0], x[..., 1])]}\")",
"def endpoints(image):\n return _neighbors_conv(image) == 1",
"def _overlapping(self, atom1, atom2):\n\n if np.linalg.norm(atom1.pos-atom2.pos) < (atom1.rad+atom2.rad):\n return True\n else:\n return False",
"def in_bounds(self, x, y):\n return x >= 0 and x < 8 and y >= 0 and y < 8",
"def find_isolated_endpoints(lines):\n \n isolated_endpoints = []\n count = len(lines)\n print(\"Finding isolated end points 2/3\")\n pb = pbar.ProgressBar(count)\n for i, line in enumerate(lines):\n pb += 1\n other_lines = lines[:i] + lines[i+1:]\n for q in [0,-1]:\n endpoint = Point(line.coords[q])\n if any(endpoint.touches(another_line) \n for another_line in other_lines):\n continue\n else:\n isolated_endpoints.append(endpoint)\n del pb\n return isolated_endpoints",
"def check_positions_in_range(self):\n reachable = 0\n total = 0\n reachable, total = self.check_positions_in_range_for_list(reachable, total, self.close_positions_world)\n reachable, total = self.check_positions_in_range_for_list(reachable, total, self.medium_positions_world)\n reachable, total = self.check_positions_in_range_for_list(reachable, total, self.far_positions_world)\n\n return float(reachable) / float(total)",
"def check_coord_in_range(self, x, y):\n return 0 <= x < self.cols and 0 <= y < self.lines",
"def near_segment(point:tuple, edge:tuple)->bool:\n return between(point[0], edge[0][0], edge[1][0]) and between(point[1], edge[0][1], edge[1][1])",
"def check_positions_in_range_for_list(self, reachable, total, list):\n for pose in list:\n total += 1\n distance_to_base = math.sqrt(pose[0] ** 2 + pose[1] ** 2 + pose[2] ** 2)\n if distance_to_base < self.robot_reachable_distance:\n reachable += 1\n else:\n rospy.logwarn('Position not in range: {}, distance to base: {}'.format(pose, distance_to_base))\n return reachable, total",
"def intersects(self):\n match = False\n for i in range(len(self.__points) - 1):\n p1 = self.__points[i]\n p2 = self.__points[i + 1]\n bounds = self.__line_segment(p1, p2)\n if not bounds is None:\n xmin = bounds[0]\n ymin = bounds[1]\n xmax = bounds[0]\n ymax = bounds[1]\n for j in range(len(bounds)):\n if not (j % 2):\n if bounds[j] < xmin:\n xmin = bounds[j]\n elif bounds[j] > xmax:\n xmax = bounds[j]\n else:\n if bounds[j] < ymin:\n ymin = bounds[j]\n elif bounds[j] > ymax:\n ymax = bounds[j]\n x = self.x\n y = self.y\n # TODO: Determine direction, and check two leading edge points; ie. last vector ----> then points are x+width,y+width x+width,y-width\n if x > xmin and x < xmax and y > ymin and y < ymax:\n match = True\n break\n return match",
"def on_segment(point_p, point_q, point_r):\n if (point_q.x <= max(point_p.x, point_r.x)\n and point_q.x >= min(point_p.x, point_r.x)\n and point_q.y <= max(point_p.y, point_r.y)\n and point_q.y >= min(point_p.y, point_r.y)):\n return True\n return False",
"def is_underlapping(\n geom: LineString,\n trace: LineString,\n endpoint: Point,\n snap_threshold: float,\n snap_threshold_error_multiplier: float,\n) -> Optional[bool]:\n split_results = list(split(geom, trace).geoms)\n if len(split_results) == 1:\n # Do not intersect\n return True\n if len(split_results) > 1:\n for segment in split_results:\n if (\n segment.distance(endpoint)\n < snap_threshold * snap_threshold_error_multiplier\n ):\n # Dangling end, overlapping\n return False\n log_prints = {\n \"geom\": geom,\n \"trace\": trace,\n \"endpoint\": endpoint,\n \"snap_threshold\": snap_threshold,\n \"snap_threshold_error_multiplier\": snap_threshold_error_multiplier,\n }\n log.error(f\"Expected is_underlapping to be resolvable.\\nvalues:{log_prints}\")\n return None",
"def contains_point(self, p):\n return self.begin <= p < self.end",
"def __is_position_overlapped(self, position, exon):\n start, end = self.__get_exon_coordinates(exon)\n return position >= start and position <= end",
"def relative_interior_contains(self, point):\n try:\n p = vector(point)\n except TypeError: # point not iterable or no common ring for elements\n if len(point)>0:\n return False\n else:\n p = vector(self.field(), [])\n\n if len(p)!=self.ambient_dim():\n return False\n \n for eq in self.equation_generator():\n if not eq.contains(p):\n return False\n\n for ine in self.inequality_generator():\n if not ine.interior_contains(p):\n return False\n\n return True",
"def is_within(point, surface, offset):\r\n return (point[0] >= offset[0] and point[0] < offset[0] + surface.get_width() \\\r\n and point[1] >= offset[1] and point[1] < offset[1] + surface.get_height())",
"def overlap_checker(x1, y1, x2, y2, all_coord):\n overlaps = False\n i = 0\n start = 0\n for i in range(int(len(all_coord)/4)):\n b = all_coord[start:start + 4]\n start += 4\n try:\n if (max(b[0], b[2]) <= min(x1, x2) or max(x1, x2) <= min(b[0], b[2]) or max(b[1], b[3]) <= min(y1, y2) or max(y1, y2) <= min(b[1], b[3])):\n if not (min(x1, x2) <= min(b[0], b[2]) and min(y1, y2) <= min(b[1], b[3]) and max(x1, x2) >= max(b[0], b[2]) and max(y1, y2) >= max(b[1], b[3])):\n if not (min(b[0], b[2]) <= min(x1, x2) and min(b[1], b[3]) <= min(y1, y2) and max(b[0], b[2]) >= max(x1, x2) and max(b[1], b[3]) >= max(y1, y2)):\n overlaps = False\n else:\n return True\n else:\n return True\n else:\n return True\n except TypeError:\n overlaps = False\n if not overlaps:\n return False",
"def onSegment(self, p, q, r):\n if ((q.x <= max(p.x, r.x)) and (q.x >= min(p.x, r.x)) and\n (q.y <= max(p.y, r.y)) and (q.y >= min(p.y, r.y))):\n return True\n return False",
"def contains ( self, pos ):\n \n poly = Polygon(array(self.edges).reshape(-1,2)[:,0],array(self.edges).reshape(-1,2)[:,1])\n dists = poly.is_inside(pos[0,:],pos[1,:]) \n if self.include_border:\n inds = dists >= -self.abs_tol\n else:\n inds = dists > 0\n \n \n # if none inside, take nearest\n if ~inds.any() and self.default_nearest:\n dr2 = array(self.edges).reshape(-1,2).mean(0)\n inds[argmin(dr2)] = True\n \n return inds",
"def fetchbounds(self):\n pnts = [x for x in [self.out_start, self.start, self.in_start, \\\n self.in_end, self.end, self.out_end] \\\n if x is not None]\n return min(pnts), max(pnts)",
"def overlap(x,y):\n if (x[0]<=y[-1] and x[-1]>y[0]) or (y[0]<=x[-1] and y[-1]>x[0]):\n return 1\n else: return 0",
"def check_overlap(current, hit, overlap = 200):\n for prev in current:\n p_coords = prev[2:4]\n coords = hit[2:4]\n if get_overlap(coords, p_coords) >= overlap:\n return True\n return False",
"def in_range(x, y):\n if (x < 0 or x > width or y < 0 or y > length):\n return False\n else:\n return True",
"def overlap(component1, component2):\n if component1[0].start <= component2[0].stop and component2[0].start <= component1[0].stop:\n if component1[1].start <= component2[1].stop and component2[1].start <= component1[1].stop:\n return True\n return False",
"def contains_point(self, x, y):\r\n if self.m == None:\r\n if abs(x - self.start[0]) > 0.6:\r\n return False\r\n else:\r\n if (y >= self.start[1] and y <= self.end[1]) or \\\r\n (y <= self.start[1] and y >= self.end[1]):\r\n return True\r\n else:\r\n return False\r\n else: \r\n y0 = int(self.m * x + self.n)\r\n if abs(y - y0) > 0.6: \r\n return False \r\n else: \r\n if ((x >= self.start[0] and x <= self.end[0]) or \\\r\n (x <= self.start[0] and x >= self.end[0])) and \\\r\n ((y >= self.start[1] and y <= self.end[1]) or \\\r\n (y <= self.start[1] and y >= self.end[1])): \r\n return True\r\n else:\r\n return False",
"def CheckOverlap(self, via):\r\n\r\n for item in self.overlappings:\r\n if type(item) is pcbnew.PAD:\r\n if item.GetBoundingBox().Intersects(via.GetBoundingBox()):\r\n return True\r\n elif type(item) is pcbnew.PCB_VIA:\r\n # Overlapping with vias work best if checking is performed by intersection\r\n if item.GetBoundingBox().Intersects(via.GetBoundingBox()):\r\n return True\r\n elif type(item) in [pcbnew.ZONE, pcbnew.FP_ZONE]:\r\n if item.GetBoundingBox().Intersects(via.GetBoundingBox()):\r\n return True\r\n elif type(item) is pcbnew.PCB_TRACK:\r\n if item.GetBoundingBox().Intersects(via.GetBoundingBox()):\r\n width = item.GetWidth()\r\n dist, _ = pnt2line(via.GetPosition(), item.GetStart(), item.GetEnd())\r\n if dist <= self.clearance + width // 2 + via.GetWidth() / 2:\r\n return True\r\n return False",
"def contains(self, possible_point):\n# if possible_point == self.endpoints[0] or possible_point == self.endpoints[1]:\n# return False\n distance = sum(possible_point.distance_to(p) for p in self.endpoints)\n return abs(distance - self.length()) < 0.0000001",
"def inrange(cc, point):\n return point.row in range(cc.top, cc.bottom+1) and point.col in range(cc.left, cc.right+1)"
]
| [
"0.6494828",
"0.61780626",
"0.61478144",
"0.61463255",
"0.61320376",
"0.6116703",
"0.61136043",
"0.6108635",
"0.60967755",
"0.606722",
"0.60669786",
"0.6033708",
"0.6022681",
"0.60138786",
"0.6001213",
"0.5966237",
"0.59654135",
"0.59575963",
"0.59564316",
"0.5953769",
"0.59290415",
"0.5928847",
"0.5923114",
"0.59226096",
"0.59173083",
"0.5895177",
"0.5842998",
"0.5838095",
"0.5835294",
"0.58351225"
]
| 0.6398601 | 1 |
Cluster P vertically, stop at the end of segment | def cluster_vertical(P): # Used in form_segment_().
if len(P['down_fork_']) == 1 and len(P['down_fork_'][0]['up_fork_']) == 1:
down_fork = P.pop('down_fork_')[0] # Only 1 down_fork.
down_fork.pop('up_fork_') # Only 1 up_fork.
down_fork.pop('y')
down_fork.pop('sign')
return [P] + cluster_vertical(down_fork) # Plus next P in segment
return [P] # End of segment | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_clusters(self, p: float):\n pass",
"def compute_clusters(self, p: float):\n w = self.w\n h = self.h\n self.p = p\n self.sample = self._get_sample(p)\n self.cluster = np.zeros((w + 1, h + 1), dtype=int)\n visited = np.full((w + 1, h + 1), False)\n k = 0 # cluster index\n myvertex = 1\n stack = []\n # as long as we havent treated the last myvertex, continue\n while myvertex < (w + 1) * (h + 1):\n # put the next site in myvertex in to the stack if the site is\n # unvisited, otherwise myvertex ++\n iv = (myvertex - 1) % (w + 1)\n jv = (myvertex - 1) // (w + 1)\n if not visited[iv, jv]:\n stack.append([iv, jv])\n k += 1 # increment cluster index\n else:\n myvertex += 1\n\n while stack:\n # pop the current myvertex from the stack and set its cluster\n # label to k and mark as visited\n i, j = stack.pop(0)\n self.cluster[i, j] = k\n visited[i, j] = True\n # check all of its 4 neighbors, if neighbor is unvisited and\n # connected to current site,\n # then set its cluster label to k and marked visited and\n # push this site into stack, otherwise do nothing\n # check the left neighbor, first coordinate must >0 to have\n # a left neighbor\n if i > 0 and not visited[i - 1, j] and \\\n self.sample[i - 1, j, 0] == 1:\n self.cluster[i - 1, j] = k\n visited[i - 1, j] = True\n stack.append([i - 1, j])\n # check the right neighbor, first coordinate must be < w\n # to have a right neighbor\n if i < w and not visited[i + 1, j] and \\\n self.sample[i, j, 0] == 1:\n self.cluster[i + 1, j] = k\n visited[i + 1, j] = True\n stack.append([i + 1, j])\n # check the up neighbor, second coordinate must be < h\n # to have such a neighbor\n if j < h and not visited[i, j + 1] and \\\n self.sample[i, j, 1] == 1:\n self.cluster[i, j + 1] = k\n visited[i, j + 1] = True\n stack.append([i, j + 1])\n # check the bottom neighbor, second coordinate must be > 0\n if j > 0 and not visited[i, j - 1] and \\\n self.sample[i, j - 1, 1] == 1:\n self.cluster[i, j - 1] = k\n visited[i, j - 1] = True\n stack.append([i, j - 1])",
"def compute_clusters(self, p: float):\n w = self.w\n h = self.h\n self.p = p\n self.sample = self._get_sample(p)\n self.cluster = np.zeros((w, h), dtype=int)\n x = self.sample\n visited = np.full((w, h), False)\n k = 0 # cluster index\n myvertex = 1\n stack = []\n # as long as we havent treated the last myvertex, continue\n while myvertex < w * h + 1:\n # put the next site in myvertex in to the stack if the site is\n # unvisited, otherwise myvertex ++\n iv = (myvertex - 1) % w\n jv = (myvertex - 1) // w\n if not visited[iv, jv] and x[iv, jv] == 1:\n stack.append([iv, jv])\n k += 1 # increment cluster index\n else:\n myvertex += 1\n\n while stack:\n # pop the current myvertex from the stack and set its cluster\n # label to k and mark as visited\n i, j = stack.pop(0)\n self.cluster[i, j] = k\n visited[i, j] = True\n # check all of its six neighbors, if neighbor is unvisited and\n # connected to current site,\n # then set its cluster label to k and marked visited and\n # push this site into stack, otherwise do nothing\n # check the 12clock neighbor\n if j < h-1 and not visited[i, j+1] and x[i, j+1] == 1:\n self.cluster[i, j+1] = k\n visited[i, j+1] = True\n stack.append([i, j+1])\n # check the 2clock neighbor\n if i < w-1 and not visited[i+1, j] and x[i+1, j] == 1:\n self.cluster[i+1, j] = k\n visited[i+1, j] = True\n stack.append([i+1, j])\n # check the 4clock neighbor\n if i < w-1 and j > 0 and not visited[i+1, j-1] \\\n and x[i+1, j-1] == 1:\n self.cluster[i+1, j-1] = k\n visited[i+1, j-1] = True\n stack.append([i+1, j-1])\n # check the 6clock neighbor\n if j > 0 and not visited[i, j-1] and x[i, j-1] == 1:\n self.cluster[i, j-1] = k\n visited[i, j-1] = True\n stack.append([i, j-1])\n # check the 8clock neighbor\n if i > 0 and not visited[i-1, j] and x[i-1, j] == 1:\n self.cluster[i-1, j] = k\n visited[i-1, j] = True\n stack.append([i-1, j])\n # check the 10clock neighbor\n if i > 0 and j < h-1 and not visited[i-1, j+1] \\\n and x[i-1, j+1] == 1:\n self.cluster[i-1, j+1] = k\n visited[i-1, j+1] = True\n stack.append([i-1, j+1])",
"def cluster(self):\n center_index = np.random.choice(range(100), self.K, replace=False)\n self.centers = np.array([self.X[i] for i in center_index])\n self.cluster_sizes = np.zeros(self.K)\n member_of = np.zeros(100, dtype=int)\n min_dist = np.array([distance.euclidean(self.centers[0], point) for point in self.X])\n self.cluster_sizes[0] = 100\n flag = True\n while flag:\n flag = False\n for i, point in enumerate(self.X):\n for j, center in enumerate(self.centers):\n if member_of[i] != j:\n dist = distance.euclidean(point, center)\n if dist < min_dist[i]:\n flag = True\n current = member_of[i]\n self.cluster_sizes[current] -= 1\n self.cluster_sizes[j] += 1\n member_of[i] = j\n min_dist[i] = dist\n if np.count_nonzero(self.cluster_sizes) != self.K:\n return self.cluster()\n self.centers = np.zeros((self.K, 2), dtype='d')\n for i, point in enumerate(self.X):\n center = member_of[i]\n self.centers[center] += point\n for i, center in enumerate(self.centers):\n center /= self.cluster_sizes[i]",
"def plot(self, p: int):\n self.compute_clusters(p)\n self.plot_clusters()",
"def segment(X, MU, k, r):\n cls = cluster(r)\n new_x = X.copy()\n for i in range(k):\n new_x[cls == i, :] = MU[i]\n return new_x",
"def segment(data):",
"def _cluster_segments_all_way(self, segmented_instances, labels, \\\n end_points, stats, cluster_thresh=0.5):\n\n #self.showme(segmented_instances, 'main img')\n segment_association_list = []\n max_num_end_points= 0\n\n # for each stem segment\n for i in range(0, len(labels)):\n # each end point in the current segment i\n if max_num_end_points < len(end_points[i]):\n max_num_end_points = len(end_points[i])\n for k in range(0, len(end_points[i])):\n angle_list=[]\n # find the segment that is most likely connected to segment i at end point[i][k]\n for j in range(0, len(labels)):\n # make sure we are not trying to connect the segment to itself\n if i!= j:\n # angle calculates the angle between the line stats['centroid'][i]-end_points[i][k]\n # and stats['centroid'][i]-stats['centroid'][j]\n\n angle = self._ang([stats['centroid'][i],end_points[i][k]], \\\n [stats['centroid'][i], stats['centroid'][j]] )\n # if the angle value is within the acceptable range of +/- angle_thresh\n if angle<=self.angle_thresh or angle>=360-self.angle_thresh:\n other_angle, other_seg_section, end_point_dist = self._get_best_fit(segmented_instances, \\\n len(labels), \\\n stats, end_points,\\\n i, j, k, pos_angle=angle<=self.angle_thresh)\n # if the best fit segment also has a small angle between its\n # end point-centroid line and centroid-centroid line,\n # add it to segments connected to segment i\n if other_angle!=None and other_angle<=self.angle_thresh:\n angle_list.append((j, other_seg_section, other_angle, end_point_dist, angle))\n #Sort the list of stem segments connected to i by end_point_dist\n angle_list = sorted(angle_list, key=lambda x:x[3])\n #Sorting by the Euclidian distance of the end_point_dist and the other_angle does not change end result\n #angle_list = sorted(angle_list, key=lambda x:(math.sqrt(x[3]**2.0+x[2]**2.0)))\n # the angle value reflects how far segment k is from the straight line\n # going through the centroids\n if len(angle_list)>0:\n # (i, j, k, l, angle between i and centroid line, angle between j and centroid line, distance between closest end points k in seg i and l in seg j)\n segment_association_list.append((i,angle_list[0][0],k, angle_list[0][1], angle_list[0][4], angle_list[0][2], angle_list[0][3]))\n\n\n # sort slope differences in an increasing order\n segment_association_list = sorted(segment_association_list,key=lambda x:(x[6]))\n\n # find best match by iteretively selecting the smallest difference\n # and adding it to the ith cluster\n cluster_list = []\n cluster = np.full(len(labels),None)\n colored_clusterImg = np.zeros(segmented_instances.shape).astype(np.uint8)\n #clusterImg = np.zeros(segmented_instances.shape).astype(np.uint8)\n\n # initialize cluster list to single clusters contianing only each individual segment\n for i in range(0, len(labels)):\n cluster[i]=i\n cluster_list.append([i])\n #self.showme(clusterImg, str(i))\n\n visited=np.full((len(labels),max_num_end_points), False)\n\n #cluster=np.frompyfunc(list,1,1)(cluster) # allows us to append to only the specified list end_points[i]\n new_cluster_num=0\n color_offset=len(labels)\n\n # for each pair of segments in our list of best fit segments\n for curr_tuple in segment_association_list:\n img = np.zeros(segmented_instances.shape)\n i = curr_tuple[0] # index of first segment\n j = curr_tuple[1] # index of second segment in the tuple\n i_section = curr_tuple[2] #end point number in segment i\n j_section = curr_tuple[3] #end point number in segment j\n angle = curr_tuple[4]\n other_angle = curr_tuple[5]\n end_point_dist = curr_tuple[6] #distance between the connecting end points of segments i and j\n img[segmented_instances== i]= 255\n img[segmented_instances== j]= 255\n if (visited[i][i_section]==False)and(visited[j][j_section]==False):\n #cv2.line(clusterImg,(end_points[i][i_section][0],end_points[i][i_section][1]),\\\n # (end_points[j][j_section][0], end_points[j][j_section][1]),150,2)\n #self.showme(clusterImg, str(i))\n visited[i][i_section]=True\n visited[j][j_section]=True\n cluster_num = cluster[i]\n if cluster[i]!=cluster[j]:\n other_cluster_num = cluster[j]\n cluster_list[cluster_num] = list(set(cluster_list[cluster_num]+\\\n copy.deepcopy(cluster_list[other_cluster_num])))\n # update cluster numbers for all segments moved into new cluster\n for seg in cluster_list[other_cluster_num]:\n cluster[seg]=cluster_num\n # update cluster numbers for clusters larger than cluster to be removed\n for idx in range(0, len(cluster)):\n if (cluster[idx]>other_cluster_num):\n cluster[idx]= cluster[idx]-1\n del cluster_list[other_cluster_num]\n\n\n #show clustered segments\n color = 0\n cluster_num = 0\n cluster_mask=[]\n\n for c in cluster_list:\n color = color+0.1\n cluster_mask.append(np.zeros(segmented_instances.shape).astype(np.uint8))\n\n for i in c:\n cluster_mask[cluster_num][(segmented_instances == labels[i])]=1\n colored_clusterImg[(segmented_instances == labels[i])]= int(color*255)\n \"\"\"if self.key in ['../data/images/image1672', '../data/images/image1289']:\n self.showme(colored_clusterImg)\"\"\"\n cluster_num +=1\n\n return cluster_mask, colored_clusterImg",
"def initialize_pp(img: np.ndarray):\n\n h, w, c = img.shape\n pixels = img.copy().reshape(h*w, c)\n\n # Choose one center uniformly at random \n # from among the data points\n r = np.random.randint(h*w)\n current_cluster_centers[0, 0, :] = pixels[r, :]\n\n # remove that point from the data set\n pixels = np.delete(pixels, r, axis=0)\n\n # For each data point x, compute D(x), \n # the distance between x and the nearest center \n # that has already been chosen.\n for k in range(1, numclusters):\n dist_sq = np.zeros(pixels.shape[0])\n for i in range(pixels.shape[0]): # over data points\n dist = []\n for j in range(k): # over current clusters\n # calculate distance to the cluster\n diff = pixels[i, :] - current_cluster_centers[j, 0, :]\n dist.append(np.inner(diff, diff))\n \n # choose the distance closest to the cluster\n dist_sq.itemset(i, min(dist))\n\n probs = dist_sq / dist_sq.sum()\n cumprobs = probs.cumsum()\n r = np.random.uniform()\n for i, prob in enumerate(cumprobs):\n if r <= prob:\n index = i\n break\n \n # add a new cluster\n current_cluster_centers[k, 0, :] = pixels[index, :]\n\n # remove that point from the data set\n pixels = np.delete(pixels, index, axis=0)\n\n\n print(\"Current clusters:\\n\", current_cluster_centers)",
"def hierarical_clustering(p_df, method=\"average\"):\n pdf_values = p_df.values\n np.fill_diagonal(pdf_values, 0)\n pdf_values_1_d = matrix_to_squareform(pdf_values)\n cluster_matrix = linkage(pdf_values_1_d, method)\n return cluster_matrix",
"def prepareParrallelize(self,segs):\n\n angles = numpy.array([s.angle for s in segs ])\n angles[numpy.where(angles<0)] += _pi # we care about direction, not angle orientation\n clList = clusterValues(angles, 0.15, refScaleAbs='abs')\n\n for cl in clList:\n meanA = angles[list(cl)].mean()\n for i in cl:\n seg = segs[i]\n seg.newAngle = meanA if seg.angle>=0. else meanA-_pi",
"def grow_cluster(self):\n # Need a new list to store cluster points\n new_cluster_pts = []\n\n # Loop through the active points in the perimeter, add the point the cluster with probability p_init\n active_perimeter = [k for k, v in self.perimeter.items() if bool(v)]\n for pt in active_perimeter:\n if self.p >= np.random.rand():\n new_cluster_pts.append(pt)\n else:\n # self.cluster[pt] = False\n self.perimeter[pt] = False\n\n # Check if there are no new points being added to the cluster\n if len(new_cluster_pts) == 0:\n self.stopped_growing = True\n return False\n\n # Loop through the new cluster points and add their nearest neighbors to the perimeter\n for pt in new_cluster_pts:\n if pt[0] in range(0, self.N) and pt[1] in range(0, self.N):\n self.cluster[pt] = True\n self.perimeter[pt] = False\n self.no_particles += 1\n self.world[pt[1], pt[0]] = 1\n self.add_perimeter(pt)\n else:\n self.reached_end = True\n return False",
"def find_cluster(self, point: tuple) -> tuple:\r\n\r\n # set quickness\r\n quickness = 1\r\n\r\n # Create initial bounding box\r\n bb = [point[0], point[1], point[0], point[1]]\r\n\r\n # Get the first direction to expand the box\r\n direction = self.touching_border(bb, quickness)\r\n\r\n # loop until the box has no green on the perimeter\r\n while direction != 'EDGE':\r\n\r\n # Check if there is an error\r\n if bb[2] - bb[0] > self.size[0] / 4 \\\r\n or bb[3] - bb[1] > self.size[1] / 4:\r\n \r\n bb[0] = 0\r\n bb[2] = len(self.bin_pic[0]) - 1\r\n return ('ERROR',\r\n [(x, y)\r\n for x in range(bb[0], bb[2] + 1)\r\n for y in range(bb[1], bb[3] + 1)],\r\n bb)\r\n\r\n # Expand Down and Right\r\n if direction == 'BR':\r\n bb[2] += quickness\r\n bb[3] += quickness\r\n\r\n # Expand Down and Left\r\n elif direction == 'BL':\r\n bb[0] -= quickness\r\n bb[3] += quickness\r\n\r\n # Expand Right\r\n elif direction == 'RIGHT':\r\n bb[2] += quickness\r\n\r\n # Expand Down \r\n elif direction == 'BOTTOM':\r\n bb[3] += quickness\r\n\r\n # Expand Left \r\n elif direction == 'LEFT':\r\n bb[0] -= quickness\r\n\r\n # Expand Up\r\n elif direction == 'TOP':\r\n bb[1] -= quickness\r\n\r\n # Check the area directly around the current box\r\n elif direction == 'NONE':\r\n cntn = False\r\n \r\n for i in range(1, 3):\r\n\r\n # if there is a green pixel just outside of the box,\r\n # expand the box to cover it and continue searching\r\n tb = self.touching_border([bb[0] - i,\r\n bb[1] - i,\r\n bb[2] + i,\r\n bb[3] + i])\r\n \r\n if tb != 'NONE':\r\n direction = tb\r\n cntn = True\r\n break\r\n \r\n if cntn:\r\n continue\r\n\r\n break\r\n \r\n # Default case\r\n else:\r\n raise IndexError(str(direction) + ' is not a valid direction!')\r\n\r\n # Get new direction to expand in\r\n direction = self.touching_border(bb, quickness)\r\n\r\n # Gather all the green pixels within the bounding box \r\n cluster = [(x, y)\r\n for x in range(bb[0], bb[2] + 1)\r\n for y in range(bb[1], bb[3] + 1)\r\n if self.bin_pic[y][x]]\r\n\r\n # Don't count the plant if it's touching the edge of the picture\r\n if direction == 'EDGE':\r\n if len(cluster) > 250:\r\n return (bb, cluster)\r\n else:\r\n return (None, cluster, bb)\r\n \r\n return (bb, cluster)",
"def clustering(pcd: o3d.geometry.PointCloud):\n with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Debug) as cm:\n labels = np.array(pcd.cluster_dbscan(eps=1, min_points=30, print_progress=True))\n\n max_label = labels.max()\n print(f\"point cloud has {max_label + 1} clusters\")\n colors = plt.get_cmap(\"tab20b\")(labels / (max_label if max_label > 0 else 1))\n colors[labels < 0] = 0\n pcd.colors = o3d.utility.Vector3dVector(colors[:, :3])\n return pcd, labels",
"def grow_cluster(self):\n fate = np.random.rand(len(self.perimeter)) <= self.p\n new_cluster_pts = []\n new_dead_pts = []\n for pt, f in zip(self.perimeter, fate):\n if f:\n new_cluster_pts.append(pt)\n else:\n new_dead_pts.append(pt)\n self.perimeter = set()\n for pt in new_dead_pts:\n self.dead.add(pt)\n self.world[pt] = DEAD\n for pt in new_cluster_pts:\n self.cluster.add(pt)\n self.world[pt] = ALIVE\n self.add_perimeter(pt)",
"def peel_clusters(self, plot_step=0):\n\n def peel_edge(cluster, vertex):\n \"\"\"\n :param cluster current active cluster\n :param vertex pendant vertex of the edge to be peeled\n\n Recursive function which peels a branch of the tree if the input vertex is a pendant vertex\n\n If there is only one neighbor of the input vertex that is in the same cluster, this vertex is a pendant vertex and can be peeled. The function calls itself on the other vertex of the edge leaf.\n \"\"\"\n plot = True if self.plot and plot_step else False\n num_connect = 0\n\n for wind in self.graph.wind:\n (NV, NE) = vertex.neighbors[wind]\n if NE.support == 2:\n new_cluster = find_cluster_root(NV.cluster)\n if new_cluster is cluster and not NE.peeled:\n num_connect += 1\n edge, new_vertex = NE, NV\n if num_connect > 1:\n break\n if num_connect == 1:\n edge.peeled = True\n if vertex.state:\n edge.state = not edge.state\n edge.matching = True\n vertex.state = False\n new_vertex.state = not new_vertex.state\n if plot:\n self.uf_plot.plot_edge_step(edge, \"match\")\n self.uf_plot.plot_strip_step_anyon(vertex)\n self.uf_plot.plot_strip_step_anyon(new_vertex)\n else:\n if plot:\n self.uf_plot.plot_edge_step(edge, \"peel\")\n peel_edge(cluster, new_vertex)\n\n for vertex in self.graph.V.values():\n if vertex.cluster is not None:\n cluster = find_cluster_root(vertex.cluster)\n peel_edge(cluster, vertex)\n\n if self.plot and not plot_step:\n self.uf_plot.plot_removed(self.graph, \"Peeling completed.\")",
"def _delete_point_cluster(self, pts, starting_pt, max_dist=None):\n\n if max_dist == None:\n max_dist=self.maximum_edge_point_distance\n first = starting_pt\n last = first\n # Find end of cluster\n k = last+1\n while last < len(pts)-1 and k< len(pts)-1 and abs(pts[k]-pts[last])<max_dist:\n k = k+1\n last+=1\n # Find beginning of cluster\n k=first-1\n while first> 0 and k> 0 and abs(pts[k]-pts[first])<max_dist:\n k=k-1\n first=first-1\n\n # Delete cluster pixels\n for i in range(first,last+1):\n del pts[i]\n\n return pts",
"def plot_clusters(self):\n pass",
"def draw_clusters(img, p1, p2, k, label, thres, padding):\n for i in range(k):\n color = np.random.uniform(low=0, high=255, size=3)\n index = np.where(label == i)[0]\n if len(index) <= thres:\n continue\n\n # plot for one cluster\n start = p1[index]\n end = p2[index]\n img = draw_circles(img, start, color)\n img = draw_circles(img, end, color)\n img = draw_arrows(img, start, end, color)\n min_x, min_y = np.amin(end, axis=0).astype(int) - padding\n max_x, max_y = np.amax(end, axis=0).astype(int) + padding\n img = cv2.rectangle(img, (min_x, min_y), (max_x, max_y), color, 2)\n return img",
"def __initCluster(self):\n data_size, cluster_center = self.data_size, self.cluster_center\n self.cluster_temp = np.zeros(data_size, dtype=int)\n self.cluster_upper_bound = np.full(len(cluster_center), float('inf'), dtype=float)\n for center in cluster_center:\n self.cluster_temp[center] = center",
"def KMeansClustering(VSA, A, DuplicateSectionsArray, labels,K=None):\r\n Q=labels.max()\r\n NewLabelsInOne=[0]*len(DuplicateSectionsArray)\r\n centroids=[]\r\n AllSeperatedLabels=[]\r\n for o in range(len(DuplicateSectionsArray)):\r\n NewLabelsList=[]\r\n p = DuplicateSectionsArray[o] \r\n plt.figure()\r\n plt.imshow(labels==p)\r\n plt.title(f'Problematic Section {p}')\r\n if K==None:\r\n K=round(A[p-1]/mean(VSA)) #p is the number of the section, but has the index p-1 (python starts at 0)\r\n print(A[p-1]/mean(VSA))\r\n print(K)\r\n y,x=np.where(labels==p)\r\n Coord=np.array([x,y]).T\r\n n_clusters=int(K)\r\n kmeans=KMeans(n_clusters)\r\n kmeans=kmeans.fit(Coord)\r\n kmeans_labels = kmeans.predict(Coord)\r\n centroids=kmeans.cluster_centers_\r\n \r\n plt.figure()\r\n plt.imshow(labels==p)\r\n plt.plot(centroids[:,0],centroids[:,1],'r+', mew=2)\r\n plt.title(f'Problematic Section {p} with K-means clustering centroids displayed')\r\n \r\n plt.figure()\r\n plt.imshow(labels==p)\r\n seperatedlabels=[]\r\n for n in range(max(kmeans_labels)+1):\r\n validlabelx=[]\r\n validlabely=[]\r\n newlabel=np.zeros(np.shape(labels))\r\n for m in range(len(x)):\r\n if kmeans_labels[m]==n:\r\n validlabelx.append(x[m])\r\n validlabely.append(y[m])\r\n newlabel[y[m]][x[m]]=n+1+Q\r\n seperatedlabels.append(newlabel)\r\n plt.plot(validlabelx,validlabely)\r\n plt.title(f'Proposed seperation of problematic section {p}')\r\n AllSeperatedLabels.append(seperatedlabels)\r\n NewLabelsList=NewLabelsList+seperatedlabels\r\n NewLabelsInOne[o]=sum(NewLabelsList).astype(np.int32)\r\n Q=NewLabelsInOne[o].max()\r\n print(Q)\r\n \r\n #skimage slic for k means clustering\r\n return (AllSeperatedLabels,NewLabelsInOne,K)",
"def cluster(self):\n print(\"Calculating distances\")\n self.all_distances()\n\n print(\"Start making sets\")\n clusters = self.clusters\n\n # Generates a set with neighbours for each point\n for row in self.distances:\n clusters.append(set(np.where(row < self.distance_threshold)[0].tolist()))\n\n print(\"Merging sets\")\n for cluster1 in range(self.point_count):\n for cluster2 in range(self.point_count):\n if clusters[cluster2] is not None and clusters[cluster1] is not None:\n if not clusters[cluster1].isdisjoint(clusters[cluster2]) and cluster1 != cluster2:\n clusters[cluster1].update(clusters[cluster2])\n clusters[cluster2] = None\n # Deletes empty clusters\n clusters = [points for points in clusters if points is not None]\n # Sorts clusters by their size\n clusters.sort(key=len, reverse=True)\n # Builds main set\n for point_set in clusters[0:self.cluster_count_threshold]:\n self.main_cluster.update(point_set)\n\n self.main_cluster = list(self.main_cluster)\n self.clusters = clusters",
"def cluster(self):\r\n\t\tself.clusterer.fit(self.koopman_feature_array)\r\n\t\tself.labels = self.clusterer.labels_\r\n\t\tfor j in range(max(self.labels)+1):\r\n\t\t\tself.koop_cluster_list.append([self.koop_list[i] for i in range(len(self.labels)) if self.labels[i] == j])\r\n\t\t\tself.koop_cluster_memb_prob_list.append([self.clusterer.probabilities_[i] for i in range(len(self.labels)) if self.labels[i] == j])",
"def segment_func2(self):\n # computing neighboors graph\n A = self.boundaryprob_graph()\n\n # SpectralClustering segmentation\n sc = SpectralClustering(3, affinity='precomputed', n_init=10, assign_labels='discretize')\n labels = sc.fit_predict(A)\n\n return labels",
"def miller(points):\r\n\r\n N = np.cross(points[1] - points[0], points[2] - points[0])\r\n O = np.array([0, 0, 0])\r\n P = points[0] # point of plane\r\n Ccs = map(np.array, [[1.0, 0, 0], [0, 1.0, 0], [0, 0, 1.0]])\r\n segments = ([np.dot(P - O, N) / np.dot(ort, N) if np.dot(ort, N) != 0 else\r\n np.nan for ort in Ccs])\r\n if any(x == 0 for x in segments): # Plane goes through origin.\r\n vertices = [ # top:\r\n np.array([1.0, 1.0, 1.0]),\r\n np.array([0.0, 0.0, 1.0]),\r\n np.array([1.0, 0.0, 1.0]),\r\n np.array([0.0, 1.0, 1.0]),\r\n # bottom, except 0,0,0:\r\n np.array([1.0, 0.0, 0.0]),\r\n np.array([0.0, 1.0, 0.0]),\r\n np.array([1.0, 1.0, 1.0]),\r\n ]\r\n for vertex in vertices:\r\n if np.dot(vertex - O, N) != 0: # vertex not in plane\r\n new_origin = vertex\r\n break\r\n # obtain new axes with center in new origin\r\n X = np.array([1 - new_origin[0], new_origin[1], new_origin[2]])\r\n Y = np.array([new_origin[0], 1 - new_origin[1], new_origin[2]])\r\n Z = np.array([new_origin[0], new_origin[1], 1 - new_origin[2]])\r\n new_Ccs = [X - new_origin, Y - new_origin, Z - new_origin]\r\n segments = ([np.dot(P - new_origin, N) / np.dot(ort, N) if\r\n np.dot(ort, N) != 0 else np.nan for ort in new_Ccs])\r\n # fix signs of indices: 0 -> 1, 1 -> -1 (\r\n segments = (1 - 2 * new_origin) * segments\r\n\r\n return sub_miller(segments)",
"def parks(self):\n point_array = [0, 2, 8, 12, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14]\n park_coords = []\n parks_sorted = []\n for i in range(4):\n for j in range(4):\n if self.as_list[i][j] == 'p':\n park_coords.append(tuple([i, j]))\n while len(park_coords) > 0:\n x, y = park_coords.pop(0)\n if len(parks_sorted) == 0:\n parks_sorted.append([(x, y)])\n else:\n borders_bool = []\n for block_no, park_block in enumerate(parks_sorted):\n borders_bool.append(False)\n for i, j in park_block:\n if abs(x - i) + abs(y - j) == 1:\n borders_bool[block_no] = True\n if (num_true := borders_bool.count(True)) == 1:\n parks_sorted[borders_bool.index(True)].append((x, y))\n elif num_true > 1:\n new_parks_sorted = []\n i_mega_park = None\n for block_no, park_block in enumerate(parks_sorted):\n if borders_bool[block_no]: # If it is bordering\n if i_mega_park is None:\n i_mega_park = block_no\n new_parks_sorted.append(park_block)\n else:\n new_parks_sorted[i_mega_park] += park_block\n new_parks_sorted[i_mega_park] += [(x, y)]\n parks_sorted = new_parks_sorted\n else:\n new_parks_sorted.append(park_block)\n parks_sorted = new_parks_sorted\n else:\n parks_sorted.append([(x, y)])\n\n return sum([point_array[len(block)] for block in parks_sorted])",
"def cluster(M, point, eps): # zwraca punkty dla ktorych dystans z punktu point jest mniejszy od eps\n seeds = []\n for i in range(0, M.shape[0]):\n if eps_neighborhood(M, point, i, eps):\n seeds.append(i)\n return seeds",
"def pc_cluster(data, clusters):\n dist = MorningstarPCA.pc_distance(data, clusters)\n return MorningstarPCA.get_column_with_min_value(dist)",
"def recluster( g, clusters, max_csize ) :\n while (len( clusters ) > 1) :\n # Step 1: Calculates the cohesion scores for all pairs of clusters.\n clusters = sorted( clusters, cmp = lambda x, y : len( x ) - len( y ) )\n cohesion = [] # Element = (cluster-index pair, score)\n n = len( clusters )\n for i in range( n - 1 ) :\n for j in range( i + 1, n ) :\n cohesion_score = calc_cohesion( g, clusters[i], clusters[j], max_csize )\n if (cohesion_score > 0) :\n cohesion.append( (i, j, cohesion_score,) )\n\n if (not cohesion) :\n break\n \n # Step 2: Finds the smallest cluster with the highest cohesion score. We can do this by sorting `cohesion' twice:\n # (1) descendingly by scores, and then\n # (2) ascendingly by size of the smaller cluster in the pair.\n # Python's sort algorithm is stable.\n cohesion = sorted( cohesion, cmp = lambda x, y : sign( y[2] - x[2] ) )\n cohesion = sorted( cohesion, cmp = lambda x, y : x[0] - y[0] )\n\n # Step 3: Collapses the first pair of clusters in `cohesion'.\n i = cohesion[0][0]\n j = cohesion[0][1]\n collapsed = clusters[i] + clusters[j]\n del clusters[j]\n del clusters[i]\n clusters.append( collapsed )\n \n return clusters",
"def clustering(cluster_list):\n while len(cluster_list) > 1:\n x = 0\n y = 0\n distance_min = 10\n\n for i in range(0,len(cluster_list)):\n\n for j in range(0,len(cluster_list)):\n\n if i != j:\n distance = cluster_list[i].linkage(cluster_list[j])\n if distance < distance_min:\n x = i\n y = j\n distance_min = distance\n \n \n clusX = cluster_list[x]\n clusY = cluster_list[y]\n cluster_list.pop(cluster_list.index(clusX))\n cluster_list.pop(cluster_list.index(clusY))\n\n cluster_list.append(Cluster(clusX,clusY))\n return cluster_list[0]"
]
| [
"0.64368105",
"0.6374936",
"0.634104",
"0.60059184",
"0.58162886",
"0.5787876",
"0.57370514",
"0.5631855",
"0.5605895",
"0.55195105",
"0.5509005",
"0.54823226",
"0.54415303",
"0.54227644",
"0.54128027",
"0.5396293",
"0.53785056",
"0.536136",
"0.5358855",
"0.5355567",
"0.5338538",
"0.532763",
"0.5317456",
"0.5308498",
"0.5299408",
"0.5285686",
"0.52795124",
"0.52556735",
"0.5232597",
"0.5212186"
]
| 0.7829764 | 0 |
Form blobs from given list of segments. Each blob is formed from a number of connected segments. | def form_blob_(seg_, root_fork):
# Determine params type:
if 'M' not in seg_[0]: # No M.
Dert_keys = (*aDERT_PARAMS[:2], *aDERT_PARAMS[3:], "S", "Ly")
else:
Dert_keys = (*aDERT_PARAMS, "S", "Ly") if nI != 1 \
else (*gDERT_PARAMS, "S", "Ly")
# Form blob:
blob_ = []
for blob_seg_ in cluster_segments(seg_):
# Compute boundary box in batch:
y0, yn, x0, xn = starmap(
lambda func, x_: func(x_),
zip(
(min, max, min, max),
zip(*[(
seg['y0'], # y0_ .
seg['y0'] + seg['Ly'], # yn_ .
seg['x0'], # x0_ .
seg['xn'], # xn_ .
) for seg in blob_seg_]),
),
)
# Compute mask:
mask = np.ones((yn - y0, xn - x0), dtype=bool)
for blob_seg in blob_seg_:
for y, P in enumerate(blob_seg['Py_'], start=blob_seg['y0']):
x_start = P['x0'] - x0
x_stop = x_start + P['L']
mask[y - y0, x_start:x_stop] = False
dert__ = root_fork['dert__'][:, y0:yn, x0:xn]
dert__.mask[:] = mask
blob = dict(
Dert=dict(
zip(
Dert_keys,
[*map(sum,
zip(*map(op.itemgetter(*Dert_keys),
blob_seg_)))],
)
),
box=(y0, yn, x0, xn),
seg_=blob_seg_,
sign=blob_seg_[0].pop('sign'), # Pop the remaining segment's sign.
dert__=dert__,
root_fork=root_fork,
fork_=defaultdict(list),
)
blob_.append(blob)
# feedback(blob)
return blob_ | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def construct_segments(self):\n for strand in self.strand_list:\n strand.construct_segment()",
"def create_from_segments(self, segment, origin=0):\r\n n = origin\r\n if segment[origin]['T'] != 'soma': # if it's a soma, only one compartment\r\n while (len(segment[n]['children']) == 1) and (segment[n]['T'] != 'soma'): # Go to the end of the branch\r\n n += 1\r\n # End of branch\r\n branch = segment[origin:n + 1]\r\n # Set attributes\r\n self.diameter, self.length, self.area, self.x, self.y, self.z = \\\r\n zip(*[(seg['diameter'], seg['length'], seg['area'], seg['x'], seg['y'], seg['z']) for seg in branch])\r\n self.diameter, self.length, self.area, self.x, self.y, self.z = array(self.diameter), array(self.length), \\\r\n array(self.area), array(self.x), array(self.y), array(self.z)\r\n self.type = segment[n]['T'] # normally same type for all compartments in the branch\r\n # Create children (list)\r\n self.children = [Morphology().create_from_segments(segment, origin=c) for c in segment[n]['children']]\r\n # Create dictionary of names (enumerates children from number 1)\r\n for i, child in enumerate(self.children):\r\n self._namedkid[str(i + 1)] = child\r\n # Name the child if possible\r\n if child.type in ['soma', 'axon', 'dendrite']:\r\n if child.type in self._namedkid:\r\n self._namedkid[child.type] = None # two children with the same name: erase (see next block)\r\n else:\r\n self._namedkid[child.type] = child\r\n # Erase useless names\r\n for k in self._namedkid.keys():\r\n if self._namedkid[k] is None:\r\n del self._namedkid[k]\r\n # If two kids, name them L (left) and R (right)\r\n if len(self.children) == 2:\r\n self._namedkid['L'] = self._namedkid['1']\r\n self._namedkid['R'] = self._namedkid['2']\r\n return self",
"def segment(raw_sents:List[str], segment=\"jieba\") -> List[List[str]]:\n\t# segment_list = [\"pkuseg\", \"jieba\"]\n\t# if segment.strip() not in segment_list:\n\t# \treturn []\n\n\tseg_sents = []\n\tif segment == \"pkuseg\":\n\t\timport pkuseg\n\n\t\t## init the seg\n\t\tseg = pkuseg.pkuseg()\n\n\t\t## segment the sentence by pkuseg\n\t\tfor sent in raw_sents:\n\t\t\tres_seg = seg.cut(sent)\n\t\t\tseg_sents.append(res_seg)\n\t\t# print(seg_sents)\n\telif segment == \"jieba\":\n\t\timport jieba\n\t\tfor sent in raw_sents:\n\t\t\tres_seg = jieba.lcut(sent)\n\t\t\tsentence = \" \".join(res_seg)\n\t\t\tpattern4 = re.compile(\" +\", re.S)\n\t\t\tsentence = pattern4.sub(\" \", sentence)\n\t\t\tres_seg = sentence.split(\" \")\n\t\t\tseg_sents.append(res_seg)\n\n\treturn seg_sents",
"def segment_heads(classes, data):\n\n segmented_data =[]\n\n # gather and organize needed data\n output_dir = PROJECT_ROOT + \"/data/segmented_head_images/\"\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n img_ids_file = open(PROJECT_ROOT + '/data/CUB_200_2011/images.txt').readlines()\n img_ids_file = [i.strip().split(' ') for i in img_ids_file]\n\n parts_file = open(PROJECT_ROOT +'/data/CUB_200_2011/parts/part_locs.txt').readlines()\n parts_file = [i.strip().split(' ') for i in parts_file]\n\n # <image_id> <x> <y> <width> <height>\n bounding_file = open(PROJECT_ROOT +'/data/CUB_200_2011/bounding_boxes.txt').readlines()\n bounding_file = [i.strip().split(' ') for i in bounding_file]\n\n img_ids = {}\n for i in img_ids_file:\n img_ids[i[1]] = int(i[0])\n\n part_ids = {}\n for i in parts_file:\n part_ids[(int(i[0]), int(i[1]))] = list(map(lambda x:int(float(x)), i[2:]))\n\n boudning_ids = {}\n for i in bounding_file:\n boudning_ids[int(i[0])] = list(map(lambda x:int(float(x)), i[1:]))\n\n for r in data:\n # print(\"~~~SEGMENTING HEAD: \", r[1])\n\n img_id = r[1].split('/')\n img_id = img_id[len(img_id)-2] + '/' + img_id[len(img_id)-1].replace('png', 'jpg')\n img_id = img_ids[img_id]\n\n # get location of bird parts\n # [x, y, visible or not]\n nape = part_ids[(img_id, 10)]\n tail = part_ids[(img_id, 14)]\n throat = part_ids[(img_id, 15)]\n bounds = boudning_ids[img_id]\n\n # if any of that parts not visible\n if nape[2] == 0 or tail[2] == 0 or throat[2] == 0 or nape[1] - throat[1] == 0:\n continue\n\n #A=(x1,y1) to B=(x2,y2) a point P=(x,y) f\n #d=(xโx1)(y2โy1)โ(yโy1)(x2โx1)\n\n # compute on what side of nape-throat line tail is on\n tail_side = (tail[0] - nape[0])*(throat[1] - nape[1])-(tail[1] - nape[1])*(throat[0]-nape[0])\n\n img = cv2.imread(r[1])\n (rows, cols, _) = img.shape\n\n # all pixels on same side of nape-throat line as tail turn off\n for y in range(0,rows):\n for x in range(0,cols):\n side = (x - nape[0])*(throat[1] - nape[1])-(y - nape[1])*(throat[0]-nape[0])\n\n if np.sign(tail_side) == np.sign(side):\n img[y, x, :] = 0\n\n # img = cv2.circle(img, (nape[0], nape[1]), 3, (255, 0, 0))\n # img = cv2.circle(img, (tail[0], tail[1]), 3, (0, 255, 0))\n # img = cv2.circle(img, (throat[0], throat[1]), 3, (0, 0, 255))\n\n # crop by boudning box\n img = img[bounds[1]:bounds[1]+bounds[3], bounds[0]:bounds[0]+bounds[2], :]\n\n # save\n filename = r[1].split(\"/\")\n filename = filename[len(filename)-1].split(\".\")[0]\n if not os.path.exists(output_dir+classes[r[0]]):\n os.makedirs(output_dir+classes[r[0]])\n cv2.imwrite(output_dir+classes[r[0]]+\"/\"+filename+\".png\", img)\n segmented_data.append((r[0],output_dir+classes[r[0]]+\"/\"+filename+\".png\"))\n\n return segmented_data",
"def segment(args):\n from jcvi.formats.base import SetFile\n\n p = OptionParser(segment.__doc__)\n p.add_option(\n \"--chain\",\n default=1,\n type=\"int\",\n help=\"Allow next N genes to be chained\",\n )\n opts, args = p.parse_args(args)\n\n if len(args) != 2:\n sys.exit(not p.print_help())\n\n idsfile, bedfile = args\n bed = Bed(bedfile)\n order = bed.order\n ids = SetFile(idsfile)\n losses = Grouper()\n skip = opts.chain\n for i, a in enumerate(bed):\n a = a.accn\n for j in range(i + 1, i + 1 + skip):\n if j >= len(bed):\n break\n b = bed[j].accn\n if a in ids:\n losses.join(a, a)\n if a in ids and b in ids:\n losses.join(a, b)\n\n losses = list(losses)\n singletons = [x for x in losses if len(x) == 1]\n segments = [x for x in losses if len(x) > 1]\n ns, nm, nt = len(singletons), len(segments), len(losses)\n assert ns + nm == nt\n\n # Summary for all segments\n for x in sorted(singletons) + sorted(segments):\n print(\n \"\\t\".join(\n str(x)\n for x in (\"|\".join(sorted(x)), len(x), estimate_size(x, bed, order))\n )\n )\n\n # Find longest segment stretch\n if segments:\n mx, maxsegment = max([(len(x), x) for x in segments])\n print(\"Longest stretch: run of {0} genes\".format(mx), file=sys.stderr)\n print(\" {0}\".format(\"|\".join(sorted(maxsegment))), file=sys.stderr)\n seg_asize = sum(estimate_size(x, bed, order) for x in segments)\n seg_bsize = sum(\n estimate_size(x, bed, order, conservative=False) for x in segments\n )\n else:\n seg_asize = seg_bsize = 0\n\n sing_asize = sum(estimate_size(x, bed, order) for x in singletons)\n sing_bsize = sum(\n estimate_size(x, bed, order, conservative=False) for x in singletons\n )\n total_asize = sing_asize + seg_asize\n total_bsize = sing_bsize + seg_bsize\n print(\n \"Singleton ({0}): {1} - {2} bp\".format(ns, sing_asize, sing_bsize),\n file=sys.stderr,\n )\n print(\n \"Segment ({0}): {1} - {2} bp\".format(nm, seg_asize, seg_bsize), file=sys.stderr\n )\n print(\n \"Total ({0}): {1} - {2} bp\".format(nt, total_asize, total_bsize),\n file=sys.stderr,\n )\n print(\n \"Average ({0}): {1} bp\".format(nt, (total_asize + total_bsize) / 2),\n file=sys.stderr,\n )",
"def _draw_segments(frame, segments):\n for segment in segments:\n cv2.line(frame, segment[0], segment[1],\n color=(0, 255, 255), thickness=2)\n cv2.circle(frame, segment[0], radius=3,\n color=(255, 0, 0), thickness=-1)\n cv2.circle(frame, segment[1], radius=3,\n color=(255, 0, 0), thickness=-1)",
"def combineSegments(self):\n\n remaining_segments = list(self.segments)\n if not remaining_segments:\n return []\n\n chains = []\n # @TODO: Why is count computed this way?\n max_count = len(remaining_segments) * 2\n count = 0\n while remaining_segments and count < max_count:\n if chains and linked_a_chain_from(chains, remaining_segments):\n count += 1\n continue\n\n chains.append([remaining_segments.pop()])\n\n # grab the vertex indicies for each chain (aka face)\n newFaces = [[segment[2] for segment in chain] for chain in chains]\n self.faces.extend(newFaces)\n\n # lets compute some textureCoords for these new faces\n # based on their vertex coords in world space.\n # this works well for floors and ceilings.\n # flats are always 64x64 aligned to world coords\n [self.textureCoords.append(\n [(segment[0].x/64., segment[0].y/64.) for segment in chain])\n for chain in chains]",
"def parse(all_blobs, all_angles):",
"def split_segments_vertically(segments, img_shape):\n left, right = [], []\n split = img_shape[1] // 2\n for (p1, p2) in segments:\n if p1[0] < split and p2[0] < split:\n left.append((p1, p2))\n elif p1[0] > split and p1[0] > split:\n right.append((p1, p2))\n\n return left, right",
"def convert_segments(segments):\n polygons = []\n interiors = []\n linestrings = []\n for segment in segments:\n ls = LineString(segment)\n if segment[0][0] == segment[-1][0] and segment[0][1] == segment[-1][1]:\n lr = LinearRing(ls)\n if not lr.is_ccw:\n polygons.append(Polygon(segment))\n else:\n interiors.append(lr)\n continue\n linestrings.append(ls)\n\n return polygons, interiors, linestrings",
"def compress_segments(map_, wav_id, file_path, segments, outpath):\n try:\n audio = AudioSegment.from_wav(file_path)\n #print(\"\\nSegments:\", len(segments))\n for _, row in segments.iterrows():\n start = row[2] * 1000\n end = row[3] * 1000\n audio_chunk = audio[start:end]\n save_path = \"{}/{}_chunk_{}_{}.wav\".format(outpath, wav_id, start, end)\n audio_chunk.export(save_path, format='wav')\n compress_file(map_=map_, \n name=row[0],\n save_path=save_path)\n except Exception as e:\n print(\"ERR:\",e)\n print(\"Failed files:\", file_path)",
"def features_from_labels(audio_file, segments):\n segments_features = []\n #for each segment\n for segment in segments:\n features = features_from_label(audio_file, segment)\n #and append it to the list\n segments_features.append(features)\n return segments_features",
"def submitlist(jb, ls):\n segstart, segend = calculatestartend(ls) # Get the segment id for the current segment\n seg = None\n opp = None\n with jb.lock: # Lock the segments dictionary\n segments = jb.segments\n if segstart in segments:\n seg, opp = segments.pop(segstart, None)\n elif segend in segments:\n seg, opp = segments.pop(segend, None)\n if seg:\n segments.pop(opp)\n else:\n segments[segstart] = (ls, segend)\n segments[segend] = (ls, segstart)\n if seg:\n reqq.put((\"merge\", (ls, seg)), )",
"def get_as_mb(segments):\n\n ref_x = 0.0\n ref_y = 0.0\n\n mbs = []\n \n for segment in segments:\n\n xa = segment['x']\n ya = segment['y']\n\n for x,y in zip(xa,ya):\n\n if x==0 and y==0:\n continue\n\n dx = x - ref_x\n dy = y - ref_y\n\n mb = \"\"\n\n if dx == 0:\n if dy >= 0:\n mb = 'n 0 e {}'.format(dy)\n else:\n mb = 's 0 w {}'.format(-dy)\n else:\n if dy == 0:\n if dx >= 0:\n mb = 'n 90 e {}'.format(dx)\n else:\n mb = 'n 90 w {}'.format(-dx)\n else:\n\n ang = math.degrees(math.atan(dx/dy))\n ang = round(ang,10)\n\n if dy >= 0:\n mb = \"n {}\".format(abs(ang))\n if dx >= 0:\n mb += \" e\"\n else:\n mb += \" w\"\n else:\n mb = \"s {}\".format(abs(ang))\n if dx >= 0:\n mb += \" e\"\n else:\n mb += \" w\"\n\n length = math.sqrt(dx**2 + dy**2)\n length = round(length,10)\n\n mb += \" {}\".format(length)\n \n ref_x = x\n ref_y = y\n\n mbs.append(mb)\n \n return mbs",
"def consolidate_instances_all_way(self, stats, segmented_instances):\n\n img = np.zeros(segmented_instances.shape).astype(np.uint8)\n\n #get all pixel labels in the segmented_instances mask\n segment_numbers = np.unique(segmented_instances)\n\n # remove the background label\n segment_numbers=segment_numbers[segment_numbers!=0]\n\n end_points = np.empty((len(segment_numbers),),dtype=np.object_)\n end_points.fill([])\n\n for curr_segment in segment_numbers:\n idx=[]\n i=curr_segment-1\n if curr_segment!=0:\n #Show all segments of curr_segment. Only useful to view results\n img[segmented_instances== curr_segment]= 255\n #get indeces of the segments for curr_segment\n idx = np.argwhere(segmented_instances == curr_segment)\n if len(idx>0):\n end_points[i]= self._get_end_points(segmented_instances, i, \\\n stats, idx)\n # add point markers and lines connecting each end point to centroid.\n # useful only to view results\n \"\"\"for pt_num, pt in enumerate(end_points[i]):\n cv2.circle(img, (pt[0],pt[1]), 3, 100, -1)\n cv2.line(img,(pt[0],pt[1]),\\\n (stats['centroid'][i,0], stats['centroid'][i,1]),150,2)\n cv2.circle(img, (stats['centroid'][i,0], stats['centroid'][i,1]), 3, 200, -1)\"\"\"\n #self.showme(img, 'line '+str(i))\n\n # cluster segments into stem instances\n cluster_mask, clustered_instances = self._cluster_segments_all_way(segmented_instances,\\\n segment_numbers, end_points, \\\n stats)\n\n #put all instances in one layer\n if len(cluster_mask)>0:\n single_layer_cluster_mask=np.zeros(cluster_mask[0].shape)\n for i in xrange(len(cluster_mask)):\n single_layer_cluster_mask[cluster_mask[i]>0]= i+1\n\n # self.showObjects(clustered_instances);\n return single_layer_cluster_mask, clustered_instances",
"def merge_segments(lst):\n ii = 0\n while True:\n jj = ii + 1\n if len(lst) <= jj:\n return lst\n seg1 = lst[ii]\n seg2 = lst[jj]\n if seg1.merge(seg2):\n if seg2.empty():\n del lst[jj]\n else:\n ii += 1\n else:\n ii += 1\n return lst",
"def apply_segmentations(classes, data):\n\n segmented_data =[]\n\n output_dir = PROJECT_ROOT + \"/data/segmented_images/\"\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n for r in data:\n # print(\"~~~ SEGMENTING: \"+r[1])\n seg = cv2.imread(r[2])\n img = cv2.imread(r[1])\n img2 = cv2.bitwise_and(img, seg)\n filename = r[1].split(\"/\")\n filename = filename[len(filename)-1].split(\".\")[0]\n if not os.path.exists(output_dir+classes[r[0]]):\n os.makedirs(output_dir+classes[r[0]])\n cv2.imwrite(output_dir+classes[r[0]]+\"/\"+filename+\".png\", img2)\n segmented_data.append((r[0],output_dir+classes[r[0]]+\"/\"+filename+\".png\"))\n\n return segmented_data",
"def getmergedblobs (lblob,lmergeset,bmerged): \n lblobnew = [] # list of new blobs\n for i,blob in enumerate(lblob):\n if not bmerged[i]: lblobnew.append(blob) # non-merged blobs are copied as is\n for mergeset in lmergeset: # now go through the list of mergesets and create the new blobs\n lblobtmp = [lblob[ID] for ID in mergeset]\n for i,blob in enumerate(lblobtmp):\n if i == 0:\n box = bbox(blob.left,blob.right,blob.bottom,blob.top)\n peakF = blob.peakF\n minF = blob.minF\n maxF = blob.maxF\n minT = blob.minT\n maxT = blob.maxT\n peakT = blob.peakT\n maxpos = blob.maxpos\n maxval = blob.maxval\n minval = blob.minval\n else:\n box = box.getunion(blob)\n minF = min(minF, blob.minF)\n maxF = max(maxF, blob.maxF)\n minT = min(minT, blob.minT)\n maxT = max(maxT, blob.maxT)\n if blob.maxval > maxval:\n peakF = blob.peakF\n peakT = blob.peakT\n maxpos = blob.maxpos\n maxval = blob.maxval\n if blob.minval < minval:\n minval = blob.minval\n blob.left,blob.right,blob.bottom,blob.top = box.left,box.right,box.bottom,box.top\n blob.minF,blob.maxF,blob.peakF,blob.minT,blob.maxT,blob.peakT=minF,maxF,peakF,minT,maxT,peakT\n blob.maxpos,blob.maxval = maxpos,maxval\n blob.minval = minval\n lblobnew.append(blob)\n return lblobnew",
"def buildSegments(array, segLength: int):\n s = math.floor(len(array)/segLength)\n segments = []\n for i in range(s+1):\n segments.append(array[i*segLength:(i+1)*segLength])\n return np.array(segments)",
"def mergeRoundaboutChunks(segments):\n \n roundabouts = segments[segments.tag.apply(lambda x : ('junction' in x and(x['junction'] in ['circular','roundabout'] )))]\n roundabouts=roundabouts.nodes.apply(lambda x : pd.Series((x[0],x[-1]),index=['in','out']))\n roundabouts=roundabouts[roundabouts['in']!=roundabouts['out']]\n incopmleteRoundabouts = roundabouts[~roundabouts['in'].isin(roundabouts['out'])].index.values\n while(len(incopmleteRoundabouts)!=0):\n roundabouts.drop(incopmleteRoundabouts,inplace=True)\n incopmleteRoundabouts = roundabouts[~roundabouts['in'].isin(roundabouts['out'])].index.values\n\n\n def getRoundaboutFromChunks(startSeg,chunks):\n \"\"\"\n connect roundabout chunks\n \"\"\"\n \n sequence=[startSeg]\n nextIndex=startSeg\n while len(sequence)<=1 or sequence[0]!=sequence[-1]:\n outValue = chunks.loc[nextIndex]['out']\n nextIndex = chunks[chunks['in']==outValue].index[0]\n sequence.append(nextIndex)\n return sequence[:-1]\n\n sequences=roundabouts.index.map(lambda x : getRoundaboutFromChunks(x,roundabouts))\n\n sequences=pd.Series(sequences)\n\n sequences = sequences.groupby(lambda x : sorted(sequences.loc[x])[0]).first()\n\n sequences=sequences.apply(lambda x : np.concatenate([ segments.nodes.loc[seg] for seg in x]).tolist())\n \n segments.drop(roundabouts.loc[~roundabouts.index.isin(sequences.index)].index,inplace=True)\n for idx in sequences.index:\n segments.at[idx,'nodes']=sequences.loc[idx]",
"def upload_semantic_segments_to_boxes(self, data):\n #data_str = 'array[\"' + '\",\"'.join(data) + '\"]'\n data_str = \"array['\" + \"','\".join(data) + \"']\"\n sql = f\"SET role {self.write_role}; \" \\\n + f\"update results.boxes \" \\\n + f\"set semantic_segment_bottom_edge_mode = ({data_str})[id];\"\n return sql",
"def create_segments(data, bbox):\n\n nb, sb, eb, wb = bbox\n G = ox.graph_from_bbox(nb, sb, eb, wb)\n dist = 0.0001\n edges = ox.utils_graph.graph_to_gdfs(G, nodes=False, fill_edge_geometry=True)\n edges['index'] = range(1, len(edges)+1)\n\n # Get closest point on each segment\n lng = data['long']\n lat = data['lat']\n ne, dist = ox.distance.nearest_edges(G, lng, lat, return_dist=True)\n\n # Format edges to upload to mapbox\n all_edges = []\n all_lines = []\n for n in ne:\n u, v, key = n\n edge = edges.loc[(u, v, key), \"geometry\"]\n index = edges.loc[(u, v, key), \"index\"]\n if edge not in all_edges:\n feature = Feature(id=int(index), geometry=edge)\n all_edges.append(edge)\n all_lines.append(feature)\n all_lines = FeatureCollection(all_lines)\n\n return all_lines",
"def update_nets_with_segments(pcb_data: List[Dict[str, Any]], nets: List[Net]):\n segments = get_all_dicts_by_key(pcb_data, 'segment')\n for segment in segments:\n start: Coords = get_dict_by_key(segment['segment'], 'start')['start']\n start[1] = str(-1*float(start[1]))\n end: Coords = get_dict_by_key(segment['segment'], 'end')['end']\n end[1] = str(-1 * float(end[1]))\n width: str = get_dict_by_key(segment['segment'], 'width')['width']\n layer_data: str = get_dict_by_key(segment['segment'], 'layer')['layer']\n layers: List[Layer] = convert_to_layers(layer_data)\n new_segment: Segment = Segment(start=start, end=end, width=width, layers=layers)\n net_id: str = get_dict_by_key(segment['segment'], 'net')['net']\n for net in nets:\n if float(net.net_id) == float(net_id):\n net.segments.append(new_segment)",
"def get_image_segments(connected_components):\n image_segments = []\n for cc in connected_components:\n min_row = cc[0][0]\n min_col = cc[0][1]\n max_row = cc[0][0]\n max_col = cc[0][1]\n for pixel in cc:\n if pixel[0] < min_row:\n min_row = pixel[0]\n if pixel[0] > max_row:\n max_row = pixel[0]\n if pixel[1] < min_col:\n min_col = pixel[1]\n if pixel[1] > max_col:\n max_col = pixel[1]\n height = max_row - min_row + CUSHION*2\n width = max_col - min_col + CUSHION*2\n # TODO: this can break! need to check max width and height of image\n bottom_right_row = max_row + CUSHION\n bottom_right_col = max_col + CUSHION\n top_left_row = max(min_row - CUSHION, 0)\n top_left_col = max(min_col - CUSHION, 0)\n image_segments.append((top_left_row, top_left_col, bottom_right_row, bottom_right_col))\n return image_segments",
"def create_network_segments(self, tenant_id, network_id,\n network_name, segments):",
"def segment(data):",
"def add_semantic_segms_blobs(blobs, roidb, im_scale, batch_idx, data):\n num_cls = cfg.MODEL.NUM_CLASSES\n rescale_factor = cfg.SEMANTIC_NET.RESCALE_FACTOR\n polys_gt_inds = np.where(\n (roidb['gt_classes'] > 0) & (roidb['is_crowd'] == 0)\n )[0]\n polys_gt = [roidb['segms'][i] for i in polys_gt_inds]\n\n # Define size variables\n inp_h, inp_w = data.shape[2], data.shape[3]\n out_h, out_w = int(inp_h * rescale_factor), int(inp_w * rescale_factor)\n\n if polys_gt_inds.shape[0] > 0:\n # class label for the mask\n gt_class_labels = roidb['gt_classes'][polys_gt_inds]\n semantic_segms = blob_utils.zeros((num_cls, out_h, out_w), int32=True)\n # narrow scale and size\n scale = im_scale * rescale_factor\n im_h, im_w = roidb['height'], roidb['width']\n im_label_h, im_label_w = int(im_h * scale), int(im_w * scale)\n\n # add\n for i in range(polys_gt_inds.shape[0]):\n cls_label = gt_class_labels[i]\n poly_gt = polys_gt[i]\n # Rasterize the portion of the polygon mask within the given fg roi\n # to an im_label_h x im_label_w binary image\n mask = segm_utils.polys_to_mask_scaled(poly_gt, im_h, im_w, scale)\n mask = np.array(mask > 0, dtype=np.int32) # Ensure it's binary\n semantic_segms[cls_label, 0:im_label_h, 0:im_label_w] = np.maximum(\n semantic_segms[cls_label, 0:im_label_h, 0:im_label_w], mask,\n dtype=np.int32\n )\n\n semantic_segms = np.reshape(semantic_segms, (1,num_cls*out_h*out_w))\n\n else:\n # The network cannot handle empty blobs, so we must provide a mask\n # We simply take the first bg roi, given it an all -1's mask (ignore\n # label), and label it with class zero (bg).\n\n # We give it an -1's blob (ignore label)\n semantic_segms = -blob_utils.ones((1, num_cls*out_h*out_w), int32=True)\n\n blobs['semantic_segms_int32'] = semantic_segms\n blobs['img_rois'] = np.array([batch_idx, 0, 0, inp_w-1, inp_h-1], dtype=np.float32)[np.newaxis, :]",
"def split_segments(old_seg_ends, B):\n new_segment_ends = []\n for q in range(0, B.size):\n new_ends = list(np.linspace(old_seg_ends[q], old_seg_ends[q + 1], B[q] + 1))\n new_segment_ends.extend(new_ends[:-1])\n new_segment_ends.extend([1])\n new_segment_ends = np.asarray(new_segment_ends)\n return new_segment_ends",
"def load_segments(filename):\n coordinates_struct = struct.Struct('4d')\n segments = []\n adjuster = CoordinatesHash()\n\n with open(filename, \"rb\") as bo_file:\n packed_segment = bo_file.read(32)\n while packed_segment:\n coordinates = coordinates_struct.unpack(packed_segment)\n raw_points = [Point(coordinates[0:2]), Point(coordinates[2:])]\n adjusted_points = [adjuster.hash_point(p) for p in raw_points]\n segments.append(Segment(adjusted_points))\n packed_segment = bo_file.read(32)\n\n return adjuster, segments",
"def make_chunks(self, audio_segment, chunk_length):\r\n\t\tnumber_of_chunks = math.ceil(len(audio_segment) / float(chunk_length))\r\n\t\treturn [audio_segment[i * chunk_length:(i + 1) * chunk_length]\r\n\t\t\t\tfor i in range(int(number_of_chunks))]"
]
| [
"0.6240448",
"0.573545",
"0.5672107",
"0.56235355",
"0.55338395",
"0.5473892",
"0.54547447",
"0.5373943",
"0.5372714",
"0.52542466",
"0.5249314",
"0.52047896",
"0.51399314",
"0.51182914",
"0.51067835",
"0.50957716",
"0.5091512",
"0.5088217",
"0.5078772",
"0.50569415",
"0.5052178",
"0.5037505",
"0.5034501",
"0.5025919",
"0.50236225",
"0.5013536",
"0.49927875",
"0.49869016",
"0.49800313",
"0.49762806"
]
| 0.58002687 | 1 |
Sets the upgrade_time of this NextUpgradeInfo. | def upgrade_time(self, upgrade_time):
self._upgrade_time = upgrade_time | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def upgrade_time_epoch(self, upgrade_time_epoch):\n\n self._upgrade_time_epoch = upgrade_time_epoch",
"def update_time(self, update_time):\n\n self._update_time = update_time",
"def mod_time(self, mod_time):\n\n self._mod_time = mod_time",
"def mod_time(self, mod_time):\n\n self._mod_time = mod_time",
"def up_time(self, up_time):\n\n self._up_time = up_time",
"def set_time(self, time):\n self._time = time",
"def SetModTime(self, modtime):\n self.file.SetModTime(modtime)",
"def time_updated(self, time_updated):\n self._time_updated = time_updated",
"def delivery_time(self, delivery_time):\n\n self._delivery_time = delivery_time",
"def evolution_time(self, evolution_time: float) -> None:\n self._evolution_time = evolution_time",
"def set_sleep_time(self, time):\n self.sleep_time = time",
"def set_time(self, set_time):\n\n self._set_time = set_time",
"def time(self, time):\n\n self._time = time",
"def time(self, time):\n\n self._time = time",
"def time(self, time):\n\n self._time = time",
"def time(self, time):\n\n self._time = time",
"def time(self, time):\n\n self._time = time",
"def dep_time(self, dep_time):\n\n self._dep_time = dep_time",
"def status_switch_time(self, status_switch_time):\n\n self._status_switch_time = status_switch_time",
"def set_umeastime(self, time):\n self.utime = time",
"def completion_time(self, completion_time: datetime):\n\n self._completion_time = completion_time",
"def registration_time(self, registration_time):\n\n self._registration_time = registration_time",
"async def on_upgrade_complete(self, upgrade: UpgradeId):",
"def time(self, time: float) -> None:\n self._time = time",
"def __init__(self, upgrade_time=None, stable=None, major_version=None, minor_version=None, mandatory=None, upgrade_time_epoch=None): # noqa: E501 # noqa: E501\n\n self._upgrade_time = None\n self._stable = None\n self._major_version = None\n self._minor_version = None\n self._mandatory = None\n self._upgrade_time_epoch = None\n self.discriminator = None\n\n if upgrade_time is not None:\n self.upgrade_time = upgrade_time\n if stable is not None:\n self.stable = stable\n if major_version is not None:\n self.major_version = major_version\n if minor_version is not None:\n self.minor_version = minor_version\n if mandatory is not None:\n self.mandatory = mandatory\n if upgrade_time_epoch is not None:\n self.upgrade_time_epoch = upgrade_time_epoch",
"async def async_set_position_updated_at(self, time):\n self._position_updated_at = time",
"def rt_dep_time(self, rt_dep_time):\n\n self._rt_dep_time = rt_dep_time",
"def scan_time(self, scan_time):\n\n self._scan_time = scan_time",
"def date_time(self, date_time):\n\n self._date_time = date_time",
"def transaction_time(self, transaction_time):\n\n self._transaction_time = transaction_time"
]
| [
"0.6725681",
"0.62003857",
"0.5695385",
"0.5695385",
"0.55752087",
"0.55294573",
"0.54945123",
"0.5405532",
"0.5399842",
"0.5302209",
"0.52906084",
"0.52381265",
"0.5231574",
"0.5231574",
"0.5231574",
"0.5231574",
"0.5231574",
"0.52093667",
"0.5196944",
"0.51616",
"0.50660056",
"0.5055394",
"0.5025547",
"0.502395",
"0.50219476",
"0.5012252",
"0.49888346",
"0.49536097",
"0.49158117",
"0.48941582"
]
| 0.8613101 | 0 |
Sets the stable of this NextUpgradeInfo. | def stable(self, stable):
self._stable = stable | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stable():\n env.branch = 'stable'",
"def get_stable(self: _R) -> _R:\n return self._replace(\n BaseVersion(\n epoch=0,\n release=(self.major, self.minor, self.micro),\n pre=None,\n post=None,\n dev=None,\n local=None,\n )\n )",
"def is_stable(self) -> bool:\n return not self.is_prerelease",
"def cluster_setslot_stable(self, slot_id: int) -> ResponseT:\n return self.execute_command(\"CLUSTER SETSLOT\", slot_id, \"STABLE\")",
"def test_beta_to_stable(self):\n self.change_version(self.version_1_2_0, '1.2beta')\n self.change_status(self.version_1_2_0, amo.STATUS_BETA)\n\n version, file = self.get('1.2beta', self.version_int,\n self.app, self.platform)\n assert version == self.version_1_2_2",
"def test_beta_updates_to_stable(self):\n self.change_version(self.version_1_2_0, '1.2beta')\n self.change_status(self.version_1_2_0, amo.STATUS_BETA)\n self.change_status(self.version_1_2_2, amo.STATUS_BETA)\n\n version, file = self.get('1.2beta', self.version_int,\n self.app, self.platform)\n assert version == self.version_1_2_1",
"def set_upgraded(self, val: bool) -> None:\n self._upgraded = val",
"def version(self, newVersion=None):\n pass",
"def setSat ( self, newsat ):\n if isinstance( newsat, int ):\n newsat /= 100.0\n if newsat > 1.0:\n newsat = 1.0\n if newsat < 0.0:\n newsat = 0.0\n self.s = newsat\n self.hsl[1] = newsat\n self.hsla[1] = newsat\n self.updateFromHsl()",
"def set_version(self, version: str) -> None:\n if self.current_version == version:\n return\n self.current_version = version\n self._del_cached_property(\"version\")",
"def switch_to_version(self, version):\n self.current_version = version\n self.save()",
"def setHeadway(self, new_headway: int):\n self.headway = new_headway",
"def set_toVersion(self):\n if not self.data.get('toVersion') or LooseVersion(self.data.get('toVersion', '99.99.99')) >= TO_VERSION_5_9_9:\n if self.verbose:\n click.echo('Setting toVersion field')\n self.data['toVersion'] = TO_VERSION_5_9_9",
"def update_version(self, version):\n self.version = CPE.escape_for_cpe23_fs(version)",
"def test_request_estable_version(self):\n current_stable_version = get_stable_version()\n self.assertIsNotNone(current_stable_version)",
"def stable(self):\n return(self.zeta > 0)",
"def set_installed_version(vcs, version):\n version_path = _get_version_path(vcs)\n with open(version_path, 'w') as f:\n f.write(version)",
"def set_repository_software_version(branchenv: lmdb.Environment,\n ver_str: str,\n *,\n overwrite: bool = False) -> bool:\n versionKey = repo_version_db_key()\n ver_spec = repo_version_raw_spec_from_raw_string(v_str=ver_str)\n versionVal = repo_version_db_val_from_raw_val(v_spec=ver_spec)\n branchTxn = TxnRegister().begin_writer_txn(branchenv)\n try:\n success = branchTxn.put(versionKey, versionVal, overwrite=overwrite)\n finally:\n TxnRegister().commit_writer_txn(branchenv)\n return success",
"def _update_version(self) -> None:\n # Implement in child class.\n raise NotImplementedError",
"def SetVersion(self, addonVersion):\n self._addonVersion = addonVersion",
"def test_release_update_available_MINOR(self):\n NEXT = '%d.%d-%d' % (MAJOR, MINOR + 1, 0)\n self._uri({\n '%d.%d/maintained/%s/all/Packages.gz' % (MAJOR, MINOR + 1, NEXT): DATA,\n })\n next = self.u.release_update_available()\n self.assertEqual(NEXT, next)",
"def SetToolPacking(self, packing):\r\n\r\n self._tool_packing = packing",
"def minor_version_auto_upgrade(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"minor_version_auto_upgrade\")",
"def switch_to_latest_version(self):\n self.current_version = Version.objects.filter(is_published=True).latest()\n self.save()",
"def __init__(self, upgrade_time=None, stable=None, major_version=None, minor_version=None, mandatory=None, upgrade_time_epoch=None): # noqa: E501 # noqa: E501\n\n self._upgrade_time = None\n self._stable = None\n self._major_version = None\n self._minor_version = None\n self._mandatory = None\n self._upgrade_time_epoch = None\n self.discriminator = None\n\n if upgrade_time is not None:\n self.upgrade_time = upgrade_time\n if stable is not None:\n self.stable = stable\n if major_version is not None:\n self.major_version = major_version\n if minor_version is not None:\n self.minor_version = minor_version\n if mandatory is not None:\n self.mandatory = mandatory\n if upgrade_time_epoch is not None:\n self.upgrade_time_epoch = upgrade_time_epoch",
"def auto_minor_version_upgrade(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"auto_minor_version_upgrade\")",
"def auto_minor_version_upgrade(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"auto_minor_version_upgrade\")",
"def set_active(cls, directory: Path, ver: str) -> None:\n\n if directory.is_dir() is False:\n raise Failure(f\"{directory} is not a valid directory\")\n\n if not cls.is_installed(ver):\n raise Failure(f\"{ver} is not installed, cannot set as active\")\n\n logger.info(f\"Setting python version {ver} as active in {directory}\")\n\n os.chdir(directory)\n run([cls.command, \"local\", ver])",
"def major_version(self, major_version):\n\n self._major_version = major_version",
"def major_version(self, major_version):\n\n self._major_version = major_version"
]
| [
"0.62132746",
"0.574145",
"0.5200357",
"0.5139811",
"0.51368827",
"0.50893146",
"0.48866063",
"0.48249158",
"0.46558702",
"0.46255958",
"0.46187514",
"0.46168604",
"0.46053055",
"0.4556065",
"0.45172706",
"0.45138413",
"0.4506797",
"0.45026952",
"0.4471989",
"0.4469258",
"0.44681567",
"0.44639236",
"0.44635424",
"0.44519562",
"0.44499037",
"0.44438174",
"0.44438174",
"0.44436538",
"0.44373807",
"0.44373807"
]
| 0.7988852 | 0 |
Sets the major_version of this NextUpgradeInfo. | def major_version(self, major_version):
self._major_version = major_version | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _major_version(self):\n version_tuple = StrictVersion(self.plugin.version).version\n major = '.'.join(map(str, version_tuple[:2]))\n\n return major",
"def minor_version(self, minor_version):\n\n self._minor_version = minor_version",
"def minor_version(self, minor_version):\n\n self._minor_version = minor_version",
"def major_version(self) -> str:\n return pulumi.get(self, \"major_version\")",
"def _getVersionMajor(self):\n return int(self.model.getroot().attrib['versionMajor'])",
"def major_version(self):\n return self.unpack_dword(0x14)",
"def bump_major(self: _R, inc: int = 1) -> _R:\n if not self.is_stable and self.minor == 0 and self.micro == 0:\n return self.get_stable().bump_major(inc - 1)\n\n return self._replace(\n BaseVersion(\n epoch=0,\n release=(self.major + inc, 0, 0),\n pre=None,\n post=None,\n dev=None,\n local=None,\n )\n )",
"def min_affected_version(self, min_affected_version):\n\n self._min_affected_version = min_affected_version",
"def browser_version_major(self, browser_version_major):\n # type: (string_types) -> None\n\n if browser_version_major is not None:\n if not isinstance(browser_version_major, string_types):\n raise TypeError(\"Invalid type for `browser_version_major`, type has to be `string_types`\")\n\n self._browser_version_major = browser_version_major",
"def version_major(self):\n assert self._version_major != NotImplemented\n return self._version_major",
"def min_tls_version(self, value):\n self._set_attr('min-tls-version', value)",
"def major_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"major_version\")",
"def minimum_agent_version(self, minimum_agent_version):\n\n self._minimum_agent_version = minimum_agent_version",
"def major_version(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"major_version\")",
"def operatingsystem_version_major(self, operatingsystem_version_major):\n # type: (string_types) -> None\n\n if operatingsystem_version_major is not None:\n if not isinstance(operatingsystem_version_major, string_types):\n raise TypeError(\"Invalid type for `operatingsystem_version_major`, type has to be `string_types`\")\n\n self._operatingsystem_version_major = operatingsystem_version_major",
"def major_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"major_version\")",
"def set_version(self, version: str) -> None:\n if self.current_version == version:\n return\n self.current_version = version\n self._del_cached_property(\"version\")",
"def SetVersion(self, addonVersion):\n self._addonVersion = addonVersion",
"def version(self, version: int):\n\n self._version = version",
"def version(self, version):\n \n self._version = version",
"def version(self, version):\n self._version = version",
"def version(self, version):\n self._version = version",
"def version(self, version: str):\n\n self._version = version",
"def version(self, version: str):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version"
]
| [
"0.6609667",
"0.64588195",
"0.64588195",
"0.6412141",
"0.64004534",
"0.6383321",
"0.63680845",
"0.62749135",
"0.62064695",
"0.617141",
"0.6109517",
"0.60880303",
"0.6085801",
"0.60802436",
"0.6033241",
"0.5941284",
"0.5857889",
"0.57498246",
"0.5686118",
"0.56828374",
"0.5679289",
"0.5679289",
"0.56535304",
"0.56535304",
"0.5647721",
"0.5647721",
"0.5647721",
"0.5647721",
"0.5647721",
"0.5647721"
]
| 0.826978 | 0 |
Sets the minor_version of this NextUpgradeInfo. | def minor_version(self, minor_version):
self._minor_version = minor_version | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def operatingsystem_version_minor(self, operatingsystem_version_minor):\n # type: (string_types) -> None\n\n if operatingsystem_version_minor is not None:\n if not isinstance(operatingsystem_version_minor, string_types):\n raise TypeError(\"Invalid type for `operatingsystem_version_minor`, type has to be `string_types`\")\n\n self._operatingsystem_version_minor = operatingsystem_version_minor",
"def minor_version(self) -> str:\n return pulumi.get(self, \"minor_version\")",
"def _getVersionMinor(self):\n return int(self.model.getroot().attrib['versionMinor'])",
"def browser_version_minor(self, browser_version_minor):\n # type: (string_types) -> None\n\n if browser_version_minor is not None:\n if not isinstance(browser_version_minor, string_types):\n raise TypeError(\"Invalid type for `browser_version_minor`, type has to be `string_types`\")\n\n self._browser_version_minor = browser_version_minor",
"def bump_minor(self: _R, inc: int = 1) -> _R:\n if not self.is_stable and self.micro == 0:\n return self.get_stable().bump_minor(inc - 1)\n\n return self._replace(\n BaseVersion(\n epoch=0,\n release=(self.major, self.minor + inc, 0),\n pre=None,\n post=None,\n dev=None,\n local=None,\n )\n )",
"def version_minor(self):\n assert self._version_patch != NotImplemented\n return self._version_patch",
"def minor_version(self):\n return self.unpack_dword(0x18)",
"def operatingsystem_version_minor(self):\n # type: () -> string_types\n return self._operatingsystem_version_minor",
"def commcare_minor_release(self):\n return '%d.%d' % self.build_spec.minor_release()",
"def auto_minor_version_upgrade(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"auto_minor_version_upgrade\")",
"def auto_minor_version_upgrade(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"auto_minor_version_upgrade\")",
"def test_minor(self):\n self.assertEqual(1, self._version1.minor())\n self.assertEqual(3, self._version2.minor())",
"def auto_minor_version_upgrade(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"auto_minor_version_upgrade\")",
"def auto_minor_version_upgrade(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"auto_minor_version_upgrade\")",
"def auto_minor_version_upgrade(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"auto_minor_version_upgrade\")",
"def auto_minor_version_upgrade(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"auto_minor_version_upgrade\")",
"def major_version(self, major_version):\n\n self._major_version = major_version",
"def major_version(self, major_version):\n\n self._major_version = major_version",
"def browser_version_minor(self):\n # type: () -> string_types\n return self._browser_version_minor",
"def minor_version_auto_upgrade(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"minor_version_auto_upgrade\")",
"def test_get_next_version_MINOR(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR + 1, MAJOR, MINOR + 1, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual('%d.%d-%d' % (MAJOR, MINOR + 1, 0), ver)",
"def version(self):\n return \"%d.%d\" % (self._vmajor, self._vminor)",
"def max_affected_version(self, max_affected_version):\n\n self._max_affected_version = max_affected_version",
"def test_minor(scraper, version_parts):\n\n new_version_parts = list(version_parts)\n new_version_parts[1] = int(new_version_parts[1]) + 1\n\n assert scraper.is_compatible_with(generate_version(new_version_parts)) is True",
"def _getMajorMinorVersion( self, sVersion ):\n\n\t\ttry:\n\t\t\trgs = sVersion.split( '.' )\n\t\t\tif len( rgs ) == 2:\n\t\t\t\treturn sVersion\n\n\t\t\treturn rgs[ 0 ] + '.' + rgs[ 1 ]\n\n\t\texcept Exception, e:\n\t\t\terrMsg( 'error getting major.minor version' )\n\t\t\terrMsg( e )\n\t\t\treturn ''",
"def DoCheckManifestMinorVersionTest(self, minor_version, payload_type):\n # Create the test object.\n payload = self.MockPayload()\n payload.manifest.minor_version = minor_version\n payload_checker = checker.PayloadChecker(payload)\n payload_checker.payload_type = payload_type\n report = checker._PayloadReport()\n\n should_succeed = (\n (minor_version == 0 and payload_type == checker._TYPE_FULL) or\n (minor_version == 1 and payload_type == checker._TYPE_DELTA) or\n (minor_version == 2 and payload_type == checker._TYPE_DELTA) or\n (minor_version == 3 and payload_type == checker._TYPE_DELTA) or\n (minor_version == 4 and payload_type == checker._TYPE_DELTA) or\n (minor_version == 5 and payload_type == checker._TYPE_DELTA))\n args = (report,)\n\n if should_succeed:\n self.assertIsNone(payload_checker._CheckManifestMinorVersion(*args))\n else:\n self.assertRaises(PayloadError,\n payload_checker._CheckManifestMinorVersion, *args)",
"def test_minor_property(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n expected = 2\n\n self.assertEqual(v1.minor, expected)",
"def SetVersion(self, addonVersion):\n self._addonVersion = addonVersion",
"def get_ilo_firmware_version_as_major_minor(self):\n try:\n manager, reset_uri = self._get_ilo_details()\n ilo_fw_ver_str = (\n manager['Oem']['Hp']['Firmware']['Current']['VersionString']\n )\n return common.get_major_minor(ilo_fw_ver_str)\n except Exception:\n return None",
"def annotate_minor_heap(self):\n minor_start = get_value_safe(\"caml_young_base\", size_t)\n minor_end = get_value_safe(\"caml_young_end\", size_t)\n if minor_start is None or minor_end is None:\n return\n minor_size = minor_end - minor_start\n\n memrange = self.get_range(minor_start)\n if memrange is not None:\n self.annotate_split_range(minor_start, minor_size, MemoryType.MinorHeap, \"Minor heap\")\n else:\n new_range = MemoryRange(minor_start, minor_size, \"gdb\", \"Minor Heap\", MemoryType.MinorHeap)\n self.set_inaccurate(\"minor heap memory map info\")\n bisect.insort(self.ranges, new_range)"
]
| [
"0.6780061",
"0.6694908",
"0.6519205",
"0.6498734",
"0.6280392",
"0.62734467",
"0.6271302",
"0.59974277",
"0.58754146",
"0.5872266",
"0.5872266",
"0.5766906",
"0.57591933",
"0.57591933",
"0.57591933",
"0.57591933",
"0.5721738",
"0.5721738",
"0.5715131",
"0.5521791",
"0.54774046",
"0.54574454",
"0.54071426",
"0.5380227",
"0.53679127",
"0.5311225",
"0.5259588",
"0.52221835",
"0.5213848",
"0.5186878"
]
| 0.8593409 | 0 |
Sets the mandatory of this NextUpgradeInfo. | def mandatory(self, mandatory):
self._mandatory = mandatory | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def required(self, required):\n\n self._required = required",
"def required(self, required):\n\n self._required = required",
"def required(self, required):\n\n self._required = required",
"def required(self, required):\n\n self._required = required",
"def explicitly_required(self, required):\n\n self._explicit = required\n self._update_state()",
"def setRequired(self, *args):\n return _libsbml.SBMLDocumentPlugin_setRequired(self, *args)",
"def set_required(self, val):\n if not contain_in_list_equal(val, PARAM_REQUIRED):\n raise ArgumentError(\"[WARNING] `required`, should be `true or `false`\")\n if val == False or val == \"false\" or val == \"[]\" or val == \"[ ]\":\n self._required = \"false\"\n elif val == True or val == \"true\" or val == \"[x]\":\n self._required = \"true\"\n else:\n self._required = val\n pass",
"def setRequired(self, *args):\n return _libsbml.CompSBMLDocumentPlugin_setRequired(self, *args)",
"def mandatory(self):\n return self._mandatory",
"def setPkgRequired(self, *args):\n return _libsbml.SBMLDocument_setPkgRequired(self, *args)",
"def is_required(self, is_required):\n\n self._is_required = is_required",
"def setPackageRequired(self, *args):\n return _libsbml.SBMLDocument_setPackageRequired(self, *args)",
"def implicitly_required(self, required):\n\n self._implicit = required\n self._update_state()",
"def required(self, value: Optional[List[str]]):\n self._required = value",
"def required(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"required\")",
"def isMandatory(self, is_set, get_value):\n\t\tif self._mandatory and not is_set(self.name):\treturn True\n\t\treturn False",
"def set_upgraded(self, val: bool) -> None:\n self._upgraded = val",
"def isSetRequired(self):\n return _libsbml.SBMLDocumentPlugin_isSetRequired(self)",
"def Mandatory(cls, **_kwargs):\n\n kwargs = dict(min_occurs=1, nillable=False)\n if cls.get_type_name() is not cls.Empty:\n kwargs['type_name'] = '%s%s%s' % (const.MANDATORY_PREFIX,\n cls.get_type_name(), const.MANDATORY_SUFFIX)\n kwargs.update(_kwargs)\n if issubclass(cls, Unicode):\n kwargs.update(dict(min_len=1))\n\n elif issubclass(cls, Array):\n (k,v), = cls._type_info.items()\n if v.Attributes.min_occurs == 0:\n cls._type_info[k] = Mandatory(v)\n\n return cls.customize(**kwargs)",
"def required(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"required\")",
"def required(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"required\")",
"def is_required(self) -> bool:\n return self.required",
"def require(self, path: str, up_to_date: UpToDate) -> None:\n self.required[path] = up_to_date",
"def __mandatory_is_given(self):\n\n strTestName = 'Mandatory parameter is given (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('mandatory_parameter', 'Mandatory parameter')\n RxCSObject.mandatory_parameter = 1\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def required(self):\n\n return bool(self.qualifiers.get(\"required\", False))",
"def is_required(self):\r\n return self.default == self.NotSpecified",
"def required(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"required\")",
"def isSetPkgRequired(self, *args):\n return _libsbml.SBMLDocument_isSetPkgRequired(self, *args)",
"def required(self) -> bool | None:\n return self._underlying.required",
"def _set_minimum(self):\n self._level_gen.minimum_length = self._minimum_length_spinbox.value()\n self._refresh_view()"
]
| [
"0.6098085",
"0.6098085",
"0.6098085",
"0.6098085",
"0.5943629",
"0.5823603",
"0.5807848",
"0.57494354",
"0.5626224",
"0.55867773",
"0.54788595",
"0.53794473",
"0.53619933",
"0.5344923",
"0.53359044",
"0.52371824",
"0.5211088",
"0.51647735",
"0.5157812",
"0.51299804",
"0.51299804",
"0.5125399",
"0.5104481",
"0.5081528",
"0.50759083",
"0.50620514",
"0.5056589",
"0.5048536",
"0.501238",
"0.49950945"
]
| 0.72124577 | 0 |
Sets the upgrade_time_epoch of this NextUpgradeInfo. | def upgrade_time_epoch(self, upgrade_time_epoch):
self._upgrade_time_epoch = upgrade_time_epoch | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def upgrade_time(self, upgrade_time):\n\n self._upgrade_time = upgrade_time",
"def set_epoch(self, epoch=None):\n params = {'epoch': epoch if epoch is not None else int(time.time())}\n return self._jadeRpc('set_epoch', params)",
"def set_epoch(self, epoch):\n self.epoch = epoch",
"def set_epoch(self, epoch):\r\n pass",
"def setepoch(self, value):\n return _coordsys.coordsys_setepoch(self, value)",
"def release_epoch(self, release_epoch):\n\n self._release_epoch = release_epoch",
"def update_time(self, update_time):\n\n self._update_time = update_time",
"def gps_epoch_timing_info(self, gps_epoch_timing_info):\n\n self._gps_epoch_timing_info = gps_epoch_timing_info",
"def update_epoch(self):\n raise NotImplementedError",
"def up_time(self, up_time):\n\n self._up_time = up_time",
"def mod_time(self, mod_time):\n\n self._mod_time = mod_time",
"def mod_time(self, mod_time):\n\n self._mod_time = mod_time",
"def __init__(self, upgrade_time=None, stable=None, major_version=None, minor_version=None, mandatory=None, upgrade_time_epoch=None): # noqa: E501 # noqa: E501\n\n self._upgrade_time = None\n self._stable = None\n self._major_version = None\n self._minor_version = None\n self._mandatory = None\n self._upgrade_time_epoch = None\n self.discriminator = None\n\n if upgrade_time is not None:\n self.upgrade_time = upgrade_time\n if stable is not None:\n self.stable = stable\n if major_version is not None:\n self.major_version = major_version\n if minor_version is not None:\n self.minor_version = minor_version\n if mandatory is not None:\n self.mandatory = mandatory\n if upgrade_time_epoch is not None:\n self.upgrade_time_epoch = upgrade_time_epoch",
"def evolution_time(self, evolution_time: float) -> None:\n self._evolution_time = evolution_time",
"def set_time(self, time):\n self._time = time",
"def set_umeastime(self, time):\n self.utime = time",
"def set_train_epoch(self, epoch: int):\n self._train_epoch = epoch",
"def set_sleep_time(self, time):\n self.sleep_time = time",
"def SetModTime(self, modtime):\n self.file.SetModTime(modtime)",
"def set_train_epoch(self, epoch: int):\n if hasattr(self, 'cls_head'):\n self.cls_head.set_train_epoch(epoch)",
"def downtime(self, down_time=0):\n self.down_time = down_time\n return down_time",
"def set_last_count_update_time(self, update_time):\n from datetime import datetime\n\n if isinstance(update_time, datetime):\n self.__last_count_update_time = update_time.strftime(self._isoFmt)\n # Fix issue on some systems, e.g. Debian, where %Y doesn't zero-pad\n tpadding = \"\"\n if 10 > update_time.year:\n tpadding = \"000\"\n elif 100 > update_time.year:\n tpadding = \"00\"\n elif 1000 > update_time.year:\n tpadding = \"0\"\n if \"0\" != self.__last_count_update_time[0:1]:\n self.__last_count_update_time = tpadding + \\\n self.__last_count_update_time\n else:\n self.__last_count_update_time = update_time",
"def set_step_time(self, us):\n if us < 20: # 20 us is the shortest possible for esp8266\n self.step_time = 20\n else:\n self.step_time = us",
"def generation_timestamp(self, generation_timestamp):\n\n self._generation_timestamp = generation_timestamp",
"def time_updated(self, time_updated):\n self._time_updated = time_updated",
"async def async_set_position_updated_at(self, time):\n self._position_updated_at = time",
"def cd_sync_epoch(self, cd_sync_epoch):\n if (self._configuration.client_side_validation and\n cd_sync_epoch is not None and cd_sync_epoch < 0): # noqa: E501\n raise ValueError(\"Invalid value for `cd_sync_epoch`, must be a value greater than or equal to `0`\") # noqa: E501\n\n self._cd_sync_epoch = cd_sync_epoch",
"def _set_current_step(self, epoch: int):\n self._cur_step = epoch * self._steps_per_epoch",
"def last_update_timestamp(self, last_update_timestamp):\n\n self._last_update_timestamp = last_update_timestamp",
"def lastmod_time(self, lastmod_time):\n\n self._lastmod_time = lastmod_time"
]
| [
"0.71056724",
"0.58541226",
"0.58303434",
"0.56607425",
"0.5544668",
"0.55109674",
"0.5417715",
"0.5276821",
"0.5267726",
"0.5082459",
"0.5068214",
"0.5068214",
"0.50546193",
"0.50347763",
"0.5013292",
"0.4975316",
"0.49132764",
"0.49092776",
"0.48097897",
"0.47805026",
"0.4725613",
"0.4722906",
"0.46905005",
"0.46825817",
"0.46805978",
"0.46795878",
"0.46659204",
"0.46615562",
"0.4653108",
"0.46193308"
]
| 0.85634923 | 0 |
Given a node (as an input or as a neuron), a dictionary mapping input names to their values, and a dictionary mapping neuron names to their outputs returns the output value of the node. This function does NOT do any computation; it simply looks up values in the provided dictionaries. | def node_value(node, input_values, neuron_outputs): # PROVIDED BY THE STAFF
if isinstance(node, str):
# A string node (either an input or a neuron)
if node in input_values:
return input_values[node]
if node in neuron_outputs:
return neuron_outputs[node]
raise KeyError("Node '{}' not found in either the input values or neuron outputs dictionary.".format(node))
if isinstance(node, (int, float)):
# A constant input, such as -1
return node
raise TypeError("Node argument is {}; should be either a string or a number.".format(node)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def map_output_and_node(cls, onnx_model: onnx.ModelProto):\n output2node = dict()\n for node in onnx_model.graph.node:\n for output_name in node.output:\n output2node[output_name] = node\n return output2node",
"def _lookup_input(nodes, name, value, definition):\n # containers\n if isinstance(value, list):\n return [_lookup_input(nodes, name, elem, definition) for elem in value]\n\n if isinstance(value, dict):\n return {k: _lookup_input(nodes, name, v, definition) for k, v in value.items()}\n\n # node reference\n if not isinstance(value, six.string_types):\n raise ValueError(\n \"Invalid definition for node '%s': invalid reference '%s' of type '%s' in inputs\"\n % (name, value, type(value))\n )\n # node not yet discovered yet\n if not value in nodes:\n # Look for it in the definition items:\n for found_name, d in definition.items():\n if value != found_name:\n continue\n # Load the node into nodes\n _process_kwargs(found_name, d, definition, nodes)\n\n break\n\n if not value in nodes:\n raise ValueError(\n \"Invalid definition for node '%s': reference to nonexistent node '%s' in inputs\" % (name, value)\n )\n node = nodes[value]\n\n # copy in debug mode\n if settings[\"DEBUG\"]:\n node = deepcopy(node)\n\n return node",
"def _run_node(cls,\n onnx_node,\n inputs,\n handle,\n forward,\n opset_version=_known_opset_version):\n outputs = forward(*inputs) if handle is None else forward(\n handle, *inputs)\n if not isinstance(outputs, collections.Iterable):\n outputs = [outputs]\n outputs_dict = OrderedDict()\n for (key, val) in zip(onnx_node.outputs, outputs):\n outputs_dict[key] = val\n return outputs_dict",
"def compute_output(self):\n x, y = self.input_nodes\n print(x.name, y.name)\n self.output_value = backend.dot(x.output_value, y.output_value)\n return self.output_value",
"def _learn_node_parameter_var(outputs, weights, inputs):\n var = 0.\n\n \"\"\" YOUR CODE HERE \"\"\"\n temp = 0\n N_observe = outputs.shape[0]\n if inputs is None:\n temp = np.sum((outputs-weights[0])**2)\n else:\n for i in range(N_observe):\n temp += (outputs[i] - (np.sum(weights[1:] * inputs[i]) +weights[0]))**2\n var = temp/N_observe\n\n\n\n \"\"\" END YOUR CODE HERE \"\"\"\n\n return var",
"def onnx_extract_operator(node, model, nodes_dict):\n op_type = node.op_type\n input_tensors = []\n output_tensors = []\n\n \"\"\" input_tensors\n each input_tensor has its own soure op, but same dest op\n so both have single string\n \"\"\"\n input_names = []\n # name list\n input_tensor_names = node.input\n for input_tensor_name in input_tensor_names:\n origin_tensor_name, input_tensor_name = util.names_from_input(input_tensor_name)\n try:\n pre_node = nodes_dict[nodes_dict[origin_tensor_name]].node\n except BaseException:\n pre_node = nodes_dict[origin_tensor_name].node\n \n data = None\n if pre_node in model.initializer():\n data = to_array(pre_node)\n else:\n if (pre_node not in model.graph().input) and (pre_node.op_type == 'Constant'):\n data = to_array(pre_node.attribute[0].t)\n if isinstance(data, np.ndarray):\n dtype = util.get_data_dtype(data)\n shape = list(data.shape) if data.shape != () else [1]\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[],\n dest_op=[node.name],\n shape=shape,\n data=data,\n dtype=dtype\n )\n input_tensors.append(input_tensor)\n\n else:\n input_tensor = Tensor(name=input_tensor_name,\n source_op=[pre_node.name],\n dest_op=[node.name],\n shape=None,\n data=None,\n dtype=None\n )\n input_tensors.append(input_tensor)\n input_names.append(node.name)\n\n \"\"\" output_tensors\n in onnx, NodeProto has the output attribute\n \"\"\"\n output_tensor_names = node.output\n for output_tensor_name in output_tensor_names:\n output_tensor_name = util.names_from_input(output_tensor_name)[1]\n output_tensor = Tensor(name=output_tensor_name,\n source_op=[node.name],\n dest_op=nodes_dict[node.name].outputs,\n shape=None,\n data=None,\n dtype=None\n )\n\n output_tensors.append(output_tensor)\n\n return op_type, input_tensors, output_tensors",
"def build_output(self, oname, **inputs):\n if oname == 'invscale':\n return self.build_invscale(**inputs)\n elif oname == 'loc':\n return self.build_loc(**inputs)\n elif oname == 'main':\n return self.build_main(**inputs)\n else:\n raise ValueError(\"`oname` {} is not an output name for \"\n \"this node\".format(oname))",
"def _retrieve_or_adapt_input_to_graph_set(\n fx_node_arg: fx_type_utils.Argument,\n fx_name_to_onnxscript_value: Dict[\n str,\n Union[\n onnxscript_graph_building.TorchScriptTensor,\n Tuple[onnxscript_graph_building.TorchScriptTensor, ...],\n ],\n ],\n tracer: onnxscript_graph_building.TorchScriptTracingEvaluator,\n):\n\n onnx_tensor = fx_node_arg\n if isinstance(onnx_tensor, torch.fx.Node):\n # 1. fx_node_arg is a torch.fx.Node, which means\n # fx_node_arg stands for the output of that torch.fx.Node.\n # 2. fx_node_arg (variable in torch.fx.Graph) is be mapped to\n # torch.jit.Value, fx_name_to_onnxscript_value[fx_node_arg.name],\n # in TorchScript graph.\n return fx_name_to_onnxscript_value[onnx_tensor.name]\n if isinstance(onnx_tensor, (tuple, list)) and any(\n isinstance(node, torch.fx.Node)\n and isinstance(node.meta.get(\"val\"), torch.SymInt)\n for node in onnx_tensor\n ):\n # This intends to handle dynamic axes. for example, if the input size of op.Expand\n # is dynamic, each dimension would be variable (i.e., sym variable in Pytorch\n # FX graph. Note that sym variable is mapped to tensor in ONNX Script world)\n # calculated by other operators.\n sequence_mixed_elements: List[\n Union[\n onnxscript_graph_building.TorchScriptTensor,\n List[int],\n ]\n ] = []\n for tensor in onnx_tensor:\n if isinstance(tensor, torch.fx.Node) and isinstance(\n tensor.meta.get(\"val\"), torch.SymInt\n ):\n sequence_mixed_elements.append(fx_name_to_onnxscript_value[tensor.name])\n elif isinstance(tensor, int):\n # NOTE: op.Concat doesn't support scalar, so we need to wrap it with\n # dim, and onnx-script will promote it to tensot(int64)\n sequence_mixed_elements.append([tensor])\n # Concat all the elements in the sequence.\n # shapes are mapped to tensors in ONNX graph (TorchScriptGraph),\n # so list of sym_ints is concatenated to a tensor before calling ONNX op.\n\n # For example:\n # inputs: [[2], [4], fx.Node(SymIntA), [1], fx.Node(SymIntB)]\n # outputs: op.Concat([op.Constant(2), op.Constant(4), TorchScriptTensor(A), op.Constant(1), TorchScriptTensor(B)])\n\n # onnx-script auto wraps python number with op.Constants,\n # so we don't need to specifically process them.\n with onnxscript.evaluator.default_as(tracer):\n output = onnxscript.opset18.Concat(*sequence_mixed_elements, axis=0)\n output.dtype = torch.int64\n output.shape = [len(sequence_mixed_elements)]\n return output\n elif isinstance(onnx_tensor, (tuple, list)) and all(\n isinstance(node, torch.fx.Node) or node is None for node in onnx_tensor\n ):\n sequence_elements: List[\n Union[\n Optional[onnxscript_graph_building.TorchScriptTensor],\n Tuple[\n onnxscript_graph_building.TorchScriptTensor,\n ...,\n ],\n ]\n ] = []\n for tensor in onnx_tensor:\n sequence_elements.append(\n fx_name_to_onnxscript_value[tensor.name] if tensor is not None else None\n )\n return sequence_elements\n if isinstance(onnx_tensor, torch.dtype):\n onnx_tensor = int(\n jit_type_utils.JitScalarType.from_dtype(onnx_tensor).onnx_type()\n )\n # NOTE: if device is specified in kwargs (not consumed), it's free to ignored. But\n # if it's in args, we need to set it to string for dispatcher to match schema.\n if isinstance(onnx_tensor, torch.device):\n # torch.device is not supported by onnxscript (no op). We turn it into\n # a string.\n return str(onnx_tensor)\n\n # all other cases, we do nothing.\n return onnx_tensor",
"def compute_output(self):\n x, y = self.input_nodes\n self.output_value = backend.multiply(x.output_value, y.output_value)\n return self.output_value",
"def _get_input_output_node_names(nodes):\n input_names, output_names = set(), set()\n extension_output_names = set()\n for node in nodes:\n tf_node = node if isinstance(node,\n TensorflowNode) else TensorflowNode(node)\n output_names.add(node.name)\n # Add outputs for Split, Switch TensorArrayV3\n if tf_node.op_type == \"Split\":\n for i in range(1, tf_node.attr[\"num_split\"]):\n output_names.add(tf_node.name + \":{}\".format(i))\n if tf_node.op_type == \"Switch\":\n output_names.add(tf_node.name + \":1\")\n extension_output_names.add((tf_node.name, tf_node.name + \":1\"))\n if tf_node.op_type == \"TensorArrayV3\":\n output_names.add(tf_node.name + \":1\")\n extension_output_names.add((tf_node.name, tf_node.name + \":1\"))\n input_names.update(\n set([inp if inp[0] != \"^\" else inp[1:] for inp in tf_node.inputs]))\n inputs = input_names - output_names\n outputs = output_names - input_names\n while extension_output_names:\n ext_names = extension_output_names.pop()\n for name in ext_names:\n if name in outputs:\n outputs -= set(ext_names)\n break\n inputs.discard(None)\n return list(inputs), list(outputs)",
"def map_input_and_node(cls, onnx_model: onnx.ModelProto):\n\n input2node: Dict[str, List] = dict()\n for node in onnx_model.graph.node:\n for idx, input_name in enumerate(node.input):\n if input_name not in input2node:\n input2node[input_name] = []\n input2node[input_name].append([node, idx])\n return input2node",
"def test_onnx_node_name_to_input_output_names_util(self):\n model = models.resnet18(pretrained=False)\n dummy_input = torch.randn(1, 3, 224, 224)\n torch.onnx.export(model, dummy_input, './data/resnet18.onnx')\n onnx_utils.OnnxSaver.set_node_names('./data/resnet18.onnx', model, dummy_input, is_conditional=False,\n module_marker_map={})\n onnx_model = onnx.load('./data/resnet18.onnx')\n\n # Get Dict mapping node name to the input and output names\n node_to_io_dict,_ = onnx_utils.OnnxSaver.get_onnx_node_to_io_tensor_names_map(onnx_model)\n\n node_0 = onnx_model.graph.node[0]\n assert node_0.input == node_to_io_dict[node_0.name].inputs\n assert node_0.output == node_to_io_dict[node_0.name].outputs",
"def compute_output(self):\n x, y = self.input_nodes\n self.output_value = backend.add(x.output_value, y.output_value)\n return self.output_value",
"def get_output_node_gene(key, config):\n gene1 = OutputNodeGene(key, config)\n gene1.aggregation = 'a'\n gene1.bias = 0\n gene2 = OutputNodeGene(key, config)\n gene2.aggregation = 'b'\n gene2.bias = 1\n return gene1, gene2",
"def _learn_node_parameter_w(outputs, inputs=None):\n num_inputs = 0 if inputs is None else inputs.shape[1]\n weights = np.zeros(shape=num_inputs + 1)\n\n \"\"\" YOUR CODE HERE \"\"\"\n # Ax = b, A\n N_observe = outputs.shape[0]\n A = np.zeros(shape = (num_inputs+1, num_inputs+1))\n for i in range(A.shape[0]):\n for j in range(A.shape[1]):\n if i==0 and j==0:\n A[i][j] = N_observe\n elif i==0 and j!=0:\n A[i][j] = np.sum(inputs[:,j-1])\n elif i!=0 and j==0:\n A[i][j] = np.sum(inputs[:,i-1])\n else:\n for k in range(N_observe):\n A[i][j] += inputs[k,i-1]*inputs[k, j-1]\n b = np.zeros(shape=num_inputs + 1)\n for i in range(len(b)):\n if i==0:\n b[i] = np.sum(outputs)\n else:\n for k in range(N_observe):\n b[i] += inputs[k,i-1]*outputs[k]\n\n weights = np.linalg.solve(A, b)\n \"\"\" END YOUR CODE HERE \"\"\"\n\n return weights",
"def get_outputs(self, input_dict: Dict) -> Dict[str, np.ndarray]:\n activation_values = self.session.run(self.activation_names, input_dict)\n return dict(zip(self.sanitized_activation_names, activation_values))",
"def _update_output_after_create_node(self):\n # Constants and parameter should not exist for input and output.\n filtered_node = {NodeTypeEnum.CONST.value, NodeTypeEnum.PARAMETER.value}\n for node in self._normal_node_map.values():\n for src_name, input_attr in node.inputs.items():\n src_node = self._get_normal_node(node_name=src_name)\n if src_node.type in filtered_node:\n continue\n\n src_node.add_outputs(node.name, input_attr)",
"def compute(self, node, input_vals):\r\n #assert len(input_vals) == 1\r\n return input_vals[0].astype(node.dtype)",
"def get_node_outputs(node_path):\n \n item = ix.get_item(node_path)\n\n obj_array = ix.api.OfItemArray(1)\n obj_array[0] = item\n item_outputs = ix.api.OfItemVector()\n\n ix.application.get_factory().get_items_outputs(obj_array, item_outputs, False)\n\n node_outputs = []\n for item_ in range(item_outputs.get_count()):\n\n for i in range(item_outputs[item_].get_attribute_count()):\n\n attr= item_outputs[item_].get_attribute(i)\n\n if attr.get_texture():\n\n if str(attr.get_texture()) == item.get_full_name():\n\n #attrs[attr] = target_node.get_full_name()\n node_outputs.append(attr)\n return node_outputs",
"def compute(self, node, input_vals):\r\n \"\"\"TODO: Your code here\"\"\"\r\n \"\"\"Given values of two input nodes, return result of element-wise multiplication.\"\"\"\r\n assert len(input_vals) == 1\r\n #print(input_vals[0].shape)\r\n #print(node.name)\r\n #print(np.max(input_vals[0]))\r\n #print(np.sum(input_vals[0]))\r\n #assert np.mean(np.array(np.less(input_vals[0],750).astype(float32)))==1\r\n return np.exp(input_vals[0])",
"def compute(self, node, input_vals):\n assert len(input_vals) == 2\n return input_vals[0] + input_vals[1]",
"def get_output_data(\n self,\n inputs: Dict[str, Any]) -> Any:\n return inputs",
"def compute(self, node, input_vals):\r\n assert len(input_vals) == 2\r\n return input_vals[0] + input_vals[1]\r\n #print(input_vals[0])\r\n #print(input_vals[1])\r\n #print(input_vals[0]+input_vals[1])\r",
"def update_key_value(\n self, outputs: torch.Tensor, targets: torch.Tensor\n ) -> Dict[str, float]:\n tn, fp, fn, tp, support, _ = self.update(outputs=outputs, targets=targets)\n return {\"fn\": fn, \"fp\": fp, \"support\": support, \"tn\": tn, \"tp\": tp}",
"def update_key_value(\n self, outputs: torch.Tensor, targets: torch.Tensor\n ) -> Dict[str, float]:\n tn, fp, fn, tp, support, _ = self.update(outputs=outputs, targets=targets)\n return {\"fn\": fn, \"fp\": fp, \"support\": support, \"tn\": tn, \"tp\": tp}",
"def compute(self, node, input_vals):\r\n \"\"\"TODO: Your code here\"\"\"\r\n \"\"\"Given values of two input nodes, return result of element-wise multiplication.\"\"\"\r\n assert len(input_vals) == 1\r\n #assert np.mean(np.array(np.greater(input_vals[0],0).astype(float32)))==1\r\n #print(input_vals)\r\n return np.log(input_vals[0])",
"def _graph_fn_get_action_layer_outputs(self, nn_output, nn_input):\n nn_input = next(iter(nn_input.values()))\n\n ret = FlattenedDataOp()\n for flat_key, action_adapter in self.action_adapters.items():\n ret[flat_key] = action_adapter.get_logits(nn_output, nn_input)\n\n return ret",
"def update_key_value(\n self, outputs: torch.Tensor, targets: torch.Tensor\n ) -> Dict[str, float]:\n tn, fp, fn, tp, support = self.update(outputs=outputs, targets=targets)\n return {\"fn\": fn, \"fp\": fp, \"support\": support, \"tn\": tn, \"tp\": tp}",
"def calculate(self, assignments):\n # Build a tuple of the relevant input states from the set of\n # assignments given.\n states = tuple([assignments[v] for v in self.inputs])\n\n # Look them up\n try:\n results = self.lookup[states]\n except KeyError:\n raise RuntimeError(\"Error in {} with key {}\".format(self, states))\n\n # Now, construct a mapping over th output variables and return that.\n return dict(zip(self.outputs, results))",
"def compute(self, node, input_vals):\n assert False, \"placeholder values provided by feed_dict\""
]
| [
"0.63742995",
"0.5989175",
"0.587564",
"0.5840289",
"0.58103305",
"0.5776675",
"0.5659506",
"0.5615959",
"0.559921",
"0.556679",
"0.5563925",
"0.55307496",
"0.54789686",
"0.54659796",
"0.54319817",
"0.5390829",
"0.53307354",
"0.5327316",
"0.5283891",
"0.52830815",
"0.52765304",
"0.52676904",
"0.5256168",
"0.52379894",
"0.52379894",
"0.52331996",
"0.5224938",
"0.52220684",
"0.5214271",
"0.51921713"
]
| 0.7839648 | 0 |
Given a neural net and dictionary of input values, performs forward propagation with the given threshold function to compute binary output. | def forward_prop(net, input_values, threshold_fn=stairstep):
raise NotImplementedError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def forward(inputs,weights,function=sigmoid,step=-1):\n if step == 0:\n return inputs\n elif step == -1:\n step = len(weights) #go to output layer \n output = np.append(1, inputs)\n for i in range(step):\n output = np.append(1, function(np.dot(weights[i], output))) #calculating activation\n return output[1:]",
"def forward(network: dict, x: np.array) -> np.array:\n W1, W2, W3 = network['W1'], network['W2'], network['W3']\n b1, b2, b3 = network['b1'], network['b2'], network['b3']\n z1 = _forward(x, W1, b1, 'sigmoid')\n z2 = _forward(z1, W2, b2, 'sigmoid')\n y = _forward(z2, W3, b3, 'identity')\n return y",
"def forward(self, propagation_matrix, input_features):\n if self.args.model == \"exact\":\n propagation_matrix = torch.nn.functional.dropout(propagation_matrix, p = self.args.dropout, training = self.training)\n abstract_features_1 = self.page_rank_convolution_1(propagation_matrix, input_features, self.args.dropout, True, False)\n abstract_features_2 = self.page_rank_convolution_2(propagation_matrix, abstract_features_1, self.args.dropout, True, True)\n abstract_features_3 = self.page_rank_convolution_3(propagation_matrix, abstract_features_2, 0, False, True)\n predictions = torch.nn.functional.log_softmax(abstract_features_3, dim=1)\n return predictions",
"def forward(self, pred, gt, weight=None):\n num_pos = torch.relu(torch.sum(gt) - 1) + 1\n num_neg = torch.relu(torch.sum(1 - gt) - 1) + 1\n if weight is not None:\n loss = nn.BCEWithLogitsLoss(reduction='none')(pred, gt.float())\n loss = torch.mean(loss * weight)\n elif self.balanced is False:\n loss = nn.BCEWithLogitsLoss(reduction='mean')(pred, gt.float())\n else:\n loss = nn.BCEWithLogitsLoss(pos_weight=num_neg * 1.0 / num_pos, reduction='mean')(pred, gt.float())\n\n # compute precision, recall, f1\n pred_labels = pred > 0\n gt, pred_labels, pred = gt.detach().cpu().numpy(), pred_labels.detach().cpu().numpy(), pred.detach().cpu().numpy()\n precision = precision_score(gt[0], pred_labels[0])\n recall = recall_score(gt[0], pred_labels[0])\n f1 = f1_score(gt[0], pred_labels[0])\n mean_logit_true = np.sum(pred * gt) / max(1, np.sum(gt))\n mean_logit_false = np.sum(pred * (1 - gt)) / max(1, np.sum(1 - gt))\n\n eval_stats = {\n \"loss\": loss,\n \"precision\": float(precision),\n \"recall\": float(recall),\n \"f1\": float(f1),\n \"logit_true\": float(mean_logit_true),\n \"logit_false\": float(mean_logit_false)\n }\n return eval_stats",
"def forward_propagate(X, W, b):\n m = X.shape[1]\n \n \"\"\" We do the linear transformation of our entire dataset. (W.X + b) \"\"\"\n linear_transformation = np.matmul(W.T, X) + b\n \n \"\"\" Apply sigmoid activation to bring outputs within range [0..1] \"\"\"\n activation = sigmoid(linear_transformation)\n \n return activation",
"def apply_network(inputs):\n return apply_layer(tf.sigmoid(apply_layer(inputs, 64)), 1)",
"def activate(self, inputs):\n\t\tstrength = np.dot(self.weights, inputs)\n\t\t# if strength <= self.threshold:\n\t\t# \tself.result = 0\n\t\t# else:\n\t\t# \tself.result = 1\n\t\t# return self.result\n\t\treturn int(strength > self.threshold)",
"def feedForward(self):\n # Calculate the current values of the first layer\n self.layer1 = sigmoid(np.dot(self.input, self.weights1))\n\n # Calculate the sigmoid of the second layer which is the output\n self.output = sigmoid(np.dot(self.layer1, self.weights2))",
"def makeFastFeedForwardFunction(self):\n\n\t\toutWeightMatrix = []\n\t\tfor unit in self.outputLayer:\n\n\t\t\trow = []\n\t\t\tfor b in unit.branchesIn:\n\t\t\t\tprint b.weight\n\t\t\t\trow.append(b.weight)\n\t\t\t\n\t\t\toutWeightMatrix.append(row)\n\t\toutWeightMatrix = np.array(outWeightMatrix).squeeze()\n\n\t\thiddenMatrices = []\n\t\tfor layer in self.hiddenLayers:\n\t\t\tmatrix = []\n\t\t\t#ignore the bias unit, since it has no branches in\n\t\t\tfor unit in layer[1:]:\n\t\t\t\trow = []\n\t\t\t\tfor b in unit.branchesIn:\n\t\t\t\t\trow.append(b.weight)\n\n\t\t\t\tmatrix.append(row)\n\t\t\tmatrix = np.array(matrix)\n\n\t\t\thiddenMatrices.append(matrix)\n\n\t\thidActFunc = (self.hiddenLayers[0])[1].activationFunction\n\t\toutActFunc = self.outputLayer[0].activationFunction\n\n\t\tdef ffFunc(inp):\n\t\n\t\t\tforward = np.insert(inp.T,0,1.0,axis=0)\n\t\t\tfor matrix in hiddenMatrices:\n\t\t\t\tnext = np.dot(matrix,forward)\n\t\t\t\tnext = hidActFunc(next)\n\t\t\t\tforward = np.insert(next,0,1.0,axis=0)\n\n\t\t\tout = np.dot(outWeightMatrix,forward)\n\n\t\t\treturn outActFunc(out)\n\n\t\treturn ffFunc",
"def forward(W,X):\n return activation_func(np.dot(add_bias(X),W))",
"def _forward(z: np.array, W: np.array, b: np.array,\n activation: str) -> np.array:\n a = np.dot(z, W) + b\n if activation == 'sigmoid':\n return sigmoid(a)\n elif activation == 'identity':\n return identity(a)",
"def binary_focal_loss(loss_fn, threshold=0.5, alpha=0.2, gamma=2.0):\n\n def _binary_focal_loss(y_true, y_pred):\n\n # apply threshold to get clearly positive and negative predictions\n y_true_binary = tf.keras.backend.greater(y_true, threshold)\n\n # compute the focal loss\n alpha_factor = tf.keras.backend.ones_like(y_true, dtype=tf.float32) * alpha # create an array with alpha values, same shape as y_true\n alpha_factor = tf.where(y_true_binary, alpha_factor, 1 - alpha_factor) # alpha on true, 1-alpha on false\n alpha_factor = alpha_factor * 2 # we don't want to half the learning rate\n\n focal_weight = tf.where(y_true_binary, 1 - y_pred, y_pred)\n\n # this is needed, because the output contains 0.0 after applying to the input grid\n focal_weight = tf.clip_by_value(focal_weight, tf.keras.backend.epsilon(), 1.0) \n\n focal_weight = alpha_factor * focal_weight**gamma\n focal_weight = tf.squeeze(focal_weight, axis=-1)\n focal_weight = tf.identity(focal_weight, name=\"focal_weight\")\n\n cls_loss = focal_weight * loss_fn(y_true, y_pred)\n cls_loss = tf.identity(cls_loss, name=\"cls_loss\")\n\n # compute the normalizer: the number of positive anchors\n normalizer = tf.where(y_true_binary)\n normalizer = tf.keras.backend.cast(tf.keras.backend.shape(normalizer)[0], tf.keras.backend.floatx())\n normalizer = tf.keras.backend.maximum(tf.keras.backend.cast_to_floatx(1), normalizer)\n\n cls_loss_sum = tf.keras.backend.sum(cls_loss)\n loss = cls_loss_sum / normalizer\n\n loss = tf.identity(loss, name=\"focal_loss\")\n return loss #tf.keras.backend.sum(cls_loss) / normalizer\n\n return _binary_focal_loss",
"def forward(X,W,b,V,d):\n H = sigmoid(X, W, b)\n Y = softmax(H, V, d)\n return H, Y",
"def forward(self, inputs):\n #print(\"w1 shape\", self.w1.shape)\n z1 = np.dot(inputs, self.w1)\n self.a1 = sigmoid(z1)\n \n z2 = np.dot(self.a1, self.w2)\n self.a2 = sigmoid(z2)\n \n z3 = np.dot(self.a2, self.w3)\n self.y = sigmoid(z3)\n \n return self.y",
"def forwardPropagate(self, inputMatrix):\r\n\r\n return 1.0/(1.0+np.exp(-np.einsum('kji, kli->klj', self.WeightMatrixT, inputMatrix)-self.BiasVector))",
"def forwardPropagation(self, inputs, label):\n node_hidden = np.dot(inputs, self.input_W)\n node_hidden = np.add(node_hidden, self.input_B)\n node_hidden = np.maximum(0, node_hidden)\n node_output = np.dot(node_hidden, self.hidden_W)\n node_output = np.add(node_output, self.hidden_B)\n #print(node_output)\n exp_node_output = np.exp(node_output)\n node_output = exp_node_output / np.sum(exp_node_output, axis=1, keepdims=True)\n #print(node_output)\n #node_output = self.softmax(node_output)\n loss = np.sum(-np.log(node_output[range(inputs.shape[0]),label]))/(inputs.shape[0])+0.5 * self.regularizer*np.sum(self.input_W *self.input_W)+0.5 * self.regularizer*np.sum(self.hidden_W *self.hidden_W)\n \"\"\"Loss= Input data loss + Loss correction by penalizing the loss, here we use 0.2 as an experimental value\"\"\"\n #loss = np.sum(-np.log(node_output[range(inputs.shape[0]), label])) / (inputs.shape[0]) + 0.2 * self.regularizer * np.sum(self.input_W ^ 2) + 0.2 * self.regularizer * np.sum(self.hidden_W ^ 2)\n return loss, node_hidden, node_output",
"def binary_predict(probs, threshold = 0.5):\n return (probs >= threshold) * np.ones(len(probs))",
"def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n if not torch.is_tensor(input):\n raise TypeError(\n \"Input type is not a torch.Tensor. Got {}\".format(type(input))\n )\n if input.device != target.device:\n raise ValueError(\n \"input and target must be in the same device. Got: {}\".format(\n input.device, target.device\n )\n )\n\n # filter labels\n target = target.type(torch.long)\n\n if self.activation_type == 'sigmoid':\n multi_hot_key = target\n logits = torch.sigmoid(input)\n zero_hot_key = 1 - multi_hot_key\n focal_loss = -self.alpha * multi_hot_key * \\\n torch.pow((1 - logits), self.gamma) * \\\n (logits + self.epsilon).log()\n focal_loss += -(1 - self.alpha) * zero_hot_key * \\\n torch.pow(logits, self.gamma) * \\\n (1 - logits + self.epsilon).log()\n weights = torch.ones_like(\n focal_loss, dtype=focal_loss.dtype, device=focal_loss.device\n )\n else:\n input_mask = target != self.ignore_index\n target = target[input_mask]\n input = input[input_mask]\n # compute softmax over the classes axis\n pt = F.softmax(input, dim=1)\n logpt = F.log_softmax(input, dim=1)\n\n # compute focal loss\n pt = pt.gather(1, target.unsqueeze(-1)).squeeze()\n logpt = logpt.gather(1, target.unsqueeze(-1)).squeeze()\n focal_loss = -1 * (1 - pt) ** self.gamma * logpt\n\n weights = torch.ones_like(\n focal_loss, dtype=focal_loss.dtype, device=focal_loss.device\n )\n if self.alpha is not None:\n if isinstance(self.alpha, float):\n alpha = torch.tensor(self.alpha, device=input.device)\n weights = torch.where(target > 0, 1 - alpha, alpha)\n elif torch.is_tensor(self.alpha):\n alpha = self.alpha.to(input.device)\n weights = alpha.gather(0, target)\n\n tmp_loss = focal_loss * weights\n if self.reduction == \"none\":\n loss = tmp_loss\n elif self.reduction == \"mean\":\n loss = (\n tmp_loss.sum() / weights.sum()\n if torch.is_tensor(self.alpha)\n else torch.mean(tmp_loss)\n )\n elif self.reduction == \"sum\":\n loss = tmp_loss.sum()\n else:\n raise NotImplementedError(\n \"Invalid reduction mode: {}\".format(self.reduction)\n )\n return loss",
"def predict(data_point,weights): \n return sigmoidFunction(np.dot(np.append([1.0],data_point),weights))",
"def forward(self, x):\n for l in self.layers:\n w = l.weights\n b = l.biases\n x = self.sigmoid(np.dot(x, w) + b)\n return x",
"def _pcnfwd(self,inputs):\n\n outputs = np.dot(inputs,self.weights)\n\n # Threshold the outputs\n return self.f(outputs)",
"def predict(self, inputs: np.array, logits: bool = False, threshold: float = 0.5) -> np.array:\n\n preds = softmax(np.dot(inputs, self.theta), axis=1)\n if not logits:\n return preds >= threshold\n else:\n return preds",
"def classify(self, predict_wx, threshold):\n # predict_wx = self.compute_wx(data_instances, self.model_weights.w_, self.model_weights.intercept_)\n\n def predict(x):\n prob = activation.sigmoid(x)\n pred_label = 1 if prob > threshold else 0\n return prob, pred_label\n\n predict_table = predict_wx.mapValues(predict)\n return predict_table",
"def forwardPass(self, inputs):\n #############################################################################\n # TODO: Implement the forward phase of the model. It has two hidden layers \n # and the output layer. The activation function of the two hidden layers is \n # sigmoid function. The output layer activation function is the softmax function\n # because we are working with multi-class classification. \n #############################################################################\n\n # layer 1 \n # compute the forward pass on the first hidden layer with the sigmoid function\n\n self.hidden1 = np.dot(inputs, self.weights1) #(9000, 785) (785, 5)\n # self.hidden1 = self.sigmoid_fun(self.hidden1) #(9000, 5)\n self.hidden1 = 1.0 / (1.0 + np.exp(-self.beta*self.hidden1))\n self.hidden1 = np.concatenate((self.hidden1, -np.ones((np.shape(inputs)[0], 1))), axis=1) # (9000,6)\n\n\n # layer 2\n # compute the forward pass on the second hidden layer with the sigmoid function\n self.hidden2 = np.dot(self.hidden1, self.weights2) # (9000,6) (6, 5)\n self.hidden2 = self.sigmoid_fun(self.hidden2) # (9000,5)\n self.hidden2 = np.concatenate((self.hidden2, -np.ones((np.shape(self.hidden1)[0], 1))), axis=1) # (9000,6)\n\n # output layer\n # compute the forward pass on the output layer with softmax function\n outputs = np.dot(self.hidden2, self.weights3) # (9000,6) (6, 10)\n normalisers = np.sum(np.exp(outputs), axis=1)*np.ones((1, np.shape(outputs)[0]))\n outputs = np.transpose(np.transpose(np.exp(outputs)) / normalisers)\n # outputs = self.softmax_fun(outputs) # (9000,10)\n # print(outputs)\n #############################################################################\n # END of YOUR CODE \n #############################################################################\n return outputs",
"def hard_sigmoid(x):\n x = (0.2 * x) + 0.5\n x = F.threshold(-x, -1, -1)\n x = F.threshold(-x, 0, 0)\n return x",
"def activate(self, inputvaluelist: List[float]):\n if len(inputvaluelist) != len(self.inputWeight):\n raise Exception(f\"The length input is {len(inputvaluelist)} and is not equal\"\n f\" to length of weights({len(self.inputWeight)})\")\n self.inputvaluelist = inputvaluelist\n inputlist = list(zip(inputvaluelist, self.inputWeight))\n\n input_sum = 0\n for inp in inputlist:\n input_sum += inp[0] * inp[1]\n input_sum += self.bias\n\n self.output = sigmoid(input_sum)\n\n return self.output",
"def forward(self, inputs): \n self.z1 = self.af(np.dot(inputs, self.hidden1_weights)) \n self.z2 = self.af(np.dot(self.z1, self.hidden2_weights))\n self.z3 = sigmoid(np.dot(self.z2, self.output3_weights)) # because the output interval must be [0, 1]\n return self.z3 # so the activation function of last layer must be sigmoid",
"def forward_propagate(self, x):\n Zh = np.dot(x, self.weights_ih)\n Ah = self.sigmoid(Zh)\n Zo = np.dot(Ah, self.weights_ho)\n output = self.softmax(Zo)\n return output",
"def forwardPropagate(self, inputMatrix):\r\n return 1.0/(1.0+np.exp(-np.einsum('ji, li->lj', self.WeightMatrixT, inputMatrix)-self.BiasVector))",
"def get_predict(prediction, threshold):\n\n prediction[prediction < threshold] = 0\n prediction[prediction >= threshold] = 1\n \n return prediction"
]
| [
"0.6250236",
"0.60710454",
"0.5927202",
"0.589811",
"0.58545476",
"0.57951117",
"0.5756987",
"0.57506436",
"0.5733161",
"0.57154197",
"0.57070035",
"0.57040226",
"0.5698928",
"0.5695584",
"0.5671023",
"0.5650407",
"0.5640358",
"0.5620086",
"0.56128013",
"0.560631",
"0.5605319",
"0.55858403",
"0.5582811",
"0.55801123",
"0.5577787",
"0.5562907",
"0.55470395",
"0.5543604",
"0.5543568",
"0.5535102"
]
| 0.76103055 | 0 |
Given an unknown function of three variables and a list of three values representing the current inputs into the function, increments each variable by +/ step_size or 0, with the goal of maximizing the function output. | def gradient_ascent_step(func, inputs, step_size):
raise NotImplementedError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def step_4(f: Callable[..., float], x: float, y: np.array, params: Tuple,\\\n h: float, k3: np.array) -> np.array:\n\n # Initialize the output vector.\n n = len(y)\n y_int = np.zeros(n)\n\n # Find dym/dx using the given function, then use it to compute dym-1/dx.\n y_int[0] = f(x + h, y + k3, *params) * h\n\n # Starting with dym-1/dx, compute the other values down to y/dx.\n for i in range(1, n):\n y_int[i] = (y[n-i] + k3[n-1]) * h\n\n # Reverse the output vector so y/dx is on top.\n y_int = np.flipud(y_int)\n\n return y_int",
"def step_3(f: Callable[..., float], x: float, y: np.array, params: Tuple,\\\n h: float, k2: np.array) -> np.array:\n\n # Initialize the output vector.\n n = len(y)\n y_int = np.zeros(n)\n\n # Find dym/dx using the given function, then use it to compute dym-1/dx.\n y_int[0] = f(x + (h / 2), y + (k2 / 2), *params) * h\n\n # Starting with dym-1/dx, compute the other values down to y/dx.\n for i in range(1, n):\n y_int[i] = (y[n-i] + (k2[n-i] / 2)) * h\n\n # Reverse the output vector so y/dx is on top.\n y_int = np.flipud(y_int)\n\n return y_int",
"def instability_bf(funcs, step = 10, maximum = 300, guess = 0, tolerance=0.01):\n if guess < maximum:\n s = 1 # to increase\n else:\n s = -1 # to decrease\n step = s*abs(step) # correct step\n # offset to ensure that data moves to maximum even if actual data is stable\n offset = [f(maximum) for f in funcs]\n val_prev = np.array([f(guess-step) for f in funcs]+offset) # first y values with offset\n acc = 0 # accumulator to interchange when to add offset and when not\n while s*(maximum-guess)>0: # check approximation to maximum\n val = [f(guess) for f in funcs] # get y values\n if acc%2: # interchange\n val = np.array(val+offset) # values with offset\n else:\n val = np.array(val+val) # just values\n # np.repeat(np.mean(val),val.size)\n # check minimization\n if np.allclose(val, val_prev, tolerance, tolerance): # it means instability\n return True, guess # success!\n guess += step # updata step\n acc += 1 # update accumulator\n val_prev = val # update previous data\n return False, guess # not found or limit reached",
"def __slide_function__(func, x, dt, params, window_size, step_size, kernel_name, solver=None):\n\n # get smoothing kernel\n if not window_size % 2: # then make odd\n window_size += 1\n ker = KERNELS[kernel_name](window_size)\n\n x_hat_list = []\n dxdt_hat_list = []\n weight_list = []\n\n for p in range(0, len(x), step_size):\n # deal with end points\n start = p - int((window_size-1)/2)\n end = p + int((window_size-1)/2)+1\n\n ker_start = 0\n ker_end = window_size\n\n if start < 0:\n ker_start = np.abs(start)\n start = 0\n if end > len(x):\n ker_end = window_size - (end-len(x))\n end = len(x)\n\n # weights\n w = ker[ker_start:ker_end]\n w = w/np.sum(w)\n\n # run the function on the window\n _x = x[start:end]\n x_hat, dxdt_hat = func(_x, dt, params, options={'weights': w, 'solver': solver})\n\n # stack results\n z_x_hat = np.zeros([len(x)])\n z_x_hat[start:end] = x_hat\n x_hat_list.append(z_x_hat)\n\n z_dxdt_hat = np.zeros([len(x)])\n z_dxdt_hat[start:end] = dxdt_hat\n dxdt_hat_list.append(z_dxdt_hat)\n\n z_weights = np.zeros([len(x)])\n z_weights[start:end] = w\n weight_list.append(z_weights)\n\n # column norm weights\n weights = np.vstack(weight_list)\n for col in range(weights.shape[1]):\n weights[:, col] = weights[:, col] / np.sum(weights[:, col])\n\n # stack and weight x_hat and dxdt_hat\n x_hat = np.vstack(x_hat_list)\n dxdt_hat = np.vstack(dxdt_hat_list)\n\n x_hat = np.sum(weights*x_hat, axis=0)\n dxdt_hat = np.sum(weights*dxdt_hat, axis=0)\n\n return x_hat, dxdt_hat",
"def evaluate(f, a):\n if f == []:\n return 0\n result = f[-1] # begin with leading coefficient\n i = len(f) - 1 # number of times to iterate \n while i >= 0:\n result = f[i] + a*result\n i -= 1\n return result",
"def iterate_run(self, stepsize: float, **kwargs):\r\n # TODO extend to non mono-dimensional variables\r\n\r\n # exchange dual variables with neighbors\r\n lambda_neigh = self.agent.neighbors_exchange(self.lambd, dict_neigh=True)\r\n deltalambda = np.zeros(self.x_shape)\r\n\r\n for j in self.agent.in_neighbors:\r\n deltalambda += self.lambd[j] - lambda_neigh[j]\r\n \r\n # build local problem\r\n x = Variable(self.x_shape[0])\r\n obj_function = self.agent.problem.objective_function + deltalambda @ x\r\n pb = Problem(obj_function, self.agent.problem.constraints)\r\n\r\n # solve problem and save data\r\n x = pb.solve()\r\n\r\n # exchange primal variables with neighbors\r\n x_neigh = self.agent.neighbors_exchange(x)\r\n\r\n self._update_local_solution(x, x_neigh, stepsize, **kwargs)",
"def gd(func, grad, x0, numIter, stepSize):\r\n \r\n # initialize current location\r\n x = x0\r\n\r\n # set up storage for trajectory of function values\r\n trajectory = zeros(numIter + 1)\r\n trajectory[0] = func(x)\r\n\r\n # begin iterations\r\n for iter in range(numIter):\r\n # compute the gradient at the current location\r\n g = grad(x)\r\n\r\n\r\n # compute the step size\r\n eta = stepSize/(sqrt(iter+1))\r\n\r\n\r\n # step in the direction of the gradient\r\n x = x - eta * g\r\n\r\n\r\n # record the trajectory\r\n trajectory[iter+1] = func(x)\r\n\r\n # return the solution\r\n return (x, trajectory)",
"def minimize_neldermead(func, x0, args=(), callback=None,\n maxiter=None, maxfev=None, disp=False,\n return_all=False, initial_simplex=None,\n xatol=1e-4, fatol=1e-4, **unknown_options):\n maxfun = maxfev\n retall = return_all\n\n rho = 1\n chi = 2\n psi = 0.5\n sigma = 0.5\n nonzdelt = 0.05\n zdelt = 0.00025\n\n if initial_simplex is None:\n N = len(x0)\n\n sim = numpy.zeros((N + 1, N), dtype=x0.dtype)\n sim[0] = x0\n for k in range(N):\n y = numpy.array(x0, copy=True)\n if y[k] != 0:\n y[k] = (1 + nonzdelt) * y[k]\n else:\n y[k] = zdelt\n sim[k + 1] = y\n\n maxiter = 10\n maxfun = 10\n\n one2np1 = list(range(1, N + 1))\n fsim = numpy.zeros((N + 1,), float)\n\n for k in range(N + 1):\n fsim[k] = func(sim[k])\n\n ind = numpy.argsort(fsim)\n fsim = numpy.take(fsim, ind, 0)\n # sort so sim[0,:] has the lowest function value\n sim = numpy.take(sim, ind, 0)\n raise Exception()\n print('aaaaffaaaaaa')\n\n iterations = 1\n\n while iterations < maxiter:\n if (numpy.max(numpy.ravel(numpy.abs(sim[1:] - sim[0]))) <= xatol and\n numpy.max(numpy.abs(fsim[0] - fsim[1:])) <= fatol):\n break\n logger.debug('itr: %s' % iterations)\n print('aaaaaaaaaa')\n xbar = numpy.add.reduce(sim[:-1], 0) / N\n xr = (1 + rho) * xbar - rho * sim[-1]\n fxr = func(xr)\n doshrink = 0\n\n if fxr < fsim[0]:\n xe = (1 + rho * chi) * xbar - rho * chi * sim[-1]\n fxe = func(xe)\n\n if fxe < fxr:\n sim[-1] = xe\n fsim[-1] = fxe\n else:\n sim[-1] = xr\n fsim[-1] = fxr\n else: # fsim[0] <= fxr\n if fxr < fsim[-2]:\n sim[-1] = xr\n fsim[-1] = fxr\n else: # fxr >= fsim[-2]\n # Perform contraction\n if fxr < fsim[-1]:\n xc = (1 + psi * rho) * xbar - psi * rho * sim[-1]\n fxc = func(xc)\n\n if fxc <= fxr:\n sim[-1] = xc\n fsim[-1] = fxc\n else:\n doshrink = 1\n else:\n # Perform an inside contraction\n xcc = (1 - psi) * xbar + psi * sim[-1]\n fxcc = func(xcc)\n\n if fxcc < fsim[-1]:\n sim[-1] = xcc\n fsim[-1] = fxcc\n else:\n doshrink = 1\n\n if doshrink:\n for j in one2np1:\n sim[j] = sim[0] + sigma * (sim[j] - sim[0])\n fsim[j] = func(sim[j])\n\n ind = numpy.argsort(fsim)\n sim = numpy.take(sim, ind, 0)\n fsim = numpy.take(fsim, ind, 0)\n if callback is not None:\n callback(sim[0])\n iterations += 1\n\n x = sim[0]\n fval = numpy.min(fsim)\n warnflag = 0\n\n result = OptimizeResult(fun=fval, nit=iterations, nfev=0,\n status=warnflag, success=(warnflag == 0),\n message=None, x=x, final_simplex=(sim, fsim))\n return result",
"def body(i, *args):\n del args\n fn_result = fn(ctx, iterator.get_next())\n flat_last_step_outputs = nest.flatten(ctx.last_step_outputs)\n with ops.control_dependencies([fn_result]):\n return [i + 1] + flat_last_step_outputs",
"def step(self, delta_l11, delta_l12, delta_l13, delta_l21, delta_l22, delta_l23):\n self.l11 += delta_l11; self.l12 += delta_l12; self.l13 += delta_l13\n self.l21 += delta_l11; self.l22 += delta_l12; self.l23 += delta_l13\n self.l21 += delta_l21; self.l22 += delta_l22; self.l23 += delta_l23\n # check that all tendon lenghts are within limit\n self.l11 = self.l1min if self.l11 < self.l1min else self.l11\n self.l12 = self.l1min if self.l12 < self.l1min else self.l12\n self.l13 = self.l1min if self.l13 < self.l1min else self.l13\n self.l11 = self.l1max if self.l11 > self.l1max else self.l11\n self.l12 = self.l1max if self.l12 > self.l1max else self.l12\n self.l13 = self.l1max if self.l13 > self.l1max else self.l13\n self.l21 = self.l2min if self.l21 < self.l2min else self.l21\n self.l22 = self.l2min if self.l22 < self.l2min else self.l22\n self.l23 = self.l2min if self.l23 < self.l2min else self.l23\n self.l21 = self.l2max if self.l21 > self.l2max else self.l21\n self.l22 = self.l2max if self.l22 > self.l2max else self.l22\n self.l23 = self.l2max if self.l23 > self.l2max else self.l23\n old_tip_vec = self.tip_vec2 # used for potential reward\n self.update_variables()\n new_tip_vec = self.tip_vec2 # used for potential reward\n reward = self.r_static\n return reward",
"def f(x, y, z, a = 10, b = 20, c = 30):\r\n \r\n return((x + y + z) * (a + b + c))",
"def func(x, a, b, c):\r\n return a + (b * x) + (c * x * x)",
"def backtracking(obj_func, x, step_size, grad, is_valid_x_func\n , alpha: float = 0.25, beta: float = 0.8):\n t = 1\n while t > beta * t:\n if not is_valid_x_func(x + t * step_size):\n t *= beta\n continue\n elif obj_func(x + t * step_size) < obj_func(x) + alpha * t * grad.T.dot(step_size).squeeze():\n break\n t *= beta\n return t",
"def sliding_window_analysis(sequence, function,\n window_size=100000, step_size=50000):\n for start in range(0, len(sequence), step_size):\n end = start + window_size\n if end > len(sequence):\n break\n yield start, end, function(sequence[start:end])",
"def update(self, function_values, es, bounds=None):\r\n if bounds is None:\r\n bounds = self.bounds\r\n if bounds is None or (bounds[0] is None and bounds[1] is None): # no bounds ==> no penalty\r\n return self # len(function_values) * [0.0] # case without voilations\r\n\r\n N = es.N\r\n ### prepare\r\n # compute varis = sigma**2 * C_ii\r\n varis = es.sigma**2 * array(N * [es.C] if np.isscalar(es.C) else ( # scalar case\r\n es.C if np.isscalar(es.C[0]) else # diagonal matrix case\r\n [es.C[i][i] for i in xrange(N)])) # full matrix case\r\n\r\n # dmean = (es.mean - es.gp.into_bounds(es.mean)) / varis**0.5\r\n dmean = (es.mean - es.gp.geno(es.gp.into_bounds(es.gp.pheno(es.mean)))) / varis**0.5\r\n\r\n ### Store/update a history of delta fitness value\r\n fvals = sorted(function_values)\r\n l = 1 + len(fvals)\r\n val = fvals[3*l // 4] - fvals[l // 4] # exact interquartile range apart interpolation\r\n val = val / np.mean(varis) # new: val is normalized with sigma of the same iteration\r\n # insert val in history\r\n if np.isfinite(val) and val > 0:\r\n self.hist.insert(0, val)\r\n elif val == inf and len(self.hist) > 1:\r\n self.hist.insert(0, max(self.hist))\r\n else:\r\n pass # ignore 0 or nan values\r\n if len(self.hist) > 20 + (3*N) / es.popsize:\r\n self.hist.pop()\r\n\r\n ### prepare\r\n dfit = np.median(self.hist) # median interquartile range\r\n damp = min(1, es.sp.mueff/10./N)\r\n\r\n ### set/update weights\r\n # Throw initialization error\r\n if len(self.hist) == 0:\r\n raise _Error('wrongful initialization, no feasible solution sampled. ' +\r\n 'Reasons can be mistakenly set bounds (lower bound not smaller than upper bound) or a too large initial sigma0 or... ' +\r\n 'See description of argument func in help(cma.fmin) or an example handling infeasible solutions in help(cma.CMAEvolutionStrategy). ')\r\n # initialize weights\r\n if (dmean.any() and (not self.weights_initialized or es.countiter == 2)): # TODO\r\n self.gamma = array(N * [2*dfit])\r\n self.weights_initialized = True\r\n # update weights gamma\r\n if self.weights_initialized:\r\n edist = array(abs(dmean) - 3 * max(1, N**0.5/es.sp.mueff))\r\n if 1 < 3: # this is better, around a factor of two\r\n # increase single weights possibly with a faster rate than they can decrease\r\n # value unit of edst is std dev, 3==random walk of 9 steps\r\n self.gamma *= exp((edist>0) * np.tanh(edist/3) / 2.)**damp\r\n # decrease all weights up to the same level to avoid single extremely small weights\r\n # use a constant factor for pseudo-keeping invariance\r\n self.gamma[self.gamma > 5 * dfit] *= exp(-1./3)**damp\r\n # self.gamma[idx] *= exp(5*dfit/self.gamma[idx] - 1)**(damp/3)\r\n elif 1 < 3 and (edist>0).any(): # previous method\r\n # CAVE: min was max in TEC 2009\r\n self.gamma[edist>0] *= 1.1**min(1, es.sp.mueff/10./N)\r\n # max fails on cigtab(N=12,bounds=[0.1,None]):\r\n # self.gamma[edist>0] *= 1.1**max(1, es.sp.mueff/10./N) # this was a bug!?\r\n # self.gamma *= exp((edist>0) * np.tanh(edist))**min(1, es.sp.mueff/10./N)\r\n else: # alternative version, but not better\r\n solutions = es.pop # this has not been checked\r\n r = self.feasible_ratio(solutions) # has to be the averaged over N iterations\r\n self.gamma *= exp(np.max([N*[0], 0.3 - r], axis=0))**min(1, es.sp.mueff/10/N)\r\n es.more_to_write += list(self.gamma) if self.weights_initialized else N * [1.0]\r\n ### return penalty\r\n # es.more_to_write = self.gamma if not np.isscalar(self.gamma) else N*[1]\r\n return self # bound penalty values\r",
"def g_iter(n):\n \"*** YOUR CODE HERE ***\"\n vals = [1, 2, 3]\n if n <= 3:\n return vals[n-1]\n for i in range(n - 3):\n new_val = 3 * vals[0] + 2 * vals[1] + 1 * vals[2]\n vals = vals[1:] + [new_val]\n return vals[-1]",
"def test_find_maximum_list_of_funcs(ntrain, n_inits=5, dtype = tf.float32):\n tf.reset_default_graph()\n\n xdim = 2\n xmin = -10.\n xmax = 10.\n \n funcs = [lambda x: tf.sin( tf.matmul(x,x,transpose_b=True) ),\n lambda x: tf.cos( tf.matmul(x,x,transpose_b=True)) + 2.0 ]\n n_funcs = len(funcs)\n\n initializers = tf.random.uniform(shape=(n_funcs, n_inits, xdim), dtype=dtype) * (xmax - xmin) + xmin\n\n xs, xs_list, fvals = gen_fval_xs(funcs, n_inits, xdim, xmin, xmax, dtype=dtype, name='test_max_listf')\n\n assign_inits = []\n for i in range(n_funcs):\n for j in range(n_inits):\n assign_inits.append( tf.assign(xs_list[i][j], tf.reshape(initializers[i,j,:], shape=(1,xdim))) )\n\n optimizer = tf.train.AdamOptimizer()\n\n trains, max_vals, max_inputs = find_maximum_list_of_funcs(xdim, n_inits, n_funcs, xs, xs_list, fvals, optimizer=optimizer, dtype=dtype, name=\"opt_list_funcs\")\n\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n sess.run(assign_inits)\n\n\n for i in range(ntrain):\n _, max_vals_val, max_inputs_val, xs_val, fvals_val = sess.run([trains, max_vals, max_inputs, xs, fvals])\n\n if i == ntrain - 1 or i == 1:\n print('')\n print('max input = ', max_inputs_val)\n print('max output = ', max_vals_val)\n print('xs = ', xs_val)\n print('fvals = ', fvals_val)",
"def step(self, objective_fn, *args, **kwargs):\n # will single out one variable to change at a time\n # these hold the arguments not getting updated\n before_args = []\n after_args = list(args)\n\n # mutable version of args to get updated\n args_new = list(args)\n\n for index, arg in enumerate(args):\n # removing current arg from after_args\n del after_args[0]\n\n if getattr(arg, \"requires_grad\", True):\n x_flat = np.fromiter(_flatten(arg), dtype=float)\n\n # version of objective function that depends on a flattened version of\n # just the one argument. All others held constant.\n objective_fn_flat = lambda x_flat, arg_kw=arg: objective_fn(\n *before_args, unflatten(x_flat, arg_kw), *after_args, **kwargs\n )\n\n # updating each parameter in current arg\n for d, _ in enumerate(x_flat):\n x_flat = self._rotosolve(objective_fn_flat, x_flat, d)\n\n args_new[index] = unflatten(x_flat, arg)\n\n # updating before_args for next loop\n before_args.append(args_new[index])\n\n # unwrap arguments if only one, backward compatible and cleaner\n if len(args_new) == 1:\n return args_new[0]\n return args_new",
"def fn(i):\n if i < 0: return 0\n return max(fn(i-1), fn(i-2) + nums[i])",
"def __call__(self, origin, function, state, **kwargs):\n direction = state['direction']\n ak = 0.\n if 'initial_alpha_step' in state:\n bk = state['initial_alpha_step']\n else:\n bk = self.stepSize\n v_bk = function(origin + bk * direction)\n\n while abs(bk - ak) > self.minStepSize:\n v_ak = function(origin + ak * direction)\n g_ak = numpy.dot(function.gradient(origin + ak * direction), direction)\n ck = ak - .5 * (ak - bk) * g_ak / (g_ak - (v_ak - v_bk) / (ak - bk))\n v_ck = function(origin + ck * direction)\n\n bk = ak\n ak = ck\n v_bk = v_ak\n v_ak = v_ck\n\n state['alpha_step'] = ck\n return origin + ck * direction",
"def max_continuous(func: Callable[[Tuple], np.ndarray], over: Iterable[Tuple],\\\n state: Tuple[Union[int, float]]) -> Tuple[float, Tuple[Union[int, float]], None]:\n statebounds = tuple(zip(state, state))\n init = tuple([*state, *np.random.uniform(*zip(*over))])\n funcarg = lambda x: -func(np.asarray(x).reshape(1,-1))[0, 0]\n res = minimize(funcarg, x0=init, bounds=(*statebounds, *over))\n return (-funcarg(res.x), tuple(res.x[len(state):]), None)",
"def main(n):\n return sum(f(i) for i in xrange(n))",
"def _incremental_steps(start, end, steps, stepsize=None):\n if stepsize is None: step_size = (end - start) / np.maximum((steps - 1), 1)\n gradient = []\n for i in range(steps):\n value = start + step_size * i\n gradient.append(value)\n\n return gradient[0:steps]",
"def linear_warmup(base_value, max_warmup_iter, cur_step):\n if max_warmup_iter <= cur_step:\n return base_value\n return base_value * cur_step / max_warmup_iter",
"def _chunk_vmapped_function(vmapped_fun, chunk_size, argnums=0):\n\n if chunk_size is None:\n return vmapped_fun\n\n if isinstance(argnums, int):\n argnums = (argnums,)\n\n def _fun(*args):\n\n n_elements = jax.tree_leaves(args[argnums[0]])[0].shape[0]\n n_chunks, n_rest = divmod(n_elements, chunk_size)\n\n if n_chunks == 0 or chunk_size >= n_elements:\n y = vmapped_fun(*args)\n else:\n # split inputs\n def _get_chunks(x):\n x_chunks = jax.tree_map(lambda x_: x_[: n_elements - n_rest, ...], x)\n x_chunks = _chunk(x_chunks, chunk_size)\n return x_chunks\n\n def _get_rest(x):\n x_rest = jax.tree_map(lambda x_: x_[n_elements - n_rest :, ...], x)\n return x_rest\n\n args_chunks = [\n _get_chunks(a) if i in argnums else a for i, a in enumerate(args)\n ]\n args_rest = [\n _get_rest(a) if i in argnums else a for i, a in enumerate(args)\n ]\n\n y_chunks = _unchunk(\n scanmap(vmapped_fun, scan_append, argnums)(*args_chunks)\n )\n\n if n_rest == 0:\n y = y_chunks\n else:\n y_rest = vmapped_fun(*args_rest)\n y = jax.tree_map(\n lambda y1, y2: jnp.concatenate((y1, y2)), y_chunks, y_rest\n )\n return y\n\n return _fun",
"def initial_estimator(f, x, step,k):\n\n fx = f(x)\n\n if decide(fx > 0):\n sign1 = 1\n else:\n sign1 = -1\n k_step = k\n h = fx / derivative(f, x)\n\n for k1 in range(1, 50000):\n step = step + 1\n x_new = x - k_step * h\n k_step = k_step * 2 # make the k double in each iteration\n fx_new = f(x_new)\n if decide(fx_new > 0):\n sign2 = 1\n else:\n sign2 = -1\n\n if not (sign1 == sign2):\n return x_new, step\n\n print(\"limit need to Increase\")",
"def minimize(fun, \n bounds = None, \n value_limit = math.inf,\n num_retries = 1000,\n logger = None,\n workers = mp.cpu_count(),\n popsize = 31, \n max_evaluations = 50000, \n capacity = 500,\n stop_fittness = None,\n optimizer = None,\n ):\n\n if optimizer is None:\n optimizer = de_cma(max_evaluations, popsize, stop_fittness) \n store = Store(bounds, capacity = capacity, logger = logger)\n return retry(fun, store, optimizer.minimize, num_retries, value_limit, workers)",
"def step_1(f: Callable[..., float], x: float, y: np.array, params: Tuple,\\\n h: float) -> np.array:\n\n # Initialize the output vector.\n n = len(y)\n y_int = np.zeros(n)\n\n # Find dym/dx using the given function, then use it to compute dym-1/dx.\n y_int[0] = f(x, y, *params) * h\n\n # Starting with dym-1/dx, compute the other values down to y/dx.\n for i in range(1, n):\n y_int[i] = y[n-i] * h\n\n # Reverse the output vector so y/dx is on top.\n y_int = np.flipud(y_int)\n\n return y_int",
"def basis_fns(n=0):\n return lambda x: np.sum(x ** (n+1), axis=1)",
"def make_function(self):\n result = {}\n x0 = 0\n for i, x in enumerate(self.step_vals):\n vals_range = xrange(x0, x)\n for k in vals_range:\n result[k] = i\n x0 = x\n self.function = result"
]
| [
"0.5863381",
"0.5583114",
"0.5476102",
"0.54370964",
"0.53885406",
"0.53238237",
"0.5317676",
"0.5294501",
"0.5279447",
"0.5264133",
"0.52436185",
"0.5241336",
"0.52220637",
"0.5213319",
"0.5204717",
"0.5185854",
"0.5183404",
"0.5178885",
"0.51723236",
"0.5156619",
"0.5156295",
"0.5155214",
"0.51367813",
"0.51281196",
"0.5110909",
"0.5087873",
"0.5085143",
"0.50778943",
"0.50718033",
"0.5068485"
]
| 0.5839692 | 1 |
Retruns repository url from Cekit config files repositories section | def _get_repo_url(self, descriptor):
configured_repositories = config.get('repositories')
# We need to remove the custom "__name__" element before we can show
# which repository keys are defined in the configuration
configured_repository_names = configured_repositories.keys()
if '__name__' in configured_repository_names:
configured_repository_names.remove('__name__')
if descriptor['name'] not in configured_repositories:
if len(configured_repository_names):
logger.warning("Package repository '%s' used in descriptor is not "
"available in Cekit configuration file. "
"Available repositories: %s"
% (descriptor['name'], ' '.join(configured_repository_names)))
else:
logger.warning("Package repository '%s' used in descriptor is not "
"available in Cekit configuration file. "
% descriptor['name'])
return None
return configured_repositories[descriptor['name']] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def repo_url(self):\n return self._repo_url",
"def _get_config(repository, config):\n confp = ConfigParser()\n for config_path in CONF_LOCATIONS:\n confp.read(config_path / 'config.ini')\n\n local_config = Path(repository) / 'cricic/config.ini'\n confp.read(local_config)\n confp.read(config)\n return confp",
"def _fetch_base_urls(repository_url):\n repo_config = _url_as_ini_file(repository_url)\n config = configparser.ConfigParser()\n config.read_file(repo_config)\n\n base_urls = list()\n for repo in config.sections():\n base_urls.append((config.get(repo, 'name'),\n config.get(repo, 'baseurl')))\n\n return base_urls",
"def get_repository_uri(self) -> str:\n raise NotImplementedError",
"def lookup_scm_url(package_location):\n scm_cfg = configparser.ConfigParser()\n if os.path.exists('%s/.git' % package_location):\n scm_cfg.read('%s/.git/config' % package_location)\n if 'remote \"origin\"' in scm_cfg:\n return scm_cfg['remote \"origin\"'].get('url')\n elif os.path.exists('%s/.hg' % package_location):\n scm_cfg.read('%s/.hg/hgrc' % package_location)\n if 'paths' in scm_cfg:\n return scm_cfg['paths'].get('default')",
"def RepositoryUrl(name):\n repository = ndb.Key(Repository, name).get()\n if not repository:\n raise KeyError('Unknown repository name: ' + name)\n return repository.urls[0]",
"def repo_config(self, repo_config, args=None):\n return repo_config",
"def config_url(config):\n if 'url' not in config:\n raise Exception('The config file does not contain \"url\"')\n return config['url']",
"def get_repo_url(package_name):\n package_info = get_package_info(package_name)\n\n if package_info and package_info.get('links'):\n links = package_info['links']\n\n if links.get('repository'):\n return links['repository']",
"def getProjectURL():",
"def _github_config(self, config_file_name):\n home = os.path.abspath(os.environ.get('HOME', ''))\n config_file_path = os.path.join(home, config_file_name)\n return config_file_path",
"def svn_info_t_repos_root_URL_get(svn_info_t_self): # real signature unknown; restored from __doc__\n return \"\"",
"def config_file_address() -> str:\n\n config_files = json_files_from_folder(\"config\")\n config_file = choose_config(config_files) # Choice a config file if there is more then 1 in config folder\n return config_file",
"def getUrls(self):\n # in case you need to move from a read only Url to a writeable one, here it gets replaced\n repopath = self.repositoryUrl().replace(\"[git]\", \"\")\n repoString = utils.replaceVCSUrl(repopath)\n [repoUrl, repoBranch, repoTag] = utils.splitVCSUrl(repoString)\n if not repoBranch and not repoTag:\n repoBranch = \"master\"\n print(\"|\".join([repoUrl, repoBranch, repoTag]))\n return True",
"def query_repo_url(repo_name):\n return buildapi.query_repo_url(repo_name)",
"def repositories(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ConfigurationServiceGitRepositoryArgs']]]]:\n return pulumi.get(self, \"repositories\")",
"def test_config_repository(self):\n self._ucr({\n 'repository/online': 'no',\n 'repository/online/server': 'example.net',\n 'repository/online/port': '1234',\n 'repository/online/prefix': 'prefix',\n 'repository/online/sources': 'yes',\n 'repository/online/httpmethod': 'POST',\n })\n self.u.config_repository()\n self.assertFalse(self.u.online_repository)\n self.assertEqual(self.u.repository_server, 'example.net')\n self.assertEqual(self.u.repository_port, '1234')\n self.assertEqual(self.u.repository_prefix, 'prefix')\n self.assertTrue(self.u.sources)\n self.assertEqual(U.UCSHttpServer.http_method, 'POST')",
"def query_repos(self):\n return [self.config[\"repo\"]]",
"def get_config(name):\n db = dbm.open(config_file, 'c')\n url = db[name]\n db.close()\n return url",
"def config_path(self):\n if os.path.exists(self._config_path):\n if pyhocon.ConfigFactory.parse_file(self._config_path):\n return os.path.realpath(self._config_path)\n # TODO if string is url/git repo, download file locally first\n return None",
"def repo_info():\n return TEST_REPOS_INFO[0]",
"def add_repo_url(image, repository, repositories):\n try:\n path = repositories[repository]\n path = path.strip(\"/\").replace(\"https://\", \"\").replace(\"http://\", \"\")\n image = \"/\".join([path, image])\n except KeyError:\n raise KeyError(f\"Repository {repository} not defined!\")\n return image",
"def base_path(self):\n return \"/repos/{}/{}\".format(self.owner, self.label)",
"def _get_base_url(self):\n template = config.baseurl_template\n # get distro name and arch\n base_url = template.format(\n host=config.gitbuilder_host,\n proj=self.project,\n pkg_type=self.pkg_type,\n arch=self.arch,\n dist=self.distro,\n flavor=self.flavor,\n uri=self.uri_reference,\n )\n return base_url",
"def get_repository_uri(self) -> str:\n try:\n url = subprocess.check_output(\n ['git', 'config', '--get', 'remote.origin.url']\n ).decode('utf-8').strip()\n return self.norm_uri(url)\n except subprocess.CalledProcessError as error:\n # no remote origin defined, log and continue\n logger.debug('Unable to get remote origin {}'.format(str(error)))\n return None",
"def get_remote(repo, name='origin'):\n config_name = 'remote.{}.url'.format(name)\n return subprocess.check_output(['git', 'config', '--get',\n config_name], cwd=repo).rstrip()",
"def __get_repo_url_by_name(self, name, repos_list):\n for repo in repos_list:\n if repo['name'] == name:\n return repo['commits_url'].split('{')[0]",
"def config(self,item):\n config = GitConfigParser(environ['HOME']+\"/.gitconfig\")\n return config.get_value('meta', item)",
"def source_repo_url(branch_url_mode, vcs, source_repo, source_repo_branch):\n return {\n 'short': source_repo_branch,\n 'medium': '{source_repo.strpath}#{source_repo_branch}'.format(**locals()),\n 'long': '{vcs}+{source_repo.strpath}#{source_repo_branch}'.format(**locals())\n }[branch_url_mode]",
"def get_config_from_remote_git(git_url):\n raise ConfigError('%s is an URL to a git repo but this functionality is '\n 'currently unsupported' % (git_url))"
]
| [
"0.68390673",
"0.6761129",
"0.6745644",
"0.67428726",
"0.6676351",
"0.6610542",
"0.6607609",
"0.65621877",
"0.6534249",
"0.6315402",
"0.6299453",
"0.6285862",
"0.62839335",
"0.62542546",
"0.6166379",
"0.61340797",
"0.61335737",
"0.6127935",
"0.611482",
"0.61136746",
"0.61121434",
"0.6074674",
"0.60499126",
"0.6043083",
"0.60257965",
"0.6014845",
"0.59791064",
"0.595841",
"0.5938208",
"0.59317327"
]
| 0.749481 | 0 |
Solver for 1D diffusion equations with constant diffusion coefficent and | def diffusion_solver(I, alpha=1, L=1, Nx=11, T=1, F=0.5, udim='1D'):
assert alpha > 0, f'Diffusion coefficient alpha must be greater than 0'
assert F <= 0.5, \
f'Stability criterion F=alpha*dt/dx**2 <= 0.5 not satisfied with F={F}'
x = np.linspace(0, L, Nx + 1) # mesh points in space
dx = x[1] - x[0] # constant mesh spacing in x
dt = F * dx**2 / alpha # constant mesh spacing in t
Nt = int(T / dt)
t = np.linspace(0, T, Nt + 1) # mesh points in time
u_arr = np.zeros((Nt + 1, Nx + 1)) # solution array
u = np.zeros(Nx + 1) # 1D solution array
u1 = I(x) # initial condition
u1[0] = u1[Nx] = 0.0 # boundary conditions
u_arr[0] = u1
for n in range(0, Nt):
# Update all inner points
u[1:Nx] = u1[1:Nx] + F * (u1[0:Nx - 1] - 2 * u1[1:Nx] + u1[2:Nx + 1])
# Boundary conditions
u[0] = 0.0
u[Nx] = 0.0
u_arr[n] = u
# Update u1 before next step
u1, u = u, u1 # just switch references
if udim == '2D':
return u_arr, x, t
else:
return u, x, t | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def define_ufl_equations_diff(self):\n\n # Derivatives of velocity integration equation.\n if self.f1 != 0:\n self.df1_du = dlf.derivative(self.f1, self.displacement, self.trial_vector)\n self.df1_dv = dlf.derivative(self.f1, self.velocity, self.trial_vector)\n else:\n self.df1_du = 0\n self.df1_dv = 0\n self.df1_dp = 0 # This is always zero.\n\n # Derivatives of momentum equation.\n if self.displacement != 0:\n self.df2_du = dlf.derivative(self.f2, self.displacement, self.trial_vector)\n else:\n self.df2_du = 0\n\n if self.velocity != 0:\n self.df2_dv = dlf.derivative(self.f2, self.velocity, self.trial_vector)\n else:\n self.df2_dv = 0\n\n if self.pressure != 0:\n self.df2_dp = dlf.derivative(self.f2, self.pressure, self.trial_scalar)\n else:\n self.df2_dp = 0\n\n # Derivatives of incompressibility equation.\n if self.f3 != 0:\n if self.displacement != 0:\n self.df3_du = dlf.derivative(self.f3, self.displacement, self.trial_vector)\n else:\n self.df3_du = 0\n\n if self.velocity != 0:\n self.df3_dv = dlf.derivative(self.f3, self.velocity, self.trial_vector)\n else:\n self.df3_dv = 0\n\n self.df3_dp = dlf.derivative(self.f3, self.pressure, self.trial_scalar)\n else:\n self.df3_du = 0\n self.df3_dv = 0\n self.df3_dp = 0\n\n return None",
"def diffusion_steady_fixed_source(Dims,Lengths,BCs,D,Sigma,Q, tolerance=1.0e-12, LOUD=False):\n I = Dims[0]\n J = Dims[1]\n K = Dims[2]\n L = I*J*K\n Nx = Lengths[0]\n Ny = Lengths[1]\n Nz = Lengths[2]\n \n hx,hy,hz = np.array(Lengths)/np.array(Dims)\n ihx2,ihy2,ihz2 = (1.0/hx**2,1.0/hy**2,1.0/hz**2)\n \n #allocate the A matrix, and b vector\n A = sparse.lil_matrix((L,L))\n b = np.zeros(L)\n \n temp_term = 0\n for k in range(K):\n for j in range(J):\n for i in range(I):\n temp_term = Sigma[i,j,k]\n row = coordLookup_l(i,j,k,I,J)\n b[row] = Q[i,j,k]\n #do x-term left\n if (i>0):\n Dhat = 2* D[i,j,k]*D[i-1,j,k] / (D[i,j,k] + D[i-1,j,k])\n temp_term += Dhat*ihx2\n A[row, coordLookup_l(i-1,j,k,I,J)] = -Dhat*ihx2\n else:\n bA,bB,bC = BCs[0,:]\n if (np.abs(bB) > 1.0e-8):\n if (i<I-1):\n temp_term += -1.5*D[i,j,k]*bA/bB/hx\n b[row] += -D[i,j,k]/bB*bC/hx\n A[row, coordLookup_l(i+1,j,k,I,J)] += 0.5*D[i,j,k]*bA/bB/hx\n else:\n temp_term += -0.5*D[i,j,k]*bA/bB/hx\n b[row] += -D[i,j,k]/bB*bC/hx\n else:\n temp_term += D[i,j,k]*ihx2*2.0\n b[row] += D[i,j,k]*bC/bA*ihx2*2.0\n #do x-term right\n if (i < I-1):\n Dhat = 2* D[i,j,k]*D[i+1,j,k] / (D[i,j,k] + D[i+1,j,k])\n temp_term += Dhat*ihx2\n A[row, coordLookup_l(i+1,j,k,I,J)] += -Dhat*ihx2\n else:\n bA,bB,bC = BCs[1,:]\n if (np.abs(bB) > 1.0e-8):\n if (i>0):\n temp_term += 1.5*D[i,j,k]*bA/bB/hx\n b[row] += D[i,j,k]/bB*bC/hx\n A[row, coordLookup_l(i-1,j,k,I,J)] += -0.5*D[i,j,k]*bA/bB/hx\n else:\n temp_term += -0.5*D[i,j,k]*bA/bB/hx\n b[row] += -D[i,j,k]/bB*bC/hx\n \n else:\n temp_term += D[i,j,k]*ihx2*2.0\n b[row] += D[i,j,k]*bC/bA*ihx2*2.0\n #do y-term\n if (j>0):\n Dhat = 2* D[i,j,k]*D[i,j-1,k] / (D[i,j,k] + D[i,j-1,k])\n temp_term += Dhat*ihy2\n A[row, coordLookup_l(i,j-1,k,I,J)] += -Dhat*ihy2\n else:\n bA,bB,bC = BCs[2,:]\n if (np.abs(bB) > 1.0e-8):\n if (j<J-1):\n temp_term += -1.5*D[i,j,k]*bA/bB/hy\n b[row] += -D[i,j,k]/bB*bC/hy\n A[row, coordLookup_l(i,j+1,k,I,J)] += 0.5*D[i,j,k]*bA/bB/hy\n else:\n temp_term += -0.5*D[i,j,k]*bA/bB/hy\n b[row] += -D[i,j,k]/bB*bC/hy\n else:\n temp_term += D[i,j,k]*ihy2*2.0\n b[row] += D[i,j,k]*bC/bA*ihy2*2.0\n if (j < J-1):\n Dhat = 2* D[i,j,k]*D[i,j+1,k] / (D[i,j,k] + D[i,j+1,k])\n temp_term += Dhat*ihy2\n A[row, coordLookup_l(i,j+1,k,I,J)] += -Dhat*ihy2\n else:\n bA,bB,bC = BCs[3,:]\n if (np.abs(bB) > 1.0e-8):\n if (j>0):\n temp_term += 1.5*D[i,j,k]*bA/bB/hy\n b[row] += D[i,j,k]/bB*bC/hy\n A[row, coordLookup_l(i,j-1,k,I,J)] += -0.5*D[i,j,k]*bA/bB/hy\n else:\n temp_term += 0.5*D[i,j,k]*bA/bB/hy\n b[row] += D[i,j,k]/bB*bC/hy\n \n else:\n temp_term += D[i,j,k]*ihy2*2.0\n b[row] += D[i,j,k]*bC/bA*ihy2*2.0\n #do z-term\n if (k>0):\n Dhat = 2* D[i,j,k]*D[i,j,k-1] / (D[i,j,k] + D[i,j,k-1])\n temp_term += Dhat*ihz2\n A[row, coordLookup_l(i,j,k-1,I,J)] += -Dhat*ihz2\n else:\n bA,bB,bC = BCs[4,:]\n if (np.abs(bB) > 1.0e-8):\n if (k<K-1):\n temp_term += -1.5*D[i,j,k]*bA/bB/hz\n b[row] += -D[i,j,k]/bB*bC/hz\n A[row, coordLookup_l(i,j,k+1,I,J)] += 0.5*D[i,j,k]*bA/bB/hz\n else:\n temp_term += -0.5*D[i,j,k]*bA/bB/hz\n b[row] += -D[i,j,k]/bB*bC/hz\n else: \n temp_term += D[i,j,k]*ihz2*2.0\n b[row] += D[i,j,k]*bC/bA*ihz2*2.0\n if (k < K-1):\n Dhat = 2* D[i,j,k]*D[i,j,k+1] / (D[i,j,k] + D[i,j,k+1])\n temp_term += Dhat*ihz2\n A[row, coordLookup_l(i,j,k+1,I,J)] += -Dhat*ihz2\n else:\n bA,bB,bC = BCs[5,:]\n if (np.abs(bB) > 1.0e-8):\n if (k>0):\n temp_term += 1.5*D[i,j,k]*bA/bB/hz\n b[row] += D[i,j,k]/bB*bC/hz\n A[row, coordLookup_l(i,j,k-1,I,J)] += -0.5*D[i,j,k]*bA/bB/hz\n else:\n temp_term += 0.5*D[i,j,k]*bA/bB/hz\n b[row] += D[i,j,k]/bB*bC/hz\n \n else:\n temp_term += D[i,j,k]*ihz2*2.0\n b[row] += D[i,j,k]*bC/bA*ihz2*2.0\n A[row,row] += temp_term\n phi,code = splinalg.cg(A,b, tol=tolerance)\n if (LOUD):\n print(\"The CG solve exited with code\",code)\n phi_block = np.zeros((I,J,K))\n for k in range(K):\n for j in range(J):\n for i in range(I):\n phi_block[i,j,k] = phi[coordLookup_l(i,j,k,I,J)]\n x = np.linspace(hx*.5,Nx-hx*.5,I)\n y = np.linspace(hy*.5,Ny-hy*.5,J)\n z = np.linspace(hz*.5,Nz-hz*.5,K)\n if (I*J*K <= 10):\n print(A.toarray())\n return x,y,z,phi_block",
"def test_solver():\n # Choice of nonlinear coefficient\n m = 2\n\n def q(u):\n return (1+u)**m\n\n def Dq(u):\n return m*(1+u)**(m-1)\n\n u_exact = Expression(\n 'pow((pow(2, m+1)-1)*x[0] + 1, 1.0/(m+1)) - 1', m=m)\n linear_solver = 'direct'\n errors = []\n for method in 'alg_Newton', 'pde_Newton':\n for J_comp in 'manual', 'automatic':\n for degree in 1, 2, 3:\n error_prev = -1\n for divisions in [(10, 10), (20, 20), (40, 40)]:\n u = solver(\n q, Dq, f, divisions, degree,\n method, J_comp,\n linear_solver,\n abs_tol_Krylov=1E-10,\n rel_tol_Krylov=1E-10,\n abs_tol_Newton=1E-10,\n rel_tol_Newton=1E-10)\n\n # Find max error\n u_e = interpolate(u_exact, u.function_space())\n import numpy as np\n error = np.abs(u_e.vector().array() -\n u.vector().array()).max()\n # Expect convergence as h**(degree+1)\n if error_prev > 0:\n frac = abs(error - error_prev/2**(degree+1))\n errors.append(frac)\n error_prev = error\n tol = 4E-5\n for error_reduction in errors:\n assert error_reduction < tol, error_reduction",
"def efSolver2(self):\n dx = self.dh[0] # dx\n dy = self.dh[1] # dy\n dz = self.dh[2] # dz\n \n \"\"\"\n for i in np.arange(0, self.ni):\n for j in np.arange(0, self.nj):\n for k in np.arange(0, self.nk):\n \"\"\"\n\n ##x-component#\n #if i==0: \n #x-component#\n \"\"\"\n if i==0: \n # forward\n self.ef[i][j][k][0] = -(-3*self.phi[i][j][k]+\\\n 4*self.phi[i+1][j][k]-\\\n self.phi[i+2][j][k])/(2*dx)\n \"\"\"\n \n # forward\n self.ef[0,0:self.nj,0:self.nk,0] = -(-3*self.phi[0,0:self.nj,0:self.nk]+\\\n 4*self.phi[1,0:self.nj,0:self.nk]-\\\n self.phi[2,0:self.nj,0:self.nk])/(2*dx)\n \n #elif i==self.ni-1: \n \"\"\"\n elif i==self.ni-1: \n # backward\n self.ef[i][j][k][0] = -(self.phi[i-2][j][k]-\\\n 4*self.phi[i-1][j][k]+\\\n 3*self.phi[i][j][k])/(2*dx)\n \"\"\" \n # backward\n self.ef[self.ni-1,0:self.nj,0:self.nk,0] = -(self.phi[self.ni-3,0:self.nj,0:self.nk]-\\\n 4*self.phi[self.ni-2,0:self.nj,0:self.nk]+\\\n 3*self.phi[self.ni-1,0:self.nj,0:self.nk])/(2*dx)\n \"\"\"\n else: \n #central\n self.ef[i][j][k][0] = -(self.phi[i+1][j][k] - \\\n self.phi[i-1][j][k])/(2*dx)\n \"\"\" \n #central\n self.ef[1:self.ni-1,0:self.nj,0:self.nk,0] = -(self.phi[2:self.ni,0:self.nj,0:self.nk] - \\\n self.phi[0:self.ni-2,0:self.nj,0:self.nk])/(2*dx)\n\n\n #y-component\n #if j==0:\n \"\"\"\n if j==0:\n self.ef[i][j][k][1] = -(-3*self.phi[i][j][k] + \\\n 4*self.phi[i][j+1][k]-\\\n self.phi[i][j+2][k])/(2*dy)\n \n \"\"\"\n self.ef[0:self.ni,0,0:self.nk,1] = -(-3*self.phi[0:self.ni,0,0:self.nk] + \\\n 4*self.phi[0:self.ni,1,0:self.nk]-\\\n self.phi[0:self.ni,2,0:self.nk])/(2*dy)\n #elif j==self.nj-1:\n \"\"\"\n elif j==self.nj-1:\n self.ef[i][j][k][1] = -(self.phi[i][j-2][k] - \\\n 4*self.phi[i][j-1][k] +\\\n 3*self.phi[i][j][k])/(2*dy)\n \n \"\"\"\n self.ef[0:self.ni,self.nj-1,0:self.nk,1] = -(self.phi[0:self.ni,self.nj-3,0:self.nk] - \\\n 4*self.phi[0:self.ni,self.nj-2,0:self.nk] +\\\n 3*self.phi[0:self.ni,self.nj-1,0:self.nk])/(2*dy)\n #else:\n \"\"\"\n else:\n self.ef[i][j][k][1] = -(self.phi[i][j+1][k] - \\\n self.phi[i][j-1][k])/(2*dy)\n\n \"\"\"\n self.ef[0:self.ni,1:self.nj-1,0:self.nk,1] = -(self.phi[0:self.ni,2:self.nj,0:self.nk] - \\\n self.phi[0:self.ni,0:self.nj-2,0:self.nk])/(2*dy)\n\n #z-component\n '''\n if k==0:\n self.ef[i][j][k][2] = -(-3*self.phi[i][j][k] + \\\n 4*self.phi[i][j][k+1]-\n self.phi[i][j][k+2])/(2*dz)\n \n '''\n #z-component\n #if k==0:\n self.ef[0:self.ni,0:self.nj,0,2] = -(-3*self.phi[0:self.ni,0:self.nj,0] + \\\n 4*self.phi[0:self.ni,0:self.nj,1]-\n self.phi[0:self.ni,0:self.nj,2])/(2*dz)\n\n \"\"\"\n elif k==self.nk-1:\n self.ef[i][j][k][2] = -(self.phi[i][j][k-2] - \\\n 4*self.phi[i][j][k-1] + \\\n 3*self.phi[i][j][k])/(2*dz)\n \"\"\"\n \n #elif k==self.nk-1:\n self.ef[0:self.ni,0:self.nj,self.nk-1,2] = -(self.phi[0:self.ni,0:self.nj,self.nk-3] - \\\n 4*self.phi[0:self.ni,0:self.nj,self.nk-2] + \\\n 3*self.phi[0:self.ni,0:self.nj,self.nk-1])/(2*dz) \n \"\"\"\n else:\n self.ef[i][j][k][2] = -(self.phi[i][j][k+1] - \\\n self.phi[i][j][k-1])/(2*dz)\n \"\"\"\n #else:\n self.ef[0:self.ni,0:self.nj,1:self.nk-1,2] = -(self.phi[0:self.ni,0:self.nj,2:self.nk] - \\\n self.phi[0:self.ni,0:self.nj,0:self.nk-2])/(2*dz)",
"def _redef_via_predef_eqn(self):\r\n time = self.current_T # + self.d_T\r\n\r\n self.Beta = (self.diff_scale * self.thermal_conductivity) / \\\r\n (self.convect_coeff) \r\n self.Epsilon = self.d_T * self.thermal_conductivity / \\\r\n (self.density * self.heat_capacity)\r\n\r\n # Source term.\r\n def F_func(elem, eta):\r\n x = elem.local_to_global(eta)\r\n F = elem.eval_elem(self.node_map, self.lst_tmp, [eta])[0]\r\n F -= self.Epsilon * self.redef_F_laplacian(x[0], x[1], time)\r\n F += self.redef_dTdt(x[0], x[1], time) * self.d_T\r\n return elem.funcs(eta) * F\r\n\r\n self.vF_vect_vol = et.elems_2_array(self.mesh,\r\n F_func,\r\n self.node_map,\r\n gauss_mult=2) # Use double gp_1D\r\n\r\n # Boundary term.\r\n def f_func(elem, eta):\r\n n = elem.guess_normal_vector_global(eta)\r\n f = elem.eval_elem(self.node_map, self.lst_tmp, [eta])[0]\r\n x = elem.local_to_global(eta)\r\n # Evaluate our boundary term.\r\n f += self.Beta * self.redef_f_norm_grad(x[0], x[1], time, n)\r\n f += self.redef_dTdt(x[0], x[1], time) * self.d_T\r\n return elem.funcs(eta) * f\r\n\r\n self.vf_vect_bound = et.edge_2_array(self.mesh,\r\n \"Boundary\",\r\n f_func,\r\n self.node_map,\r\n gauss_mult=2)",
"def f1d(t,y,float_params,sigmaI): #sigmastep is an array\n \n ## y is Ntot0 ##\n\n # unpack parameters\n Nbar, Nstar, sigma0, nu_kin_mlyperus, DoverdeltaX2 = float_params \n\n # Ntot is passed in, Fqll calculated from Ntot\n Ntot0 = np.ascontiguousarray(y)\n Nqll0 = Nbar - Nstar * np.sin(2*np.pi*(Ntot0))\n\n # Calc surface deposition, dNtot_dt before diffusion\n m = (Nqll0 - (Nbar - Nstar))/(2*Nstar)\n sigmaM = (sigmaI - m * sigma0)/(1+m*sigma0)\n depsurf = nu_kin_mlyperus * sigmaM\n dNtot_dt = depsurf\n\n # Diffusion\n dy = diffuse_1d(Nqll0,DoverdeltaX2)\n dNtot_dt += dy \n\n # Package for output, only values of dNtot\n derivs = dNtot_dt\n return derivs",
"def finite_diff(F, x0, v0, dt, M, K, C, T):\r\n\r\n ### INITIAL PARAMETERS ####\r\n\r\n # defining the number of steps of analysis = Ns\r\n Ns = int(T/dt)+1\r\n # step t0 (initial acceleration)\r\n ngl = np.shape(F)[0] # captures the number of degrees of freedom\r\n\r\n ### MODELLING THE DISPLACEMENTS ###\r\n\r\n x_before = np.zeros((ngl,1))\r\n # matrix that indicates the displacements, in each degree of freedom, along the time of \r\n # duration of analysis. Each column is a time step\r\n x = np.zeros((ngl, Ns))\r\n x[:,0] = x0[:,0]\r\n\r\n ### SOLVING INITIAL STEP ###\r\n\r\n # initial Force F0 is equivalent to the first column of the matrix of load vectors F along time\r\n aux1 = np.zeros((ngl,1))\r\n aux1[:,0] = np.copy(F[:,0])\r\n aux2 = aux1 - np.dot(C,v0) - np.dot(K,x0)\r\n a0 = np.dot(la.inv(M),aux2)\r\n # step t-1 (before initial condition)\r\n x_before = dt*dt*a0/2 - dt*v0 + x0 \r\n # step t+1 (after initial condition)\r\n C1 = M / (dt*dt) + C / (2*dt)\r\n C2 = K - 2*M / (dt*dt)\r\n C3 = M / (dt*dt) - C / (2*dt)\r\n aux3 = aux1 - np.dot(C2, x0) - np.dot(C3, x_before)\r\n x[:,1] = np.dot(la.inv(C1), aux3[:,0])\r\n\r\n ### INTEGRATING ALONG THE DURATION OS ANALYSIS ###\r\n\r\n i = 0\r\n aux4 = np.zeros((ngl,1))\r\n aux5 = np.zeros((ngl,1))\r\n aux6 = np.zeros((ngl,1))\r\n aux7 = np.zeros((ngl,1))\r\n for i in range(1,Ns-1):\r\n aux4[:,0] = np.copy(F[:,i])\r\n aux5[:,0] = np.copy(x[:,i])\r\n aux6[:,0] = np.copy(x[:,i-1])\r\n aux7[:,0] = np.copy(x[:,i+1])\r\n aux7 = np.dot(la.inv(C1), aux4 - np.dot(C2,aux5) - np.dot(C3,aux6))\r\n x[:,i+1] = np.copy(aux7[:,0])\r\n return x",
"def solve_model(init_amounts, times, neighbourhood, params):\n # init_amounts should be an array of length 3*no_cultures.\n growth_func = make_cns_model(params, neighbourhood)\n sol = odeint(growth_func, init_amounts, times)\n return np.maximum(0, sol)",
"def construct_linear_system(self):\n N=self.grid.Ncells()\n Nbc = len(self.dirichlet_bcs)\n self.Ncalc=Ncalc = N - Nbc\n\n # map cells to forced values\n dirichlet = dict( [ (c,v) for c,v,xy in self.dirichlet_bcs])\n\n self.is_calc_c = is_calc_c = np.ones(N,np.bool8)\n for c,v,xy in self.dirichlet_bcs:\n is_calc_c[c] = False\n\n # is_calc_c[self.c_mask] = False\n\n # c_map is indexed by real cell indices, and returns the matrix index\n c_map = self.c_map = np.zeros(N,np.int32)\n self.c_map[is_calc_c] = np.arange(Ncalc)\n\n dzc=self.dzc\n dzf=self.dzf\n area_c=self.area_c\n\n meth='coo' # 'dok'\n if meth == 'dok':\n A=sparse.dok_matrix((Ncalc,Ncalc),np.float64)\n else:\n # construct the matrix from a sequence of indices and values\n ij=[]\n values=[] # successive value for the same i.j will be summed\n \n b = np.zeros(Ncalc,np.float64)\n flux_per_gradient_j = -self.K_j * self.l_j * dzf / self.d_j * self.dt\n\n self.grid.edge_to_cells() # makes sure that edges['cells'] exists.\n \n for j in range(self.grid.Nedges()):\n e = self.grid.edges[j]\n ic1,ic2 = e['cells']\n \n if ic1<0 or ic2<0 or e['deleted']:\n continue # boundary edge, or deleted edge\n \n flux_per_gradient=flux_per_gradient_j[j]\n \n # this is the desired operation:\n # Cdiff[ic1] -= flux_per_gradient / (An[ic1]*dzc) * (C[ic2] - C[ic1])\n # Cdiff[ic2] += flux_per_gradient / (An[ic2]*dzc) * (C[ic2] - C[ic1])\n # Where Cdiff is row, C is col\n\n if is_calc_c[ic1] and is_calc_c[ic2]:\n mic2 = c_map[ic2]\n mic1 = c_map[ic1]\n v1=flux_per_gradient / (area_c[ic1]*dzc[ic1])\n v2=flux_per_gradient / (area_c[ic2]*dzc[ic2])\n \n if meth == 'dok':\n A[mic1,mic2] -= v1\n A[mic1,mic1] += v1\n A[mic2,mic2] += v2\n A[mic2,mic1] -= v2\n else:\n ij.append( (mic1,mic2) ) ; values.append(-v1)\n ij.append( (mic1,mic1) ) ; values.append(v1)\n ij.append( (mic2,mic2) ) ; values.append(v1)\n ij.append( (mic2,mic1) ) ; values.append(-v1)\n \n elif not ( is_calc_c[ic1] or is_calc_c[ic2] ):\n # both are dirichlet, so nothing to do\n pass\n elif not is_calc_c[ic2]:\n mic1 = c_map[ic1]\n v=flux_per_gradient / (self.area_c[ic1]*dzc[ic1])\n if meth == 'dok':\n A[mic1,mic1] += v\n else:\n ij.append( (mic1,mic1) )\n values.append(v)\n\n # roughly\n # A[1,1]*x[1] + A[1,2]*x[2] + ... = b[1]\n # but we already know x[2],\n # A[1,1]*x[1] + ... = b[1] - A[1,2]*x[2]\n # so flip the sign, multiply by known dirichlet value, and\n # add to the RHS\n b[mic1] += flux_per_gradient / (area_c[ic1]*dzc[ic1]) * dirichlet[ic2]\n else: # not is_calc_c[c1]\n mic2 = c_map[ic2]\n # A[mic2,mic2] += flux_per_gradient / (area_c[ic2]*dzc[ic2])\n # A[mic2,mic1] -= flux_per_gradient / (area_c[ic2]*dzc[ic2])\n\n # A[mic2,mic2]*x[2] + A[mic2,mic1]*x[1] = b[2]\n # ...\n # A[mic2,mic2]*x[2] - flux_per_gradient / (area_c[ic2]*dzc[ic2])*x[1] = b[2]\n # ...\n # A[mic2,mic2]*x[2] = b[2] + flux_per_gradient / (area_c[ic2]*dzc[ic2])*x[1]\n v=flux_per_gradient / (area_c[ic2]*dzc[ic2])\n if meth == 'dok':\n A[mic2,mic2] += v\n else:\n ij.append( (mic2,mic2) )\n values.append(v)\n b[mic2] += flux_per_gradient / (area_c[ic2]*dzc[ic2]) * dirichlet[ic1]\n\n # Used to test 'is not 0:' but modern python complains\n if isinstance(self.alpha,np.ndarray): \n for c in range(N):\n if self.is_calc_c[c]:\n mic=self.c_map[c]\n v=self.alpha[c]*self.dt\n if meth == 'dok':\n A[mic,mic] -= v\n else:\n ij.append( (mic,mic) )\n values.append(-v)\n\n # Flux boundary conditions:\n for ic,value,xy in self.neumann_bcs:\n mic=c_map[ic]\n # make mass/time into concentration/step\n # arrived at minus sign by trial and error.\n # 2023-08-04: there was a bug here that used ic2 instead of ic.\n b[mic] -= value/(area_c[ic]*dzc[ic]) * self.dt\n\n if meth == 'dok':\n self.A = sparse.coo_matrix(A)\n else:\n ijs=np.array(ij,dtype=np.int32)\n data=np.array(values,dtype=np.float64)\n A=sparse.coo_matrix( (data, (ijs[:,0],ijs[:,1]) ), shape=(Ncalc,Ncalc) )\n self.A=A\n \n # report scale to get a sense of whether dt is too large\n Ascale = A.diagonal().min()\n log.debug(\"Ascale is %s\"%Ascale)\n\n self.b = b",
"def solveDiffusionEq(v, u0, u1, a, b, T, theta, N, M, loadingBar = True):\n \n import scipy.sparse as sp\n \n #h = (b-a)/N\n #tau = T/M\n gamma = T /M /(b-a)**2 * N**2 # = tau/hยฒ\n \n x = np.linspace(a, b, num = N+1)\n t = np.linspace(0, T, num = M+1)\n \n w = np.zeros((N+1, M+1))\n w[:,0] = v(x)\n w[0,:] = u0(t)\n w[N,:] = u1(t)\n \n \"\"\"\n (6.4)\n \n (w[i,j+1] - w[i,j]) = gamma * ((1-theta)*(w[i+1,j] - 2*w[i,j] + w[i-1,j]) + theta*(w[i+1,j+1] - 2*w[i,j+1] + w[i-1,j+1]))\n \"\"\"\n \n A = sp.diags([-gamma * theta * np.ones(N-2), ( 2*gamma * theta +1)*np.ones(N-1), -gamma * theta * np.ones(N-2)], [-1, 0, 1]).toarray()\n B = sp.diags([gamma*(1-theta) * np.ones(N-2), (-2*gamma*(1-theta)+1)*np.ones(N-1), gamma*(1-theta) * np.ones(N-2)], [-1, 0, 1]).toarray()\n \n d = np.zeros((N+1,M))\n d[1] = gamma*((1-theta)*u0(t[:-1]) + theta*u0(t[1:]))\n d[N-1] = gamma*((1-theta)*u1(t[:-1]) + theta*u1(t[1:]))\n \n for j in tqdm(range(M)) if loadingBar else range(M):\n \n #A*w_j+1 = B*w_j + d_j\n \n w[1:N, j+1] = np.linalg.solve(A, B.dot(w[1:N, j]) + d[1:N, j])\n \n return w, x, t",
"def solver(u,f,n=50,m=100,t0=0,t1=1000,dt=.1,nu=1):\n \n u_new=[[u[i][j] for j in range(m)]for i in range(n)]\n loopCounter=t0\n while(loopCounter<=t1):\n for i in xrange(1,n-1):\n for j in xrange(1,m-1):\n u_new[i][j]=u[i][j] + dt * (nu*u[i-1][j] + nu*u[i][j-1] - 4*nu*u[i][j] + nu*u[i][j+1] + nu*u[i+1][j] + f[i][j])\n loopCounter+=dt\n u=[[u_new[i][j] for j in range(m)]for i in range(n)]\n\n return u",
"def potentialSolver5(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged",
"def diffusion(nt, nx, tmax, xmax, nu):\n # Increments\n dt = tmax/(nt-1)\n dx = xmax/(nx-1)\n plate_length = xmax\n max_iter_time = tmax\n\n alpha = nu\n delta_x = dx\n delta_t = (delta_x ** 2)/(4 * alpha)\n \n x = np.zeros(nx)\n t = np.zeros(nt)\n\n #delta_t = (delta_x ** 2)/(4 * alpha)\n gamma = (alpha * delta_t) / (delta_x ** 2)\n\n # Initialize solution: the grid of u(k, i)\n u = np.empty((nx, nt))\n\n # Initial condition everywhere inside the grid\n u_initial = np.random.uniform(low=28.5, high=55.5, size=(nx))\n\n # Boundary conditions\n u_top = 100\n u_bottom = 0.0\n\n # Set the initial condition\n u[:,0] = u_initial\n\n # Set the boundary conditions\n u[(nx-1):,:] = u_top\n u[:1,:] = u_bottom\n\n if dt <= (dx**2)/(2*alpha):\n print(\"you are lucky\")\n else: \n print(\"hmmm\",dt,(dx**2)/(4*alpha))\n for k in range(0, nt-1):\n for i in range(1, nx-1):\n u[i,k + 1] = gamma * (u[i+1][k] + u[i-1][k] - 2*u[i][k]) + u[i][k]\n\n\n # X Loop\n for i in range(0,nx):\n x[i] = i*dx\n # T Loop\n for i in range(0,nt):\n t[i] = i*dt\n return u, x, t",
"def _redef_sp1_vars(self):\r\n\r\n if len(self.fq_list) == 0:\r\n no_rad = True\r\n lst_tmp = np.matrix(np.reshape(self.lst_tmp, \r\n (self.lst_tmp.size, 1)))\r\n else: no_rad = False\r\n # The practically constants...\r\n # Big Epsilon:\r\n if self.cond == True:\r\n self.Epsilon = self.d_T * self.thermal_conductivity\r\n else:\r\n self.Epsilon = (self.diff_scale ** 2) / \\\r\n (3.0 * self.absorb_coeffs[self.rad] ** 2)\r\n # Beta:\r\n if self.cond == True:\r\n self.Beta = (self.diff_scale * self.thermal_conductivity) / \\\r\n (self.convect_coeff)\r\n else:\r\n self.Beta = (1.0 + 3.0 * self.r2) * (2.0 * self.diff_scale) / \\\r\n ((1.0 - 2.0 * self.r1) * (\r\n 3.0 * self.absorb_coeffs[self.rad]))\r\n\r\n # The feild solutions at the last timestep.\r\n # The integral vF:\r\n if self.cond == True:\r\n # The horrifically complicated F:\r\n def F_func_cond(elem, eta):\r\n F = 0.0\r\n Tn = elem.eval_elem(self.node_map, self.lst_tmp, [eta])[0]\r\n F += Tn\r\n for k in range(0, len(self.fq_list)):\r\n vk = self.fq_list[k]\r\n try:\r\n vk_m = self.fq_list[k - 1]\r\n except:\r\n vk_m = self.v0_frequency\r\n absorbtion = self.absorb_coeffs[k]\r\n phi = elem.eval_elem(self.node_map, self.lst_rad[k],\r\n [eta])[0]\r\n inter1 = phi - 4.0 * sconst.pi * \\\r\n self.B_int_function(Tn, self.refr_idx_vol,\r\n vk, vk_m)\r\n inter2 = absorbtion * self.d_T / (self.diff_scale ** 2)\r\n F += inter2 * inter1\r\n return elem.funcs(eta) * F\r\n if not no_rad:\r\n # We're integrating something non-linear for SP1\r\n self.vF_vect_vol = et.elems_2_array(self.mesh,\r\n F_func_cond,\r\n self.node_map)\r\n else:\r\n # Or something easier if we're only looking at heat.\r\n self.vF_vect_vol = np.array(self.uv_vol * lst_tmp).reshape(-1)\r\n else:\r\n def F_func_radiative(elem, eta):\r\n T = elem.eval_elem(self.node_map, self.lst_tmp, [eta])[0]\r\n vk = self.fq_list[self.rad]\r\n try:\r\n vk_minus = self.fq_list[self.rad - 1]\r\n except:\r\n vk_minus = self.v0_frequency\r\n n = self.refr_idx_vol\r\n F = 4.0 * sconst.pi * self.B_int_function(T, n, vk, vk_minus)\r\n return elem.funcs(eta) * F\r\n\r\n self.vF_vect_vol = et.elems_2_array(self.mesh,\r\n F_func_radiative,\r\n self.node_map)\r\n # The path integral vf:\r\n if self.cond == True:\r\n def f_func_cond(elem, eta):\r\n Tb = self.background_temperature\r\n Tn = elem.eval_elem(self.node_map, self.lst_tmp, [eta])[0]\r\n n = self.refr_idx_background\r\n vk = self.v0_frequency\r\n vk_minus = 0\r\n Bb0 = self.B_int_function(Tb, n, vk, vk_minus)\r\n Bn0 = self.B_int_function(Tn, n, vk, vk_minus)\r\n B_coeff = (self.alpha * sconst.pi) / self.convect_coeff\r\n f = Tb + B_coeff * (Bb0 - Bn0)\r\n return elem.funcs(eta) * f\r\n if not no_rad:\r\n self.vf_vect_bound = et.edge_2_array(self.mesh,\r\n \"Boundary\",\r\n f_func_cond,\r\n self.node_map)\r\n else:\r\n try:\r\n self.vf_vect_bound = self.cache_tb_integral_array\r\n except AttributeError:\r\n def elem_functor(elem, eta): return elem.funcs(eta)\r\n self.cache_tb_integral_array = et.edge_2_array(self.mesh,\r\n \"Boundary\",\r\n elem_functor,\r\n self.node_map)\r\n self.cache_tb_integral_array *= self.background_temperature\r\n self.vf_vect_bound = self.cache_tb_integral_array\r\n \r\n else:\r\n # Radiation f = 4*pi*B^{(k)}(T_b, n_g)\r\n def f_func_radiative(elem, eta):\r\n T = self.background_temperature\r\n vk = self.fq_list[self.rad]\r\n try:\r\n vk_minus = self.fq_list[self.rad - 1]\r\n except:\r\n vk_minus = self.v0_frequency\r\n n = self.refr_idx_vol\r\n f = 4 * sconst.pi * self.B_int_function(T, n, vk, vk_minus)\r\n return elem.funcs(eta) * f\r\n\r\n self.vf_vect_bound = et.edge_2_array(self.mesh,\r\n \"Boundary\",\r\n f_func_radiative,\r\n self.node_map)\r\n assert (self.vF_vect_vol.size == self.vF_vect_vol.shape[0])\r\n assert (self.vf_vect_bound.size == self.vf_vect_bound.shape[0])\r\n assert (self.vf_vect_bound.shape[0] == \\\r\n self.vF_vect_vol.shape[0])",
"def solve(self,\n notifications = False\n ):\n\n if notifications:\n print('[info]: Solving differential equations for '+self.name+' model. ')\n \n\n \n # getting the time values\n self.days_list = np.linspace(self.tbeg,self.tend,self.npoints)\n\n # calling the odeint method to solve the diff. equations\n self.x = odeint(self.diff_eq,self.x0,self.days_list,args = (self.par,))\n '''\n Its important to note that (par_est,) is the way to define a tuple\n with just one element. When we put (par_est), the parenteses won't\n indicate a tuple\n '''\n \n #setting the variables\n self.confirmed_list = self.x[:,1] + self.x[:,2] + self.x[:,3]\n self.recovered_list = self.x[:,2]\n self.death_list = self.x[:,3]",
"def solve(self,\n notifications = False\n ):\n\n if notifications:\n print('[info]: Solving differential equations for '+self.name+' model. ')\n \n\n \n # getting the time values\n self.days_list = np.linspace(self.tbeg,self.tend,self.npoints)\n\n # calling the odeint method to solve the diff. equations\n self.x = odeint(self.diff_eq,self.x0,self.days_list,args = (self.par,))\n '''\n Its important to note that (par_est,) is the way to define a tuple\n with just one element. When we put (par_est), the parenteses won't\n indicate a tuple\n '''\n \n #setting the variables\n self.confirmed_list = self.x[:,1] + self.x[:,2] + self.x[:,3]\n self.recovered_list = self.x[:,2]\n self.death_list = self.x[:,3]",
"def dual_problem(\n states: list[np.ndarray], probs: list[float] = None, dist_method=\"min-error\"\n) -> float:\n constraints = []\n meas = []\n\n dim_x, _ = states[0].shape\n\n y_var = cvxpy.Variable((dim_x, dim_x), hermitian=True)\n objective = cvxpy.Minimize(cvxpy.trace(cvxpy.real(y_var)))\n\n dim = int(np.log2(dim_x))\n dim_list = [2] * int(np.log2(dim_x))\n sys_list = list(range(1, dim, 2))\n # dim_list = [3, 3]\n\n if dist_method == \"min-error\":\n for i, _ in enumerate(states):\n meas.append(cvxpy.Variable((dim_x, dim_x), PSD=True))\n constraints.append(\n cvxpy.real(y_var - probs[i] * states[i])\n >> partial_transpose(meas[i], sys=sys_list, dim=dim_list)\n )\n\n if dist_method == \"unambiguous\":\n for j, _ in enumerate(states):\n sum_val = 0\n for i, _ in enumerate(states):\n if i != j:\n sum_val += cvxpy.real(cvxpy.Variable()) * probs[i] * states[i]\n meas.append(cvxpy.Variable((dim_x, dim_x), PSD=True))\n constraints.append(\n cvxpy.real(y_var - probs[j] * states[j] + sum_val)\n >> partial_transpose(meas[j], sys=sys_list, dim=dim_list)\n )\n\n meas.append(cvxpy.Variable((dim_x, dim_x), PSD=True))\n constraints.append(\n cvxpy.real(y_var) >> partial_transpose(meas[-1], sys=sys_list, dim=dim_list)\n )\n\n problem = cvxpy.Problem(objective, constraints)\n sol_default = problem.solve()\n\n # print(np.around(y_var.value, decimals=3))\n\n return sol_default",
"def potentialSolver4(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged",
"def potentialSolver3(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged",
"def potentialSolver3(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged",
"def solver_auto_param(u_init, T, H, L_lhs, L_rhs, alpha, gamma, B, D, C, eta_step = 0.5, eta_step_tumor = 0.99, ftol = 1e-3, max_iter = 300, verbose = 0, nnls_max_iter=30):\n auto_param_obj_history = []\n auto_param_relaxed_obj_history = []\n \n eta_0 = (1/(2*np.max(B)))*0.5 #Initialize eta_0\n eta = np.array([eta_0/len(H)]*len(H))*0.9\n eta_lin = np.ones(L_lhs.shape[0])*0.01\n \n u, w_0, w, w_lin, obj_history, relaxed_obj_history = solver(u_init, eta_0, eta, eta_lin, T, H, L_lhs, L_rhs, alpha, gamma, B, D, C, ftol = ftol, max_iter = max_iter, verbose = verbose, nnls_max_iter=nnls_max_iter)\n # solver(u_init, eta_0, eta, T, H, alpha, gamma, B, D, C, ftol = 1e-3, max_iter = 300, verbose = verbose)\n auto_param_obj_history.append(obj_history)\n auto_param_relaxed_obj_history.append(relaxed_obj_history)\n cnstr = constraints_all(u, H, gamma, D, C, tol = 0.05, verbose = 0)\n cnstr_linear = linear_constraint(u, L_lhs, L_rhs, tol = 0.05)\n \n print('Enforcing Feasibility')\n count = 0\n num_violated = -1\n while (len(H) - cnstr['Relaxed'].sum() + (L_lhs.shape[0] - np.sum(cnstr_linear))):\n count += 1\n num_violated_prev = np.copy(num_violated)\n num_violated_oar = len(H) - cnstr['Relaxed'].sum()\n num_violated_lin = L_lhs.shape[0] - np.sum(cnstr_linear)#(1 - int(cnstr_linear))\n num_violated = len(H) - cnstr['Relaxed'].sum() + (L_lhs.shape[0] - np.sum(cnstr_linear))#(1 - int(cnstr_linear))\n \n print('Iter ', count, '# of violated constr:', len(H) - cnstr['Relaxed'].sum() + (L_lhs.shape[0] - np.sum(cnstr_linear)))\n print(' Linear constraints on u violation:', L_lhs.shape[0] - np.sum(cnstr_linear))\n eta[cnstr['Relaxed'] == False] *= eta_step\n eta_lin[cnstr_linear == False] *= eta_step\n # eta_0 *= eta_step*2\n # eta_lin *= eta_step\n \n if num_violated == num_violated_prev:\n print('Increase enforcement')\n if num_violated_lin > 0:\n eta_lin[cnstr_linear == False] *= eta_step\n # eta_0 *= eta_step*2\n #eta_lin *= eta_step\n if num_violated_oar > 0:\n eta[cnstr['Relaxed'] == False] *= eta_step\n # eta_0 *= eta_step*2\n \n u, w_0, w, w_lin, obj_history, relaxed_obj_history = solver(u, eta_0, eta, eta_lin, T, H, L_lhs, L_rhs, alpha, gamma, B, D, C, ftol = ftol, max_iter = max_iter, verbose = verbose, nnls_max_iter=nnls_max_iter)\n # solver(u, eta_0, eta, T, H, alpha, gamma, B, D, C, ftol = ftol, max_iter = max_iter, verbose = verbose)\n auto_param_obj_history.append(obj_history)\n auto_param_relaxed_obj_history.append(relaxed_obj_history)\n cnstr = constraints_all(u, H, gamma, D, C, tol = 0.05, verbose = 0)\n cnstr_linear = linear_constraint(u, L_lhs, L_rhs, tol = 0.05)\n \n print('Enforcing Optimality')\n count = 0\n while not (len(H) - cnstr['Relaxed'].sum() + (L_lhs.shape[0] - np.sum(cnstr_linear))):\n # (cnstr['Relaxed'].sum()-len(H)): #If nothing is violated -- enforce optimality!\n count += 1\n print('Opt Iter', count)\n obj_prev = obj_u_opt_N_fixed(u, T, alpha, B)\n u_prev = np.copy(u)\n eta_0 *= eta_step_tumor\n print('Current eta_0:', eta_0)\n if (2*eta_0)**2 <= 1e-80:\n print('zero reached')\n break\n # u, w_0, w, w_lin, obj_history, relaxed_obj_history = solver(u, eta_0, eta, T, H, alpha, gamma, B, D, C, ftol = ftol, max_iter = max_iter, verbose = verbose)\n u, w_0, w, w_lin, obj_history, relaxed_obj_history = solver(u, eta_0, eta, eta_lin, T, H, L_lhs, L_rhs, alpha, gamma, B, D, C, ftol = ftol, max_iter = max_iter//2, verbose = verbose, nnls_max_iter=nnls_max_iter)\n auto_param_obj_history.append(obj_history)\n auto_param_relaxed_obj_history.append(relaxed_obj_history)\n \n obj_new = obj_u_opt_N_fixed(u, T, alpha, B)\n if (abs(obj_new - obj_prev)/abs(obj_prev) <= 1e-4) or (obj_new > obj_prev): #two consequent iters, two times bc on iter 2 it stops anyway\n print('No improvement, increase enforcement')\n eta_step_tumor *= 0.1\n eta_0 *= eta_step_tumor\n if (2*eta_0)**2 <= 1e-80:\n print('zero reached')\n break\n # break\n \n cnstr = constraints_all(u, H, gamma, D, C, tol = 0.05, verbose = 0)\n cnstr_linear = linear_constraint(u, L_lhs, L_rhs, tol = 0.05)\n print('# of violated constr:', len(H) - cnstr['Relaxed'].sum() + (L_lhs.shape[0] - np.sum(cnstr_linear)))#(1 - int(cnstr_linear)))\n \n print('Finding the correct solution:')\n u = u_prev\n eta_0 = eta_0/eta_step_tumor\n \n cnstr = constraints_all(u, H, gamma, D, C, tol = 0.05, verbose = 0)\n cnstr_linear = linear_constraint(u, L_lhs, L_rhs, tol = 0.05)\n print('# of violated constr:', len(H) - cnstr['Relaxed'].sum() + (L_lhs.shape[0] - np.sum(cnstr_linear)))\n # print('# of violated constr:', cnstr['Relaxed'].sum()-len(H))\n print(\"OBJJJJJ:\", obj_u_opt_N_fixed(u, T, alpha, B))\n return u, w_0, w, w_lin, eta_0, eta, eta_lin, auto_param_obj_history, auto_param_relaxed_obj_history",
"def solve(self):",
"def solve(self):\n \n # Check if cost is available for both estimators\n if not self.est0.cost_avail or not self.est1.cost_avail:\n self.comp_cost = False\n \n # Initial estimate from the input node\n if self.comp_cost:\n z0, zvar0, cost0 = self.est0.est_init(return_cost=True)\n else:\n z0, zvar0 = self.est0.est_init(return_cost=False)\n cost0 = 0\n self.z0 = z0\n self.zvar0 = zvar0\n self.cost0 = cost0\n \n # Initialize other variables\n self.var_cost0 = 0\n self.var_cost1 = 0\n self.cost = 0\n self.s = np.zeros(self.shape1)\n \n for it in range(self.nit):\n \n # Forward transform to est1\n t0 = time.time()\n rvar1_new = self.A.var_dot(self.zvar0)\n rvar1_rep = common.repeat_axes(rvar1_new,self.shape1,\\\n self.var_axes1,rep=False)\n z1_mult = self.A.dot(self.z0)\n r1_new = z1_mult - rvar1_rep*self.s\n \n # Damping\n if it > 0: \n self.r1 = (1-self.step)*self.r1 + self.step*r1_new\n self.rvar1 = (1-self.step)*self.rvar1 + self.step*rvar1_new\n else:\n self.r1 = r1_new\n self.rvar1 = rvar1_new\n\n # Estimator 1 \n if self.comp_cost: \n z1, zvar1, cost1 = self.est1.est(self.r1, self.rvar1, return_cost=True) \n if not self.map_est:\n cost1 -= self.cost_adjust(self.r1,z1,self.rvar1,zvar1,\\\n self.shape1,self.var_axes1)\n else:\n z1, zvar1 = self.est1.est(self.r1, self.rvar1, return_cost=False) \n cost1 = 0\n self.z1 = z1\n self.zvar1 = zvar1\n self.cost1 = cost1 \n con_new = np.mean(np.abs(z1-z1_mult)**2) \n \n # Reverse nonlinear transform to est 0\n self.s = (self.z1-self.r1)/rvar1_rep\n self.sprec = 1/self.rvar1*(1-self.zvar1/self.rvar1)\n t1 = time.time()\n self.time_est1 = t1-t0\n \n # Reverse linear transform to est 0 \n rvar0_new = 1/self.A.var_dotH(self.sprec)\n rvar0_rep = common.repeat_axes(rvar0_new,self.shape0,\\\n self.var_axes0,rep=False)\n r0_new = self.z0 + rvar0_rep*self.A.dotH(self.s)\n \n # Damping\n if it > 0:\n self.r0 = (1-self.step)*self.r0 + self.step*r0_new\n self.rvar0 = (1-self.step)*self.rvar0 + self.step*rvar0_new\n else:\n self.r0 = r0_new\n self.rvar0 = rvar0_new\n \n \n # Estimator 0\n if self.comp_cost:\n z0, zvar0, cost0 = self.est0.est(self.r0, self.rvar0, return_cost=True)\n if not self.map_est:\n cost0 -= self.cost_adjust(self.r0,z0,self.rvar0,zvar0,\\\n self.shape0,self.var_axes0)\n \n else:\n z0, zvar0 = self.est0.est(self.r0, self.rvar0, return_cost=False)\n cost0 = 0\n self.z0 = z0\n self.zvar0 = zvar0\n self.cost0 = cost0 \n\n \n # Compute total cost and constraint \n cost_new = self.cost0 + self.cost1 \n if not self.map_est:\n cost_new += self.cost_gauss()\n \n # Step size adaptation\n if (self.step_adapt) and (it > 0):\n if (con_new < self.con):\n self.step = np.minimum(1,self.step_inc*self.step)\n else:\n self.step = np.maximum(self.step_min, self.step_dec*self.step)\n self.cost=cost_new\n self.con=con_new\n \n t2 = time.time()\n self.time_est0 = t2-t1\n self.time_iter = t2-t0\n \n # Print progress\n if self.prt_period > 0:\n if (it % self.prt_period == 0):\n if self.comp_cost:\n print(\"it={0:4d} cost={1:12.4e} con={2:12.4e} step={3:12.4e}\".format(\\\n it, self.cost, self.con, self.step))\n else:\n print(\"it={0:4d} con={1:12.4e}\".format(\\\n it, self.con))\n \n # Save history\n self.save_hist()",
"def solver(u_init, eta_0, eta, eta_lin, T, H, L_lhs, L_rhs, alpha, gamma, B, D, C, ftol = 1e-3, max_iter = 5000, verbose = 0, nnls_max_iter=30):\n\n # Raise('NotImplementedError: only adjusted the arguments.')\n #Need to incorporate L_lhs into stacked and appropriate w_lin updates, u_update and eta_lin increments\n #precompute the expensive operation:\n lin_penalties = 1/np.sqrt(2*eta_lin)\n eta_T_H_L_stacked = scipy.sparse.vstack([T.multiply(1/np.sqrt(2*eta_0))] + [H[i].multiply(1/np.sqrt(2*eta[i])) for i in range(len(H))] + [L_lhs.multiply(lin_penalties[:,None])])\n #!!!!\n# premultiplied_lhs = eta_T_H_stacked.T.dot(eta_T_H_stacked).toarray()\n #!!!!\n u_prev = u_init + 1\n u = u_init\n count = 0\n obj_history = []\n relaxed_obj_history = [-1, 0.1] #just two initial values to enter the loop\n while np.abs((relaxed_obj_history[-2] - relaxed_obj_history[-1])/relaxed_obj_history[-2]) > ftol and count < max_iter:#np.linalg.norm(u - u_prev, np.inf) > 1e-3 and count < max_iter: #Maybe all of them stop changing\n start = time.time()\n \n u_prev = np.copy(u)\n w_0 = w_0_update(eta_0, u, T, alpha, B) \n w = w_update(u, H, gamma, D, C) \n w_lin = w_lin_update(u, L_lhs, L_rhs)\n# u = u_update(eta_0, eta, w_0, w, eta_T_H_stacked, nnls_max_iter=50)\n #!!!!\n # u = u_update(eta_0, eta, w_0, w, eta_T_H_L_stacked, nnls_max_iter=30)\n u = u_update(eta_0, eta, eta_lin, w_0, w, w_lin, eta_T_H_L_stacked, premultiplied_lhs = None, nnls_max_iter=nnls_max_iter)\n #!!!!\n count += 1 \n if count == 10:\n u_inf = np.copy(u)\n w_0_inf = w_0[:]\n w_inf = w[:]\n w_lin_inf = w_lin[:]\n if count > 10 and np.abs(cur_obj) > 1e+15: #HANDLE THIS BETTER!!!\n print('INFINITY! RETURNING u at the 10-th iteration to enter the feasibility loop')\n return u_inf, w_0_inf, w_inf, w_lin_inf, obj_history, relaxed_obj_history\n \n cur_obj = obj_u_opt_N_fixed(u, T, alpha, B)\n obj_history.append(cur_obj)\n cur_relaxed_obj = relaxed_obj_u_opt_N_fixed(u, w_0, w, w_lin, eta_0, eta, eta_lin, T, H, L_lhs, alpha, B)\n # relaxed_obj_u_opt_N_fixed(u, w_0, w, eta_0, eta, T, H, alpha, B)\n relaxed_obj_history.append(cur_relaxed_obj) \n \n stop = time.time()\n duration = stop-start\n \n if count%1 == 0 and verbose: \n stopping_criterion = np.abs((relaxed_obj_history[-2] - relaxed_obj_history[-1])/relaxed_obj_history[-2])\n print(' iter = {}, stopping criterion:{}, OBJ {}'.format(count, stopping_criterion, cur_obj))\n print(' This iteration took: {}'.format(duration))\n return u, w_0, w, w_lin, obj_history, relaxed_obj_history",
"def build_linear_system(u, dt, dx, D = 3, P = 3,time_diff = 'poly',space_diff = 'poly',lam_t = None,lam_x = None, width_x = None,width_t = None, deg_x = 5,deg_t = None,sigma = 2):\n\n n, m = u.shape\n\n if width_x == None: width_x = n/10\n if width_t == None: width_t = m/10\n if deg_t == None: deg_t = deg_x\n\n # If we're using polynomials to take derviatives, then we toss the data around the edges.\n if time_diff == 'poly': \n m2 = m-2*width_t\n offset_t = width_t\n else: \n m2 = m\n offset_t = 0\n if space_diff == 'poly': \n n2 = n-2*width_x\n offset_x = width_x\n else: \n n2 = n\n offset_x = 0\n\n if lam_t == None: lam_t = 1.0/m\n if lam_x == None: lam_x = 1.0/n\n\n ########################\n # First take the time derivaitve for the left hand side of the equation\n ########################\n ut = np.zeros((n2,m2), dtype=u.dtype)\n\n if time_diff == 'FDconv':\n Usmooth = np.zeros((n,m), dtype=u.dtype)\n # Smooth across x cross-sections\n for j in range(m):\n Usmooth[:,j] = ConvSmoother(u[:,j],width_t,sigma)\n # Now take finite differences\n for i in range(n2):\n ut[i,:] = FiniteDiff(Usmooth[i + offset_x,:],dt,1)\n\n elif time_diff == 'poly':\n T= np.linspace(0,(m-1)*dt,m)\n for i in range(n2):\n ut[i,:] = PolyDiff(u[i+offset_x,:],T,diff=1,width=width_t,deg=deg_t)[:,0]\n\n elif time_diff == 'Tik':\n for i in range(n2):\n ut[i,:] = TikhonovDiff(u[i + offset_x,:], dt, lam_t)\n\n else:\n for i in range(n2):\n ut[i,:] = FiniteDiff(u[i + offset_x,:],dt,1)\n \n ut = np.reshape(ut, (n2*m2,1), order='F')\n\n ########################\n # Now form the rhs one column at a time, and record what each one is\n ########################\n\n u2 = u[offset_x:n-offset_x,offset_t:m-offset_t]\n Theta = np.zeros((n2*m2, (D+1)*(P+1)), dtype=u.dtype)\n ux = np.zeros((n2,m2), dtype=u.dtype)\n rhs_description = ['' for i in range((D+1)*(P+1))]\n\n if space_diff == 'poly': \n Du = {}\n for i in range(m2):\n Du[i] = PolyDiff(u[:,i+offset_t],np.linspace(0,(n-1)*dx,n),diff=D,width=width_x,deg=deg_x)\n if space_diff == 'Fourier': ik = 2*np.pi*1j*np.fft.fftfreq(n, d = dx)\n \n for d in range(D+1):\n\n if d > 0:\n for i in range(m2):\n if space_diff == 'Tik': ux[:,i] = TikhonovDiff(u[:,i+offset_t], dx, lam_x, d=d)\n elif space_diff == 'FDconv':\n Usmooth = ConvSmoother(u[:,i+offset_t],width_x,sigma)\n ux[:,i] = FiniteDiff(Usmooth,dx,d)\n elif space_diff == 'FD': ux[:,i] = FiniteDiff(u[:,i+offset_t],dx,d)\n elif space_diff == 'poly': ux[:,i] = Du[i][:,d-1]\n elif space_diff == 'Fourier': ux[:,i] = np.fft.ifft(ik**d*np.fft.fft(u[:,i]))\n else: ux = np.ones((n2,m2), dtype=u.dtype) \n \n for p in range(P+1):\n Theta[:, d*(P+1)+p] = np.reshape(np.multiply(ux, np.power(u2,p)), (n2*m2), order='F')\n\n if p == 1: rhs_description[d*(P+1)+p] = rhs_description[d*(P+1)+p]+'u'\n elif p>1: rhs_description[d*(P+1)+p] = rhs_description[d*(P+1)+p]+'u^' + str(p)\n if d > 0: rhs_description[d*(P+1)+p] = rhs_description[d*(P+1)+p]+\\\n 'u_{' + ''.join(['x' for _ in range(d)]) + '}'\n\n return ut, Theta, rhs_description",
"def potentialSolver2(self, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged",
"def diffuse_CN(self, dt):\n\n gr = self.grid\n phi = gr.phi\n\n phinew = gr.scratch_array()\n\n alpha = self.k*dt/gr.dx**2\n\n # create the RHS of the matrix\n gr.fill_BCs()\n R = 0.5*self.k*dt*self.lap()\n R = R[gr.ilo:gr.ihi+1]\n R += phi[gr.ilo:gr.ihi+1]\n\n # create the diagonal, d+1 and d-1 parts of the matrix\n d = (1.0 + alpha)*np.ones(gr.nx)\n u = -0.5*alpha*np.ones(gr.nx)\n u[0] = 0.0\n\n l = -0.5*alpha*np.ones(gr.nx)\n l[gr.nx-1] = 0.0\n\n # set the boundary conditions by changing the matrix elements\n\n # homogeneous neumann\n d[0] = 1.0 + 0.5*alpha\n d[gr.nx-1] = 1.0 + 0.5*alpha\n\n # Dirichlet\n #d[0] = 1.0 + 1.5*alpha\n #d[gr.nx-1] = 1.0 + 1.5*alpha\n\n #R[0] += alpha*phi1\n #R[gr.nx-1] += alpha*phi1\n\n # solve\n A = np.matrix([u,d,l])\n phinew[gr.ilo:gr.ihi+1] = linalg.solve_banded((1,1), A, R)\n\n return phinew",
"def solve_inc(self, DU, DF, calcG=True):\n\n nu = len(self.udofs)\n np = len(self.pdofs)\n ndof = len(self.dofs)\n decompose = False\n if calcG: decompose = True\n scheme = self.scheme\n\n if calcG:\n if self.verbose and nu>500: print \" building system...\", ; sys.stdout.flush()\n self.mountG()\n\n # Mount G11.. G22 matrices\n cG = self.G.tocsc()\n self.G11 = cG[:nu , :nu ]\n self.G12 = cG[:nu , nu:]\n self.G21 = cG[ nu:, :nu ]\n self.G22 = cG[ nu:, nu:]\n cG = None # Free memory\n\n # Pick last values for disp, vel and accel\n U_0 = self.U.copy()\n Uv_0 = self.Uv.copy()\n Ua_0 = self.Ua.copy()\n\n # Mount RHS\n self.RHS = self.DF - dot(self.C, Uv_0 + (1.0-gamma)*h*Ua_0) - dot(self.K, U_0 + h*Uv_0 + (0.5-beta)*(h**2.0)*Ua_0) \n\n RHS1 = RHS[:nu]\n Ua2 = DU[nu:]\n\n # Solve linear system\n RHS2 = self.G22*Ua2 #sparse matrix * dense vector\n if nu:\n if self.verbose and nu>500: print \"solving...\", ; sys.stdout.flush()\n if scheme == \"MNR\" and decompose : self.LUsolver = factorized(self.G11)\n if scheme == \"NR\" or scheme == \"FE\": self.LUsolver = factorized(self.G11)\n U1 = scipy.sparse.linalg.spsolve(self.G11, RHS1 - self.G12*Ua2)\n RHS2 += self.G21*Ua1\n\n # updating disp, vel and accel\n self.Uv = Uv_0 + (1.0-gamma)*h*Ua_0 + gamma*h*self.Ua\n self.U = U_0 + h*Uv_0 + (0.5-beta)*(h**2.0)*Ua_0 + (h**2.0)*beta*self.Ua\n \n # calculating reactions\n self.DF = dot(self.M,self.Ua) + dot(self.C,self.Uv) + dot(self.K,self.U)\n for i in range(nu):\n self.F[self.udofs[i].eq_id] = F_bk[self.udofs[i].eq_id]\n\n # Complete vectors\n for i, dof in enumerate(self.udofs): DU[dof.eq_id] = U1[i]\n for i, dof in enumerate(self.pdofs): DF[dof.eq_id] = F2[i]\n\n if self.verbose and nu>500: print \"updating...\" ; sys.stdout.flush()\n DFint = self.update_elems_and_nodes(DU) # Also calculates DFint\n #if self.verbose: print \" done.\"\n\n R = DF - DFint\n return DFint, R",
"def term_1(\n omega1, # vorticity-1\n omega2, # vorticity-2\n omega3, # vorticity-3\n enst, # enstrophy\n nu_sgs, # turbulent viscosity\n h = True): # spatial step size\n #---------------------------------------------------------------------#\n # Setting default values #\n #---------------------------------------------------------------------#\n if h is True:\n h = 2.0*np.pi/64.0\n #---------------------------------------------------------------------#\n # Preallocating space #\n #---------------------------------------------------------------------#\n term = np.zeros((64,64,64))\n #---------------------------------------------------------------------#\n # Enstrophy term #\n #---------------------------------------------------------------------#\n term += np.gradient(\\\n np.gradient(enst, h, edge_order=2)[2], h, edge_order=2)[2]\n term += np.gradient(\\\n np.gradient(enst, h, edge_order=2)[1], h, edge_order=2)[1]\n term += np.gradient(\\\n np.gradient(enst, h, edge_order=2)[0], h, edge_order=2)[0]\n #---------------------------------------------------------------------#\n # Dissipation #\n #---------------------------------------------------------------------#\n omega1_grad = np.gradient(omega1, h, edge_order=2)\n omega2_grad = np.gradient(omega2, h, edge_order=2)\n omega3_grad = np.gradient(omega3, h, edge_order=2)\n term -= np.square(omega1_grad[2])\n term -= np.square(omega1_grad[1])\n term -= np.square(omega1_grad[0])\n term -= np.square(omega2_grad[2])\n term -= np.square(omega2_grad[1])\n term -= np.square(omega2_grad[0])\n term -= np.square(omega3_grad[2])\n term -= np.square(omega3_grad[1])\n term -= np.square(omega3_grad[0])\n #---------------------------------------------------------------------#\n # Applying the subgrid stress #\n #---------------------------------------------------------------------#\n term *= nu_sgs\n\n return term",
"def tsne(x, no_dims=2, perplexity=30.0, max_iter=1000):\n\n # Check inputs\n if isinstance(no_dims, float):\n print(\"Error: array x should have type float.\")\n return -1\n\n (n, d) = x.shape\n\n # ๅจ้\n initial_momentum = 0.5\n final_momentum = 0.8\n eta = 500\n min_gain = 0.01\n # ้ๆบๅๅงๅY\n y = np.random.randn(n, no_dims)\n # dyๆขฏๅบฆ\n dy = np.zeros((n, no_dims))\n # iyๆฏไปไน\n iy = np.zeros((n, no_dims))\n\n gains = np.ones((n, no_dims))\n\n # ๅฏน็งฐๅ\n P = seach_prob(x, 1e-5, perplexity)\n P = P + np.transpose(P)\n P = P / np.sum(P) #pij\n # early exaggeration\n # pi\\j๏ผๆๅๅคธๅคง\n print (\"T-SNE DURING:%s\" % time.clock())\n P = P * 4\n P = np.maximum(P, 1e-12)\n\n # Run iterations\n for iter in range(max_iter):\n # Compute pairwise affinities\n sum_y = np.sum(np.square(y), 1)\n num = 1 / (1 + np.add(np.add(-2 * np.dot(y, y.T), sum_y).T, sum_y))\n num[range(n), range(n)] = 0\n Q = num / np.sum(num) #qij\n Q = np.maximum(Q, 1e-12) #XไธY้ไฝๆฏ่พๅๅ
ถๅคง่
\n\n # Compute gradient\n # np.tile(A,N) ้ๅคๆฐ็ปANๆฌก [1],5 [1,1,1,1,1]\n # pij-qij\n PQ = P - Q\n # ๆขฏๅบฆdy\n for i in range(n):\n dy[i,:] = np.sum(np.tile(PQ[:,i] * num[:,i], (no_dims, 1)).T * (y[i,:] - y), 0)\n\n # Perform the update\n if iter < 20:\n momentum = initial_momentum\n else:\n momentum = final_momentum\n\n gains = (gains + 0.2) * ((dy > 0) != (iy > 0)) + (gains * 0.8) * ((dy > 0) == (iy > 0))\n gains[gains < min_gain] = min_gain\n # ่ฟญไปฃ\n iy = momentum * iy - eta * (gains * dy)\n y = y + iy\n y = y - np.tile(np.mean(y, 0), (n, 1))\n # Compute current value of cost function\\\n if (iter + 1) % 100 == 0:\n C = np.sum(P * np.log(P / Q))\n print(\"Iteration \", (iter + 1), \": error is \", C)\n if (iter+1) != 100:\n ratio = C/oldC\n print(\"ratio \", ratio)\n if ratio >= 0.95:\n break\n oldC = C\n # Stop lying about P-values\n if iter == 100:\n P = P / 4\n print(\"finished training!\")\n return y"
]
| [
"0.6330036",
"0.6144398",
"0.60684615",
"0.6064849",
"0.6052447",
"0.6048306",
"0.6007914",
"0.59658027",
"0.59391546",
"0.5922955",
"0.58881867",
"0.58848006",
"0.58755314",
"0.5860544",
"0.585488",
"0.585488",
"0.5840503",
"0.5813928",
"0.58136034",
"0.58136034",
"0.5794688",
"0.5769201",
"0.5763471",
"0.57501423",
"0.57479715",
"0.57428545",
"0.57406753",
"0.57190615",
"0.57166475",
"0.56623846"
]
| 0.67087233 | 0 |
Return a Euca2ool Object | def __init_euca(self):
if self.euca:
return
self.euca = Euca2ool() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def createCasaTool(mytool):\n if (type(casac.Quantity) != type): # casa 4.x\n myt = mytool()\n else: # casa 3.x\n myt = mytool.create()\n return(myt)",
"def exo2():",
"def Au():\n return load_material(miepy.__path__[0] + \"/materials/au.dat\")",
"def __init__(self):\n self.label = \"CDA Tools\"\n self.alias = \"\"\n\n # List of tool classes associated with this toolbox\n self.tools = [SecondaryCraterRemovalTool]",
"def __make_connection(self):\n return self.euca.make_connection()",
"def GetDataAsObject(self):",
"def __init__(self):\n self.__tModel = TeasModel(\"teasdata/tea.db\")\n self.tView = TeasView()\n self.singleTea = ()\n self.__teaholder = []\n self.__teaColumns = ((\"tea_name\", 1), (\"tea_type\", 2), (\"brand\", 7), (\"temperature\", 3), (\"package\", 6),\n (\"time\", 4), (\"price\", 10), (\"notes\", 5), (\"mood\", 11), (\"on_hand\", 9), (\"buy_again\", 8))\n self.manyTea = self.__tModel.get_teas()\n self.__edited_sect = -1",
"def __init__(self):\r\n self.label = \"Toolbox\"\r\n self.alias = \"Geodesic Densification using arcpy\"\r\n\r\n # List of tool classes associated with this toolbox\r\n self.tools = [GeodesicDensification_arcpy]",
"def to_pycuber(self) -> pycuber.Cube:\n self.soft_align_faces()\n qpos_copy = self.sim.data.qpos.copy()\n\n cubies = []\n\n for i in range(27):\n cubelet_meta = self.cubelet_meta_info[i]\n\n if cubelet_meta[\"type\"] == \"cubelet\":\n mtx = self._cubelet_rotation_matrix(cubelet_meta, qpos_copy)\n\n original_coords = cubelet_meta[\"coords\"]\n # current_coords = (mtx @ cubelet_meta['coords'].astype(float)).round().astype(int)\n\n cubie_desc = {}\n\n for prev_axis, sign in enumerate(original_coords):\n if sign != 0:\n vec = mtx[:, prev_axis] * sign\n new_axis = np.abs(vec).argmax()\n new_sign = vec[new_axis]\n\n color = PYCUBER_REVERSE_COLORS[prev_axis, sign]\n loc = PYCUBER_REVERSE_LOCATIONS[new_axis, new_sign]\n\n cubie_desc[loc] = pycuber.Square(color)\n\n if len(cubie_desc) == 3:\n cubies.append(pycuber.Corner(**cubie_desc))\n elif len(cubie_desc) == 2:\n cubies.append(pycuber.Edge(**cubie_desc))\n if cubelet_meta[\"type\"] == \"driver\":\n original_coords = cubelet_meta[\"coords\"]\n axis = np.abs(original_coords).argmax()\n sign = original_coords[axis]\n\n color = PYCUBER_REVERSE_COLORS[axis, sign]\n loc = PYCUBER_REVERSE_LOCATIONS[axis, sign]\n\n cubie_desc = {loc: pycuber.Square(color)}\n cubies.append(pycuber.Centre(**cubie_desc))\n\n return pycuber.Cube(cubies=cubies)",
"def __init__(self, attck_obj = None, **kwargs):\n\n super(AttckTools, self).__init__(**kwargs)\n self.attck_obj = attck_obj\n\n self.id = self._set_id(kwargs)\n self.name = self._set_attribute(kwargs, 'name')\n self.alias = self._set_attribute(kwargs, 'aliases')\n self.description = self._set_attribute(kwargs, 'description')\n self.reference = self._set_reference(kwargs)\n self.created = self._set_attribute(kwargs, 'created')\n self.modified = self._set_attribute(kwargs, 'modified')\n self.stix = self._set_attribute(kwargs, 'id')\n self.type = self._set_attribute(kwargs, 'type')\n self.wiki = self._set_wiki(kwargs)\n self.contributor = self._set_attribute(kwargs, 'contributor')\n\n self.set_relationships(self.attck_obj)\n\n if AttckTools.__ATTCK_C2_DATASETS is None or AttckTools.__ATTCK_TOOLS_DATASETS is None:\n try:\n data = AttckDatasets().generated_attck_data()\n except:\n raise GeneratedDatasetException('Unable to retrieve generated attack data properties')\n if AttckTools.__ATTCK_C2_DATASETS is None:\n if 'c2_data' in data:\n AttckTools.__ATTCK_C2_DATASETS = data['c2_data']\n if AttckTools.__ATTCK_TOOLS_DATASETS is None:\n if 'tools' in data:\n AttckTools.__ATTCK_TOOLS_DATASETS = data['tools']\n\n self.c2_data = self.__get_c2_dataset()\n self.external_dataset = self.__get_tools_dataset()",
"def __init__(self):\n self.label = \"Toolbox\"\n self.alias = \"\"\n\n # List of tool classes associated with this toolbox\n self.tools = [Offset]",
"def object(self):",
"def __init__(self):\n self.label = \"Create\"\n self.alias = \"\"\n\n # List of tool classes associated with this toolbox\n if core.get_pass():\n self.tools = [Fbound, Roads, Diekdikisi]\n else:\n self.tools = []",
"def get_catalog(self):\n return self",
"def __init__(self):\r\n self.label = \"OVL Tools\"\r\n self.alias = \"\"\r\n\r\n # List of tool classes associated with this toolbox\r\n self.tools = [OVLtoFeature, BatchOVLtoFeature]",
"def __init__(self):\n self.label = \"Toolbox\"\n self.alias = \"\"\n\n # List of tool classes associated with this toolbox\n self.tools = [FilesWithin, UpdateAiracInfo, CalculatePolygonRotationUTM33, CalculatePolygonRotationLCC10E, SetLayoutsNorAirac, SetLayoutsSweAirac, SetLayoutsFinDnkAirac, Export330charts]",
"def __init__(self):\r\n\t\tself.label = \"Toolbox\"\r\n\t\tself.alias = \"\"\r\n\r\n\t\t# List of tool classes associated with this toolbox\r\n\t\tself.tools = [LinkedDataSpatialQuery, LinkedDataPropertyEnrich, MergeBatchNoFunctionalProperty, MergeSingleNoFunctionalProperty, LocationPropertyPath, RelFinder]",
"def createObject(self, *args):\n return _libsbml.CompSBasePlugin_createObject(self, *args)",
"def createBasicObject(self):\n\n\t\treturn self._createBasicObjFunct(self)",
"def ConUACalc(self):\n if hasattr(self,\"con\"): return self.con\n st = self.uacalc_format(\"A\"+str(self.index))\n writefile('tmpalgCon.ua',st)\n os.system('java -classpath '+clspth+'uacalc/classes/ org.uacalc.example.ConUACalc tmpalgCon.ua >tmpoutCon.txt')\n st = readfile('tmpoutCon.txt')\n st = st[st.index(\"[\"):] # remove diagnostic output\n self.con = eval(st)\n return self.con",
"def __init__(self):\n self.label = \"Surface Generation\"\n self.alias = \"far77\"\n\n # List of tool classes associated with this toolbox\n self.tools = [LineToFar77]",
"def __init__(self):\n\n super(aero_csm_component,self).__init__()",
"def tool(self):\n return equipment_module.Equipment(self._get_attr('extraction_tool_id'))",
"def _its_tool_ ( self , typename , name = None , interface = None , createIf = True , parent = None ) :\n if not name :\n t,s,n = typename.rpartition('/')\n if t and s and n :\n typename = t\n name = n\n else : name = typename\n \n p1 = typename.find ( ':PUBLIC' )\n if 0 < p1 and p1 + 6 == len(typename) :\n typename = typename [:p1]\n \n itool = GaudiPython.Bindings.Helper.tool ( self._its , typename , name , parent , createIf ) \n if itool and interface :\n iif = GaudiPython.Bindings.InterfaceCast(interface)( itool )\n if not iif : logger.warning(\"Can't retrieve proepr interface %s for %s\" % ( interface , itool ) )\n return GaudiPython.Bindings.iAlgTool ( itool.name() , iif )\n elif not itool : logger.warning(\"Can't retrieve the tool %s'%s\" % ( typename , name ) )\n \n return GaudiPython.Bindings.iAlgTool ( name , itool )",
"def tool(self):\n if self._tool is None:\n return SE3()\n else:\n return self._tool",
"def makeCompo(self, gen):\n (cmake_text, cmake_vars) = self.additionalLibraries()\n # DSC_libs are needed for datastream ports only\n DSC_libs = \"\"\"${KERNEL_SalomeDSCContainer}\n ${KERNEL_SalomeDSCSuperv}\n ${KERNEL_SalomeDatastream}\n ${KERNEL_SalomeDSCSupervBasic}\n ${KERNEL_CalciumC}\n \"\"\"\n cmake_vars = DSC_libs + cmake_vars\n cxxfile = \"%s.cxx\" % self.name\n hxxfile = \"%s.hxx\" % self.name\n if self.kind == \"exe\":\n exe_opt = 1\n else:\n exe_opt = 0\n ret = { cxxfile:self.makecxx(gen, exe_opt),\n hxxfile:self.makehxx(gen)\n }\n sources = \" \".join(map(os.path.basename,self.sources))\n cmakelist_content = cmake_src_compo_cpp.substitute(\n module = gen.module.name,\n component = self.name,\n componentlib = self.libraryName(),\n includes = self.includes,\n sources = sources,\n libs = cmake_vars,\n find_libs = cmake_text,\n target_properties = self.targetProperties())\n if self.kind == \"exe\":\n exe_file = self.name+\".exe\"\n install_commande = \"\\nINSTALL(PROGRAMS %s DESTINATION ${SALOME_INSTALL_BINS})\\n\" % exe_file\n cmakelist_content = cmakelist_content + install_commande\n ret[exe_file] = exeCPP.substitute(compoexe=self.exe_path)\n pass\n \n ret[\"CMakeLists.txt\"] = cmakelist_content\n \n return ret",
"def get_main_object(tc):\n return Daal(tc)",
"def get(self):\n ue = UE.query.all()\n #print(ue)\n return ue",
"def New(*args, **kargs):\n obj = itkMeshSourcePSUC2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def getObject(language=None):"
]
| [
"0.5496832",
"0.5192484",
"0.51200885",
"0.50912255",
"0.5090922",
"0.50232905",
"0.500518",
"0.4990415",
"0.49752933",
"0.49743345",
"0.4944218",
"0.49384177",
"0.49251083",
"0.49192965",
"0.48852822",
"0.48561084",
"0.48528242",
"0.48424453",
"0.48358312",
"0.4827221",
"0.4763653",
"0.47515753",
"0.47508153",
"0.47397754",
"0.47382885",
"0.4734873",
"0.471719",
"0.47152767",
"0.47097144",
"0.4701619"
]
| 0.7435938 | 0 |
Validate one or more instance ids | def __validate_instance_id(self, instance_ids):
try:
if instance_ids:
for id in instance_ids:
self.euca.validate_instance_id(id)
except InstanceValidationError:
sys.exit(1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _validate_instance_bundles(instance_bundles, mode):\n for bundle in instance_bundles:\n if mode in ['ssh', 'sftp']:\n if not INSTANCE_ID_RE.match(bundle['instance_id']):\n raise AssertionError('Missing instance_id')",
"def check_validity_of_ids(self, user_inputs, db_ids, message=None):\n self.user_inputs = user_inputs\n self.db_ids = db_ids\n if not self.user_inputs:\n raise GraphQLError(VALIDATOR_RESPONSES['invalid-batch-id'])\n\n is_valid = [usr_input in self.db_ids for usr_input in self.user_inputs]\n\n if not all(is_valid):\n invalid_items = list(\n compress(self.user_inputs, [not item for item in is_valid]))\n if message is None:\n message = VALIDATOR_RESPONSES['invalid-product-id']\n message = message.format(\",\".join(map(str, invalid_items)))\n raise GraphQLError(message)",
"def is_valid_instance_id(version):\n return bool(INSTANCE_ID_RE.match(version))",
"def validate_instance(instance: Any) -> Any:\n attr.validate(instance)",
"def check_id(self, id):",
"def __init__(self, instance_ids: np.ndarray, instance_id: int):\n if (instance_id == -1):\n return\n self.instance_id = int(instance_id)\n self.gt_mask = (instance_ids == instance_id)\n self.instance_count = int((instance_ids == instance_id).sum())",
"def _get_instance_ids(instances):\n instance_ids = []\n for instance in instances:\n instance_ids.append(instance.id)\n return instance_ids",
"def _validate(cls, pid_value):\n blop = re.compile('^[-\\w]+$')\n if not bool(blop.match(pid_value)):\n raise ValidationError(\n 'The ID should contain only letters with numbers or dashes.',\n field_name='id',\n )",
"def get_invalid(cls, instance):\n\n others = [i for i in list(instance.context) if\n i is not instance and\n set(cls.families) & get_families(i)]\n if not others:\n return []\n\n other_ids = defaultdict(list)\n for other in others:\n for _id, members in get_instance_node_ids(other).items():\n other_ids[_id].extend(members)\n\n # Take only the ids with more than one member\n invalid = list()\n ids = get_instance_node_ids(instance)\n for _id, members in ids.iteritems():\n if _id in other_ids:\n cls.log.error(\"ID found on multiple nodes: '%s'\" % members)\n cls.log.debug(\"Clashes with: %s\" % (other_ids[_id],))\n invalid.extend(members)\n\n return invalid",
"def validateId(shortId):\n return shortId in [DockerUtil.getShortId(container) for container in DOCKER_CLIENT.containers.list()]",
"def test_entity_ids(validator) -> None:\n schema = vol.Schema(validator)\n\n options = (\n \"invalid_entity\",\n \"sensor.light,sensor_invalid\",\n [\"invalid_entity\"],\n [\"sensor.light\", \"sensor_invalid\"],\n [\"sensor.light,sensor_invalid\"],\n )\n for value in options:\n with pytest.raises(vol.MultipleInvalid):\n schema(value)\n\n options = ([], [\"sensor.light\"], \"sensor.light\")\n for value in options:\n schema(value)\n\n assert schema(\"sensor.LIGHT, light.kitchen \") == [\"sensor.light\", \"light.kitchen\"]",
"def validate_batch(self, *arg, **kwargs):\n pass",
"def test_id_bad_value(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_event_swimmer_id(val))",
"def limit_instances(sender, instance, created, *args, **kwargs):\n\tif created:\n\t\traise ValidationError(\"There can only be 1 instance of this model.\")",
"def test_duplicate_flavorids_fail(self):\n flavorid = 'flavor1'\n instance_types.create('name one', 256, 1, 120, 200, flavorid)\n self.assertRaises(exception.InstanceTypeIdExists,\n instance_types.create,\n 'name two', 256, 1, 120, 200, flavorid)",
"def validateVfabric(output ,arg_dict, key):\n id = arg_dict[key]\n counter = 0\n for char in id:\n counter += 1\n if re.compile('[0-9]+').match(char[0]) == None:\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s' = '%s' is not a valid Id. ID should be numeric \" % \n\t\t\t\t(key,id)))\n return None\n if counter > lib.constants._ATTR_ID_LENGHT:\n\t output.completeOutputError(InvalidArgumentCount(descape =\"'%s'='%s' is not a valid Id. \\n ID should be numeric with Length = '%s' \" % (key,id, lib.constants._ATTR_ID_LENGHT)))\n return None\n return arg_dict",
"def test_id_bad_value(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_team_swimmer_id(val))",
"def validate_list_of_ids(data, field='ids',\n max_query=settings.MAX_IDS_PER_QUERY):\n msg = None\n error = False\n make_list = False\n convert_list = False\n if field not in data:\n error = True\n msg = \"IDs must be in an array named {0}\".format(field)\n elif type(data[field]) is not list:\n if type(data[field]) is int:\n make_list = True\n else:\n error = True\n msg = \"IDs must be in an array. Got {0}\".format(type(data[field]))\n elif len(data[field]) > max_query:\n error = True\n msg = \"Maxmium of {0} IDs allowed per query.\".format(max_query)\n elif not all(isinstance(i, int) for i in data[field]):\n try:\n [int(i) for i in data[field]]\n convert_list = True\n except ValueError:\n error = True\n msg = \"IDs must be an array of integers. Characters were found.\"\n\n return {\"has_errors\": error, \"message\": msg, \"make_list\": make_list,\n \"convert_list\": convert_list}",
"def _validate(self, instance, value):",
"def test_is_valid_user_id_valid(self):\n ids = (\n \"NDcyMjY1OTQzMDYyNDEzMzMy\",\n \"NDc1MDczNjI5Mzk5NTQ3OTA0\",\n \"NDY3MjIzMjMwNjUwNzc3NjQx\",\n )\n\n for user_id in ids:\n with self.subTest(user_id=user_id):\n result = TokenRemover.is_valid_user_id(user_id)\n self.assertTrue(result)",
"def validate(self, instance, value):",
"def validate(self, instance, value):",
"def validateID(id):\n\n if re.compile('[0-9]+').match(id) == None:\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s' is not a valid Id. ID should be numeric with Length = '%s' \" \n\t\t\t% (id, lib.constants._ATTR_ID_LENGHT)))\n return -1\n else:\n # Check for the lenght \n counter = 0\n for char in id:\n counter += 1\n print counter , lib.constants._ATTR_ID_LENGHT\n if counter > lib.constants._ATTR_ID_LENGHT :\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s' exceeded the given length i.e Max Length = '%s'\" % \n\t\t\t(id, lib.constants._ATTR_ID_LENGHT)))\n return -1\n else:\n return 0\n return 0",
"def test_duplicate_id(self):\n with self.assertRaises(ValueError):\n REANATemplate(\n workflow_spec={},\n parameters=[\n pd.parameter_declaration('A', index=1),\n pd.parameter_declaration('B'),\n pd.parameter_declaration('C'),\n pd.parameter_declaration('A', index=2),\n pd.parameter_declaration('E', index=1)\n ],\n validate=True\n )",
"def test_reservation_ids_two_instances(self):\n (refs, resv_id) = self.compute_api.create(self.context,\n self.default_flavor,\n image_href=uuids.image_href_id,\n min_count=2, max_count=2)\n self.assertEqual(len(refs), 2)\n self.assertIsNotNone(resv_id)\n for instance in refs:\n self.assertEqual(instance['reservation_id'], resv_id)",
"def _check_valid_ids(self):\n durations = []\n # check all shapes from *.npy header\n for ts_filepath in self.ts_filepaths:\n shape = data.utils.read_npy_array_header(ts_filepath)[0]\n durations += [shape[0]]\n # check durations\n most_common_dura = np.bincount(durations).argmax()\n mask_valid_ts = (durations == most_common_dura)\n non_valid_ids = np.array(self.ids)[~mask_valid_ts]\n if not mask_valid_ts.all():\n warnings.warn(\"Different shapes for sub ID(s): {}\".format(non_valid_ids))\n\n return np.array(self.ids)[mask_valid_ts]",
"def _validate_ids(self, ids):\n if type(ids) == str:\n return self.metanode_to_ids[ids]\n elif isinstance(ids, collections.Iterable):\n # String, assume ids\n if type(ids[0]) == str or type(ids[0]) == np.str_:\n # Ensure all ids belong to metanodes of the same type\n assert len(set([self.id_to_metanode[i] for i in ids])) == 1\n # Sort the ids according to their index in the adj. mats.\n return sorted(ids, key=lambda i: self.nid_to_index[i])\n\n raise ValueError()",
"def check_cloud_ids(self):\n train = set(self.all_cloud_ids['train'])\n val = set(self.all_cloud_ids['val'])\n test = set(self.all_cloud_ids['test'])\n\n assert len(train.intersection(val)) == 0 or self.val_mixed_in_train, \\\n \"Cloud ids must be unique across all the 'train' and 'val' \" \\\n \"stages, unless `val_mixed_in_train=True`\"\n assert len(val.intersection(test)) == 0 or self.test_mixed_in_val, \\\n \"Cloud ids must be unique across all the 'val' and 'test' \" \\\n \"stages, unless `test_mixed_in_val=True`\"",
"def id_is_valid(gal_id, query_id, data):\n return not ((data.cam_idx[query_id] == data.cam_idx[gal_id]) and (data.labels[query_id] == data.labels[gal_id]))",
"def testValidateId(self):\n #create a different person and try to use their id\n self.directory.invokeFactory(type_name=\"FSDPerson\",id=\"def456\",firstName=\"Joe\",lastName=\"Blow\")\n self.failUnless('def456' in self.person.validate_id('def456'))\n #create a different content object and try to use its id\n self.directory.invokeFactory(\"Document\", \"mydoc\")\n self.failUnless('mydoc' in self.person.validate_id('mydoc'))"
]
| [
"0.66104805",
"0.63021135",
"0.6231086",
"0.6120488",
"0.608665",
"0.6085082",
"0.5961724",
"0.5956122",
"0.5913075",
"0.58599216",
"0.5846487",
"0.584379",
"0.58395326",
"0.5822834",
"0.5817616",
"0.57512295",
"0.5728121",
"0.57224673",
"0.57157797",
"0.56912893",
"0.566695",
"0.566695",
"0.5640674",
"0.56383115",
"0.5626213",
"0.5620279",
"0.56157506",
"0.56002086",
"0.55673146",
"0.5561087"
]
| 0.83329064 | 0 |
Validate instance_ids and get reservations deployed | def __get_reservations(self, instance_ids=None):
if instance_ids:
self.__validate_instance_id(instance_ids)
euca_conn = self.__make_connection()
try:
return euca_conn.get_all_instances(instance_ids)
except:
euca.display_error_and_exit('%s' % ex)
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __get_multi_instances(self, reservations, instance_ids=None, policies=None):\n check_instance_ids = False\n if ( instance_ids and len(instance_ids) > 0 ):\n check_instance_ids = True\n instances = [] \n for reservation in reservations:\n if check_instance_ids:\n for instance in reservation.instances:\n if instance.id in instance_ids:\n instances.append(instance)\n elif policies:\n for instance in reservation.instances:\n if 'typevm' in policies and instance.instance_type == policies['typevm']:\n instances.append(instance) \n elif policies.get('level')==1:\n if self.__compare_types_instances(policies, instance.instance_type.encode(\"latin-1\")):\n instances.append(instance)\n elif policies.get('level') == 0:\n if self.__is_adaptive_instance(self.__get_metrics_adapted(policies), instance.instance_type.encode(\"latin-1\")):\n instances.append(instance)\n else:\n instances=[]\n else:\n instances += reservation.instances\n return instances, len(instances)",
"def reserved_compare(options):\n running_instances = defaultdict(dict)\n reserved_purchases = defaultdict(dict)\n regions = boto.ec2.regions()\n good_regions = [r for r in regions if r.name not in ['us-gov-west-1',\n 'cn-north-1']]\n for region in good_regions:\n if options.trace:\n print \" Scanning region {0}\".format(region.name)\n conn = region.connect()\n filters = {'instance-state-name': 'running'}\n zones = defaultdict(dict)\n\n if options.trace:\n print \" Fetching running instances\"\n reservations = conn.get_all_instances(filters=filters)\n for reservation in reservations:\n for inst in reservation.instances:\n if options.debug:\n print instance_string(inst, options, verbose=True)\n if inst.state != 'running':\n if options.debug:\n print \"Skip {0.id} state {0.state}\".format(inst)\n continue\n if inst.spot_instance_request_id:\n if options.debug:\n print \"Skip {0.id} has spot id {0.spot_instance_request_id}\".format(inst)\n continue\n if 'aws:autoscaling:groupName' in inst.tags:\n if options.debug:\n print \"Skip {0.id} is an autoscale instance\".format(inst)\n continue\n if inst.platform == 'Windows' or inst.platform == 'windows':\n if options.debug:\n print \"Skip {0.id} has platform {0.platform}\".format(inst)\n continue\n if inst.instance_type not in zones[inst.placement]:\n zones[inst.placement][inst.instance_type] = []\n zones[inst.placement][inst.instance_type].append(inst)\n\n if zones:\n running_instances[region.name] = zones\n\n purchased = defaultdict(dict)\n if options.trace:\n print \" Fetching reservations\"\n\n reserved = conn.get_all_reserved_instances()\n for r in reserved:\n if options.debug:\n print reservation_string(r, verbose=True)\n if r.state != 'active':\n continue\n if r.instance_tenancy != 'default':\n print 'WARNING: Non-default tenancy %s: %s' % (r.instance_tenancy, reservation_string(r))\n continue\n if r.instance_type not in purchased[r.availability_zone]:\n purchased[r.availability_zone][r.instance_type] = [r]\n else:\n purchased[r.availability_zone][r.instance_type].append(r)\n\n if purchased:\n reserved_purchases[region.name] = purchased\n\n return check_reservation_use(options, running_instances,\n reserved_purchases)",
"def get_all_instances(self, instance_ids=None, filters=None):\r\n params = {}\r\n if instance_ids:\r\n self.build_list_params(params, instance_ids, 'InstanceId')\r\n if filters:\r\n if 'group-id' in filters:\r\n warnings.warn(\"The group-id filter now requires a security \"\r\n \"group identifier (sg-*) instead of a group \"\r\n \"name. To filter by group name use the \"\r\n \"'group-name' filter instead.\", UserWarning)\r\n self.build_filter_params(params, filters)\r\n return self.get_list('DescribeInstances', params,\r\n [('item', Reservation)], verb='POST')",
"def get_reservations_ids(self, instance_ids=None):\n reservations = self.__get_reservations(instance_ids)\n reservations_ids = []\n for reservation in reservations:\n reservations_ids.append(reservation.id.encode(\"latin-1\"))\n\n return reservations_ids",
"def get_all_reservations(config):\n reservations = []\n region_list = regions(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for region in region_list:\n _logger.info(\"Searching %s\", region)\n cnx = region.connect(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for reservation in cnx.get_all_instances():\n _logger.info(\"Found %s %s\", reservation,\n [str(i.id) for i in reservation.instances])\n reservations.append(reservation)\n return reservations",
"def get_instances(instance_ids):\n\n instances = dict()\n conn = connect_to_region(REGION, aws_access_key_id=KEY_ID, aws_secret_access_key=ACCESS_KEY)\n try:\n reservations = conn.get_all_instances(instance_ids)\n except EC2ResponseError, ex:\n print 'Got exception when calling EC2 for instances (%s): %s' % \\\n (\", \".join(instance_ids), ex.error_message)\n return instances\n\n for r in reservations:\n if len(r.instances) and r.instances[0].id in instance_ids:\n instances[r.instances[0].id] = r.instances[0].tags[\"Name\"]\n\n return instances",
"def _instancelist(self):\n\n rv = []\n self.iname = {}\n for resv in self.conn.get_all_reservations():\n for inst in resv.instances:\n if inst.state != 'terminated':\n name = inst.tags.get('Name',None)\n rv.append([inst.id,inst.state])\n if name is not None:\n rv.append([name,inst.state])\n else:\n rv.append([inst.id+'-needsName',inst.state])\n self.iname[name] = inst.id\n self.iname[inst.id] = inst.id\n return rv",
"def get_ec2_instances(client):\n reservations = client.describe_instances().get(\"Reservations\")\n instances = list(map(lambda x: x.get(\"Instances\"), reservations))\n instances = list(itertools.chain.from_iterable(instances))\n return list(map(lambda x: {\n 'name': next((t['Value'] for t in x.get('Tags', []) if t.get('Key') == 'Name'), 'Unknown'),\n 'id': x.get('InstanceId'),\n 'state': x.get('State'),\n }, instances))",
"def get_ec2_reservations(profile, running_filter):\n try:\n ec2_client = boto3.Session(profile_name=profile).client('ec2')\n except ProfileNotFound:\n print(\"Profile: %s not found\" % profile, file=sys.stderr)\n sys.exit(1)\n filtered_instances = ec2_client.describe_instances(Filters=running_filter)\n return filtered_instances['Reservations']",
"def instances(self):\r\n # It would be more efficient to do this with filters now\r\n # but not all services that implement EC2 API support filters.\r\n instances = []\r\n rs = self.connection.get_all_instances()\r\n for reservation in rs:\r\n uses_group = [g.name for g in reservation.groups if g.name == self.name]\r\n if uses_group:\r\n instances.extend(reservation.instances)\r\n return instances",
"def Get_Running_Instances():\n ec2 = boto3.resource('ec2') \n #call the features resource from the boto3 library\n instances = ec2.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': ['pending', 'running',]},])\n #filter the instances returned using the state name\n #you can also filter using Tags by adding the filters: \n #[{'Name': 'tag-key', 'Values': ['Role','Name',]}, {'Name': 'tag-value', 'Values': ['*test*', '*TEST*',]},]\n return [instance.id for instance in instances]\n #return a liste with the ids of the instances",
"def __validate_instance_id(self, instance_ids):\n try:\n if instance_ids:\n for id in instance_ids:\n self.euca.validate_instance_id(id)\n except InstanceValidationError:\n sys.exit(1)",
"def run(self):\n ilist = []\n key_filter = filters[self.args['filter_group']]\n for item in self.client.describe_instances()['Reservations']:\n for instance in item['Instances']:\n idict = {}\n for tag in instance['Tags']:\n if not any(t['Key'] == 'Name' for t in instance['Tags']):\n tag['Value'] = 'Unnamed'\n idict['Name'] = tag['Value']\n if tag['Key'] == 'Name':\n if tag['Value'] == \"\":\n tag['Value'] = 'Unnamed'\n idict['Name'] = tag['Value']\n for key in key_filter:\n try:\n if key in ['AvailabilityZone','Tenancy']:\n idict[key] = instance['Placement'][key]\n elif key == 'SecurityGroups':\n sg_list = []\n for sg in instance[key]:\n sg_list.append(sg['GroupId'])\n if self.args['output'] == 'csv':\n sg_string = \" \\n\"\n idict[key] = sg_string.join(sg_list)\n else:\n idict[key] = ','.join(sg_list)\n elif key == 'BlockDeviceMappings':\n devices = []\n for dev in instance[key]:\n devices.append(dev['DeviceName'])\n if self.args['output'] == 'csv':\n dev_string = \" \\n\"\n idict[key] = dev_string.join(devices)\n else:\n idict[key] = ','.join(devices)\n elif key == 'State':\n idict[key] = instance[key]['Name']\n else:\n if instance[key]:\n idict[key] = instance[key]\n except Exception as e:\n idict[key] = 'N/A'\n ilist.append(idict)\n self.template(self.sortList(ilist))",
"def test_non_additive_instance_creation(self):\n\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import launch_instances, run_block_device_dict, farm_security_group_setup\n\n # launch_instances requires vpc setup as done by firesim/scripts/setup_firesim.py\n from awstools.aws_setup import aws_setup\n aws_setup()\n farm_security_group_setup()\n\n type = 'f1.2xlarge'\n\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags = {'fsimcluster': 'testcluster'},\n always_expand=False)\n instances.should.have.length_of(1)\n\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags = {'fsimcluster': 'testcluster'},\n always_expand=False)\n instances.should.have.length_of(1)\n\n # There should be one instance total now, across one reservation\n ec2_client = boto3.client('ec2')\n paginator = ec2_client.get_paginator('describe_instances')\n page_iterator = paginator.paginate()\n\n all_reservations = []\n for page in page_iterator:\n page['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n all_reservations += page['Reservations']\n all_reservations.should.have.length_of(1)\n\n [i for r in all_reservations for i in r['Instances']].should.have.length_of(1)",
"def aws_get_instances_by_id(region, instance_id, raw=True):\n client = boto3.session.Session().client('ec2', region)\n try:\n matching_reservations = client.describe_instances(InstanceIds=[instance_id]).get('Reservations', [])\n except ClientError as exc:\n if exc.response.get('Error', {}).get('Code') != 'InvalidInstanceID.NotFound':\n raise\n return []\n instances = []\n [[instances.append(_aws_instance_from_dict(region, instance, raw)) # pylint: disable=expression-not-assigned\n for instance in reservation.get('Instances')] for reservation in matching_reservations if reservation]\n return instances",
"def find_instances(\n instance_id=None,\n name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n return_objs=False,\n in_states=None,\n filters=None,\n):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n try:\n filter_parameters = {\"filters\": {}}\n\n if instance_id:\n filter_parameters[\"instance_ids\"] = [instance_id]\n\n if name:\n filter_parameters[\"filters\"][\"tag:Name\"] = name\n\n if tags:\n for tag_name, tag_value in tags.items():\n filter_parameters[\"filters\"][\"tag:{}\".format(tag_name)] = tag_value\n\n if filters:\n filter_parameters[\"filters\"].update(filters)\n\n reservations = conn.get_all_reservations(**filter_parameters)\n instances = [i for r in reservations for i in r.instances]\n log.debug(\n \"The filters criteria %s matched the following instances:%s\",\n filter_parameters,\n instances,\n )\n\n if in_states:\n instances = [i for i in instances if i.state in in_states]\n log.debug(\n \"Limiting instance matches to those in the requested states: %s\",\n instances,\n )\n if instances:\n if return_objs:\n return instances\n return [instance.id for instance in instances]\n else:\n return []\n except boto.exception.BotoServerError as exc:\n log.error(exc)\n return []",
"def test_reservation_ids_two_instances(self):\n (refs, resv_id) = self.compute_api.create(self.context,\n self.default_flavor,\n image_href=uuids.image_href_id,\n min_count=2, max_count=2)\n self.assertEqual(len(refs), 2)\n self.assertIsNotNone(resv_id)\n for instance in refs:\n self.assertEqual(instance['reservation_id'], resv_id)",
"def _quota_reservations(session, context, reservations):\n\n # Get the listed reservations\n return model_query(context, models.Reservation,\n read_deleted=\"no\",\n session=session).\\\n filter(models.Reservation.uuid.in_(reservations)).\\\n with_lockmode('update').\\\n all()",
"def get_instances(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_instances = []\n reservations = conn.get_all_reservations()\n for reservation in reservations:\n for instance in reservation.instances:\n region_instances.append(instance)\n except boto.exception.EC2ResponseError:\n return []\n return region_instances",
"def _get_instance_ids(instances):\n instance_ids = []\n for instance in instances:\n instance_ids.append(instance.id)\n return instance_ids",
"def monitor_instances(self, instance_ids):\r\n params = {}\r\n self.build_list_params(params, instance_ids, 'InstanceId')\r\n return self.get_list('MonitorInstances', params,\r\n [('item', InstanceInfo)], verb='POST')",
"def fetch_instances(self, ids):\n result = []\n self.log.info(f\"fetch '{len(ids)}' instances\")\n self.log.debug(f\"fetch instance data for ids '{ids}'\")\n try:\n response = self.client.describe_instances(\n InstanceIds=ids\n )\n if 'HTTPStatusCode' in response['ResponseMetadata'] and response['ResponseMetadata']['HTTPStatusCode'] == 200:\n pass\n else:\n raise Exception(f'not able to fetch instacnes with ids: {ids}')\n if len(response['Reservations'][0]['Instances']) == 0:\n raise Exception(f'should retrun at least single insatance data')\n result = []\n for reservation in response[\"Reservations\"]:\n for el in reservation[\"Instances\"]:\n ec2 = EC2Instance.factory(el)\n if ec2.state:\n result.append(ec2)\n else:\n self.log.warn(f'instance \"{ec2.id}\" excluded')\n except Exception as e:\n raise Exception(f'exception when trying to fetch instance data {ids}')\n return sorted(list(result), key=lambda instance: instance.launch_time)",
"def get_wharton_gsr_reservations():\n\n sessionid = get_wharton_sessionid()\n\n if not sessionid:\n return jsonify({'error': 'No Session ID provided.'})\n\n try:\n reservations = wharton.get_reservations(sessionid)\n save_wharton_sessionid()\n return jsonify({'reservations': reservations})\n except APIError as e:\n return jsonify({\"error\": str(e)}), 400",
"def _get_instances(instance_tags, region):\n return ec2_conn[region].get_all_instances(filters={\"tag:Name\": instance_tags})",
"def get_instances_ids(self):\n reservations = self.__get_reservations()\n instances_ids = []\n instances,_ = self.__get_multi_instances(reservations)\n for instance in instances:\n instances_ids.append(instance.id.encode(\"latin-1\"))\n return instances_ids",
"def test_can_create_multiple_instance_tags(self):\n\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import launch_instances, run_block_device_dict, farm_security_group_setup\n\n # launch_instances requires vpc setup as done by firesim/scripts/setup_firesim.py\n from awstools.aws_setup import aws_setup\n aws_setup()\n farm_security_group_setup()\n\n instances = launch_instances('f1.2xlarge', 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags={'fsimcluster': 'testcluster', 'secondtag': 'secondvalue'})\n instances.shouldnt.be.empty\n\n ids = [i.id for i in instances]\n ids.shouldnt.be.empty\n\n ec2_client = boto3.client('ec2')\n\n paginator = ec2_client.get_paginator('describe_instances')\n\n operation_params = {\n 'InstanceIds': ids\n }\n page_iterator = paginator.paginate(**operation_params)\n\n all_reservations = []\n for page in page_iterator:\n page['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n all_reservations += page['Reservations']\n\n tags = {t['Key']:t['Value'] for t in all_reservations[0]['Instances'][0]['Tags']}\n tags.should.have.key('fsimcluster')\n tags['fsimcluster'].should.equal('testcluster')\n tags.should.have.key('secondtag')\n tags['secondtag'].should.equal('secondvalue')",
"def running_instances(hostnames=None):\n\n global api\n\n all_inst = []\n try:\n all_inst = api.get_all_instances()\n except Exception, e:\n logging.error(\"Can't get list of instances (maybe wrong credentials?)\")\n return None\n\n # Resolve IPs\n if hostnames is not None:\n ips = []\n for h in hostnames:\n try:\n ipv4 = gethostbyname(h)\n ips.append(ipv4)\n except Exception:\n # Don't add host if IP address could not be found\n logging.warning(\"Ignoring hostname %s: can't reslove IPv4 address\" % h)\n ips=list(set(ips))\n\n if hostnames is not None:\n logging.debug(\"Input hostnames: %s\" % (','.join(hostnames)))\n logging.debug(\"Input IPs: %s\" % (','.join(ips)))\n else:\n logging.debug(\"No input hostnames given\")\n\n # Add only running instances\n inst = []\n for i in all_inst:\n if i.status(token_id=api.keystone.token_id) == 'running':\n if hostnames is None:\n # Append all\n inst.append(i)\n else:\n found = False\n for ipv4 in ips:\n if i.network_ip(network_name=cf[\"api\"][\"network_name\"]) == ipv4:\n inst.append(i)\n logging.debug(\"Found IP %s corresponding to instance\" % ipv4)\n found = True\n break\n if not found:\n logging.warning(\"Cannot find instance %s in the list of known IPs\" % i.network_ip(network_name=cf[\"api\"][\"network_name\"]))\n\n return inst",
"def get_reservations():\n start_date = request.args.get('start')\n end_date = request.args.get('end')\n\n if start_date is not None and end_date is not None:\n start = parse_datetime(request.json['start'])\n end = parse_datetime(request.json['end'])\n if start is None or end is None:\n abort(400, 'cannot parse start or end date')\n\n reservations = Reservation.query.filter(\n Reservation.end >= start, Reservation.start <= end)\n else:\n reservations = Reservation.query.filter(\n or_(Reservation.start >= datetime.datetime.now(),\n Reservation.end >= datetime.datetime.now()))\n\n reservations = map(lambda x: x.as_dict(), reservations)\n\n return json.dumps(reservations)",
"def reservations(self):\n session_id = plone_session.get_session_id(self.context)\n return db.reservations_by_session(session_id).all()",
"def start_stop_instances(instances, schedule):\n for reservation in instances:\n for instance in reservation.instances:\n region = instance.placement\n if instance.state == 'running' and _get_desired_state(schedule) == 'stop':\n print \"Should stop \" + instance.id + \".\"\n instance.stop()\n elif instance.state == 'stopped' and _get_desired_state(schedule) == 'start':\n print \"Should start \" + instance.id + \".\"\n instance.start()\n else:\n print \"Nothing to do.\""
]
| [
"0.688962",
"0.67898965",
"0.64505816",
"0.6446688",
"0.6443735",
"0.6300012",
"0.6249685",
"0.62233186",
"0.61486757",
"0.6136195",
"0.6116925",
"0.60838884",
"0.60401386",
"0.6009709",
"0.6004771",
"0.5986708",
"0.59733444",
"0.59473145",
"0.5845798",
"0.5832438",
"0.5815837",
"0.5811871",
"0.5792365",
"0.57671523",
"0.5746863",
"0.5746123",
"0.5745353",
"0.5729091",
"0.57234585",
"0.5715515"
]
| 0.7164238 | 0 |
Get Reservations ids deployed | def get_reservations_ids(self, instance_ids=None):
reservations = self.__get_reservations(instance_ids)
reservations_ids = []
for reservation in reservations:
reservations_ids.append(reservation.id.encode("latin-1"))
return reservations_ids | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_instances_ids(self):\n reservations = self.__get_reservations()\n instances_ids = []\n instances,_ = self.__get_multi_instances(reservations)\n for instance in instances:\n instances_ids.append(instance.id.encode(\"latin-1\"))\n return instances_ids",
"def get_all_reservations(config):\n reservations = []\n region_list = regions(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for region in region_list:\n _logger.info(\"Searching %s\", region)\n cnx = region.connect(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for reservation in cnx.get_all_instances():\n _logger.info(\"Found %s %s\", reservation,\n [str(i.id) for i in reservation.instances])\n reservations.append(reservation)\n return reservations",
"def reservations(self):\n session_id = plone_session.get_session_id(self.context)\n return db.reservations_by_session(session_id).all()",
"def ReserveIds(self, request, global_params=None):\n config = self.GetMethodConfig('ReserveIds')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def _quota_reservations(session, context, reservations):\n\n # Get the listed reservations\n return model_query(context, models.Reservation,\n read_deleted=\"no\",\n session=session).\\\n filter(models.Reservation.uuid.in_(reservations)).\\\n with_lockmode('update').\\\n all()",
"def get_wharton_gsr_reservations():\n\n sessionid = get_wharton_sessionid()\n\n if not sessionid:\n return jsonify({'error': 'No Session ID provided.'})\n\n try:\n reservations = wharton.get_reservations(sessionid)\n save_wharton_sessionid()\n return jsonify({'reservations': reservations})\n except APIError as e:\n return jsonify({\"error\": str(e)}), 400",
"def _instancelist(self):\n\n rv = []\n self.iname = {}\n for resv in self.conn.get_all_reservations():\n for inst in resv.instances:\n if inst.state != 'terminated':\n name = inst.tags.get('Name',None)\n rv.append([inst.id,inst.state])\n if name is not None:\n rv.append([name,inst.state])\n else:\n rv.append([inst.id+'-needsName',inst.state])\n self.iname[name] = inst.id\n self.iname[inst.id] = inst.id\n return rv",
"def reservoirs(self): \n return self._node_reg.reservoirs",
"def get_refresh_ids(self):\n ids = []\n for bucket in self.router.lonely_buckets():\n rid = random.randint(*bucket.range).to_bytes(20, byteorder='big')\n ids.append(rid)\n return ids",
"def get_ec2_reservations(profile, running_filter):\n try:\n ec2_client = boto3.Session(profile_name=profile).client('ec2')\n except ProfileNotFound:\n print(\"Profile: %s not found\" % profile, file=sys.stderr)\n sys.exit(1)\n filtered_instances = ec2_client.describe_instances(Filters=running_filter)\n return filtered_instances['Reservations']",
"def Get_Running_Instances():\n ec2 = boto3.resource('ec2') \n #call the features resource from the boto3 library\n instances = ec2.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': ['pending', 'running',]},])\n #filter the instances returned using the state name\n #you can also filter using Tags by adding the filters: \n #[{'Name': 'tag-key', 'Values': ['Role','Name',]}, {'Name': 'tag-value', 'Values': ['*test*', '*TEST*',]},]\n return [instance.id for instance in instances]\n #return a liste with the ids of the instances",
"def _get_instance_ids(instances):\n instance_ids = []\n for instance in instances:\n instance_ids.append(instance.id)\n return instance_ids",
"def _get_port_ids(self, name_or_id_list, filters=None):\n ids_list = []\n for name_or_id in name_or_id_list:\n port = self.get_port(name_or_id, filters)\n if not port:\n raise exceptions.ResourceNotFound(\n 'Port {id} not found'.format(id=name_or_id)\n )\n ids_list.append(port['id'])\n return ids_list",
"def getRefreshIDs(self):\n ids = []\n for bucket in self.router.getLonelyBuckets():\n rid = random.randint(*bucket.range).to_bytes(20, byteorder='big')\n ids.append(rid)\n return ids",
"def all_env_ids(self) -> np.ndarray:",
"def __get_reservations(self, instance_ids=None):\n if instance_ids:\n self.__validate_instance_id(instance_ids)\n euca_conn = self.__make_connection()\n try:\n return euca_conn.get_all_instances(instance_ids)\n except:\n euca.display_error_and_exit('%s' % ex)\n return False",
"def getIDs():",
"def _quota_reservations_query(context, reservations):\n return model_query(\n context, models.Reservation,\n read_deleted=\"no\",\n ).filter(\n models.Reservation.uuid.in_(reservations),\n ).with_for_update()",
"def get_ids(self) -> List[str]:",
"def get_reservations():\n start_date = request.args.get('start')\n end_date = request.args.get('end')\n\n if start_date is not None and end_date is not None:\n start = parse_datetime(request.json['start'])\n end = parse_datetime(request.json['end'])\n if start is None or end is None:\n abort(400, 'cannot parse start or end date')\n\n reservations = Reservation.query.filter(\n Reservation.end >= start, Reservation.start <= end)\n else:\n reservations = Reservation.query.filter(\n or_(Reservation.start >= datetime.datetime.now(),\n Reservation.end >= datetime.datetime.now()))\n\n reservations = map(lambda x: x.as_dict(), reservations)\n\n return json.dumps(reservations)",
"def _get_all_app_ids(config, client):\n rv = set()\n total_pages = client.get_published_apps(config.username, 0).json()[\"total_pages\"]\n for current_page in range(total_pages):\n current_page_results = client.get_published_apps(config.username, current_page).json()['results']\n for result in current_page_results:\n rv.add(result['id'])\n return rv",
"def instances(self):\r\n # It would be more efficient to do this with filters now\r\n # but not all services that implement EC2 API support filters.\r\n instances = []\r\n rs = self.connection.get_all_instances()\r\n for reservation in rs:\r\n uses_group = [g.name for g in reservation.groups if g.name == self.name]\r\n if uses_group:\r\n instances.extend(reservation.instances)\r\n return instances",
"def get_elb_instance_ids(elbclient, elbname):\r\n try:\r\n resp = elbclient.describe_load_balancers(LoadBalancerNames=[elbname])\r\n except:\r\n print(ex.message)\r\n return None\r\n return list(map(\r\n lambda x:x['InstanceId'],\r\n resp['LoadBalancerDescriptions'][0]['Instances']\r\n ))",
"def reservation_data(self):\n reservations = []\n\n for reservation in self.reservations():\n resource = utils.get_resource_by_uuid(reservation.resource)\n\n if resource is None:\n log.warn('Invalid UUID %s' % str(reservation.resource))\n continue\n\n resource = resource.getObject()\n\n data = {}\n\n data['title'] = utils.get_resource_title(resource)\n\n timespans = []\n for start, end in reservation.timespans():\n timespans.append(u'โ ' + utils.display_date(start, end))\n\n data['time'] = '<br />'.join(timespans)\n data['quota'] = utils.get_reservation_quota_statement(\n reservation.quota\n ) if reservation.quota > 1 else u''\n\n data['url'] = resource.absolute_url()\n data['remove-url'] = ''.join((\n resource.absolute_url(),\n '/your-reservations?remove=',\n reservation.token.hex\n ))\n reservations.append(data)\n\n return reservations",
"def findTaggedServiceIds(self, name):\n pass;",
"def get_instances(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_instances = []\n reservations = conn.get_all_reservations()\n for reservation in reservations:\n for instance in reservation.instances:\n region_instances.append(instance)\n except boto.exception.EC2ResponseError:\n return []\n return region_instances",
"def running_rhel_containers_id(broker):\n containers_info = []\n for container in broker[running_rhel_containers]:\n containers_info.append((container[1], container[2]))\n return containers_info",
"def ids(self):\n\n if len(self._id_ranges) % 2 != 0:\n raise AutoIDException(message=ExceptionsMessage.AutoIDIllegalRanges)\n\n ids = []\n for i in range(int(len(self._id_ranges) / 2)):\n begin = self._id_ranges[i * 2]\n end = self._id_ranges[i * 2 + 1]\n for j in range(begin, end):\n ids.append(j)\n\n return ids",
"def get_scan_ids(self):\n return list(self.scans.keys())",
"def get_ids(self, instances):\n instance_ids = []\n for instance in instances:\n instance_ids.append(instance.id)\n return instance_ids"
]
| [
"0.6664405",
"0.6625137",
"0.6480203",
"0.60759157",
"0.5999064",
"0.5983193",
"0.59796596",
"0.5953766",
"0.5928444",
"0.5895583",
"0.585223",
"0.57362765",
"0.57356167",
"0.5665165",
"0.56505793",
"0.5645751",
"0.5641092",
"0.56102735",
"0.55491316",
"0.55278045",
"0.55257314",
"0.55217797",
"0.5514817",
"0.5512158",
"0.54801553",
"0.5478234",
"0.5445897",
"0.5429656",
"0.5375595",
"0.53688914"
]
| 0.6637255 | 1 |
Compare and return if exist one instance equal some policies (cpu, ram, disk) | def __compare_types_instances(self, policies, instance_type):
zones = availabilityZones()
types_ins = zones.get_typevm_zones()
if ( types_ins[instance_type]['cpu'] == policies['cpu'] and
types_ins[instance_type]['ram'] == policies['ram'] and
types_ins[instance_type]['disk']== policies['disk'] ):
return 1
return 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __is_adaptive_instance(self, policies, instance_type):\n zones = availabilityZones()\n typevms = zones.get_typevm_zones()\n if ( typevms[instance_type]['cpu'] >= policies['cpu_min'] and typevms[instance_type]['cpu'] <= policies['cpu_max'] and\n typevms[instance_type]['ram'] >= policies['memory_min'] and typevms[instance_type]['ram'] <= policies['memory_max'] and\n typevms[instance_type]['disk'] >= policies['disk_min'] and typevms[instance_type]['disk'] <= policies['disk_max'] ):\n return True\n return False",
"def __find_adaptive_image(self, policies):\n instances_types = INSTANCE_TYPES;\n if policies['level'] == 1:\n for instance_type in instances_types:\n if self.__compare_types_instances( policies, instance_type ):\n return True, instance_type\n elif policies['level'] == 0:\n for instance_type in instances_types:\n if self.__is_adaptive_instance( self.__get_metrics_adapted(policies), instance_type ):\n return True, instance_type\n else:\n return False, None",
"def reserved_compare(options):\n running_instances = defaultdict(dict)\n reserved_purchases = defaultdict(dict)\n regions = boto.ec2.regions()\n good_regions = [r for r in regions if r.name not in ['us-gov-west-1',\n 'cn-north-1']]\n for region in good_regions:\n if options.trace:\n print \" Scanning region {0}\".format(region.name)\n conn = region.connect()\n filters = {'instance-state-name': 'running'}\n zones = defaultdict(dict)\n\n if options.trace:\n print \" Fetching running instances\"\n reservations = conn.get_all_instances(filters=filters)\n for reservation in reservations:\n for inst in reservation.instances:\n if options.debug:\n print instance_string(inst, options, verbose=True)\n if inst.state != 'running':\n if options.debug:\n print \"Skip {0.id} state {0.state}\".format(inst)\n continue\n if inst.spot_instance_request_id:\n if options.debug:\n print \"Skip {0.id} has spot id {0.spot_instance_request_id}\".format(inst)\n continue\n if 'aws:autoscaling:groupName' in inst.tags:\n if options.debug:\n print \"Skip {0.id} is an autoscale instance\".format(inst)\n continue\n if inst.platform == 'Windows' or inst.platform == 'windows':\n if options.debug:\n print \"Skip {0.id} has platform {0.platform}\".format(inst)\n continue\n if inst.instance_type not in zones[inst.placement]:\n zones[inst.placement][inst.instance_type] = []\n zones[inst.placement][inst.instance_type].append(inst)\n\n if zones:\n running_instances[region.name] = zones\n\n purchased = defaultdict(dict)\n if options.trace:\n print \" Fetching reservations\"\n\n reserved = conn.get_all_reserved_instances()\n for r in reserved:\n if options.debug:\n print reservation_string(r, verbose=True)\n if r.state != 'active':\n continue\n if r.instance_tenancy != 'default':\n print 'WARNING: Non-default tenancy %s: %s' % (r.instance_tenancy, reservation_string(r))\n continue\n if r.instance_type not in purchased[r.availability_zone]:\n purchased[r.availability_zone][r.instance_type] = [r]\n else:\n purchased[r.availability_zone][r.instance_type].append(r)\n\n if purchased:\n reserved_purchases[region.name] = purchased\n\n return check_reservation_use(options, running_instances,\n reserved_purchases)",
"def __run_instances(self, number=1, policies={}):\n try:\n self.euca = Euca2ool('k:n:t:g:d:f:z:',\n ['key=', 'kernel=', 'ramdisk=', 'instance-count=', 'instance-type=',\n 'group=', 'user-data=', 'user-data-file=', 'addressing=', 'availability-zone='])\n except Exception, ex:\n sys.exit(1)\n\n instance_type = policies.get('instance_type') or 'm1.small'\n image_id = policies.get('image_id') or self.__get_image_id()[0]\n min_count = number\n max_count = number\n keyname = 'mykey'\n \n kernel_id = None\n ramdisk_id = None\n group_names = []\n user_data = None\n addressing_type = None\n zone = None\n user_data = None\n \n if image_id:\n euca_conn = self.__make_connection()\n try:\n reservation = euca_conn.run_instances(image_id = image_id,\n min_count = min_count,\n max_count = max_count,\n key_name = keyname,\n security_groups = group_names,\n user_data = user_data,\n addressing_type = addressing_type,\n instance_type = instance_type,\n placement = zone,\n kernel_id = kernel_id,\n ramdisk_id = ramdisk_id)\n except Exception, ex:\n self.euca.display_error_and_exit('error:%s' % ex)\n return reservation\n return False",
"def test_instance_not_overscaled(self) -> None:\n if self.prod_env:\n ec2_name = 'saints-xctf-server-prod-asg'\n else:\n ec2_name = 'saints-xctf-server-dev-asg'\n\n instances = self.get_ec2(ec2_name)\n self.assertTrue(len(instances) < 3)",
"def __verify_policies( self, policies ):\n image_id = ''\n instance_type=''\n if type(policies) == type(str()):\n if self.__is_type_instance( policies ):\n instance_type = policies\n state = True\n elif self.__is_image_id( policies ):\n image_id = policies\n state = True\n else:\n state = False\n else:\n verify, ins_type = self.__find_adaptive_image( policies )\n if verify:\n instance_type = ins_type\n state = True\n\n image_id = image_id or self.__get_image_id()\n instance_type = instance_type or 'm1.small'\n state = state or False\n\n return image_id, instance_type, state",
"def verify(self, arg, choose):\n if not arg:\n print(\"** class name missing **\")\n return 0\n args = arg.split(\" \")\n if args[0] not in self.__classes:\n print(\"** class doesn't exist **\")\n return 0\n if len(args) == 1:\n print(\"** instance id missing **\")\n return 0\n obj = storage.all()\n k = \"{}.{}\".format(args[0], args[1])\n for key, val in obj.items():\n if key == k:\n if choose == 1:\n return val\n if choose == 2:\n return k\n print(\"** no instance found **\")",
"def allowed_instances(context, requested_instances, instance_type):\n project_id = context.project_id\n context = context.elevated()\n requested_cores = requested_instances * instance_type['vcpus']\n requested_ram = requested_instances * instance_type['memory_mb']\n usage = db.instance_data_get_for_project(context, project_id)\n used_instances, used_cores, used_ram = usage\n quota = get_project_quotas(context, project_id)\n allowed_instances = _get_request_allotment(requested_instances,\n used_instances,\n quota['instances'])\n allowed_cores = _get_request_allotment(requested_cores, used_cores,\n quota['cores'])\n allowed_ram = _get_request_allotment(requested_ram, used_ram, quota['ram'])\n allowed_instances = min(allowed_instances,\n allowed_cores // instance_type['vcpus'],\n allowed_ram // instance_type['memory_mb'])\n return min(requested_instances, allowed_instances)",
"def test_instance_running(self) -> None:\n if self.prod_env:\n ec2_name = 'saints-xctf-server-prod-asg'\n else:\n ec2_name = 'saints-xctf-server-dev-asg'\n\n instances = self.get_ec2(ec2_name)\n self.assertTrue(len(instances) > 0)",
"def singularity_exists(self):\n instances = Client.instances(quiet=self.quiet)\n for instance in instances:\n if self.pid in instance.name:\n return True\n return False",
"def is_instance(self,instance):\n\t\tinst_attributes = instance.getAttributes()\n\t\tfor attribute in self.utility.av_counts.keys():\n\t\t\tif attribute not in inst_attributes:\n\t\t\t\treturn False\n\t\t\tif type(inst_attributes[attribute]) == dict:\n\t\t\t\tfor value in self.utility.av_counts[attribute]:\n\t\t\t\t\tif (self.utility.av_counts[attribute][value] / self.utility.count) != 1.0:\n\t\t\t\t\t\treturn False\n\t\t\t\t\tif inst_attributes[attribute] != value:\n\t\t\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\t\tif inst_attributes[attribute] != self.utility.av_counts[attribute]['numerically_valued_attribute'] / self.utility.count:\n\t\t\t\t\t\treturn False\n\t\t\n\t\tfor attribute in instance:\n\t\t\tif attribute not in self.utility.av_counts:\n\t\t\t\treturn False\n\t\t\tif type(inst_attributes[attribute]) == dict:\n\t\t\t\tif inst_attributes[attribute] not in self.utility.av_counts[attribute]:\n\t\t\t\t\treturn False\n\t\t\t\tif ((self.utility.av_counts[attribute][inst_attributes[attribute]] / self.utility.count) != 1.0):\n\t\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\tif len(self.utility.av_counts[attribute].keys()) != 1 or self.utility.av_counts[attribute].get('numerically_valued_attribute', 0) == 0:\n\t\t\t\t\treturn False\n\t\t\n\t\treturn True",
"def __get_multi_instances(self, reservations, instance_ids=None, policies=None):\n check_instance_ids = False\n if ( instance_ids and len(instance_ids) > 0 ):\n check_instance_ids = True\n instances = [] \n for reservation in reservations:\n if check_instance_ids:\n for instance in reservation.instances:\n if instance.id in instance_ids:\n instances.append(instance)\n elif policies:\n for instance in reservation.instances:\n if 'typevm' in policies and instance.instance_type == policies['typevm']:\n instances.append(instance) \n elif policies.get('level')==1:\n if self.__compare_types_instances(policies, instance.instance_type.encode(\"latin-1\")):\n instances.append(instance)\n elif policies.get('level') == 0:\n if self.__is_adaptive_instance(self.__get_metrics_adapted(policies), instance.instance_type.encode(\"latin-1\")):\n instances.append(instance)\n else:\n instances=[]\n else:\n instances += reservation.instances\n return instances, len(instances)",
"def _Exists(self, instance_only: bool = False) -> bool:\n cmd = util.GcloudCommand(self, 'spanner', 'instances', 'describe',\n self.name)\n\n # Do not log error or warning when checking existence.\n _, _, retcode = cmd.Issue(suppress_warning=True, raise_on_failure=False)\n if retcode != 0:\n logging.info('Could not find GCP Spanner instance %s.', self.name)\n return False\n\n if instance_only:\n return True\n\n cmd = util.GcloudCommand(self, 'spanner', 'databases', 'describe',\n self.database)\n cmd.flags['instance'] = self.name\n\n # Do not log error or warning when checking existence.\n _, _, retcode = cmd.Issue(suppress_warning=True, raise_on_failure=False)\n if retcode != 0:\n logging.info('Could not find GCP Spanner database %s.', self.database)\n return False\n\n return True",
"def test_instance_profile_exists(self) -> None:\n self.assertTrue(self.validate_instance_profile('s3-access-role', is_prod=self.prod_env))",
"def instances_availability(self, lastsubmitedinstance, metrics):\n connection = self.connection\n instancesconfig = self.instancesconfigs\n\n cur = connection.cursor()\n harvesters = instancesconfig.keys()\n connection.row_factory = sqlite3.Row\n\n for harvesterid in harvesters:\n error_text = set()\n\n instanceisenable = self.__str_to_bool(instancesconfig[harvesterid]['instanceisenable'])\n del instancesconfig[harvesterid]['instanceisenable']\n ### Instance is enable ###\n if instanceisenable:\n for host in instancesconfig[harvesterid].keys():\n avaibility = []\n if self.__str_to_bool(instancesconfig[harvesterid][host]['hostisenable']):\n ### No submitted worker ###\n timedelta_submitted = timedelta(minutes=30)\n if host != 'none' and host in instancesconfig[harvesterid] \\\n and self.__str_to_bool( instancesconfig[harvesterid][host]['metrics']['lastsubmittedworker']['enable']):\n timedelta_submitted = self.__get_timedelta(\n instancesconfig[harvesterid][host]['metrics']['lastsubmittedworker']['value'])\n if lastsubmitedinstance[harvesterid]['harvesterhost'][host][\n 'harvesterhostmaxtime'] < datetime.utcnow() - timedelta_submitted:\n error = \"Last submitted worker was {0}\".format(\n str(lastsubmitedinstance[harvesterid]['harvesterhost'][host][\n 'harvesterhostmaxtime'])) + '\\n'\n error_text.add(error)\n avaibility.append(0)\n if harvesterid in metrics:\n ### No heartbeat ###\n heartbeattime = metrics[harvesterid][host].keys()[0]\n contacts = instancesconfig[harvesterid][host]['contacts']\n timedelta_heartbeat = self.__get_timedelta(instancesconfig[harvesterid][host]['metrics']['lastheartbeat']['value'])\n if self.__str_to_bool( instancesconfig[harvesterid][host]['metrics']['lastheartbeat']['enable']) and \\\n heartbeattime < datetime.utcnow() - timedelta_heartbeat:\n error = \"Last heartbeat was {0}\".format(\n str(heartbeattime)) + '\\n'\n error_text.add(error)\n avaibility.append(0)\n\n #### Metrics ####\n memory = instancesconfig[harvesterid][host]['memory']\n cpu_warning = instancesconfig[harvesterid][host]['metrics']['cpu']['cpu_warning']\n cpu_critical = instancesconfig[harvesterid][host]['metrics']['cpu']['cpu_critical']\n disk_warning = instancesconfig[harvesterid][host]['metrics']['disk']['disk_warning']\n disk_critical = instancesconfig[harvesterid][host]['metrics']['disk']['disk_critical']\n memory_warning = instancesconfig[harvesterid][host]['metrics']['memory']['memory_warning']\n memory_critical = instancesconfig[harvesterid][host]['metrics']['memory']['memory_critical']\n\n cpu_enable = self.__str_to_bool(\n instancesconfig[harvesterid][host]['metrics']['cpu']['enable'])\n disk_enable = self.__str_to_bool(\n instancesconfig[harvesterid][host]['metrics']['disk']['enable'])\n memory_enable = self.__str_to_bool(\n instancesconfig[harvesterid][host]['metrics']['memory']['enable'])\n\n #### Metrics DB ####\n for metric in metrics[harvesterid][host][heartbeattime]:\n #### CPU ####\n if cpu_enable:\n cpu_pc = int(metric['cpu_pc'])\n if cpu_pc >= cpu_warning:\n avaibility.append(50)\n error = \"Warning! CPU utilization: {0}\".format(\n str(cpu_pc)) + '\\n'\n error_text.add(error)\n elif cpu_pc >= cpu_critical:\n avaibility.append(10)\n error = \"CPU utilization: {0}\".format(\n str(cpu_pc)) + '\\n'\n error_text.add(error)\n #### Memory ####\n if memory_enable:\n if 'memory_pc' in metric:\n memory_pc = int(metric['memory_pc'])\n else:\n memory_pc = int(self.__get_change(metric['rss_mib'], memory))\n if memory_pc >= memory_warning:\n avaibility.append(50)\n error = \"Warning! Memory consumption: {0}\".format(\n str(memory_pc)) + '\\n'\n error_text.add(error)\n elif memory_pc >= memory_critical:\n avaibility.append(0)\n error = \"Memory consumption: {0}\".format(\n str(memory_pc)) + '\\n'\n error_text.add(error)\n #### HDD&HDD1 ####\n if disk_enable:\n if 'volume_data_pc' in metric:\n volume_data_pc = int(metric['volume_data_pc'])\n else:\n volume_data_pc = -1\n if volume_data_pc >= disk_warning:\n avaibility.append(50)\n error = \"Warning! Disk utilization: {0}\".format(\n str(volume_data_pc)) + '\\n'\n error_text.add(error)\n elif volume_data_pc >= disk_critical:\n avaibility.append(10)\n error = \"Disk utilization: {0}\".format(\n str(volume_data_pc)) + '\\n'\n error_text.add(error)\n if 'volume_data1_pc' in metric:\n volume_data1_pc = int(metric['volume_data1_pc'])\n if volume_data1_pc >= disk_warning:\n avaibility.append(50)\n error = \"Warning! Disk 1 utilization: {0}\".format(\n str(volume_data1_pc)) + '\\n'\n error_text.add(error)\n elif volume_data1_pc >= disk_critical:\n avaibility.append(10)\n error = \"Disk 1 utilization: {0}\".format(\n str(volume_data1_pc)) + '\\n'\n error_text.add(error)\n try:\n cur.execute(\"insert into INSTANCES values (?,?,?,?,?,?,?,?,?)\",\n (str(harvesterid), str(host),\n str(lastsubmitedinstance[harvesterid]['harvesterhost'][host]['harvesterhostmaxtime']),\n heartbeattime, 1, 0, min(avaibility) if len(avaibility) > 0 else 100, str(contacts), ', '.join(str(e) for e in error_text)))\n connection.commit()\n error_text = set()\n except:\n query = \\\n \"\"\"UPDATE INSTANCES \n SET lastsubmitted = '{0}', active = {1}, availability = {2}, lastheartbeat = '{3}', contacts = '{4}', errorsdesc = '{5}'\n WHERE harvesterid = '{6}' and harvesterhost = '{7}'\n \"\"\".format(str(lastsubmitedinstance[harvesterid]['harvesterhost'][host]['harvesterhostmaxtime']),\n 1, min(avaibility) if len(avaibility) > 0 else 100, heartbeattime, str(contacts), ', '.join(str(e) for e in error_text), str(harvesterid),\n str(host))\n cur.execute(query)\n connection.commit()\n error_text = set()\n else:\n cur.execute(\"DELETE FROM INSTANCES WHERE harvesterid = ?\", [str(harvesterid)])\n connection.commit()",
"def CheckPrereq(self):\n assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)\n self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)\n self.cluster = self.cfg.GetClusterInfo()\n cluster_hvparams = self.cluster.hvparams[self.instance.hypervisor]\n\n self.op.disks = self._LookupDiskMods()\n\n assert self.instance is not None, \\\n \"Cannot retrieve locked instance %s\" % self.op.instance_name\n\n self.warn = []\n\n if (self.op.pnode_uuid is not None and\n self.op.pnode_uuid != self.instance.primary_node and\n not self.op.force):\n instance_info = self._GetInstanceInfo(cluster_hvparams)\n\n if instance_info.fail_msg:\n self.warn.append(\"Can't get instance runtime information: %s\" %\n instance_info.fail_msg)\n elif instance_info.payload:\n raise errors.OpPrereqError(\n \"Instance is still running on %s\" %\n self.cfg.GetNodeName(self.instance.primary_node),\n errors.ECODE_STATE)\n pnode_uuid = self.instance.primary_node\n assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)\n\n node_uuids = list(self.cfg.GetInstanceNodes(self.instance.uuid))\n pnode_info = self.cfg.GetNodeInfo(pnode_uuid)\n\n assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)\n group_info = self.cfg.GetNodeGroup(pnode_info.group)\n\n # dictionary with instance information after the modification\n ispec = {}\n\n self._CheckHotplug()\n\n self._PrepareNicCommunication()\n\n # disks processing\n assert not (self.op.disk_template and self.op.disks), \\\n \"Can't modify disk template and apply disk changes at the same time\"\n\n if self.op.disk_template:\n self._PreCheckDiskTemplate(pnode_info)\n\n self._PreCheckDisks(ispec)\n\n self._ProcessHVParams(node_uuids)\n be_old = self._ProcessBeParams()\n\n self._ValidateCpuParams()\n self._ProcessOsParams(node_uuids)\n self._ProcessMem(cluster_hvparams, be_old, pnode_uuid)\n\n # make self.cluster visible in the functions below\n cluster = self.cluster\n\n def _PrepareNicCreate(_, params, private):\n self._PrepareNicModification(params, private, None, None,\n {}, cluster, pnode_uuid)\n return (None, None)\n\n def _PrepareNicAttach(_, __, ___):\n raise errors.OpPrereqError(\"Attach operation is not supported for NICs\",\n errors.ECODE_INVAL)\n\n def _PrepareNicMod(_, nic, params, private):\n self._PrepareNicModification(params, private, nic.ip, nic.network,\n nic.nicparams, cluster, pnode_uuid)\n return None\n\n def _PrepareNicRemove(_, params, __):\n ip = params.ip\n net = params.network\n if net is not None and ip is not None:\n self.cfg.ReleaseIp(net, ip, self.proc.GetECId())\n\n def _PrepareNicDetach(_, __, ___):\n raise errors.OpPrereqError(\"Detach operation is not supported for NICs\",\n errors.ECODE_INVAL)\n\n # Verify NIC changes (operating on copy)\n nics = [nic.Copy() for nic in self.instance.nics]\n ApplyContainerMods(\"NIC\", nics, None, self.nicmod, _PrepareNicCreate,\n _PrepareNicAttach, _PrepareNicMod, _PrepareNicRemove,\n _PrepareNicDetach)\n if len(nics) > constants.MAX_NICS:\n raise errors.OpPrereqError(\"Instance has too many network interfaces\"\n \" (%d), cannot add more\" % constants.MAX_NICS,\n errors.ECODE_STATE)\n\n # Pre-compute NIC changes (necessary to use result in hooks)\n self._nic_chgdesc = []\n if self.nicmod:\n # Operate on copies as this is still in prereq\n nics = [nic.Copy() for nic in self.instance.nics]\n ApplyContainerMods(\"NIC\", nics, self._nic_chgdesc, self.nicmod,\n self._CreateNewNic, None, self._ApplyNicMods,\n self._RemoveNic, None)\n # Verify that NIC names are unique and valid\n utils.ValidateDeviceNames(\"NIC\", nics)\n self._new_nics = nics\n ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)\n else:\n self._new_nics = None\n ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)\n\n if not self.op.ignore_ipolicy:\n ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,\n group_info)\n\n # Fill ispec with backend parameters\n ispec[constants.ISPEC_SPINDLE_USE] = \\\n self.be_new.get(constants.BE_SPINDLE_USE, None)\n ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,\n None)\n\n # Copy ispec to verify parameters with min/max values separately\n if self.op.disk_template:\n count = ispec[constants.ISPEC_DISK_COUNT]\n new_disk_types = [self.op.disk_template] * count\n else:\n old_disks = self.cfg.GetInstanceDisks(self.instance.uuid)\n add_disk_count = ispec[constants.ISPEC_DISK_COUNT] - len(old_disks)\n dev_type = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)\n if dev_type == constants.DT_DISKLESS and add_disk_count != 0:\n raise errors.ProgrammerError(\n \"Conversion from diskless instance not possible and should have\"\n \" been caught\")\n\n new_disk_types = ([d.dev_type for d in old_disks] +\n [dev_type] * add_disk_count)\n ispec_max = ispec.copy()\n ispec_max[constants.ISPEC_MEM_SIZE] = \\\n self.be_new.get(constants.BE_MAXMEM, None)\n res_max = ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,\n new_disk_types)\n ispec_min = ispec.copy()\n ispec_min[constants.ISPEC_MEM_SIZE] = \\\n self.be_new.get(constants.BE_MINMEM, None)\n res_min = ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,\n new_disk_types)\n\n if res_max or res_min:\n # FIXME: Improve error message by including information about whether\n # the upper or lower limit of the parameter fails the ipolicy.\n msg = (\"Instance allocation to group %s (%s) violates policy: %s\" %\n (group_info, group_info.name,\n utils.CommaJoin(set(res_max + res_min))))\n raise errors.OpPrereqError(msg, errors.ECODE_INVAL)",
"def servicenow_sspm_performance_monitoring_ip_restriction_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str):\n iso8601Time = datetime.datetime.now(datetime.timezone.utc).isoformat()\n\n # Name of the property to evaluate against\n evalTarget = \"glide.custom.ip.authenticate.allow\"\n # Get cached props\n sysPropCache = get_servicenow_sys_properties(cache)\n\n # There should not ever be a duplicate system property, use next() and a list comprehension to check if the\n # property we're evaluating is in the list of properties we get from the cache. If it is NOT then set the\n # value as `False` and we can fill in fake values. Not having a property for security hardening is the same\n # as a failed finding with a lot less fan fair\n propFinder = next((sysprop for sysprop in sysPropCache if sysprop[\"name\"] == evalTarget), False)\n # If we cannot find the property set \"NOT_CONFIGURED\" which will fail whatever the value should be\n if propFinder == False:\n propertyValue = \"NOT_CONFIGURED\"\n propDescription = \"\"\n propId = \"\"\n propCreatedOn = \"\"\n propCreatedBy = \"\"\n propUpdatedOn = \"\"\n propUpdatedBy = \"\"\n propScope = \"\"\n assetB64 = None\n else:\n propertyValue = str(propFinder[\"value\"])\n propDescription = str(propFinder[\"description\"]).replace(\"\\n \", \"\")\n propId = str(propFinder[\"sys_id\"])\n propCreatedOn = str(propFinder[\"sys_created_on\"])\n propCreatedBy = str(propFinder[\"sys_created_by\"])\n propUpdatedOn = str(propFinder[\"sys_updated_on\"])\n propUpdatedBy = str(propFinder[\"sys_updated_by\"])\n propScope = str(propFinder[\"sys_scope\"][\"value\"])\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(propFinder,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson) \n # NOTE: This is where the check evaluation happens - in SNOW these may be Bools or Numbers but will come back as Strings\n # always evaluate a failing condition first which should be the OPPOSITE of the SNOW reccomendation as sometimes the values\n # are not a simple Boolean expression\n if propertyValue == (\"\" or \"NOT_CONFIGURED\"):\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"servicenow/{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}/check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": f\"servicenow/{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}/check\",\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"LOW\"},\n \"Confidence\": 99,\n \"Title\": \"[SSPM.Servicenow.AccessControl.15] Instance should configure an IP restriction list to protect performance monitoring from unauthorized access\",\n \"Description\": f\"Servicenow instance {SNOW_INSTANCE_NAME} does not configure an IP restriction list to protect performance monitoring from unauthorized access. Use the 'glide.custom.ip.authenticate.allow' property to enable only a specified comma-separated list or a range of IP addresses access to stats.do, threads.do, and replication.do pages. If this property is not enabled, it is possible to access those types of pages from any IP address. Unnecessary exposure to the target instance on the internet should be restricted with the help of IP access controls functionality. Refer to the remediation instructions if this configuration is not intended.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information refer to the Performance monitoring IP restriction (instance security hardening) section of the Servicenow Product Documentation.\",\n \"Url\": \"https://docs.servicenow.com/bundle/utah-platform-security/page/administer/security/reference/performance-monitoring-ip-restriction.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"ServiceNow\",\n \"ProviderType\": \"SaaS\",\n \"ProviderAccountId\": SNOW_INSTANCE_NAME,\n \"AssetRegion\": SNOW_INSTANCE_REGION,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Management & Governance\",\n \"AssetService\": \"System Properties\",\n \"AssetComponent\": \"System Property\"\n },\n \"Resources\": [\n {\n \"Type\": \"ServicenowInstance\",\n \"Id\": f\"{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}\",\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"Other\": {\n \"ServicenowInstance\": SNOW_INSTANCE_NAME,\n \"SysId\": propId,\n \"PropertyName\": evalTarget,\n \"PropertyValue\": propertyValue,\n \"Description\": propDescription,\n \"CreatedBy\": propCreatedBy,\n \"CreatedOn\": propCreatedOn,\n \"UpdatedBy\": propUpdatedBy,\n \"UpdatedOn\": propUpdatedOn,\n \"Scope\": propScope\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.PT-3\",\n \"NIST SP 800-53 Rev. 4 AC-3\",\n \"NIST SP 800-53 Rev. 4 CM-7\",\n \"AICPA TSC CC6.1\",\n \"ISO 27001:2013 A.6.2.2\", \n \"ISO 27001:2013 A.9.1.2\",\n \"ISO 27001:2013 A.9.4.1\",\n \"ISO 27001:2013 A.9.4.4\",\n \"ISO 27001:2013 A.9.4.5\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.14.1.2\",\n \"ISO 27001:2013 A.14.1.3\",\n \"ISO 27001:2013 A.18.1.3\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n else:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"servicenow/{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}/check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": f\"servicenow/{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}/check\",\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[SSPM.Servicenow.AccessControl.15] Instance should configure an IP restriction list to protect performance monitoring from unauthorized access\",\n \"Description\": f\"Servicenow instance {SNOW_INSTANCE_NAME} configures an IP restriction list to protect performance monitoring from unauthorized access.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information refer to the Performance monitoring IP restriction (instance security hardening) section of the Servicenow Product Documentation.\",\n \"Url\": \"https://docs.servicenow.com/bundle/utah-platform-security/page/administer/security/reference/performance-monitoring-ip-restriction.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"ServiceNow\",\n \"ProviderType\": \"SaaS\",\n \"ProviderAccountId\": SNOW_INSTANCE_NAME,\n \"AssetRegion\": SNOW_INSTANCE_REGION,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Management & Governance\",\n \"AssetService\": \"System Properties\",\n \"AssetComponent\": \"System Property\"\n },\n \"Resources\": [\n {\n \"Type\": \"ServicenowInstance\",\n \"Id\": f\"{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}\",\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"Other\": {\n \"ServicenowInstance\": SNOW_INSTANCE_NAME,\n \"SysId\": propId,\n \"PropertyName\": evalTarget,\n \"PropertyValue\": propertyValue,\n \"Description\": propDescription,\n \"CreatedBy\": propCreatedBy,\n \"CreatedOn\": propCreatedOn,\n \"UpdatedBy\": propUpdatedBy,\n \"UpdatedOn\": propUpdatedOn,\n \"Scope\": propScope\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.PT-3\",\n \"NIST SP 800-53 Rev. 4 AC-3\",\n \"NIST SP 800-53 Rev. 4 CM-7\",\n \"AICPA TSC CC6.1\",\n \"ISO 27001:2013 A.6.2.2\", \n \"ISO 27001:2013 A.9.1.2\",\n \"ISO 27001:2013 A.9.4.1\",\n \"ISO 27001:2013 A.9.4.4\",\n \"ISO 27001:2013 A.9.4.5\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.14.1.2\",\n \"ISO 27001:2013 A.14.1.3\",\n \"ISO 27001:2013 A.18.1.3\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding",
"def test_get_hyperflex_cluster_storage_policy_by_moid(self):\n pass",
"def test_vm_cpu_limitation(self):\n expected_dict = self.calculate_expected_values(\n load_dict=self.load_dict\n )\n assert sla_helpers.load_vm_and_check_the_load(\n load_dict=self.load_dict,\n expected_values=expected_dict\n )",
"def instanceha_deployed():\n if overcloud.has_overcloud():\n return get_overcloud_nodes_running_pcs_resource(\n resource='nova-evacuate')\n else:\n return False",
"def check_process(self, instance, process):\n\n instance = self.get_instance(instance)\n output = \"\"\n try:\n if instance.get('address'):\n username = instance.get('address') + \"@\" + instance.get('credentials').get('username')\n key = instance.get('credentials').get('publickey')\n output = subprocess.check_output([\"ssh\", key, username, 'ps', 'aux', '|', 'grep', process]).decode(\n \"utf-8\")\n else:\n username = 'ubuntu@' + instance.get('credentials').get('EC2_ACCESS_ID')\n key = instance.get('credentials').get('EC2_SECRET_KEY')\n # output = os.popen(\"ls\"+ \" | \" + \"ssh\"+ \" -i \"+ key +\" \"+ username).read()\n output = subprocess.check_output(\n [\"ssh\", '-i', key, username, 'ps', 'aux', '|', 'grep', process]).decode(\"utf-8\")\n return output\n except:\n return \"Faile to access the instance\"",
"def quick_instance(self, name, image, instance_type, env_tag='dev', zone_tag='starwatts', os_tag='debian', sg_id=None,\n private=True, extra_sg_ids=None, extra_tags=None, terminate_on_shutdown=False,\n debug=False):\n # Debug setting\n if debug:\n logging.basicConfig(level=logging.DEBUG)\n\n # Preliminary tests\n try:\n ami = self.get_image(image_id=image)\n except EC2ResponseError:\n logging.error(\"The image {} could not be found. Aborting.\".format(image))\n return\n print(\"Using AMI {} : {}\".format(image, ami.name))\n if len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'running'})) > 0 or \\\n len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'stopped'})) > 0:\n logging.error(\"An instance with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No instance has the same 'name' tag.\")\n if self.keypair_exists(name):\n logging.error(\"A keypair with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No keypair was found with the same name.\")\n if sg_id is None:\n if self.security_group_exists(name=name):\n logging.error(\"A security group with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No security group was found with the same name.\")\n\n # Tags generation\n logging.debug(\"Generating tags to apply.\")\n tags = dict(name=name, os=os_tag, env=env_tag, zone=zone_tag, privacy='true' if private else 'false')\n if extra_tags is not None:\n tags.update(extra_tags)\n print(\"Tags : {}\".format(tags))\n\n # Fetching needed security groups (bastion and zabbix)\n standard_sg = self.get_all_security_groups(groupnames=['standard'])\n if len(standard_sg) != 1:\n logging.error(\"Multiple or no security group was found for the 'bastion' search. Aborting.\")\n return\n standard_sg = standard_sg[0]\n logging.debug(\"The following security group was found for 'standard : {} {}\".format(standard_sg.id,\n standard_sg.description))\n\n # Security group creation\n if sg_id is None:\n sg = self.create_security_group(name, \"SG applied to {} VM\".format(name))\n sg_id = sg.id\n\n sg_ids = [sg_id, standard_sg.id, ]\n # Using the extra security groups if any\n if extra_sg_ids is not None:\n logging.debug(\"Extra security groups to add : {}\".format(extra_sg_ids))\n sg_ids.extend(extra_sg_ids)\n logging.debug(\"Security Groups : {}\".format(sg_ids))\n\n user_data = \"-----BEGIN OUTSCALE SECTION-----\\nprivate_only=true\\n-----END OUTSCALE SECTION-----\" if private else \"\"\n logging.debug(\"Creating keypair.\")\n kp = self.create_key_pair(key_name=name)\n fp = os.path.join(os.path.expanduser('~/.ssh'), '%s.pem' % kp.name)\n with open(fp, 'wb') as fd:\n fd.write(bytes(kp.material, \"UTF-8\"))\n logging.debug(\"Keypair written to ~/.ssh/{}.pem\".format(name))\n\n resa = self.run_instances(image_id=image, key_name=name, security_groups=sg_ids, instance_type=instance_type,\n user_data=user_data,\n instance_initiated_shutdown_behavior='terminate' if terminate_on_shutdown else 'stop')\n inst = resa.instances[0]\n logging.debug(\"Adding tags to the newly created machine.\")\n inst.add_tags(tags)\n return inst",
"def test_cpu_limitation(self):\n expected_dict = self.calculate_expected_values(\n load_dict=self.load_dict\n )\n assert sla_helpers.load_vm_and_check_the_load(\n load_dict=self.load_dict, expected_values=expected_dict\n )",
"def compare_instances(self, inst1, inst2):\n for skey, sdir in zip(self._sort_keys, self._sort_dirs):\n resultflag = 1 if sdir == 'desc' else -1\n if inst1[skey] < inst2[skey]:\n return resultflag\n elif inst1[skey] > inst2[skey]:\n return resultflag * -1\n return 0",
"def _compute_status(self, instance, zone):\n if self.compute_service is None:\n logging.warning('Service unavailable: unable to start GCE VM: %s (%s)',\n instance, zone)\n return\n\n info = self.compute_service.instances().get(\n project=app_identity.get_application_id(),\n instance=instance,\n zone=zone).execute()\n return info[COMPUTE_STATUS]",
"def find_instance_by_type ( ec2_conn, base_name, instance_type ) :\n instance_name = get_instance_name( base_name, instance_type )\n instance_results = ec2_conn.get_only_instances( filters = { \"tag:Name\": [ instance_name ] } )\n if len( instance_results ) > 0 :\n return instance_results[ 0 ]\n\n return None",
"def test_cpu_limitation_without_guest_agent(self):\n expected_dict = self.calculate_expected_values(\n load_dict=self.load_dict\n )\n assert sla_helpers.load_vm_and_check_the_load(\n load_dict=self.load_dict, expected_values=expected_dict\n )",
"def get_num_disks(instance_type):\n disks_by_instance = {\n \"c1.medium\": 1,\n \"c1.xlarge\": 4,\n \"c3.large\": 2,\n \"c3.xlarge\": 2,\n \"c3.2xlarge\": 2,\n \"c3.4xlarge\": 2,\n \"c3.8xlarge\": 2,\n \"c4.large\": 0,\n \"c4.xlarge\": 0,\n \"c4.2xlarge\": 0,\n \"c4.4xlarge\": 0,\n \"c4.8xlarge\": 0,\n \"cc1.4xlarge\": 2,\n \"cc2.8xlarge\": 4,\n \"cg1.4xlarge\": 2,\n \"cr1.8xlarge\": 2,\n \"d2.xlarge\": 3,\n \"d2.2xlarge\": 6,\n \"d2.4xlarge\": 12,\n \"d2.8xlarge\": 24,\n \"g2.2xlarge\": 1,\n \"g2.8xlarge\": 2,\n \"hi1.4xlarge\": 2,\n \"hs1.8xlarge\": 24,\n \"i2.xlarge\": 1,\n \"i2.2xlarge\": 2,\n \"i2.4xlarge\": 4,\n \"i2.8xlarge\": 8,\n \"m1.small\": 1,\n \"m1.medium\": 1,\n \"m1.large\": 2,\n \"m1.xlarge\": 4,\n \"m2.xlarge\": 1,\n \"m2.2xlarge\": 1,\n \"m2.4xlarge\": 2,\n \"m3.medium\": 1,\n \"m3.large\": 1,\n \"m3.xlarge\": 2,\n \"m3.2xlarge\": 2,\n \"r3.large\": 1,\n \"r3.xlarge\": 1,\n \"r3.2xlarge\": 1,\n \"r3.4xlarge\": 1,\n \"r3.8xlarge\": 2,\n \"t1.micro\": 0,\n \"t2.micro\": 0,\n \"t2.small\": 0,\n \"t2.medium\": 0,\n }\n if instance_type in disks_by_instance:\n return disks_by_instance[instance_type]\n else:\n print(\"WARNING: Don't know number of disks on instance type {}; assuming 1\".format(\n instance_type), file=stderr)\n return 1",
"def _match_sizes(self, pi1, pi2, ignore=[]):\n if pi1.available and pi2.available:\n for arg in ('vsz', 'rss', 'data_segment', 'shared_segment',\n 'stack_segment', 'code_segment'):\n if arg in ignore:\n continue\n size1 = getattr(pi1, arg)\n size2 = getattr(pi2, arg)\n if size1 and size2:\n delta = abs(size1 - size2)\n # Allow for a difference of the size of two pages or 5%\n if delta > pi1.pagesize * 2 and delta > size1 * 0.05:\n self.fail(\"%s mismatch: %d != %d\" % (arg, size1, size2))\n if pi1.pagefaults and pi2.pagefaults:\n # If both records report pagefaults compare the reported\n # number. If a pagefault happens after taking the first\n # snapshot and before taking the second the latter will show a\n # higher pagefault number. In that case take another snapshot\n # with the first variant and check it's now reporting a higher\n # number as well. We assume pagefaults statistics are\n # monotonic.\n if pi1.pagefaults < pi2.pagefaults:\n pi1.update()\n if pi1.pagefaults < pi2.pagefaults:\n pf1 = pi1.pagefaults\n pf2 = pi2.pagefaults\n self.fail(\"Pagefault mismatch: %d != %d\" % (pf1, pf2))\n else:\n self.assertEqual(pi1.pagefaults, pi2.pagefaults)\n if pi1.pagesize and pi2.pagesize:\n self.assertEqual(pi1.pagesize, pi2.pagesize)",
"def check_instance(self, class_name, inst_id, stored_objects):\n '''get '<class_name>.id' to FileStorage.__objects key format'''\n instance = \"{}.{}\".format(class_name, inst_id)\n if instance not in stored_objects:\n \"\"\"given id does not exist\"\"\"\n print(\"** no instance found **\")\n instance = False\n return instance"
]
| [
"0.66594005",
"0.5957029",
"0.58745104",
"0.5764576",
"0.57531226",
"0.55842274",
"0.5541506",
"0.5485973",
"0.53717875",
"0.5304936",
"0.5302708",
"0.5281764",
"0.5257215",
"0.522992",
"0.51905346",
"0.51892805",
"0.5180331",
"0.5180251",
"0.5165421",
"0.51497835",
"0.512592",
"0.51108253",
"0.51052296",
"0.50937015",
"0.50872755",
"0.5083444",
"0.50813574",
"0.5080287",
"0.50555944",
"0.5020697"
]
| 0.7060165 | 0 |
Return true if is possible adapt the instance_type for the policies | def __is_adaptive_instance(self, policies, instance_type):
zones = availabilityZones()
typevms = zones.get_typevm_zones()
if ( typevms[instance_type]['cpu'] >= policies['cpu_min'] and typevms[instance_type]['cpu'] <= policies['cpu_max'] and
typevms[instance_type]['ram'] >= policies['memory_min'] and typevms[instance_type]['ram'] <= policies['memory_max'] and
typevms[instance_type]['disk'] >= policies['disk_min'] and typevms[instance_type]['disk'] <= policies['disk_max'] ):
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __compare_types_instances(self, policies, instance_type):\n zones = availabilityZones()\n types_ins = zones.get_typevm_zones()\n\n if ( types_ins[instance_type]['cpu'] == policies['cpu'] and\n types_ins[instance_type]['ram'] == policies['ram'] and\n types_ins[instance_type]['disk']== policies['disk'] ):\n return 1\n return 0",
"def __find_adaptive_image(self, policies):\n instances_types = INSTANCE_TYPES;\n if policies['level'] == 1:\n for instance_type in instances_types:\n if self.__compare_types_instances( policies, instance_type ):\n return True, instance_type\n elif policies['level'] == 0:\n for instance_type in instances_types:\n if self.__is_adaptive_instance( self.__get_metrics_adapted(policies), instance_type ):\n return True, instance_type\n else:\n return False, None",
"def adaptability():\n return True",
"def is_acceptable(self):",
"def applies(cls, obj):\n return type(obj) in cls.types",
"def __is_type_instance( self, instance_type ):\n for index, instance in enumerate(INSTANCE_TYPES):\n if instance == instance_type:\n return True\n return False",
"def __instancecheck__(self, instance):\n\n if isinstance(instance, ObjCInstance):\n return bool(instance.isKindOfClass(self))\n else:\n return False",
"def check_type(self):\n return True",
"def __instancecheck__(self, instance):\n\n if isinstance(instance, ObjCInstance):\n return bool(instance.conformsToProtocol(self))\n else:\n return False",
"def HasPerInstancePropertyProviders(self) -> bool:",
"def is_valid_type(type):\n return type in type_to_adapter",
"def __verify_policies( self, policies ):\n image_id = ''\n instance_type=''\n if type(policies) == type(str()):\n if self.__is_type_instance( policies ):\n instance_type = policies\n state = True\n elif self.__is_image_id( policies ):\n image_id = policies\n state = True\n else:\n state = False\n else:\n verify, ins_type = self.__find_adaptive_image( policies )\n if verify:\n instance_type = ins_type\n state = True\n\n image_id = image_id or self.__get_image_id()\n instance_type = instance_type or 'm1.small'\n state = state or False\n\n return image_id, instance_type, state",
"def policy_net(self) -> bool:\n raise NotImplementedError()",
"def verify_type(self, obj):\n return isinstance(obj, self.type_)",
"def needs_unique_instance(type_):\n return type_ in unique_instance_types",
"def _isinstancetype(an_obj):\n if an_obj is None: return False\n if not PY3K:\n return isinstance(an_obj, types.InstanceType)\n typstr = str(type(an_obj))\n # the following logic works, as PyRAF users expect, in both v2 and v3\n return typstr==\"<type 'instance'>\" or \\\n (typstr.startswith(\"<class '\") and ('.' in typstr))",
"def _isinstance(self, instance, raise_error=True):\n\n if isinstance(instance, self.__model__):\n return True\n elif raise_error:\n raise ValueError('{} is not of type {}.'.format(\n instance, self.__model__,\n ))\n else:\n return False",
"def is_my_case(self, type_):\n return (\n isinstance(self.__apply_sequence(type_), self.declaration_class)\n )",
"def is_valid_instance_type(image, instancetype):\n if not is_valid_image(image):\n return False\n\n if instancetype not in AVAILABLE_IMAGES[image]['supported_instances']:\n return False\n\n return True",
"def is_instance(instance, expected_types):\n for expected_type in expected_types:\n if isinstance(instance, expected_type):\n return True\n\n return False",
"def is_instance_of(self, rule, instantiation_map=None):\n\n if len(rule.assumptions) != len(self.assumptions):\n return False\n\n instantiation_map = {} if instantiation_map is None else instantiation_map\n for i in range(len(rule.assumptions)):\n if not InferenceRule._update_instantiation_map(self.assumptions[i], rule.assumptions[i], instantiation_map):\n return False\n\n if not InferenceRule._update_instantiation_map(self.conclusion, rule.conclusion, instantiation_map):\n instantiation_map.clear()\n return False\n\n return True",
"def is_acceptable(self):\n\n return self.signal_type == self.target_signal_type",
"def _check(self, class_):\r\n\r\n if isinstance(class_, (types.FunctionType, types.LambdaType,\r\n types.ClassType, types.InstanceType)):\r\n return False\r\n if not hasattr(class_, '__dict__'):\r\n if not hasattr(class_, '__slots__'):\r\n return False\r\n return True",
"def is_restraining(self, tabtype):\n if self.limit < 1:\n return False\n if tabtype not in self.restraining:\n return False\n for field, _ in self.exprs:\n if field in tabtype._sql_fields:\n return True\n return False",
"def is_applicable(self, context: Any) -> bool:\n pass",
"def isSlotBasedObject(cls, instcls):\n if instcls in restslotattributedict.keys():\n return True\n return False",
"def can_handle(self, rsm_ctx):\n return not rsm_ctx.instance.type",
"def type_valid(self):\n return contain_in_list_equal(self._type_or_ref, PARAM_RES_TYPES)",
"def is_instance(self,instance):\n\t\tinst_attributes = instance.getAttributes()\n\t\tfor attribute in self.utility.av_counts.keys():\n\t\t\tif attribute not in inst_attributes:\n\t\t\t\treturn False\n\t\t\tif type(inst_attributes[attribute]) == dict:\n\t\t\t\tfor value in self.utility.av_counts[attribute]:\n\t\t\t\t\tif (self.utility.av_counts[attribute][value] / self.utility.count) != 1.0:\n\t\t\t\t\t\treturn False\n\t\t\t\t\tif inst_attributes[attribute] != value:\n\t\t\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\t\tif inst_attributes[attribute] != self.utility.av_counts[attribute]['numerically_valued_attribute'] / self.utility.count:\n\t\t\t\t\t\treturn False\n\t\t\n\t\tfor attribute in instance:\n\t\t\tif attribute not in self.utility.av_counts:\n\t\t\t\treturn False\n\t\t\tif type(inst_attributes[attribute]) == dict:\n\t\t\t\tif inst_attributes[attribute] not in self.utility.av_counts[attribute]:\n\t\t\t\t\treturn False\n\t\t\t\tif ((self.utility.av_counts[attribute][inst_attributes[attribute]] / self.utility.count) != 1.0):\n\t\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\tif len(self.utility.av_counts[attribute].keys()) != 1 or self.utility.av_counts[attribute].get('numerically_valued_attribute', 0) == 0:\n\t\t\t\t\treturn False\n\t\t\n\t\treturn True",
"def is_acceptable(self):\n\n return not self.created and self.signal_type == self.target_signal_type"
]
| [
"0.66462946",
"0.6631519",
"0.6285129",
"0.61954254",
"0.61424285",
"0.61359036",
"0.600172",
"0.5945884",
"0.5918538",
"0.59132266",
"0.5857154",
"0.5843552",
"0.58333933",
"0.56642264",
"0.56641996",
"0.56546956",
"0.56274027",
"0.56115484",
"0.56003535",
"0.55908203",
"0.5572916",
"0.55534357",
"0.554317",
"0.5536402",
"0.55170536",
"0.5514275",
"0.55088276",
"0.55081475",
"0.5488198",
"0.54801357"
]
| 0.7656916 | 0 |
Find and return true and the instance_type true according policies | def __find_adaptive_image(self, policies):
instances_types = INSTANCE_TYPES;
if policies['level'] == 1:
for instance_type in instances_types:
if self.__compare_types_instances( policies, instance_type ):
return True, instance_type
elif policies['level'] == 0:
for instance_type in instances_types:
if self.__is_adaptive_instance( self.__get_metrics_adapted(policies), instance_type ):
return True, instance_type
else:
return False, None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __compare_types_instances(self, policies, instance_type):\n zones = availabilityZones()\n types_ins = zones.get_typevm_zones()\n\n if ( types_ins[instance_type]['cpu'] == policies['cpu'] and\n types_ins[instance_type]['ram'] == policies['ram'] and\n types_ins[instance_type]['disk']== policies['disk'] ):\n return 1\n return 0",
"def __is_adaptive_instance(self, policies, instance_type):\n zones = availabilityZones()\n typevms = zones.get_typevm_zones()\n if ( typevms[instance_type]['cpu'] >= policies['cpu_min'] and typevms[instance_type]['cpu'] <= policies['cpu_max'] and\n typevms[instance_type]['ram'] >= policies['memory_min'] and typevms[instance_type]['ram'] <= policies['memory_max'] and\n typevms[instance_type]['disk'] >= policies['disk_min'] and typevms[instance_type]['disk'] <= policies['disk_max'] ):\n return True\n return False",
"def __is_type_instance( self, instance_type ):\n for index, instance in enumerate(INSTANCE_TYPES):\n if instance == instance_type:\n return True\n return False",
"def __verify_policies( self, policies ):\n image_id = ''\n instance_type=''\n if type(policies) == type(str()):\n if self.__is_type_instance( policies ):\n instance_type = policies\n state = True\n elif self.__is_image_id( policies ):\n image_id = policies\n state = True\n else:\n state = False\n else:\n verify, ins_type = self.__find_adaptive_image( policies )\n if verify:\n instance_type = ins_type\n state = True\n\n image_id = image_id or self.__get_image_id()\n instance_type = instance_type or 'm1.small'\n state = state or False\n\n return image_id, instance_type, state",
"def is_instance(instance, expected_types):\n for expected_type in expected_types:\n if isinstance(instance, expected_type):\n return True\n\n return False",
"def needs_unique_instance(type_):\n return type_ in unique_instance_types",
"def _isinstancetype(an_obj):\n if an_obj is None: return False\n if not PY3K:\n return isinstance(an_obj, types.InstanceType)\n typstr = str(type(an_obj))\n # the following logic works, as PyRAF users expect, in both v2 and v3\n return typstr==\"<type 'instance'>\" or \\\n (typstr.startswith(\"<class '\") and ('.' in typstr))",
"def is_instance(self,instance):\n\t\tinst_attributes = instance.getAttributes()\n\t\tfor attribute in self.utility.av_counts.keys():\n\t\t\tif attribute not in inst_attributes:\n\t\t\t\treturn False\n\t\t\tif type(inst_attributes[attribute]) == dict:\n\t\t\t\tfor value in self.utility.av_counts[attribute]:\n\t\t\t\t\tif (self.utility.av_counts[attribute][value] / self.utility.count) != 1.0:\n\t\t\t\t\t\treturn False\n\t\t\t\t\tif inst_attributes[attribute] != value:\n\t\t\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\t\tif inst_attributes[attribute] != self.utility.av_counts[attribute]['numerically_valued_attribute'] / self.utility.count:\n\t\t\t\t\t\treturn False\n\t\t\n\t\tfor attribute in instance:\n\t\t\tif attribute not in self.utility.av_counts:\n\t\t\t\treturn False\n\t\t\tif type(inst_attributes[attribute]) == dict:\n\t\t\t\tif inst_attributes[attribute] not in self.utility.av_counts[attribute]:\n\t\t\t\t\treturn False\n\t\t\t\tif ((self.utility.av_counts[attribute][inst_attributes[attribute]] / self.utility.count) != 1.0):\n\t\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\tif len(self.utility.av_counts[attribute].keys()) != 1 or self.utility.av_counts[attribute].get('numerically_valued_attribute', 0) == 0:\n\t\t\t\t\treturn False\n\t\t\n\t\treturn True",
"def match(self, cls):\n return isinstance(self, cls)",
"def __instancecheck__(self, instance):\n\n if isinstance(instance, ObjCInstance):\n return bool(instance.isKindOfClass(self))\n else:\n return False",
"def match(self, data_instance: Dict[str, Any]) -> bool:",
"def is_valid_instance_type(image, instancetype):\n if not is_valid_image(image):\n return False\n\n if instancetype not in AVAILABLE_IMAGES[image]['supported_instances']:\n return False\n\n return True",
"def have_this_instance(self, instance):\n for i in self.all_instances:\n if i == instance:\n print(\"YES ITS ME!\")\n return True\n print(\"NO S.B. ELSE\")\n return False",
"def applies(cls, obj):\n return type(obj) in cls.types",
"def instance_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"instance_type\")",
"def find_instance_by_type ( ec2_conn, base_name, instance_type ) :\n instance_name = get_instance_name( base_name, instance_type )\n instance_results = ec2_conn.get_only_instances( filters = { \"tag:Name\": [ instance_name ] } )\n if len( instance_results ) > 0 :\n return instance_results[ 0 ]\n\n return None",
"def _isinstance(self, instance, raise_error=True):\n\n if isinstance(instance, self.__model__):\n return True\n elif raise_error:\n raise ValueError('{} is not of type {}.'.format(\n instance, self.__model__,\n ))\n else:\n return False",
"def is_my_case(self, type_):\n return (\n isinstance(self.__apply_sequence(type_), self.declaration_class)\n )",
"def verify(self, arg, choose):\n if not arg:\n print(\"** class name missing **\")\n return 0\n args = arg.split(\" \")\n if args[0] not in self.__classes:\n print(\"** class doesn't exist **\")\n return 0\n if len(args) == 1:\n print(\"** instance id missing **\")\n return 0\n obj = storage.all()\n k = \"{}.{}\".format(args[0], args[1])\n for key, val in obj.items():\n if key == k:\n if choose == 1:\n return val\n if choose == 2:\n return k\n print(\"** no instance found **\")",
"def is_instance_of(self, rule, instantiation_map=None):\n\n if len(rule.assumptions) != len(self.assumptions):\n return False\n\n instantiation_map = {} if instantiation_map is None else instantiation_map\n for i in range(len(rule.assumptions)):\n if not InferenceRule._update_instantiation_map(self.assumptions[i], rule.assumptions[i], instantiation_map):\n return False\n\n if not InferenceRule._update_instantiation_map(self.conclusion, rule.conclusion, instantiation_map):\n instantiation_map.clear()\n return False\n\n return True",
"def check_instance(self):\n self.assertIsInstance(self.amenity_1, amenity)\n self.assertIsInstance(self.amenity_2, amenity)",
"def match(cls, kind: 'dsl.Any') -> bool:\n return isinstance(kind, cls)",
"def test_is_instance(self):\n self.assertTrue(isinstance(self.profile, Profile))",
"def __contains__(self, item):\n from Movie import Movie\n from Person import Person\n if isinstance(item, Person):\n for m in flatten(self.data, yieldDictKeys=1, scalar=Movie):\n if item.isSame(m.currentRole):\n return 1\n elif isinstance(item, Movie):\n for m in flatten(self.data, yieldDictKeys=1, scalar=Movie):\n if item.isSame(m):\n return 1\n return 0",
"def test_instance(self):\n self.assertEqual(True, type(self.Test.defined_associations['thing']) is pyperry.association.HasOne)",
"def _get_type_to_one_of():\n\n return {\n 'primitive': Settings._is_in_prim,\n 'list': Settings._is_sublist_in_one_of_lists,\n 'dict': Settings._is_dict_in_one_of_dicts\n }",
"def identify_result(self, record):\n if record.result_class in self.result_iders:\n return self.result_iders[record.result_class](record)\n return False",
"def meets_condition(db_type: str):\n\t\t...",
"def process_entity(entity: dict, language: str) -> bool:\n # Check if entity is human\n claims = entity[\"claims\"]\n if \"P31\" in claims: # Instance of\n # Iterate over instancesOf\n for instance in claims[\"P31\"]:\n mainsnak = instance[\"mainsnak\"]\n if \"datavalue\" in mainsnak:\n if mainsnak[\"datavalue\"][\"type\"] == \"wikibase-entityid\":\n # Verify if value is Q5\n instanceof_value = mainsnak[\"datavalue\"][\"value\"][\"id\"]\n if instanceof_value == \"Q5\":\n print_info(\"Entity is a subject\")\n return process_human_entity(entity, language)\n elif instanceof_value in TOPIC_TABLE:\n print_info(\"Entity is a topic\")\n return process_topic_entity(entity, language)\n print_info(\"Entity is not a subject nor a topic\")\n return True",
"def target_type(self):"
]
| [
"0.6889342",
"0.6853335",
"0.6505152",
"0.59607524",
"0.59246886",
"0.5916552",
"0.57527685",
"0.57044965",
"0.5672798",
"0.56674916",
"0.5656137",
"0.5563012",
"0.5509639",
"0.5452389",
"0.540743",
"0.5406744",
"0.54066825",
"0.54032224",
"0.5387874",
"0.53858787",
"0.53642744",
"0.5342858",
"0.5334729",
"0.53314275",
"0.53246266",
"0.5322161",
"0.5280372",
"0.52685916",
"0.5266592",
"0.52591014"
]
| 0.71402055 | 0 |
Return all instances ids | def get_instances_ids(self):
reservations = self.__get_reservations()
instances_ids = []
instances,_ = self.__get_multi_instances(reservations)
for instance in instances:
instances_ids.append(instance.id.encode("latin-1"))
return instances_ids | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_instance_ids(instances):\n instance_ids = []\n for instance in instances:\n instance_ids.append(instance.id)\n return instance_ids",
"def get_ids(self, instances):\n instance_ids = []\n for instance in instances:\n instance_ids.append(instance.id)\n return instance_ids",
"def list_instance_uuids(self):\n return self.list_instances()",
"def _get_ids_from_hostname(self, hostname):\r\n results = self.list_instances(hostname=hostname, mask=\"id\")\r\n return [result['id'] for result in results]",
"def list_instances(self):\n LOG.debug(\"list_instances\")\n\n instance_ids = []\n bmms = db.bmm_get_all(None)\n for bmm in bmms:\n if not bmm[\"instance_id\"]:\n continue\n instance_ids.append(self._instance_id_to_name(bmm[\"instance_id\"]))\n\n return instance_ids",
"def ids(self):\n return list(self._id_generator())",
"def ids(self):\n return list(self._id_generator())",
"def getIDs():",
"def _instantiated_ids(self):\n return self._identity_map.keys()",
"def get_ids(self):\n return self._ids",
"def ids(self):\n return self._ids",
"def getIDs(self):\n return self.multiengine.getIDs()",
"def instances(self):\n return self.get('instances')",
"def get_ids(self) -> List[str]:",
"def get_asg_instance_ids(self, asg_name):\n instance_ids = []\n # Grab the first item in the list because we're only asking for 1 ASG\n try:\n asg_data = self.asg.describe_auto_scaling_groups(\n AutoScalingGroupNames=[asg_name])['AutoScalingGroups'][0]\n except Exception as e: \n logger.info(e)\n return []\n\n for instance_data in asg_data['Instances']:\n instance_ids.append(instance_data['InstanceId'])\n\n return instance_ids",
"def source_instance_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"source_instance_ids\")",
"def GetInstances(self, bulk=False, reason=None):\n query = []\n _AppendIf(query, bulk, (\"bulk\", 1))\n _AppendReason(query, reason)\n\n instances = self._SendRequest(HTTP_GET,\n \"/%s/instances\" % GANETI_RAPI_VERSION,\n query, None)\n if bulk:\n return instances\n else:\n return [i[\"id\"] for i in instances]",
"def ids(self):\n return self.obj_to_id.values()",
"def remote_get_ids(self):\n return self.smultiengine.get_ids()",
"def instances(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"instances\")",
"def getinstancelist():\n dbcursor_dict.execute(dbq.get_all_instance_list, )\n db_instance_list = dbcursor_dict.fetchall()\n return db_instance_list",
"def Get_Running_Instances():\n ec2 = boto3.resource('ec2') \n #call the features resource from the boto3 library\n instances = ec2.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': ['pending', 'running',]},])\n #filter the instances returned using the state name\n #you can also filter using Tags by adding the filters: \n #[{'Name': 'tag-key', 'Values': ['Role','Name',]}, {'Name': 'tag-value', 'Values': ['*test*', '*TEST*',]},]\n return [instance.id for instance in instances]\n #return a liste with the ids of the instances",
"def get_ids(self):\n return [item.id for item in self.items]",
"def ids(self) -> Sequence[str]:\n return pulumi.get(self, \"ids\")",
"def ids(self) -> Sequence[str]:\n return pulumi.get(self, \"ids\")",
"def object_ids(self):\n return self._extract_set('id')",
"def get_elb_instance_ids(elbclient, elbname):\r\n try:\r\n resp = elbclient.describe_load_balancers(LoadBalancerNames=[elbname])\r\n except:\r\n print(ex.message)\r\n return None\r\n return list(map(\r\n lambda x:x['InstanceId'],\r\n resp['LoadBalancerDescriptions'][0]['Instances']\r\n ))",
"def _instancelist(self):\n\n rv = []\n self.iname = {}\n for resv in self.conn.get_all_reservations():\n for inst in resv.instances:\n if inst.state != 'terminated':\n name = inst.tags.get('Name',None)\n rv.append([inst.id,inst.state])\n if name is not None:\n rv.append([name,inst.state])\n else:\n rv.append([inst.id+'-needsName',inst.state])\n self.iname[name] = inst.id\n self.iname[inst.id] = inst.id\n return rv",
"def _getAllRunningInstances(self):\n return self._ec2.get_only_instances(filters={\n 'tag:leader_instance_id': self._instanceId,\n 'instance-state-name': 'running'})",
"def get_instances(cls):\n raise NotImplementedError"
]
| [
"0.8507518",
"0.83568853",
"0.7767982",
"0.7608295",
"0.74277884",
"0.7245206",
"0.7245206",
"0.71949583",
"0.7122765",
"0.7118952",
"0.707014",
"0.70677406",
"0.7018259",
"0.6999699",
"0.6988456",
"0.6983812",
"0.69503725",
"0.6898317",
"0.68574446",
"0.676398",
"0.67505455",
"0.6725945",
"0.6690565",
"0.6661784",
"0.6661784",
"0.6657443",
"0.66432595",
"0.66408646",
"0.6625029",
"0.66210735"
]
| 0.8676883 | 0 |
Return the total number of instances | def get_num_instances(self):
return len( self.get_instances_ids() ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def instance_count(self) -> int:\n return pulumi.get(self, \"instance_count\")",
"def total_size(instance):\n return sum(i.size for i in instance.iter_instances())",
"def instance_count(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"instance_count\")",
"def retrieve_num_instances(service):\n instance_counts = service[\"instance-counts\"]\n return instance_counts[\"healthy-instances\"] + instance_counts[\"unhealthy-instances\"]",
"def instance_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"instance_count\")",
"def instance_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"instance_count\")",
"def get_data_ninstances(self):\n return self.data_ninstances",
"def GetInstanceCount():\n return _gmat_py.GmatBase_GetInstanceCount()",
"def get_count(cls):\n total = 0\n for counter in SimpleCounterShard.objects.all():\n total += counter.count\n return total",
"def get_running_instance_count(body):\n\tbody = dict(body)\n\tconn = autoscale.connect_to_region(region_name='us-west-2',\n\t\t\t\t\t\t\t\taws_access_key_id=body[\"aws_access_key_id\"],\n\t\t\t\t\t\t\t\taws_secret_access_key=body[\"aws_secret_access_key\"])\n\tinstance_status = conn.get_all_autoscaling_instances()\n\trunning_instances = 0\n\tfor item in instance_status:\n\t\tif (item.health_status == 'HEALTHY'):\n\t\t\trunning_instances += 1\n\treturn {\"data\":running_instances}",
"def get_running_instance_count(body):\n\tbody = dict(body)\n\tconn = rds.connect_to_region(region_name='us-west-2',\n\t\t\t\t\t\t\t\taws_access_key_id=body[\"aws_access_key_id\"],\n\t\t\t\t\t\t\t\taws_secret_access_key=body[\"aws_secret_access_key\"])\n\tinstance_status = conn.get_all_dbinstances()\n\trunning_instances = 0\n\tfor item in instance_status:\n\t\tif (item.state_name == 'available'):\n\t\t\trunning_instances += 1\n\treturn {\"data\":running_instances}",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def getInstCount(self):\n return self.instCount",
"def get_running_instance_count(body):\n\tbody = dict(body)\n\tconn = ec2.connect_to_region(region_name='us-west-2',\n\t\t\t\t\t\t\t\taws_access_key_id=body[\"aws_access_key_id\"],\n\t\t\t\t\t\t\t\taws_secret_access_key=body[\"aws_secret_access_key\"])\n\tinstance_status = conn.get_all_instance_status()\n\trunning_instances = 0\n\tfor item in instance_status:\n\t\tif (item.state_name == 'running'):\n\t\t\trunning_instances += 1\n\treturn {\"data\":running_instances}",
"def count(self):\n return self.get_count()",
"def num_train_instances(self):\n raise NotImplementedError()",
"def count(self):\n # TODO not implemented yet\n return 0",
"def count(self):\n return self.size()",
"def count(self) -> int:\n return self.__count",
"def count(self) -> int:\n return pulumi.get(self, \"count\")",
"def count(self):\n\n raise NotImplementedError",
"def _next_n_instances(self): # Could be merged with _stop_iter someday.\n n_instances = self.n_instances\n n_pool = self.n_pool()\n\n n_instances = min(n_instances, n_pool)\n if self.n_papers is not None:\n papers_left = self.n_papers - len(self.train_idx)\n n_instances = min(n_instances, papers_left)\n return n_instances",
"def count(self):\n\n if self.cluster:\n return self.cluster.count()\n\n return super().count()",
"def count(self, cls=None):\n return len(self.all(cls))",
"def do_count(self, *args):\n count = 0\n args = [ele for ele in args[0].split(' ')]\n if args[0] == '':\n print(\"** class name missing **\")\n return\n if args[0] not in self.list_classes:\n print(\"** class doesn't exist **\")\n return\n else:\n ''' Get a list of specified instances '''\n for key, obj in storage.all().items():\n key = key.split('.')\n if key[0] == args[0]:\n count += 1\n print(count)",
"def get_count(self):\n\n\t\treturn self.__count",
"def count(self):\n return len(self)"
]
| [
"0.85416496",
"0.8332706",
"0.80736727",
"0.7749521",
"0.76511467",
"0.76511467",
"0.75533694",
"0.75408304",
"0.7379059",
"0.7302489",
"0.72858953",
"0.7279924",
"0.7279924",
"0.7279924",
"0.7279924",
"0.7277189",
"0.7227607",
"0.7180085",
"0.7153769",
"0.7115572",
"0.71144503",
"0.70919454",
"0.7089913",
"0.7086975",
"0.70164055",
"0.7016328",
"0.6989249",
"0.6982265",
"0.69759333",
"0.69571364"
]
| 0.84645474 | 1 |
Get one image id | def __get_image_id(self):
return self.__get_multi_images_ids(1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def image_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"image_id\")",
"def image_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"image_id\")",
"def get_image_id(filename):\n del filename\n global GLOBAL_IMG_ID\n GLOBAL_IMG_ID += 1\n return GLOBAL_IMG_ID",
"def image_id(self):\n return self._image_id",
"def image_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"image_id\")",
"def image_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"image_id\")",
"def image_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"image_id\")",
"def image_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"image_id\")",
"def get_image_id(image):\n if not is_valid_image(image):\n return False\n\n return AVAILABLE_IMAGES[image]['imageid']",
"def get_image_id(self, image_name):\n _url = \"http://\" + self.host_ip + \":8774/v2/\" +\\\n self.cloud_admin_info[\"project_id\"] + \"/images/detail\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.cloud_admin_info[\"token_project\"]}\n _body = None\n\n _result = self.request(\"GET\", _url, _headers, _body)\n if _result is None:\n LOG_OBJ.error(\"No response from server while getting images.\")\n return\n if _result.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get image ID Failed with status %s \" %\n _result.status)\n return _result.status\n\n _output = json.loads(_result.data)\n for _images in _output['images']:\n if _images['name'].lower() == image_name.lower():\n LOG_OBJ.info(\"Image Name: %s, Image ID : %s \" %\n (image_name, _images['id']))\n return _images['id']\n LOG_OBJ.error(\"The image: %s is NOT found\" % image_name)",
"def image_id(cls):\n return str(uuid.uuid4())",
"def image_id_at(self, i):\n return i",
"def test_image_id(self):\n result = self.test_client.image_id\n\n assert result == \"1238012\"",
"def find_image(image_name):\n imgs = pyrax.images\n image = imgs.list(name=image_name)[0]\n\n # print image.id\n return image.id",
"def get_picture_id(path):\n\t\tif path is None:\n\t\t\treturn\n\t\tcon = mdb.connect('localhost', 'root', 'sensepass', 'sensecambrowser')\n\t\twith con:\n\t\t\tquery = \"SELECT id from fileuploader_picture WHERE file=%s\" % (path)\n\t\t\tcur = con.cursor()\n\t\t\tcur.execute(query)\n\t\t\tdata = cur.fetchall()\n\t\t\tprint \"len(data)\"\n\t\t\tprint data\n\t\t\tif len(data) > 0:\n\t\t\t\treturn data[0]\n\t\t\treturn None",
"def get_image(id_num):\n return sqldb.get_image(id_num)",
"def image_reference(self, image_id):\n\n info = self.image_info[image_id]\n if info[\"source\"] == \"openimage\":\n return info[\"id\"]\n else:\n super(self.__class__, self).image_reference(image_id)",
"def get_imageId_from_fileName(filename):\n filename = os.path.splitext(filename)[0]\n if filename.isdigit():\n return int(filename)\n return id_iter",
"def base_image_id(self):\n return self._base_image_id",
"def cmd_image_id(client, args):\n image = client.get_image(args.image_id)\n data = image.__dict__\n generate_output({'image': data})",
"def get_image_ref() -> str:\n images_rq = request(\n method=\"GET\", url=app.config[\"IMAGE_REF\"], headers=build_header(),\n )\n if not images_rq.ok:\n HTTPError(f\"Can not get image id for virtual machine: {images_rq.status_code}\")\n\n [image] = images_rq.json()[\"images\"]\n return image[\"id\"]",
"def image_reference(self, image_id):\n info = self.image_info[image_id]\n if info['source'] == self.dataset_name:\n return info['id']\n else:\n super.image_reference(image_id)",
"def _getNewImgId(self):\n\n newImgId = COCO_PLUS.IMG_ID\n COCO_PLUS.IMG_ID += 1\n\n return newImgId",
"def get_imageId_from_fackmask(filename):\n filename = os.path.splitext(filename)[0]\n regex = re.compile(r'\\d+')\n iid = regex.search(filename).group(0)\n image_id = int(iid)\n if filename.isdigit():\n return int(filename)\n return image_id",
"def parse_image_id(image_ref):\n temp = image_ref.rsplit('/')\n #Return the last item, which is the image id\n return temp[len(temp) - 1]",
"def get_imageId_from_fileName(filename, id_iter):\n filename = os.path.splitext(filename)[0]\n if filename.isdigit():\n return int(filename)\n return id_iter",
"def get_image_by_id(id):\n return ImageModel.query.filter(ImageModel.id == id) \\\n .first()",
"def get_image_by_id(id):\n return Image.objects.get(id=id)",
"def getID():",
"def image_by_id(self, id):\n if not id:\n return None\n return next((image for image in self.images() if image['Id'] == id),\n None)"
]
| [
"0.78510964",
"0.78510964",
"0.78145814",
"0.77161556",
"0.7650705",
"0.7650705",
"0.7648366",
"0.7648366",
"0.7603875",
"0.7390626",
"0.73811406",
"0.73773754",
"0.7275588",
"0.72276074",
"0.71651524",
"0.7130926",
"0.7123816",
"0.7108411",
"0.7017009",
"0.7016007",
"0.70152235",
"0.698664",
"0.6963928",
"0.69402105",
"0.69392115",
"0.6863248",
"0.68611664",
"0.67987067",
"0.6790185",
"0.67579186"
]
| 0.814898 | 0 |
Return true if the image_id is registered | def __is_image_id( self, image_id ):
images_ids = self.__get_multi_images_ids()
for id in images_ids:
if image_id == id:
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_image(self, tag):\n image_name = self.build_image_name(tag)\n try:\n self.client.images.get_registry_data(image_name)\n return True\n except Exception as ex:\n print('Image {} does not exist: '.format(image_name), str(ex))\n return False",
"def hasImg(img_name):\n try:\n Image.objects.raw({\"_id\": img_name}).first()\n return True\n except pymodm_errors.DoesNotExist:\n return False",
"def is_image_id(id: int, fields: list = []) -> bool:\n try:\n res = requests.get(\n Entities.get_image(\n image_id=id,\n fields=fields if fields != [] else Entities.get_image_fields(),\n ),\n headers={\"Authorization\": f\"OAuth {Client.get_token()}\"},\n )\n return res.status_code == 200\n\n except requests.HTTPError:\n return False",
"def registered(id):\n return True",
"def hasImage(self):\n if self.getImage():\n return True\n return False",
"def hasImage(self):\n return self._image is not None",
"def has_image(self):\n return hasattr(self, \"_image\") and self._image is not None",
"def images_exist(self):\n pass",
"def image_exists(self, id=None, tag=None):\n exists = False\n if id and self.image_by_id(id):\n exists = True\n elif tag and self.image_by_tag(tag):\n exists = True\n\n return exists",
"def test_exists(self):\n self.assertTrue(bool(self.photo))",
"def get_image_id(image):\n if not is_valid_image(image):\n return False\n\n return AVAILABLE_IMAGES[image]['imageid']",
"def test_get_image_id(self):\n self.roses.save_image()\n image_id=Images.get_image_id(self.roses.id)\n self.assertTrue(image_id.id==self.roses.id)",
"def existAndOwner(self, imgId, ownerId):\n\n exists = False\n isOwner = False\n\n try:\n dbLink = self._dbConnection[self._dbName]\n collection = dbLink[self._metacollection]\n\n contain = self._cumulusConnection.get_bucket(self._containerName)\n\n if contain.get_key(imgId) != None:\n exists = True\n #print imgId \n #print ownerId\n aux = collection.find_one({\"_id\": imgId, \"owner\": ownerId})\n if (aux == None):\n isOwner = False\n else:\n isOwner = True\n #print isOwner \n except pymongo.errors.AutoReconnect: #TODO: Study what happens with that. store or not store the file\n self._log.warning(\"Autoreconnected.\")\n except pymongo.errors.ConnectionFailure:\n self._log.error(\"Connection failure\")\n except TypeError as detail:\n self._log.error(\"TypeError in ImgStoreMongo - existAndOwner\")\n except bson.errors.InvalidId:\n self._log.error(\"Error, not a valid ObjectId in ImgStoreMongo - existAndOwner\")\n except:\n self._log.error(\"Error in ImgStorecumulusMongo - existAndOwner. \" + str(sys.exc_info()))\n\n if (exists and isOwner):\n return True\n else:\n return False",
"def is_valid_image(image):\n if image not in AVAILABLE_IMAGES.keys():\n return False\n\n return True",
"def __contains__(self, image: Any) -> bool:\n return isinstance(image, self.native_image_type)",
"def image_is_available(filename):\n # FIXME - Implement!\n file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n return os.path.isfile(file_path)",
"def is_registered(user_id: str) -> bool:\n inventories = get_file(\"inventories\")\n return str(user_id) in inventories",
"def exists_image_in_database(full_image_url):\r\n\r\n logging.debug('exists_image_in_database({})'.format(full_image_url))\r\n\r\n dir_path = os.path.join(os.environ['LOCALAPPDATA'],'WarietyWallpaperImages')\r\n db_file = os.path.join(dir_path,'wariety.db')\r\n conn = sqlite3.connect(db_file)\r\n c = conn.cursor()\r\n\r\n # Select a row\r\n c.execute(\"SELECT id FROM wallpapers WHERE iurl = ?\", (full_image_url,))\r\n\r\n if c.fetchone() is not None:\r\n conn.close()\r\n logging.debug('exists_image_in_database - True')\r\n return True\r\n else:\r\n conn.close()\r\n logging.debug('exists_image_in_database - False')\r\n return False",
"def image_present_check(self):\r\n if not self.master.images: # If no images present in the list\r\n messagebox.showerror(\"Error\", 'No image selected') # Throw up the error messagebox\r\n\r\n else:\r\n return True # If there are images present in the list, then return True value\r",
"def verify_image_id(name: str, img_text: str) -> bool:\r\n fst, snd = name.lower().split()\r\n text = img_text.lower()\r\n return fst in text and snd in text",
"def has_images(self):\n return len(self.images) > 0",
"def test_upload_image(self):\n image = self.mock_master.get_imagelist(self.region1)[0]\n id = self.mock_master.upload_image(self.region2, image)\n found = False\n for i in self.mock_master.get_imagelist(self.region2):\n if i.id == id:\n self.assertEquals(i.region, self.region2.region)\n self.assertEquals(i.name, image.name)\n self.assertEquals(i.checksum, image.checksum)\n found = True\n break\n self.assertTrue(found)",
"def isRegistered(self, cid):\n return (self.__getIDFromCID(cid) is not None)",
"def image_needs_pushing(image):\n d = docker_client()\n try:\n d.images.get_registry_data(image)\n except docker.errors.APIError:\n # image not found on registry, needs pushing\n return True\n else:\n return False",
"def has_avatar(self):\n # Implemented from template for osid.resource.Resource.has_avatar_template\n return bool(self._my_map['avatarId'])",
"def is_image(self, service_name: str) -> bool:\n return False if self.get_from_service(service_name, \"build\") else True",
"def register_image(self, image_id: str, image_path: str) -> RegisterResult:\n # check if image content/id existed, check hash value first,\n # compare image bytes if collision\n stored_hash = self.search_image_hash(image_id)\n try:\n stored_id, stored_path, v_hash = self.search_image_id(image_path)\n except ValueError as err:\n return RegisterResult(code=RegisterCode.DECODE_FAIL, info=str(err))\n except EnvironmentError as err:\n return RegisterResult(code=RegisterCode.READ_FAIL, info=str(err))\n\n # handle conflict\n if stored_id and stored_hash:\n msg = \"Duplicate image: id {} and hash: {}\"\n return RegisterResult(\n code=RegisterCode.DUPLICATE,\n info=msg.format(stored_id, stored_hash)\n )\n\n if stored_id:\n msg = \"Content of ID {} conflict with registered id {}, path: {}\"\n return RegisterResult(\n code=RegisterCode.CONTENT_CONFLICT,\n info=msg.format(image_id, stored_id, stored_path)\n )\n\n if stored_hash:\n msg = \"ID conflict: {}, with content hash: {}\"\n return RegisterResult(\n code=RegisterCode.ID_CONFLICT,\n info=msg.format(image_id, stored_hash)\n )\n\n # Register image into dicationary\n self._id_to_hash[image_id] = v_hash\n if stored_id:\n stored_id.append((image_id, image_path))\n else:\n self._hash_to_id[v_hash] = [(image_id, image_path)]\n\n return RegisterResult(\n code=RegisterCode.SUCCESS,\n info=\"Successfully register id \" + image_id\n )",
"def test_image_id(self):\n result = self.test_client.image_id\n\n assert result == \"1238012\"",
"def is_registered(self) -> bool:\n from arkouda.util import is_registered\n\n if self.registered_name is None:\n return False\n return is_registered(self.registered_name)",
"def has_media(self):\r\n if self.image:\r\n return True\r\n return False"
]
| [
"0.72950876",
"0.71553874",
"0.69728947",
"0.6842381",
"0.67704",
"0.6737884",
"0.67175126",
"0.67107576",
"0.6646041",
"0.64513296",
"0.6427297",
"0.64015955",
"0.6368361",
"0.63507783",
"0.63206196",
"0.6298749",
"0.628413",
"0.62777007",
"0.6260048",
"0.6255963",
"0.6178974",
"0.6163656",
"0.6160452",
"0.6144079",
"0.614003",
"0.613713",
"0.61283636",
"0.6117377",
"0.61097324",
"0.6052452"
]
| 0.7375224 | 0 |
Return true if the instace type exist | def __is_type_instance( self, instance_type ):
for index, instance in enumerate(INSTANCE_TYPES):
if instance == instance_type:
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def needs_unique_instance(type_):\n return type_ in unique_instance_types",
"def is_registered(self, type):\n attr = self._type_to_attr(type)\n return getattr(self, attr, None) is not None",
"def _isinstancetype(an_obj):\n if an_obj is None: return False\n if not PY3K:\n return isinstance(an_obj, types.InstanceType)\n typstr = str(type(an_obj))\n # the following logic works, as PyRAF users expect, in both v2 and v3\n return typstr==\"<type 'instance'>\" or \\\n (typstr.startswith(\"<class '\") and ('.' in typstr))",
"def is_type(self, type_name):\n\n return type_name in self._symtab",
"def exists_type(self, type):\n for i in range(1, self.grid_size - 1):\n for j in range(1, self.grid_size - 1):\n obj = self.grid.get(i, j)\n if obj and obj.type == type:\n return True\n return False",
"def exist(self):",
"def check_type(self):\n return True",
"def exists(self):\n return True",
"def exists(self):\n return True",
"def exists(self) -> bool:\n self.connection.describe_activity_type(self.domain.name, self.name, self.version)\n return True",
"def _is_run_type(cls, object_):\n # Do a string comparison instead of using isinstance() to avoid needing\n # to import lyse or other modules with these classes.\n return (type(object_).__name__ in cls._RUN_TYPES)",
"def _is_initvar_instance(typeval: Type) -> bool:\n return isinstance(typeval, InitVar)",
"def is_type(self, typ):\n return typ == self.__class__.__name__",
"def has_name(self, name):\n\t\treturn name in self.classes",
"def has_classname(self):\n return self.unpack_word(0x4A) > 0",
"def has_name(self, name):\n\t\t\treturn name in self.classes",
"def has_type(self, item_type):\n raise NotImplementedError()",
"def has_name(self, name):\n return name in self.classes",
"def __instancecheck__(self, instance):\n\n if isinstance(instance, ObjCInstance):\n return bool(instance.isKindOfClass(self))\n else:\n return False",
"def supports_type_lookup(self):\n return 'supports_type_lookup' in profile.SUPPORTS",
"def is_valid_instance_type(image, instancetype):\n if not is_valid_image(image):\n return False\n\n if instancetype not in AVAILABLE_IMAGES[image]['supported_instances']:\n return False\n\n return True",
"def is_my_case(self, type_):\n return (\n isinstance(self.__apply_sequence(type_), self.declaration_class)\n )",
"def class_is(cls: Class) -> bool:\n pass",
"def use_instance_table(self, name, typename):\n if typename in ['VkInstance', 'VkPhysicalDevice']:\n return True\n # vkSetDebugUtilsObjectNameEXT and vkSetDebugUtilsObjectTagEXT\n # need to be probed from GetInstanceProcAddress due to a loader issue.\n # https://github.com/KhronosGroup/Vulkan-Loader/issues/1109\n # TODO : When loader with fix for issue is widely available, remove this\n # special case.\n if name in ['vkSetDebugUtilsObjectNameEXT', 'vkSetDebugUtilsObjectTagEXT']:\n return True\n return False",
"def is_valid_type(type):\n return type in type_to_adapter",
"def check_model_exists(class_name):\n if path.exists(settings.get('FALAFEL_DIR') + settings.get('MODELS_DIR') + '/' + class_name + '.py'):\n return True\n else:\n return False",
"def is_object(self, name: str) -> bool:\r\n return os.path.exists(self._path_for_pickle(name))",
"def objExists(*args, **kwargs)->bool:\n pass",
"def is_present(cls):\n raise NotImplementedError()",
"def singularity_exists(self):\n instances = Client.instances(quiet=self.quiet)\n for instance in instances:\n if self.pid in instance.name:\n return True\n return False"
]
| [
"0.7270652",
"0.71896994",
"0.702901",
"0.68987685",
"0.6777073",
"0.67513824",
"0.6712662",
"0.66715205",
"0.66715205",
"0.6631957",
"0.66299903",
"0.6596198",
"0.6541169",
"0.6444942",
"0.64414465",
"0.6440417",
"0.64149344",
"0.63910496",
"0.6367319",
"0.6320373",
"0.6311478",
"0.62883586",
"0.62801385",
"0.6273101",
"0.62687755",
"0.62393486",
"0.6238962",
"0.62370074",
"0.6236213",
"0.62260807"
]
| 0.7339385 | 0 |
Get one Image Id, Type of Instance and State according some policies(requirements) | def __verify_policies( self, policies ):
image_id = ''
instance_type=''
if type(policies) == type(str()):
if self.__is_type_instance( policies ):
instance_type = policies
state = True
elif self.__is_image_id( policies ):
image_id = policies
state = True
else:
state = False
else:
verify, ins_type = self.__find_adaptive_image( policies )
if verify:
instance_type = ins_type
state = True
image_id = image_id or self.__get_image_id()
instance_type = instance_type or 'm1.small'
state = state or False
return image_id, instance_type, state | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __find_adaptive_image(self, policies):\n instances_types = INSTANCE_TYPES;\n if policies['level'] == 1:\n for instance_type in instances_types:\n if self.__compare_types_instances( policies, instance_type ):\n return True, instance_type\n elif policies['level'] == 0:\n for instance_type in instances_types:\n if self.__is_adaptive_instance( self.__get_metrics_adapted(policies), instance_type ):\n return True, instance_type\n else:\n return False, None",
"def _get_image_id(image_name, instance_profile_arn=None,\n ec2_client=None, region_name=None):\n owners = []\n filters = []\n image_id = image_name\n if not image_name:\n # Amazon has its own Linux distribution that is largely binary\n # compatible with Red Hat Enterprise Linux.\n image_name = 'amzn2-ami-hvm-2.0.????????.?-x86_64-gp2'\n owners = ['amazon']\n filters = [\n {'Name': 'name', 'Values': [image_name]},\n ]\n elif not image_name.startswith('ami-'):\n if not instance_profile_arn:\n raise RuntimeError(\"instance_profile_arn must be defined when\"\\\n \" image_name is not already an id.\")\n look = re.match(r'arn:aws:iam::(\\d+):', instance_profile_arn)\n owners = [look.group(1)]\n filters = [\n {'Name': 'name', 'Values': [image_name]},\n ]\n\n if filters:\n if not ec2_client:\n ec2_client = boto3.client('ec2', region_name=region_name)\n resp = ec2_client.describe_images(Owners=owners, Filters=filters)\n images = sorted(resp['Images'], key=lambda item: item['CreationDate'],\n reverse=True)\n if len(images) > 1:\n LOGGER.warning(\n \"Found more than one image named '%s' in account '%s',\"\\\n \" picking the first one out of %s\",\n image_name, owners,\n [(image['CreationDate'], image['ImageId'])\n for image in images])\n image_id = images[0]['ImageId']\n return image_id",
"def get_instance_image(img, bbox, size_z, size_x, context_amount, img_mean=None):\n cx, cy, w, h = xyxy2cxcywh(bbox)\n wc_z = w + context_amount * (w+h)\n hc_z = h + context_amount * (w+h)\n s_z = np.sqrt(wc_z * hc_z)\n scale_z = size_z / s_z\n d_search = (size_x - size_z) / 2\n pad = d_search / scale_z\n s_x = s_z + 2 * pad\n scale_x = size_x / s_x\n instance_img = crop_and_pad(img, cx, cy, size_x, s_x, img_mean)\n return instance_img, scale_x, s_x",
"def fetch(self,image_type):\n if image_type == self.IMAGE:\n return self.image\n elif image_type == self.FRAME:\n return self.frame\n elif image_type ==self.DIFFERENCE:\n return self.diff_frame\n elif image_type == self.ABS_DIFFERENCE:\n return self.abs_diff_frame\n else:\n print('Error defining frame to be fetched!!!')",
"def state_img(self):\n if self.master.v_state == 'victory':\n return self.vict_img\n elif self.master.v_state == 'draw':\n return self.draw_img\n if self.master.v_state == 'defeat':\n return self.def_img",
"def get_image_output(id: Optional[pulumi.Input[Optional[int]]] = None,\n name: Optional[pulumi.Input[Optional[str]]] = None,\n slug: Optional[pulumi.Input[Optional[str]]] = None,\n source: Optional[pulumi.Input[Optional[str]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetImageResult]:\n ...",
"def _process_instance(self, instance):\n instance_dict = {}\n ins_zone = instance[\"zone\"]\n instance_dict[\"zone\"] = ins_zone[\n ins_zone.index(\"zones/\") + 6:len(ins_zone)]\n instance_dict[\"name\"] = instance[\"name\"]\n instance_dict[\"cloud\"] = self.kind\n instance_dict[\"status\"] = instance[\"status\"]\n instance_dict[\"type\"] = instance[\"cpuPlatform\"]\n instance_dict[\"created\"] = instance[\"creationTimestamp\"]\n instance_dict[\"id\"] = instance[\"id\"]\n instance_dict[\"kind\"] = instance[\"kind\"]\n machineTypeUrl = instance[\"machineType\"]\n instance_dict[\"machineType\"] = machineTypeUrl[machineTypeUrl.index(\n \"machineTypes/\") + 13:len(machineTypeUrl)]\n disks = instance[\"disks\"]\n disk = disks[0]\n instance_dict[\"deviceName\"] = disk[\"deviceName\"]\n instance_dict[\"diskSizeGb\"] = disk[\"diskSizeGb\"]\n licenses = disk[\"licenses\"][0]\n instance_dict[\"sourceImage\"] = licenses[\n licenses.index(\"licenses/\") + 9:len(\n licenses)]\n instance_dict[\"diskType\"] = disk[\"type\"]\n instance_dict[\"mode\"] = disk[\"mode\"]\n instance_dict[\"modified\"] = str(DateTime.now())\n\n # Network access.\n network_config = instance[\"networkInterfaces\"]\n\n if (network_config):\n network_config = network_config[0]\n access_config = network_config[\"accessConfigs\"]\n access_config = access_config[0]\n external_ip = access_config[\"natIP\"]\n instance_dict[\"public_ip\"] = external_ip\n\n return instance_dict",
"def test_aws_service_api_image_get(self):\n pass",
"def getImg(img_name):\n img = Image.objects.raw({\"_id\": img_name}).first()\n in_dict = {}\n in_dict[\"name\"] = img.name\n in_dict[\"b64str\"] = img.b64str\n in_dict[\"imgsize\"] = img.imgsize\n in_dict[\"processed\"] = img.processed\n in_dict[\"timestamp\"] = img.timestamp\n return in_dict",
"def _get_returned_state(self):\n if self.img_type == 'RGB':\n return np.rollaxis(np.array(self.rgb), -1, 0)\n elif self.img_type == 'SEMANTIC':\n return np.rollaxis(np.array(self.semantic), -1, 0)\n else:\n raise Exception(f'There is no {self.img_type} observation type')",
"def get_instance_image_info(task):\n ctx = task.context\n node = task.node\n image_info = {}\n # NOTE(pas-ha) do not report image kernel and ramdisk for\n # local boot or whole disk images so that they are not cached\n if (node.driver_internal_info.get('is_whole_disk_image')\n or deploy_utils.get_boot_option(node) == 'local'):\n return image_info\n root_dir = get_http_boot_dir()\n i_info = node.instance_info\n labels = ('kernel', 'ramdisk')\n d_info = deploy_utils.get_image_instance_info(node)\n if not (i_info.get('kernel') and i_info.get('ramdisk')):\n glance_service = service.GlanceImageService(context=ctx)\n iproperties = glance_service.show(d_info['image_source'])['properties']\n for label in labels:\n i_info[label] = str(iproperties[label + '_id'])\n node.instance_info = i_info\n node.save()\n\n for label in labels:\n image_info[label] = (\n i_info[label],\n os.path.join(root_dir, node.uuid, label)\n )\n\n return image_info",
"def image(self):\n image_id = self.attrs.get('ImageID', self.attrs['Image'])\n if image_id is None:\n return None\n return self.client.images.get(image_id.split(':')[1])",
"def get_instance_ids(temporary_user, config, state, now, tz):\n try:\n data = temporary_user.describe_instances(Filters=[{'Name':'instance-state-name', 'Values': [state]}])\n logger.info(\"The date is : {} , {}\".format(now.strftime(\"%A, %d %B %Y %H:%M:%S\"), tz))\n\n action_required, no_action_required = categorise_instances(data, config, temporary_user)\n return action_required, no_action_required\n except Exception as error:\n logger.info(\"Describing the instances failed with the following error : {}\".format(error))",
"def get_instance_info(inst):\n instance_info = {'id': inst.id,\n 'private_ip': inst.inner_ip_address,\n 'public_ip': inst.public_ip_address,\n 'image_id': inst.image_id,\n 'zone_id': inst.zone_id,\n 'region_id': inst.region_id,\n 'launch_time': inst.creation_time,\n 'instance_type': inst.instance_type,\n 'state': inst.state,\n 'tags': inst.tags,\n # 'groups': dict((group.id, group.name) for group in inst.groups),\n # 'groups': dict((group, group) for group in inst.groups),\n 'vpc_id': inst.vpc_id,\n 'subnet_id': inst.subnet_id,\n 'vpc_private_ip': inst.vpc_private_ip,\n 'eip': inst.eip,\n 'io_optimized': inst.io_optimized\n }\n try:\n bdm_dict = {}\n bdm = getattr(inst, 'block_device_mapping')\n for device_name in bdm.keys():\n bdm_dict[device_name] = {\n 'status': bdm[device_name].status,\n 'volume_id': bdm[device_name].volume_id,\n 'delete_on_termination': bdm[device_name].delete_on_termination\n }\n instance_info['block_device_mapping'] = bdm_dict\n except AttributeError:\n instance_info['block_device_mapping'] = False\n\n return instance_info",
"def image(image_id):\n\n found = False\n img = None\n \n try:\n for img in api.get_all_images():\n if img.id == image_id:\n found = True\n break\n except Exception:\n logging.error(\"Cannot make API connection to retrieve image info!\")\n\n if not found:\n return None\n\n return img",
"def _get_image_metadata(self,\n context: context.RequestContext,\n image_id: Optional[str],\n size: int) -> Optional[dict[str, Any]]:\n\n # Check image existence\n if image_id is None:\n return None\n\n # NOTE(harlowja): this should raise an error if the image does not\n # exist, this is expected as it signals that the image_id is missing.\n image_meta = self.image_service.show(context, image_id)\n\n volume_utils.check_image_metadata(image_meta, size)\n\n return image_meta",
"def get_image_id(image):\n if not is_valid_image(image):\n return False\n\n return AVAILABLE_IMAGES[image]['imageid']",
"def _get_image_status(self, image_id):\n image_status = None\n image = self._get_nova_client().images.get(image_id)\n\n if image is not None:\n image_status = image.status\n\n return image_status",
"def get_gt_img_instances(input_dict):\n instance = input_dict['instances']\n target_h, target_w = input_dict['height'], input_dict['width']\n h, w = instance.image_size\n\n img = input_dict['image'].permute(1, 2, 0).byte().numpy()[:, :, ::-1] # h, w, c, has been resized by mapper\n target_img = cv2.resize(img, dsize=(target_w, target_h)) # resize to ori size\n\n scale_x, scale_y = (target_w / w, target_h / h)\n\n target_instances = Instances((target_h, target_w), **instance.get_fields())\n if target_instances.has('gt_boxes'):\n output_boxes = target_instances.gt_boxes\n output_boxes.scale(scale_x, scale_y)\n output_boxes.clip(target_instances.image_size)\n target_instances = target_instances[output_boxes.nonempty()]\n\n return target_img, target_instances",
"def image_info(img):\n\tprint(img.format)\n\tprint(img.size)\n\tprint(img.mode)",
"def __get_image_id(self):\n return self.__get_multi_images_ids(1)",
"def get_params(image: tensor, scale: List[float], ratio: List[float]) -> Tuple[int, int, int, int]:\n width, height = TF._get_image_size(image)\n area = height * width\n\n log_ratio = torch.log(torch.tensor(ratio))\n for _ in range(10):\n target_area = area * torch.empty(1).uniform_(scale[0], scale[1]).item()\n aspect_ratio = torch.exp(\n torch.empty(1).uniform_(log_ratio[0], log_ratio[1])\n ).item()\n\n w = int(round(math.sqrt(target_area * aspect_ratio)))\n h = int(round(math.sqrt(target_area / aspect_ratio)))\n\n if 0 < w <= width and 0 < h <= height:\n i = torch.randint(0, height - h + 1, size=(1,)).item()\n j = torch.randint(0, width - w + 1, size=(1,)).item()\n return i, j, h, w\n\n # Fallback to central crop\n in_ratio = float(width) / float(height)\n if in_ratio < min(ratio):\n w = width\n h = int(round(w / min(ratio)))\n elif in_ratio > max(ratio):\n h = height\n w = int(round(h * max(ratio)))\n else: # whole image\n w = width\n h = height\n i = (height - h) // 2\n j = (width - w) // 2\n return i, j, h, w",
"def get_mapping_actions(image=None, imageId=None, in_digests=[], bundle={}):\n\n if not image or not bundle:\n raise Exception(\"input error\")\n\n if not verify_policy_bundle(bundle=bundle):\n raise Exception(\"input bundle does not conform to bundle schema\")\n\n ret = []\n \n image_infos = []\n\n image_info = anchore_utils.get_all_image_info(image)\n if image_info and image_info not in image_infos:\n image_infos.append(image_info)\n\n for m in bundle['mappings']:\n polname = m['policy_id']\n wlnames = m['whitelist_ids']\n\n for image_info in image_infos:\n #_logger.info(\"IMAGE INFO: \" + str(image_info))\n ii = {}\n ii.update(image_info)\n registry = ii.pop('registry', \"N/A\")\n repo = ii.pop('repo', \"N/A\")\n\n tags = []\n fulltag = ii.pop('fulltag', \"N/A\")\n if fulltag != 'N/A':\n tinfo = anchore_utils.parse_dockerimage_string(fulltag)\n if 'tag' in tinfo and tinfo['tag']:\n tag = tinfo['tag']\n\n for t in [image, fulltag]:\n tinfo = anchore_utils.parse_dockerimage_string(t)\n if 'tag' in tinfo and tinfo['tag'] and tinfo['tag'] not in tags:\n tags.append(tinfo['tag'])\n\n digest = ii.pop('digest', \"N/A\")\n digests = [digest]\n for d in image_info['digests']:\n dinfo = anchore_utils.parse_dockerimage_string(d)\n if 'digest' in dinfo and dinfo['digest']:\n digests.append(dinfo['digest'])\n \n p_ids = []\n p_names = []\n for p in bundle['policies']:\n p_ids.append(p['id'])\n p_names.append(p['name'])\n\n wl_ids = []\n wl_names = []\n for wl in bundle['whitelists']:\n wl_ids.append(wl['id'])\n wl_names.append(wl['name'])\n \n if polname not in p_ids:\n _logger.info(\"policy not in bundle: \" + str(polname))\n continue\n\n skip=False\n for wlname in wlnames:\n if wlname not in wl_ids:\n _logger.info(\"whitelist not in bundle\" + str(wlname))\n skip=True\n if skip:\n continue\n\n mname = m['name']\n mregistry = m['registry']\n mrepo = m['repository']\n if m['image']['type'] == 'tag':\n mtag = m['image']['value']\n mdigest = None\n mimageId = None\n elif m['image']['type'] == 'digest':\n mdigest = m['image']['value']\n mtag = None\n mimageId = None\n elif m['image']['type'] == 'id':\n mimageId = m['image']['value']\n mtag = None\n mdigest = None\n else:\n mtag = mdigest = mimageId = None\n\n if registry == mregistry or mregistry == '*':\n _logger.debug(\"checking mapping for image (\"+str(image_info)+\") match.\")\n\n if repo == mrepo or mrepo == '*':\n doit = False\n matchstring = mname + \": N/A\"\n if tag and (mtag == '*' or mtag == tag or mtag in tags):\n matchstring = mname + \":\" + ','.join([mregistry, mrepo, mtag])\n doit = True\n elif digest and (mdigest == digest or mdigest in in_digests or mdigest in digests):\n matchstring = mname + \":\" + ','.join([mregistry, mrepo, mdigest])\n doit = True\n elif imageId and (mimageId == imageId):\n matchstring = mname + \":\" + ','.join([mregistry, mrepo, mimageId])\n doit = True\n\n matchstring = matchstring.encode('utf8')\n if doit:\n _logger.debug(\"match found for image (\"+str(matchstring)+\")\")\n\n wldata = []\n wldataset = set()\n for wlname in wlnames:\n wldataset = set(list(wldataset) + extract_whitelist_data(bundle, wlname))\n wldata = list(wldataset)\n\n poldata = extract_policy_data(bundle, polname)\n \n wlnames.sort()\n evalstr = ','.join([polname] + wlnames)\n evalhash = hashlib.md5(evalstr).hexdigest()\n ret.append( ( poldata, wldata, polname,wlnames, matchstring, m, evalhash) )\n return(ret)\n else:\n _logger.debug(\"no match found for image (\"+str(image_info)+\") match.\")\n else:\n _logger.debug(\"no match found for image (\"+str(image_info)+\") match.\")\n\n return(ret)",
"def get_image(self, size, mode='normal', state='on'):\n raise NotImplementedError",
"def quickie():\n #info = { \"instance_type\": { default = \"t2.micro\", all = [ \"t2.micro\" ] }, \"image_id\" : { default = \"\", all = [] }, \"security_groups\" : { default = [], all = [] }, \"key_name\": { default = \"\", all = [] }}\n client = boto3.client(\"EC2\")\n data = client.describe_images()\n info[\"image_id\"][\"all\"]\n args = {}\n for attr in info:\n print(\"Available values for \"+attr+\":\\n\"+\" \".join(info[attr]))\n default = info[attr][0]\n var = raw_input(\"Choose \"+attr+\"[\"+default+\"]:\")\n if var == \"\":\n var = default\n if re.match(\"^.+\\s\", attr):\n args[attr] = [var]\n else:\n args[attr] = args\n reservation = client.run_instances(**args)",
"def extract_image_with_size_type(size_list: List[Dict], img_type: str) -> Dict:\n return next(filter(lambda x: x[\"type\"] == img_type, size_list), {})",
"def quick_instance(self, name, image, instance_type, env_tag='dev', zone_tag='starwatts', os_tag='debian', sg_id=None,\n private=True, extra_sg_ids=None, extra_tags=None, terminate_on_shutdown=False,\n debug=False):\n # Debug setting\n if debug:\n logging.basicConfig(level=logging.DEBUG)\n\n # Preliminary tests\n try:\n ami = self.get_image(image_id=image)\n except EC2ResponseError:\n logging.error(\"The image {} could not be found. Aborting.\".format(image))\n return\n print(\"Using AMI {} : {}\".format(image, ami.name))\n if len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'running'})) > 0 or \\\n len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'stopped'})) > 0:\n logging.error(\"An instance with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No instance has the same 'name' tag.\")\n if self.keypair_exists(name):\n logging.error(\"A keypair with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No keypair was found with the same name.\")\n if sg_id is None:\n if self.security_group_exists(name=name):\n logging.error(\"A security group with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No security group was found with the same name.\")\n\n # Tags generation\n logging.debug(\"Generating tags to apply.\")\n tags = dict(name=name, os=os_tag, env=env_tag, zone=zone_tag, privacy='true' if private else 'false')\n if extra_tags is not None:\n tags.update(extra_tags)\n print(\"Tags : {}\".format(tags))\n\n # Fetching needed security groups (bastion and zabbix)\n standard_sg = self.get_all_security_groups(groupnames=['standard'])\n if len(standard_sg) != 1:\n logging.error(\"Multiple or no security group was found for the 'bastion' search. Aborting.\")\n return\n standard_sg = standard_sg[0]\n logging.debug(\"The following security group was found for 'standard : {} {}\".format(standard_sg.id,\n standard_sg.description))\n\n # Security group creation\n if sg_id is None:\n sg = self.create_security_group(name, \"SG applied to {} VM\".format(name))\n sg_id = sg.id\n\n sg_ids = [sg_id, standard_sg.id, ]\n # Using the extra security groups if any\n if extra_sg_ids is not None:\n logging.debug(\"Extra security groups to add : {}\".format(extra_sg_ids))\n sg_ids.extend(extra_sg_ids)\n logging.debug(\"Security Groups : {}\".format(sg_ids))\n\n user_data = \"-----BEGIN OUTSCALE SECTION-----\\nprivate_only=true\\n-----END OUTSCALE SECTION-----\" if private else \"\"\n logging.debug(\"Creating keypair.\")\n kp = self.create_key_pair(key_name=name)\n fp = os.path.join(os.path.expanduser('~/.ssh'), '%s.pem' % kp.name)\n with open(fp, 'wb') as fd:\n fd.write(bytes(kp.material, \"UTF-8\"))\n logging.debug(\"Keypair written to ~/.ssh/{}.pem\".format(name))\n\n resa = self.run_instances(image_id=image, key_name=name, security_groups=sg_ids, instance_type=instance_type,\n user_data=user_data,\n instance_initiated_shutdown_behavior='terminate' if terminate_on_shutdown else 'stop')\n inst = resa.instances[0]\n logging.debug(\"Adding tags to the newly created machine.\")\n inst.add_tags(tags)\n return inst",
"def cmd_image_id(client, args):\n image = client.get_image(args.image_id)\n data = image.__dict__\n generate_output({'image': data})",
"def get_image(self):\n logging.debug(\"%s get_image entered\" % str(self.machine_name))\n snapshots = cs.list_snapshots()\n # find the one for this server\n if self.cloudserver:\n server_id = self.cloudserver.id\n else:\n return self.image_id\n\n for snapshot in snapshots:\n img = snapshot.metadata.get(\"instance_uuid\", None)\n # print \"XXX:\", img\n\n if img == server_id:\n print \"Server %s has snapshot %s\" % (server_id, img)\n return img\n\n print \"Server %s has no snapshots\" % (server_id)\n return None",
"def get_image(image_id, user_id):\n\n with image_backend(user_id) as backend:\n return backend.get_image(image_id)"
]
| [
"0.6219611",
"0.57743716",
"0.5763096",
"0.57555014",
"0.5749443",
"0.56019837",
"0.55169725",
"0.54476464",
"0.54437417",
"0.5438611",
"0.54333144",
"0.54257226",
"0.5425297",
"0.54199284",
"0.538957",
"0.5378725",
"0.53651994",
"0.535963",
"0.5358915",
"0.5342345",
"0.5319436",
"0.53056455",
"0.5305303",
"0.52954924",
"0.5287874",
"0.5286858",
"0.5283796",
"0.52745473",
"0.52711934",
"0.52568114"
]
| 0.68623555 | 0 |
Return ids from instances | def get_ids(self, instances):
instance_ids = []
for instance in instances:
instance_ids.append(instance.id)
return instance_ids | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_instance_ids(instances):\n instance_ids = []\n for instance in instances:\n instance_ids.append(instance.id)\n return instance_ids",
"def get_instances_ids(self):\n reservations = self.__get_reservations()\n instances_ids = []\n instances,_ = self.__get_multi_instances(reservations)\n for instance in instances:\n instances_ids.append(instance.id.encode(\"latin-1\"))\n return instances_ids",
"def getIDs():",
"def _get_ids_from_hostname(self, hostname):\r\n results = self.list_instances(hostname=hostname, mask=\"id\")\r\n return [result['id'] for result in results]",
"def _instantiated_ids(self):\n return self._identity_map.keys()",
"def list_instance_uuids(self):\n return self.list_instances()",
"def instances(self):\n return self.get('instances')",
"def get_ids(self) -> List[str]:",
"def list_instances(self):\n LOG.debug(\"list_instances\")\n\n instance_ids = []\n bmms = db.bmm_get_all(None)\n for bmm in bmms:\n if not bmm[\"instance_id\"]:\n continue\n instance_ids.append(self._instance_id_to_name(bmm[\"instance_id\"]))\n\n return instance_ids",
"def ids(self):\n return self.obj_to_id.values()",
"def ids(self):\n return self._ids",
"def ids(self):\n return list(self._id_generator())",
"def ids(self):\n return list(self._id_generator())",
"def get_ids(self):\n return self._ids",
"def _instancelist(self):\n\n rv = []\n self.iname = {}\n for resv in self.conn.get_all_reservations():\n for inst in resv.instances:\n if inst.state != 'terminated':\n name = inst.tags.get('Name',None)\n rv.append([inst.id,inst.state])\n if name is not None:\n rv.append([name,inst.state])\n else:\n rv.append([inst.id+'-needsName',inst.state])\n self.iname[name] = inst.id\n self.iname[inst.id] = inst.id\n return rv",
"def getIDs(self):\n return self.multiengine.getIDs()",
"def get_instances(cls):\n raise NotImplementedError",
"def source_instance_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"source_instance_ids\")",
"def fetch_instances(self, ids):\n result = []\n self.log.info(f\"fetch '{len(ids)}' instances\")\n self.log.debug(f\"fetch instance data for ids '{ids}'\")\n try:\n response = self.client.describe_instances(\n InstanceIds=ids\n )\n if 'HTTPStatusCode' in response['ResponseMetadata'] and response['ResponseMetadata']['HTTPStatusCode'] == 200:\n pass\n else:\n raise Exception(f'not able to fetch instacnes with ids: {ids}')\n if len(response['Reservations'][0]['Instances']) == 0:\n raise Exception(f'should retrun at least single insatance data')\n result = []\n for reservation in response[\"Reservations\"]:\n for el in reservation[\"Instances\"]:\n ec2 = EC2Instance.factory(el)\n if ec2.state:\n result.append(ec2)\n else:\n self.log.warn(f'instance \"{ec2.id}\" excluded')\n except Exception as e:\n raise Exception(f'exception when trying to fetch instance data {ids}')\n return sorted(list(result), key=lambda instance: instance.launch_time)",
"def object_ids(self):\n return self._extract_set('id')",
"def Get_Running_Instances():\n ec2 = boto3.resource('ec2') \n #call the features resource from the boto3 library\n instances = ec2.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': ['pending', 'running',]},])\n #filter the instances returned using the state name\n #you can also filter using Tags by adding the filters: \n #[{'Name': 'tag-key', 'Values': ['Role','Name',]}, {'Name': 'tag-value', 'Values': ['*test*', '*TEST*',]},]\n return [instance.id for instance in instances]\n #return a liste with the ids of the instances",
"def getinstancelist():\n dbcursor_dict.execute(dbq.get_all_instance_list, )\n db_instance_list = dbcursor_dict.fetchall()\n return db_instance_list",
"def get_ids(self):\n return [item.id for item in self.items]",
"def ids(self):\n return (x[\"_id\"] for x in self.document._meta.collection.find(self.spec, fields = (\"_id\",)))",
"def GetInstances(self, bulk=False, reason=None):\n query = []\n _AppendIf(query, bulk, (\"bulk\", 1))\n _AppendReason(query, reason)\n\n instances = self._SendRequest(HTTP_GET,\n \"/%s/instances\" % GANETI_RAPI_VERSION,\n query, None)\n if bulk:\n return instances\n else:\n return [i[\"id\"] for i in instances]",
"def get_asg_instance_ids(self, asg_name):\n instance_ids = []\n # Grab the first item in the list because we're only asking for 1 ASG\n try:\n asg_data = self.asg.describe_auto_scaling_groups(\n AutoScalingGroupNames=[asg_name])['AutoScalingGroups'][0]\n except Exception as e: \n logger.info(e)\n return []\n\n for instance_data in asg_data['Instances']:\n instance_ids.append(instance_data['InstanceId'])\n\n return instance_ids",
"def remote_get_ids(self):\n return self.smultiengine.get_ids()",
"def instances(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"instances\")",
"def _get_ids_from_ip(self, ip_address):\r\n try:\r\n # Does it look like an ip address?\r\n socket.inet_aton(ip_address)\r\n except socket.error:\r\n return []\r\n\r\n # Find the VS via ip address. First try public ip, then private\r\n results = self.list_instances(public_ip=ip_address, mask=\"id\")\r\n if results:\r\n return [result['id'] for result in results]\r\n\r\n results = self.list_instances(private_ip=ip_address, mask=\"id\")\r\n if results:\r\n return [result['id'] for result in results]",
"def get_id_of_instance(self, label):\n query = read_query('id/id_of_instance') % label\n response = self._submit_query(query)\n return [elem['id']['value'] for elem in response] if response else []"
]
| [
"0.85830265",
"0.8202876",
"0.77256346",
"0.7455178",
"0.7383215",
"0.73242325",
"0.70874155",
"0.70678616",
"0.70521647",
"0.7050327",
"0.70132405",
"0.69588256",
"0.69588256",
"0.69324195",
"0.6916249",
"0.68825996",
"0.67803013",
"0.6776181",
"0.6759419",
"0.67552614",
"0.67470056",
"0.67446554",
"0.67442596",
"0.6716505",
"0.66603655",
"0.6638525",
"0.66140026",
"0.66003126",
"0.6595246",
"0.6564779"
]
| 0.8575143 | 1 |
Return ips from instances | def get_ips(self, instances):
public_ips = []
for instance in instances:
public_ips.append(instance.public_dns_name)
return public_ips | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_instances(self):\n for server in self.cs.servers.list():\n match = self.cluster_re.match(server.name)\n if match:\n for ip in server.networks['public']:\n if ip.count('.'):\n v4ip = ip\n yield (match.group('role'), v4ip)",
"def get_instances() -> dict:\n url = f\"{app.config['COMPUTE_SERVERS_REF']}/detail\"\n instances_rq = request(\n method=\"GET\", url=url, headers=build_header(), params={\"vm_state\": \"active\"},\n )\n\n if not instances_rq.ok:\n HTTPError(instances_rq.status_code)\n\n answer = {\"servers\": list()}\n for instance in instances_rq.json()[\"servers\"]:\n instance_info = dict(name=instance[\"name\"])\n instance_info[\"ip_addresses\"] = list()\n for network, info in instance[\"addresses\"].items():\n instance_info[\"ip_addresses\"].extend(entry[\"addr\"] for entry in info)\n answer[\"servers\"].append(instance_info)\n\n return answer",
"def _get_public_ips(self, ec2_ids):\n public_ips = []\n\n for ec2_id in ec2_ids:\n while True:\n response = self.ec2_client.describe_instances(InstanceIds=[ec2_id])\n ec2_info = response['Reservations'][0]['Instances'][0]\n if ec2_info['PublicIpAddress']:\n logger.info(\"Public IP for EC2 instance \" + ec2_id + \": \" + ec2_info['PublicIpAddress'])\n public_ips.append(ec2_info['PublicIpAddress'])\n break\n else:\n logger.info(\"Still waiting for Public IP to be configured for \" + ec2_id)\n time.sleep(10)\n\n return public_ips",
"def machine_lookup_all(session, hostname, public_ip = True):\n client = session.client('ec2')\n response = client.describe_instances(Filters=[{\"Name\":\"tag:Name\", \"Values\":[hostname]},\n {\"Name\":\"instance-state-name\", \"Values\":[\"running\"]}])\n\n addresses = []\n items = response['Reservations']\n if len(items) > 0:\n for i in items:\n item = i['Instances'][0]\n if 'PublicIpAddress' in item and public_ip:\n addresses.append(item['PublicIpAddress'])\n elif 'PrivateIpAddress' in item and not public_ip:\n addresses.append(item['PrivateIpAddress'])\n return addresses",
"def list_instances(self):\n nodes = self._driver.list_nodes()\n return [[n.name, n.state, n.public_ips] for n in nodes]",
"def proxy(self):\n result = self.instances(role='stateless-body', format=\"PrivateIpAddress\")\n return result[0][0] if result else None",
"def get_ips(rg_name, vmss_name):\n\n script = \"az vmss list-instance-public-ips --resource-group {rg} --name {vmss} | grep ipAddress\".format(\n rg=rg_name,\n vmss=vmss_name\n )\n run_script(script)",
"def public_ip_addresses(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"public_ip_addresses\")",
"def list_instances(self):\n print '# AWS EC2 instances'\n self.compute.list_instances()",
"def nodes_ips(os_faults_steps):\n nodes = os_faults_steps.get_nodes()\n ip_fqdn = {node.ip: node.fqdn for node in nodes}\n cmd = \"\"\"ip -o a | awk '/scope global/{split($4,ip,\"/\"); print ip[1]}'\"\"\"\n results = os_faults_steps.execute_cmd(nodes, cmd)\n nodes_ips_ = {}\n for node_result in results:\n fqdn = ip_fqdn[node_result.host]\n nodes_ips_[fqdn] = node_result.payload['stdout_lines']\n\n return nodes_ips_",
"def ip_addresses(self) -> pulumi.Output[Sequence['outputs.IpMappingResponse']]:\n return pulumi.get(self, \"ip_addresses\")",
"def running_instances(hostnames=None):\n\n global api\n\n all_inst = []\n try:\n all_inst = api.get_all_instances()\n except Exception, e:\n logging.error(\"Can't get list of instances (maybe wrong credentials?)\")\n return None\n\n # Resolve IPs\n if hostnames is not None:\n ips = []\n for h in hostnames:\n try:\n ipv4 = gethostbyname(h)\n ips.append(ipv4)\n except Exception:\n # Don't add host if IP address could not be found\n logging.warning(\"Ignoring hostname %s: can't reslove IPv4 address\" % h)\n ips=list(set(ips))\n\n if hostnames is not None:\n logging.debug(\"Input hostnames: %s\" % (','.join(hostnames)))\n logging.debug(\"Input IPs: %s\" % (','.join(ips)))\n else:\n logging.debug(\"No input hostnames given\")\n\n # Add only running instances\n inst = []\n for i in all_inst:\n if i.status(token_id=api.keystone.token_id) == 'running':\n if hostnames is None:\n # Append all\n inst.append(i)\n else:\n found = False\n for ipv4 in ips:\n if i.network_ip(network_name=cf[\"api\"][\"network_name\"]) == ipv4:\n inst.append(i)\n logging.debug(\"Found IP %s corresponding to instance\" % ipv4)\n found = True\n break\n if not found:\n logging.warning(\"Cannot find instance %s in the list of known IPs\" % i.network_ip(network_name=cf[\"api\"][\"network_name\"]))\n\n return inst",
"def get_ips():\r\n local_ips = []\r\n public_ips = []\r\n \r\n # list of iface names, 'lo0', 'eth0', etc.\r\n for iface in netifaces.interfaces():\r\n # list of ipv4 addrinfo dicts\r\n ipv4s = netifaces.ifaddresses(iface).get(netifaces.AF_INET, [])\r\n for entry in ipv4s:\r\n addr = entry.get('addr')\r\n #print(\"addr: \" + addr)\r\n if not addr:\r\n continue\r\n if not (iface.startswith('lo') or addr.startswith('127.')):\r\n public_ips.append(addr)\r\n else:\r\n local_ips.append(addr) \r\n return public_ips",
"def perform(self, resource_handler):\n instance_id = ast.literal_eval(self.instance_data['instance_id'])['Id']\n info = resource_handler.cli.inspect_container(container=instance_id)\n ip_addresses = []\n for k, v in info['NetworkSettings']['Networks'].items():\n ip_addresses.append(v['IPAddress'])\n return ip_addresses[0]",
"def perform(self, resource_handler):\n instance_id = ast.literal_eval(self.instance_data['instance_id'])['Id']\n info = resource_handler.cli.inspect_container(container=instance_id)\n ip_addresses = []\n for k, v in info['NetworkSettings']['Networks'].items():\n ip_addresses.append(v['IPAddress'])\n return ip_addresses[0]",
"def ip_addresses(self) -> Sequence['outputs.IpMappingResponse']:\n return pulumi.get(self, \"ip_addresses\")",
"def get_ips(self, oid):\n path = '/servers/%s/ips' % oid\n res = self.client.call(path, 'GET', data='', token=self.manager.identity.token)\n self.logger.debug('List ip addresses for server %s: %s' % \n (oid, truncate(res)))\n return res[0]",
"def _get_instances(instance_tags, region):\n return ec2_conn[region].get_all_instances(filters={\"tag:Name\": instance_tags})",
"def get_ip(self):\n json_scheme = self.gen_def_json_scheme('GetPurchasedIpAddresses')\n json_obj = self.call_method_post(method='GetPurchasedIpAddresses ', json_scheme=json_scheme)\n self.iplist = IpList()\n for ip in json_obj['Value']:\n r = Ip()\n r.ip_addr = ip['Value']\n r.resid = ip['ResourceId']\n r.serverid = ip['ServerId'] if 'None' not in str(ip['ServerId']) else None\n self.iplist.append(r)",
"def discovered_ips(self) -> Sequence[str]:\n return pulumi.get(self, \"discovered_ips\")",
"def instances(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"instances\")",
"def instances(self):\n return self.get('instances')",
"def get_instance_and_ip_list_for_stack_id(self,heatcln,stack_id):\n #Get the instance list for this stack\n resources = heatcln.resources.list(stack_id)\n instance_list = []\n ip_list = []\n \n for resource in resources:\n res_info = resource._info\n \n #Add those resources that are instances\n if res_info['resource_type'] == 'AWS::EC2::Instance':\n instance_list.append(resource)\n if res_info['resource_type'] == 'AWS::EC2::EIPAssociation':\n ip_list.append(resource)\n return instance_list,ip_list",
"def get_ec2_instances(client):\n reservations = client.describe_instances().get(\"Reservations\")\n instances = list(map(lambda x: x.get(\"Instances\"), reservations))\n instances = list(itertools.chain.from_iterable(instances))\n return list(map(lambda x: {\n 'name': next((t['Value'] for t in x.get('Tags', []) if t.get('Key') == 'Name'), 'Unknown'),\n 'id': x.get('InstanceId'),\n 'state': x.get('State'),\n }, instances))",
"def ip_addresses(self):\n try:\n return socket.gethostbyaddr(self.fqdn)[-1]\n except socket.error as _:\n return ['127.0.0.1']",
"def test_ipam_ip_addresses_list(self):\n pass",
"def ip(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n lookup = self.get(\"enable_ip_lookup\", False)\n ip = vmrun.getGuestIPAddress(lookup=lookup)\n if ip:\n puts_err(colored.green(ip))\n else:\n puts_err(colored.red(\"Unknown IP address\"))",
"def dns_server_ips(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"dns_server_ips\")",
"def public_ip_addresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"public_ip_addresses\")",
"def get_instance_list():\n return parse_list_output(Popen('nova list --all-tenants'.split(),\n stdout=STDOUT, stderr=STDERR).communicate()[0])"
]
| [
"0.76113784",
"0.730542",
"0.72717494",
"0.7170812",
"0.7075248",
"0.7043355",
"0.70000243",
"0.68474054",
"0.6840684",
"0.68234193",
"0.6718438",
"0.6659494",
"0.66257316",
"0.6606317",
"0.6606317",
"0.659874",
"0.6588869",
"0.6585919",
"0.6579781",
"0.6567652",
"0.6516815",
"0.64992183",
"0.6487959",
"0.6478165",
"0.64681655",
"0.6445365",
"0.64385426",
"0.64379555",
"0.64373547",
"0.64294875"
]
| 0.82241243 | 0 |
Oktaspecific application info retrieved from the URL. | def _app_info(self):
redirect_url = parse.urlparse(self._redirect_url())
if re.search("okta", redirect_url.hostname):
app_info = re.match(
r"^\/app\/(\w+)\/(\w+)\/sso/saml$",
redirect_url.path
)
return app_info.groups(0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process_app_info(self):\n pass",
"def _fetch_app_info(app_id):\n try:\n assert len(app_id), \"Empty string\"\n lookup_url = \"https://itunes.apple.com/lookup?id=\"\n target_url = lookup_url + app_id\n if sys.version_info < (3, 5):\n response = urllib2.urlopen(target_url)\n else:\n response = urllib.request.urlopen(target_url)\n data = response.read() # a `bytes` object\n text = data.decode('utf-8')\n app_info = json.loads(text)\n return app_info\n except AssertionError as e:\n print(e)\n sys.exit(\"Exit script with error code %s\" % e)\n except urllib2.URLError as e:\n print(e)\n sys.exit(\"Exit script with error code %s\" % e)\n except urllib.error.URLError as e:\n print(e)\n sys.exit(\"Exit script with error code %s\" % e)\n except urllib2.HTTPError as e:\n print(e)\n sys.exit(\"Exit script with error code %s\" % e)\n\n except:\n e = sys.exc_info()[0]\n print(\"Error: %s\" % e)\n sys.exit(\"Exit script with error code %s\" % e)",
"def _get_app_info(self):\n info_plist = None\n\n for data in self.filelist:\n if re.match(self.info_plist_regex, data.filename):\n info_plist = data\n\n if not info_plist:\n self._raise_ipa_error()\n\n info_plist = self.read(info_plist)\n self.app_info = readPlistFromString(info_plist)\n\n return self.app_info",
"def getAppInfo(self):\n data = self._client.Application.find(self.app_id)\n return data",
"def info ():\n\n info = {\n 'name' : app.config['APPLICATION_NAME'],\n 'short_name' : app.config['APPLICATION_SHORT_NAME'],\n 'main_page_url' : app.config['APPLICATION_MAIN_URL'],\n # 'css_url' : app.config.get ('APPLICATION_CSS_URL', ''),\n 'css' : 'span.smalltext { font-size: smaller }',\n 'supported_langs_query' : [ LANG ],\n }\n return make_json_response (info)",
"def getInfo():",
"def android_app_info(self) -> 'outputs.AndroidAppInfoResponse':\n return pulumi.get(self, \"android_app_info\")",
"def ios_app_info(self) -> 'outputs.IosAppInfoResponse':\n return pulumi.get(self, \"ios_app_info\")",
"def get_application_info( tree ):\n application_name = None\n # most machines store the machine name string in the tag 'ApplicationName'\n for application_name in tree.getroot().iter( 'ApplicationName' ):\n application_name = application_name.text\n break\n # NovaSeq stores the machine name string in the tag 'Application'\n if( application_name == None ):\n for application_name in tree.getroot().iter( 'Application' ):\n application_name = application_name.text\n break\n if( application_name == None ):\n raise ValueError( 'Unable to find Application* element in BCL RunParameters.xml' )\n\n application_version = None\n for application_version in tree.getroot().iter( 'ApplicationVersion' ):\n application_version = application_version.text\n break\n if( application_version == None ):\n raise ValueError( 'ApplicationVersion element missing in BCL RunParameters.xml' )\n\n re_models = '|'.join( application_name_dict.keys() )\n re_pattern = '(%s)' % re_models\n mobj = re.match( re_pattern, application_name )\n if( mobj == None ):\n raise ValueError( 'unrecognized ApplicationName in RunParameters.xml file' )\n instrument_model = application_name_dict[mobj.group( 1 )]\n\n # Distinguish between HiSeq models 3000 and 4000 using Andrew's(?) method.\n # Note: the p5 index orientations differ between these two models.\n if( instrument_model == 'HiSeq' ):\n application_major_version = int(application_version.split('.')[0])\n if application_major_version > 2:\n instrument_model = 'HiSeq4000'\n else:\n instrument_model = 'HiSeq3000'\n\n return( instrument_model, application_version )",
"def get_info(self) -> Optional[Dict[str, Any]]:",
"def info(self):\n return self._fetch_json('/api/info')",
"def info(self):\n resp = requests.get(\"%s/api/info\"%self.urlbase, verify=False)\n return resp.json",
"def _get_app_info_Primary(self):\n return self._Primary_app_info",
"def get_info_of_url(url):\n pass",
"def get_info():\n global PERF_APP\n archs = None\n best_arch = None\n cipher_algos = None\n hash_algos = None\n aead_algos = None\n\n cmd = PERF_APP + ' --print-info'\n\n try:\n res = subprocess.run(cmd, stdout=subprocess.PIPE, \\\n stderr=subprocess.STDOUT, \\\n env=ENVS, shell=True, check=True)\n output = res.stdout.decode('utf-8')\n except subprocess.CalledProcessError as e:\n print(\"Error (\" + str(e.returncode) + \")\")\n print(e.output.decode('utf-8'))\n sys.exit(1)\n\n lines = output.rstrip().split('\\n')\n try:\n for line in lines:\n info = line.split(':')\n if info[0] == 'Supported architectures':\n archs = info[1].split()\n if info[0] == 'Best architecture':\n best_arch = info[1].split()\n if info[0] == 'Supported cipher algorithms':\n cipher_algos = info[1].split()\n if info[0] == 'Supported hash algorithms':\n hash_algos = info[1].split()\n if info[0] == 'Supported aead algorithms':\n aead_algos = info[1].split()\n except:\n print(\"Error parsing --print-info output:\\n\" \\\n \"{}\".format(output), file=sys.stderr)\n\n if archs is None or best_arch is None or cipher_algos is None \\\n or hash_algos is None or aead_algos is None:\n print(\"Error parsing system and app information\", file=sys.stderr)\n sys.exit(1)\n\n return archs, best_arch, cipher_algos, hash_algos, aead_algos",
"def get_info(self):\n url = self._url_for_op('info')\n data= None # This will be a GET request since data is None\n response = self._get_raw_response(self._get_json_headers,\n self._get_json_response, url, data)\n response = json.loads(response)\n self.api_info = response['results']\n return self.api_info",
"def get_system_info(self):\r\n method = self.public_endpoints['system_info']['method']\r\n url = self.base_url + self.public_endpoints['system_info']['url']\r\n req = requests.request(method, url)\r\n res = req.json()\r\n\r\n if res['success'] == True:\r\n return res[\"result\"]\r\n else:\r\n return res",
"def _get_app_info_Secondary(self):\n return self._Secondary_app_info",
"def get_site_info(self, passed_url, options={}):\n uri = self.get_site_info_url(passed_url)\n params = self.get_site_info_query_params(options)\n response = requests.get(uri, params)\n return response.json()",
"def ios_app_info(self) -> Optional[pulumi.Input['IosAppInfoArgs']]:\n return pulumi.get(self, \"ios_app_info\")",
"def probe_api():\n\n info = loads(get(url).text)\n return info",
"def info(self):\n return requests.get(self.info_url + self.pid).json()",
"def get(self):\n app_info = {\n 'developedBy': 'This app was developed by the Melbourne eResearch Group (www.eresearch.unimelb.edu.au) within the School of Computing and Information Systems (https://cis.unimelb.edu.au) at The University of Melbourne (www.unimelb.edu.au). ',\n 'description': 'The app uses artificial intelligence (convolutional neural networks) to identify the age, gender, and emotion of the people.',\n 'contact': 'https://eresearch.unimelb.edu.au',\n 'developedByHTML': '<p>This app was developed by the Melbourne eResearch Group (<a href=\\\"www.eresearch.unimelb.edu.au\\\" target=\\\"_blank\\\">www.eresearch.unimelb.edu.au</a>) within the School of Computing and Information Systems (<a href=\\\"https://cis.unimelb.edu.au\\\" target=\\\"_blank\\\">https://cis.unimelb.edu.au</a>) at The University of Melbourne (<a href=\\\"www.unimelb.edu.au\\\" target=\\\"_blank\\\">www.unimelb.edu.au</a>).</p>',\n 'descriptionHTML': '<p>The app uses artificial intelligence (convolutional neural networks) to identify the age, gender, and emotion of the people.</p>',\n 'contactHTML': '<p>Please contact us at: <a href=\\\"eresearch.unimelb.edu.au\\\" target=\\\"_blank\\\">eresearch.unimelb.edu.au</a></p>'\n }\n\n return send_json_response(app_info, 200)",
"def info(self):\n return self.client.call('GET', self.name + 'info')",
"def get_app_info(self, name):\n with hide(\"output\", \"running\"):\n result = local(\"redis-cli -h {host} -p 6379 -n {db} hgetall {name}\".format(\n host=self.host, name=name, db=REDIS_APPLICATION_DB_NUM), capture=True)\n\n if len(result.stdout) > 0:\n splits = result.stdout.split(\"\\n\")\n fmt_result = dict([(splits[i], splits[i+1])\n for i in range(0, len(splits), 2)])\n pp = pprint.PrettyPrinter(indent=2)\n pp.pprint(fmt_result)\n return fmt_result\n else:\n warn(\"Application \\\"%s\\\" not found\" % name)\n return None",
"def get_info() -> str:\n req = Request(URL + '/info')\n context = ssl._create_unverified_context()\n with urlopen(req, context=context) as response:\n return response.read().decode('utf-8')",
"def info() -> Dict[str, Any]:",
"def info(self):\n path = self._get_path('info')\n \n response = self._GET(path)\n self._set_attrs_to_values(response)\n return response",
"def get_info(self):\n pass",
"def get_info(self):\n pass"
]
| [
"0.691974",
"0.6761501",
"0.66218877",
"0.6582538",
"0.65025103",
"0.6377309",
"0.6356972",
"0.63516927",
"0.6295055",
"0.6275283",
"0.6253936",
"0.61869735",
"0.61593604",
"0.61483043",
"0.6089754",
"0.6008549",
"0.60042727",
"0.60020375",
"0.59736496",
"0.5971184",
"0.5904033",
"0.58767104",
"0.58665955",
"0.5839131",
"0.58340275",
"0.58258533",
"0.58230776",
"0.57979953",
"0.5787618",
"0.5787618"
]
| 0.7358192 | 0 |
Check if MFA is supported. | def _mfa_supported(self):
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_mfa(self) -> None:\n try:\n mfa_form = self._driver.find_element(By.CSS_SELECTOR, \"form[name=form]\", timeout=5)\n self._driver.find_element(By.CSS_SELECTOR, \"input[name=otc]\", timeout=5)\n self._halo.stop()\n mfacode = self._csc.prompt_for(\"MFA Code\")\n self._halo.start(SPINNER_MSGS[\"mfa_send\"])\n self.send_mfa(form=mfa_form, code=mfacode)\n self._halo.start(SPINNER_MSGS[\"token_refresh\"])\n self._driver.dump_cookies()\n except selenium.common.exceptions.TimeoutException:\n pass",
"def check_supported_features(self):",
"def mba_supported():\n return common.MBA_CAP in SYSTEM_CAPS",
"def _is_azureml_available() -> bool:\n if importlib.util.find_spec(\"azureml\") is None:\n return False\n if importlib.util.find_spec(\"azureml.core\") is None:\n return False\n return importlib.util.find_spec(\"azureml.core.run\") is not None",
"async def validate_mfa(self, code):\n mfa_data = {\n \"totp\": code,\n \"mfa_token\": self._mfa_token,\n \"client_time:\": datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%SZ\"),\n }\n\n # Get auth token\n async with self._client_session.post(\n API_URL + \"authenticate/mfa\", headers=self.headers, timeout=self.api_timeout, data=mfa_data\n ) as resp:\n\n # check for 200 return\n if resp.status != 200:\n raise SenseAuthenticationException(f\"API Return Code: {resp.status}\")\n\n # Build out some common variables\n data = await resp.json()\n self._set_auth_data(data)\n self.set_monitor_id(data[\"monitors\"][0][\"id\"])",
"def mwa_available():\n try:\n urllib2.urlopen(pref('ServerURL'), timeout=1)\n return True\n except urllib2.HTTPError, e:\n if str(e.code) == \"401\":\n return True\n else:\n return False\n except urllib2.URLError as err: \n return False",
"def verify_support():\n ostype, majorrelease, _ = get_os_release_data()\n if ostype not in _supported_os:\n _logger.info('OS type %s is not supported.', ostype)\n return False\n if majorrelease not in _supported_release:\n _logger.info('OS %s %s is not supported', ostype, majorrelease)\n return False\n return True",
"def list_support_required(self):\n\t\treturn self.typemanager.has_lists",
"def supports(self, requirements: typing.List[str]) -> bool:\n # MGA: NYI\n return True",
"def test_valid_upload_modes(self):\n upload_helpers.verify_upload_mode(MODE_DEFAULT)\n upload_helpers.verify_upload_mode(MODE_FAST5)\n upload_helpers.verify_upload_mode(MODE_ASSEMBLIES)",
"def f_supports(self, data):\n return True",
"def is_support(mode: int) -> bool:\n return mode in supported_modes\n pass",
"def is_supported():\n return not isinstance(_the_app, StubApp)",
"def _check_family(self):\n return",
"def available(self):\n return self.access_token is not None",
"def is_token_required(self):\n return any([self.app_id, self._login, self._password])",
"def verify_capabilities(self, capabilities) -> bool:\n _pinfo = self.provider_features()\n not_supported = {} # type: Dict[str, Union[str, List[str]]]\n for key, val in capabilities.items():\n if isinstance(val, str):\n if val not in _pinfo.get(key, \"\"):\n not_supported[key] = val\n elif isinstance(val, bool):\n if not _pinfo.get(key) and val:\n not_supported[key] = \"\"\n elif isinstance(val, list):\n unsup = []\n for v in val:\n if v not in _pinfo.get(key, \"\"):\n unsup.append(v)\n if unsup:\n not_supported[key] = unsup\n if not_supported:\n logger.error(\n \"Server does not support the following features: %s\", not_supported\n )\n return False\n return True",
"def supported():\n return os.path.isfile(OPENCOR)",
"def validateInput(self): \n if (self.options.mexURL and self.options.token): #run module through engine service\n return True\n \n if (self.options.user and self.options.pwd and self.options.root): #run module locally (note: to test module)\n return True\n \n log.debug('Botanicam: Insufficient options or arguments to start this module')\n return False",
"def test_claims_supported_not_set(self):\n request = self.factory.get(self.url)\n\n response = ProviderInfoView.as_view()(request)\n dic = json.loads(response.content.decode('utf-8'))\n self.assertEqual(dic['claims_supported'], [])",
"def supports_auth_method(self, auth_method):\n type_set = set(self.type.split('+'))\n am_set = set(auth_method.split('+'))\n return am_set.issubset(type_set)",
"def is_valid_platform(self):\n\n return 'APPLICATION_NAME' in self",
"def verify(self):\n data = [\"rfc\", \"tel\", \"email\", \"name\", \"use\"]\n state = False\n for item in data:\n if getattr(self, item + \"Input\").text() != \"\":\n state = True\n else:\n return False\n return state",
"def msan_supported(goroot: GoRoot) -> bool:\n if goroot.goos == \"linux\":\n return goroot.goarch in (\"amd64\", \"arm64\")\n elif goroot.goos == \"freebsd\":\n return goroot.goarch == \"amd64\"\n else:\n return False",
"def isSupported(self, *args):\n return _libsbml.SBMLExtension_isSupported(self, *args)",
"def is_supported(self) -> bool:\n if self.builders and self.app.builder.name not in self.builders:\n return False\n if self.formats and self.app.builder.format not in self.formats:\n return False\n\n return True",
"def is_valid(self, user_specific_config: Any, factor: str) -> bool:",
"def test_claims_supported_set(self):\n expected_claims = ['openid', 'email']\n\n request = self.factory.get(self.url)\n\n response = ProviderInfoView.as_view()(request)\n dic = json.loads(response.content.decode('utf-8'))\n self.assertEqual(dic['claims_supported'], expected_claims)",
"def test_validate_media_player_features():\n config = {}\n attrs = {ATTR_SUPPORTED_FEATURES: 20873}\n entity_state = State(\"media_player.demo\", \"on\", attrs)\n assert validate_media_player_features(entity_state, config) is True\n\n config = {FEATURE_ON_OFF: None}\n assert validate_media_player_features(entity_state, config) is True\n\n entity_state = State(\"media_player.demo\", \"on\")\n assert validate_media_player_features(entity_state, config) is False",
"def is_feature_enabled(cls):\r\n return settings.FEATURES['ENABLE_MAX_FAILED_LOGIN_ATTEMPTS']"
]
| [
"0.6456988",
"0.64207107",
"0.6161599",
"0.6030697",
"0.57195747",
"0.5707503",
"0.5667593",
"0.5590575",
"0.54633266",
"0.54578304",
"0.5440032",
"0.53827685",
"0.53774214",
"0.5354453",
"0.5353557",
"0.53513414",
"0.53478837",
"0.5345963",
"0.53414917",
"0.5340764",
"0.533929",
"0.53220135",
"0.5307755",
"0.5304878",
"0.5302685",
"0.52753526",
"0.52737224",
"0.526645",
"0.5257398",
"0.5247834"
]
| 0.8236154 | 0 |
return the mirror of the given string | def mirror_string(the_string):
return the_string + reverse_string(the_string) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mirror(s):\n mir_str = s\n for i in range(1, len(s) + 1):\n mir_str += s[-i]\n return mir_str",
"def string_mirror(text):\n rev_text = text[::-1]\n mirror_text = text + rev_text\n return mirror_text",
"def reverse(string):\n return string[::-1]",
"def reverse_string(s):\n s.reverse()",
"def reverseString(string):\n return string[::-1]",
"def reverseComplement(string):\n rMap = { \"A\":\"T\", \"T\":\"A\", \"C\":\"G\", \"G\":\"C\", \"N\":\"N\"}\n return \"\".join(rMap[i] for i in string[::-1])",
"def reverse_string( str ):\n return str[::-1]",
"def reverse(s):\n return s[::-1]",
"def reverse_string(sen):\n return sen[::-1]",
"def reverseString(s):\n return s[::-1]",
"def reverseString1(self, s):\n for i in range(len(s)//2):\n s[i], s[~i] = s[~i], s[i]",
"def inverse_replacer(my_str:str, a:str, b:str) -> str:\n \n my_str = list(my_str)\n\n for i in range(len(my_str)):\n \n if my_str[i] == a:\n my_str[i] = b\n\n elif my_str[i] == b:\n my_str[i] = a\n \n \n return(''.join(my_str[::-1]))",
"def string_reverser(our_string):\\\\\n\\\n # TODO: Write your solution here\\",
"def reverse_string_2(s):\n s[:] = s[::-1]",
"def string_reverser(our_string):\n\n # TODO: Write your solution here\n\n reversed_string = ''\n\n i = len(our_string) - 1\n\n while i >= 0:\n reversed_string += our_string[i]\n i -= 1\n\n return reversed_string",
"def string_reverse(text):\n rev_text = text[::-1]\n return rev_text",
"def revcom(s):\n def complement(s):\n basecomplement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'N': 'N'}\n letters = list(s)\n letters = [basecomplement[base] for base in letters]\n return ''.join(letters)\n return complement(s[::-1])",
"def inverse_captcha_halfway(in_str):\n def get_reference_char(idx):\n return get_value_at_index(idx + int(len(in_str) / 2), in_str)\n\n return _inverse_captcha(in_str, get_reference_char)",
"def reverseString(self, s) -> None:\n i = 0\n j = len(s) - 1\n while i < j:\n temp = s[i]\n s[i] = s[j]\n s[j] = temp\n i += 1\n j -= 1",
"def reverse(word):\n return word[::-1]",
"def reverseString(s):\n for i in range(len(s)//2):\n t = s[i]\n s[i] = s[len(s)-i-1]\n s[len(s)-i-1] = t",
"def reverseString(self, s) -> None:\n # ๋ฐฉ๋ฒ 1\n s.reverse()\n # ๋ฐฉ๋ฒ 2\n # half_len = int(len(s) / 2)\n # for i in range(half_len):\n # temp = s[i]\n # s[i] = s[len(s) - 1 - i]\n # s[len(s) - 1 - i] = temp",
"def word_flipper(our_string):\n\n # TODO: Write your solution here\n # reversed_word = ''\n #\n # i = len(our_string) - 1\n #\n # words = []\n #\n # while i >= 0:\n #\n # if i > 0 and our_string[i] != ' ':\n # reversed_word += our_string[i]\n # i -= 1\n # elif i == 0:\n # reversed_word += our_string[i]\n # words.append(reversed_word)\n # i -= 1\n # else:\n # words.append(reversed_word)\n # reversed_word = ''\n # i -= 1\n #\n # reversed_string = []\n #\n # for i in range(len(words)):\n # reversed_string.append(words.pop())\n #\n # return ' '.join(reversed_string)\n\n # Their solution\n word_list = our_string.split(\" \")\n\n for idx in range(len(word_list)):\n word_list[idx] = word_list[idx][::-1]\n\n return \" \".join(word_list)",
"def reverse_string1(str): #reference\n return \" \".join(str.split()[::-1]) #ๅๅฒๅพๅ็๏ผๆๆๆธ๏ผๅพๅพ้ขๅพๅๅ๏ผ",
"def reverse(self, s):\n return '\\x16%s\\x16' % s",
"def reverse_string2(str):\n s_list = str.split()\n \" \".join(reverse_s(s_list))",
"def reverse_words(string):\n pass # TODO",
"def reverse_string(the_string):\r\n # iterating by index in reverse\r\n # my_rev = \"\"\r\n # for idx in range(len(the_string) - 1, -1, -1):\r\n # my_rev += the_string[idx]\r\n # return my_rev\r\n\r\n # using a while loop\r\n my_rev = \"\"\r\n idx = len(the_string) - 1\r\n while idx >= 0:\r\n my_rev += the_string[idx]\r\n idx -= 1\r\n return my_rev",
"def string_reverser(our_string):\n\n # TODO: Write your solution here\n string = \"\" # O(1)\n for i in range(len(our_string)): # O(n)\n string += our_string[len(our_string) - 1 - i] # O(1)\n return string # O(1)",
"def op_mirror():\n mir = np.array([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, -1, 0],\n [0, 0, 0, -1]])\n return mir"
]
| [
"0.78554",
"0.7797941",
"0.64188313",
"0.6326632",
"0.627955",
"0.6224376",
"0.62028164",
"0.61798996",
"0.6115949",
"0.609675",
"0.60859007",
"0.6011427",
"0.6008479",
"0.59972316",
"0.595555",
"0.5951928",
"0.59278554",
"0.59032947",
"0.58988315",
"0.5854072",
"0.58286273",
"0.5822741",
"0.58153576",
"0.57874936",
"0.5787095",
"0.5769791",
"0.5751006",
"0.5750722",
"0.5739061",
"0.57316667"
]
| 0.82076615 | 0 |
define a function called sum_evens, which receives one argument, a list of numbers. your function should return the sum of all the even numbers in the list | def sum_evens(the_nums):
my_sum = 0
for num in the_nums:
if num % 2 == 0:
my_sum += num
return my_sum | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sum_evens( n ):\n result = 0\n num = 0\n while num < n + 1:\n result += num\n num += 2\n return result",
"def sum_of_even(numbers):\r\n\r\n\tsum = 0\r\n\tfor i in numbers:\r\n\t\tif (i%2 == 0):\r\n\t\t\tsum += i\r\n\r\n\treturn sum",
"def get_all_evens(nums):\n\n even_nums = []\n\n for num in nums:\n if num % 2 == 0:\n even_nums.append(num)\n \n return even_nums",
"def get_all_evens(nums):\n\n even_nums = []\n\n for num in nums:\n if num % 2 == 0:\n even_nums.append(num)\n\n return even_nums",
"def evens(numbers):\n result = []\n for number in numbers:\n if (number % 2) == 0:\n result.append(number)\n return result",
"def even_number_of_evens(numbers):\n \n #check to see if the list is empty\n if numbers == []:\n return False\n else:\n #set a 'number_of_evens' variable that will be incremented each\n #time an even number is found\n evens = 0\n \n #Iterate over each item and if it's an even number, increment the\n #'evens' variable\n for number in numbers:\n if number % 2 == 0:\n evens += 1\n \n if evens == 0:\n return False\n else:\n return evens % 2 == 0",
"def count_evens(list):\n pass",
"def evens_only(input_list):\n ### Add your code here ###\n even_integer_list = []\n \n for i in input_list:\n if(i%2 ==0):\n even_integer_list.append(int(i))\n\n return even_integer_list",
"def only_evens(xs: list[int]) -> list[int]:\n i: int = 0\n xs_even: list[int] = []\n if len(xs) == 0:\n return []\n else:\n while i < len(xs):\n if xs[i] % 2 == 0:\n xs_even.append(xs[i])\n i += 1\n return xs_even",
"def summedOdds(L):\r\n result = 0\r\n for e in L:\r\n if e % 2 == 1:\r\n result = result + e # or result += e\r\n return result",
"def findEvens(lst):\n evens = []\n for el in lst:\n if el % 2 == 0:\n evens.append(el)\n return evens",
"def summedOdds( L ):\n result = 0\n for element in L:\n if element %2 ==1: #checking if the current elemnt is odd\n result = result + element # if it is odd, add it to the result, or result += e\n return result",
"def count_evens(L):\n result = 0\n for x in L:\n if x%2 == 0:\n result = result + 1\n return result",
"def count_evens(l):\n evens = []\n c = 0\n for number in l:\n if number % 2 == 0:\n c += 1\n return c",
"def only_evens(x: list[int]) -> list[int]:\n i: int = 0\n evens = list()\n while (i < len(x)):\n if x[i] % 2 == 0: \n evens.append(x[i])\n i += 1\n else:\n i += 1\n return evens",
"def summedOdds(L):\n result = 0\n element = 0\n\n for e in L:\n if element%2 != 0:\n result = result + L(element)\n element = element + 1\n\n return result",
"def sum_of_even_fibs(n):\n f = gen_fibs()\n next_fib = next(f)\n sum = 0\n\n while next_fib < n:\n if next_fib % 2 == 0:\n sum += next_fib\n\n next_fib = next(f)\n\n return sum",
"def only_evens(a: list[int]) -> list[int]:\n result: list[int] = list()\n for x in a:\n if x % 2 == 0:\n result.append(x)\n return result",
"def sum_even_fibs(n):\n return sum(filter(even, map(fib, range(1, n+1))))",
"def sum_of_initial_odds(the_nums):\r\n my_sum = 0\r\n for num in the_nums:\r\n if num % 2 == 0:\r\n # as soon as even number found, we're done\r\n return my_sum\r\n else:\r\n # add odd number to accumulator\r\n my_sum += num\r\n # reached end of list (no even numbers in the_nums)\r\n return my_sum",
"def even_fibbonachi_sum(num):\n fibs = fibbonachi(num)\n evenFib = [fib for fib in fibs if (fib % 2 == 0)]\n return sum(evenFib)",
"def sum_of_squared_odd_integers(n):\n\n sum = 0\n\n for i in range(0,n):\n if not is_even(i):\n sum += i*i\n\n return sum",
"def evansPerfectNumbers(n):\n assert n>1\n perfect = []\n for i in range(1,n+1):\n sums = 0\n for j in range(1,i):\n sums += evansMod(i,j)*j\n if sums == i:\n perfect.append(i)\n #print(perfect) #for testing only\n return perfect",
"def sum_of_numbers(numbers):\r\n return sum(numbers)",
"def even_by_odds(digit):\n if '-' in digit:\n digit = digit[1:]\n list_of_numbers = [int(num) for num in digit]\n evens = sum_evens(list_of_numbers)\n odds = sum_odds(list_of_numbers)\n return evens * odds",
"def sum_odd_fib(fib):\n total = 0\n for number in fib:\n if number % 2 == 0:\n total += number\n return total",
"def interleaved_sum(n, odd_term, even_term):\n \"*** YOUR CODE HERE ***\"\n if n == 1:\n return 1\n f = odd_term\n if n % 2 == 0:\n f = even_term\n return f(n) + interleaved_sum(n - 1, odd_term, even_term)",
"def even_odd_sums(seq):\n even = seq[0::2]\n odd = seq[1::2]\n return [sum(even), sum(odd)]",
"def find_an_even(input_list: list):\n\n for integer in input_list:\n if integer % 2 == 0:\n return integer\n raise ValueError",
"def even_only(some_list):\n # This function will take a list of integer and return a list of even number in original list.\n new_list = []\n for d in some_list:\n if d%2 == 0: # if it even add in new list.\n new_list.append(d)\n return new_list"
]
| [
"0.7681991",
"0.75866824",
"0.747706",
"0.7456841",
"0.7318411",
"0.7282693",
"0.7232526",
"0.7115733",
"0.6844247",
"0.6804674",
"0.6795843",
"0.6716187",
"0.67088974",
"0.66193366",
"0.650387",
"0.64708626",
"0.6333387",
"0.63076717",
"0.62996763",
"0.62290937",
"0.6212629",
"0.61988485",
"0.6192746",
"0.61088467",
"0.60879934",
"0.6038599",
"0.60333836",
"0.6008403",
"0.5976674",
"0.59682"
]
| 0.83778375 | 0 |
Execute a shell script. Setting prefix will add the environment variable COLCON_BUNDLE_INSTALL_PFREFIX equal to the passed in value | def _execute(script, prefix=None, path=None):
path = tempfile.gettempdir() if path is None else path
result = 1
try:
fh = tempfile.NamedTemporaryFile('w', delete=False)
fh.write(script)
fh.close()
print('Executing script below with cwd=%s\n{{{\n%s\n}}}\n' %
(path, script))
try:
os.chmod(fh.name, stat.S_IRWXU)
env = os.environ.copy()
if prefix is not None:
env['COLCON_BUNDLE_INSTALL_PREFIX'] = prefix
result = subprocess.run(
fh.name, cwd=path, env=env, stdout=PIPE, stderr=PIPE,
universal_newlines=True)
if result.stdout is not None:
logger.debug('stdout output: \n' + result.stdout)
if result.stderr is not None:
logger.warn('stderr output: \n' + result.stderr)
except OSError as ex:
print('Execution failed with OSError: %s' % ex)
finally:
if os.path.exists(fh.name):
os.remove(fh.name)
logger.info('Return code was: %s' % result)
return result.returncode == 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _process(self, prefix):\n options = self.options\n script = options.get(prefix, '').strip()\n if script:\n env = _env(options.get(prefix+'-env') or options.get('env') or '')\n shell = (options.get(prefix+'-shell')\n or options.get('shell') or '')\n shell_opts = (options.get(prefix+'-shell-options') or \n options.get('shell-options') or '')\n self.log.info('Executing %s script', prefix)\n self._execute(script, env=env, shell=shell, shell_opts=shell_opts)\n return ()",
"def run_script(prefix, dist, action='post-link'):\n path = join(prefix, 'Scripts' if on_win else 'bin', '.%s-%s.%s' % (\n name_dist(dist),\n action,\n 'bat' if on_win else 'sh'))\n if not isfile(path):\n return True\n if SKIP_SCRIPTS:\n print(\"WARNING: skipping %s script by user request\" % action)\n return True\n\n if on_win:\n try:\n args = [os.environ['COMSPEC'], '/c', path]\n except KeyError:\n return False\n else:\n shell_path = '/bin/sh' if 'bsd' in sys.platform else '/bin/bash'\n args = [shell_path, path]\n\n env = os.environ\n env['PREFIX'] = prefix\n\n import subprocess\n try:\n subprocess.check_call(args, env=env)\n except subprocess.CalledProcessError:\n return False\n return True",
"def run_install_script(this_node, script_name):\n\n this_node.addService(pg.Execute(shell='sh', command='/local/repository/install/' + script_name))",
"def _add_scripts(prefix):\n mapping = {\"MAST_HOME\": prefix}\n if \"Windows\" in platform.system():\n script_dir = os.path.join(INSTALL_DIR, \"files\", \"windows\")\n files = [\n \"mast.bat\",\n \"mast-system.bat\",\n \"mast-accounts.bat\",\n \"mast-backups.bat\",\n \"mast-crypto.bat\",\n \"mast-deployment.bat\",\n \"mast-developer.bat\",\n \"mast-network.bat\",\n \"test-mast.bat\",\n \"mast-version.bat\",\n \"mast-web.bat\",\n \"mastd.bat\",\n \"mast-ssh.bat\",\n \"set-env.bat\",\n ]\n elif \"Linux\" in platform.system():\n script_dir = os.path.join(INSTALL_DIR, \"files\", \"linux\")\n files = [\n \"mast\",\n \"mast-system\",\n \"mast-accounts\",\n \"mast-backups\",\n \"mast-crypto\",\n \"mast-deployment\",\n \"mast-developer\",\n \"mast-network\",\n \"test-mast\",\n \"mast-version\",\n \"mast-web\",\n \"mast-ssh\",\n \"mastd\",\n \"set-env\",\n ]\n\n for f in files:\n dst = os.path.join(prefix, f)\n src = os.path.join(script_dir, f)\n print(\"{} -> {}\".format(src, dst))\n content = render_template_file(src, mapping)\n write_file(dst, content)\n if \"Linux\" in platform.system():\n os.chmod(dst, 0o755)\n\n if \"Windows\" in platform.system():\n # copy python27.dll to site-packages/win32 directory to get around\n # issue when starting mastd\n src = os.path.join(prefix, \"miniconda\", \"python27.dll\")\n dst = os.path.join(\n prefix,\n \"miniconda\",\n \"Lib\",\n \"site-packages\",\n \"win32\",\n \"python27.dll\"\n )\n copyfile(src, dst)\n for filename in [\"pythoncom27.dll\", \"pythoncomloader27.dll\", \"pywintypes27.dll\"]:\n src = os.path.join(\n prefix,\n \"miniconda\",\n \"Lib\",\n \"site-packages\",\n \"pywin32_system32\",\n filename,\n )\n dst = os.path.join(\n prefix,\n \"miniconda\",\n \"Lib\",\n \"site-packages\",\n \"win32\",\n filename,\n )\n copyfile(src, dst)\n copytree(\n os.path.join(INSTALL_DIR, \"files\", \"bin\"),\n os.path.join(prefix, \"bin\")\n )\n copytree(\n os.path.join(INSTALL_DIR, \"files\", \"etc\"),\n os.path.join(prefix, \"etc\")\n )\n copytree(\n os.path.join(INSTALL_DIR, \"files\", \"var\"),\n os.path.join(prefix, \"var\")\n )\n copytree(\n os.path.join(INSTALL_DIR, \"files\", \"usrbin\"),\n os.path.join(prefix, \"usrbin\")\n )\n copytree(\n os.path.join(INSTALL_DIR, \"files\", \"tmp\"),\n os.path.join(prefix, \"tmp\")\n )\n copytree(\n os.path.join(INSTALL_DIR, \"files\", \"doc\"),\n os.path.join(prefix, \"doc\")\n )\n copytree(\n os.path.join(INSTALL_DIR, \"files\", \"contrib\"),\n os.path.join(prefix, \"contrib\")\n )",
"def test_exec_prefix(self):\n self.chck_triple('exec_prefix')",
"def _install():\n\tprint \"Preparing to install {} script.\".format(SCRIPT_NAME)\n\t\n\t#make sure there is a place to install the script to.\n\tif not \"SCRIPTS\" in os.environ:\n\t\tprint \"Please set SCRIPTS environment variable.\"\n\t\tsys.exit(1)\n\t\n\tscript_dir = os.environ[\"SCRIPTS\"]\n\t\n\t#check to see if already installed\n\tif _is_already_installed(script_dir):\n\t\tprint \"A version of {} is already installed.\".format(SCRIPT_NAME)\n\t\tprint \"Do you wish to overwrite it? [Y,n]\"\n\t\tif raw_input() != 'Y':\n\t\t\tprint \"Cancelling installation of {}.\".format(SCRIPT_NAME)\n\t\t\tsys.exit(0)\n\t\telse:\n\t\t\tprint \"Overwritting previously installed script {}.\".format(SCRIPT_NAME)\n\t\t\t_uninstall()\n\t\n\t#copy python sources into script directory\n\tnew_dir = os.path.join(script_dir, SCRIPT_NAME)\n\tshutil.copytree(\"src\", new_dir)\n\t\n\t#copy executable and add permissions\n\tfor name in EXEC_NAMES:\n\t\tos.system(\"sudo cp bin/{0} /bin/{0}\".format(name))\n\t\tos.system(\"sudo chmod +x /bin/{}\".format(name))",
"def install_bundle(client_bin, module, bundle_url):\n cmd = CLIENT_KARAF_COMMAND_WITH_ARGS.format(client_bin, PACKAGE_STATE_MAP[\"present\"], bundle_url)\n rc, out, err = module.run_command(cmd)\n\n bundle_id = None\n if rc != 0:\n reason = parse_error(out)\n module.fail_json(msg=reason)\n else:\n install_result = out.split(':')\n bundle_id = install_result[1].strip()\n\n # Parse out to get Bundle id.\n return True, cmd, bundle_id, out, err",
"def create_bootstrap_script(scratch_dir):\n install_script = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"install\")\n shutil.copy(install_script, os.path.join(scratch_dir, \"install\"))",
"def launched():\n if not PREFIX:\n return False\n\n return os.path.realpath(sys.prefix) == os.path.realpath(PREFIX)",
"def test_shell_run_activated(tmp_home, tmp_prefix):\n skip_if_shell_incompat(\"bash\")\n stdout = subprocess.check_output(\n [helpers.get_umamba(), \"shell\", \"-p\", tmp_prefix],\n input=\"echo $PATH\",\n text=True,\n )\n assert str(tmp_prefix) in stdout.split(os.pathsep)[0]",
"def setup_cross():\n if not os.path.exists(cross_prefix):\n docmd(\"mkdir %s\" % cross_prefix)\n epath = os.environ[\"PATH\"]\n set_evar(\"PATH\", \"%s/bin:%s\" % (cross_prefix, epath))",
"def test_config_home_custom_install():\n cache_folder = os.path.join(temp_folder(), \"custom\")\n with environment_append({\"CONAN_USER_HOME\": cache_folder}):\n client = TestClient(cache_folder=cache_folder, cache_autopopulate=False)\n client.save({\"conanfile.py\": GenConanfile()})\n client.run(\"install .\")\n assert \"conanfile.py: Installing package\" in client.out",
"def _define_script_command(command_name,\n parent_shell,\n bootstrap_script,\n container_path,\n scripts_path,\n script):\n script_fragment = \"\\\"{}\\\"\".format(script) if script else \"\"\n parent_shell.define_command(command_name,\n \"python \\\"{bootstrap}\\\" \"\n \"-d \\\"{container}\\\" \"\n \"-r \\\"{scripts}\\\" \"\n \"-s {script}\"\n \"\".format(bootstrap=bootstrap_script,\n container=container_path,\n scripts=scripts_path,\n script=script_fragment))",
"def temp_start():\n\tsh_file_path = data_dir.ROOT_DIR+\"mm_initial.sh\"\n\tcmd = \"sudo %s\" % sh_file_path\n\tsubprocess.Popen(cmd.split(), stdout=subprocess.PIPE).communicate()",
"def install_init_script():\n run('sudo touch %s' % env.init_script)\n run('sudo chown %s %s' % (env.user, env.init_script))\n run('sudo update-rc.d %s defaults' % os.path.basename(env.init_script))\n update_init_script()",
"def install(args):\n scripts = get_console_scripts(args.package)\n for script in scripts:\n src = os.path.join(args.source, script)\n dest = os.path.join(args.destination, script)\n logger.info('symlinking {1} to {0}'.format(dest, src))\n force_symlink(src, dest)",
"def pre_start_script_tmp_sh(tmp_path: Path) -> Path:\n tmp_file = tmp_path / \"prestart.sh\"\n with open(Path(tmp_file), \"x\") as f:\n f.write('echo \"Hello World, from a temporary pre-start shell script\"\\n')\n return Path(tmp_file)",
"def _adjust_shebang(self, script, outfile):\n # Always open the file, but ignore failures in dry-run mode --\n # that way, we'll get accurate feedback if we can read the\n # script.\n try:\n with open(script, \"r\") as stream:\n firstline = stream.readline()\n match = build_scripts.first_line_re.match(firstline)\n if match:\n post_interp = match.group(1) or ''\n log.info(\"copying and adjusting %s -> %s\", script,\n self.build_dir)\n if not self.dry_run:\n with open(outfile, \"w\") as outstream:\n # write script to target file\n outstream.write(\"#!%s%s\\n\" % (self.executable,\n post_interp))\n outstream.write(stream.read())\n return True\n except IOError:\n if not self.dry_run:\n raise\n return False",
"def _pre_deploy_exec(self):\n app.env['JUJU_PROVIDERTYPE'] = model_info(\n juju.get_current_model())['provider-type']\n\n pre_deploy_sh = os.path.join(app.config['spell-dir'],\n 'conjure/steps/00_pre-deploy')\n if os.path.isfile(pre_deploy_sh) \\\n and os.access(pre_deploy_sh, os.X_OK):\n utils.pollinate(app.session_id, 'J001')\n msg = \"Running pre-deployment tasks.\"\n app.log.debug(msg)\n app.ui.set_footer(msg)\n return run(pre_deploy_sh,\n shell=True,\n stdout=PIPE,\n stderr=PIPE,\n env=app.env)\n return json.dumps({'message': 'No pre deploy necessary',\n 'returnCode': 0,\n 'isComplete': True})",
"def test_shell_run_SHELL(tmp_home, tmp_prefix, tmp_env_name, use_prefix, tmp_path):\n skip_if_shell_incompat(\"bash\")\n\n script_path = tmp_path / \"fakeshell.sh\"\n script_path.write_text(\"#!/bin/sh\\nexit 42\")\n script_path.chmod(0o777)\n\n if use_prefix:\n cmd = [helpers.get_umamba(), \"shell\", \"-p\", tmp_prefix]\n else:\n cmd = [helpers.get_umamba(), \"shell\", \"-n\", tmp_env_name]\n\n ret = subprocess.run(cmd, env={**os.environ, \"SHELL\": script_path})\n assert ret.returncode == 42",
"def prepare_sub_script(i):\n\n run_cmd=''\n\n target_os_cfg=i['target_os_cfg']\n\n remote=False\n if target_os_cfg.get('remote','')=='yes': \n remote=True\n\n script_name=i['run_script']\n\n script_path=''\n if 'run_script_uoa' in i and i['run_script_uoa']!='':\n# cm_kernel.print_for_con('')\n# cm_kernel.print_for_con('Preparing path for OS script '+i['run_script_uoa']+' ...')\n\n ii={'cm_run_module_uoa':ini['cfg']['cm_modules']['os.script'],\n 'cm_action':'load',\n 'cm_data_uoa':i['run_script_uoa']}\n r=cm_kernel.access(ii)\n if r['cm_return']>0: return r\n\n script_cfg=r['cm_data_obj']['cfg']\n script_path=r['cm_path']\n\n if 'scripts' not in script_cfg or i['run_script'] not in script_cfg['scripts']:\n return {'cm_return':1, 'cm_error':'can\\'t find script in os.script configuration'}\n\n script_name=script_cfg['scripts'][script_name]\n\n script_name+=target_os_cfg['script_ext']\n\n run_name=script_name\n if script_path!='':\n run_name=os.path.join(script_path, run_name)\n elif 'exec_prefix' in target_os_cfg and target_os_cfg['exec_prefix']!='': \n run_name=target_os_cfg['exec_prefix']+run_name\n\n if target_os_cfg.get('set_executable','')!='':\n p=target_os_cfg['set_executable']+' '+run_name\n x=os.system(p)\n\n run_cmd=''\n if remote and target_os_cfg.get('no_script_execution','')=='yes':\n r=cm_kernel.load_array_from_file({'cm_filename':run_name})\n if r['cm_return']>0: return r\n a=r['cm_array']\n for x in a:\n xx=x.strip()\n if xx!='' and not xx.startswith(target_os_cfg['rem']):\n if run_cmd!='': run_cmd+=target_os_cfg['env_separator']+' '\n run_cmd+=xx\n run_name=''\n else:\n run_cmd=run_name\n\n if i.get('run_cmd','')!='': run_cmd+=' '+i['run_cmd']\n\n if i.get('run_cmd_out1','')!='': run_cmd+=' 1>'+i['run_cmd_out1']\n if i.get('run_cmd_out2','')!='': run_cmd+=' 2>'+i['run_cmd_out2']\n\n\n return {'cm_return':0, 'run_cmd':run_cmd}",
"def install(project_name, compose_file, var_dir, container_dir_to_copy, install_container_command):\n compose_install(project_name, compose_file, var_dir, container_dir_to_copy, install_container_command)",
"def setup_environment(self, spack_env, run_env):\n run_env.prepend_path('PICARD',\n join_path(self.prefix, 'bin', 'picard.jar'))",
"def RunHook(hook, upstream_branch='origin', error_ok=False):\n hook = '%s/%s' % (settings.GetRoot(), hook)\n if not os.path.exists(hook):\n return\n output = cl_settings.RunCommand([hook, upstream_branch], error_ok).strip()\n if output != '':\n print output",
"def install_agent(self, platform_uuid, fileargs):\n rpc = 'platforms.uuid.{}.install'.format(platform_uuid)\n return self.do_rpc(rpc, files=[fileargs])",
"def install_script_stored_on_remote(script_dir, script_name, mode=775, owner='root'):\n full_path = os.path.join(script_dir, script_name)\n\n with cd(script_dir):\n sudo(\"chmod {} {}\".format(mode, script_name))\n sudo(\"chown {} {}\".format(owner, script_name))\n sudo(\"ln -sf {} {}\".format(full_path, env.system_script_dir))",
"def _prepend_remote_shell_script( self, script, remote_path, **put_kwargs ):\n with closing( StringIO( ) ) as out_file:\n with closing( StringIO( ) ) as in_file:\n get( remote_path=remote_path, local_path=in_file )\n in_file.seek( 0 )\n prepend_shell_script( '\\n' + script, in_file, out_file )\n out_file.seek( 0 )\n put( remote_path=remote_path, local_path=out_file, **put_kwargs )",
"def setup(self, **kwargs):\n if self.bash_script:\n src = os.fspath(FILES / self.bash_script)\n dst = os.fspath(self.project_dir / self.bash_script)\n shutil.copy(src, dst)",
"def set_prefix(prefix):\n PLUGINS.set_prefix(prefix)",
"def install():\n src = None\n if len(sys.argv) == 2:\n src = sys.argv[1]\n elif len(sys.argv) > 2:\n print >> sys.stderr, 'USAGE: rbco_nautilusscripts_install [SOURCE_DIR]'\n sys.exit(1)\n\n paths = (\n '~/.gnome2/nautilus-scripts',\n '~/.gnome2/nemo-scripts',\n '~/.config/caja/scripts',\n )\n\n for path in paths:\n print 'Creating in {0} ...'.format(path)\n dest = os.path.expanduser(path)\n link_scripts(dest, src_dir=src)\n print"
]
| [
"0.6353147",
"0.60257393",
"0.56810886",
"0.55134755",
"0.54582494",
"0.5435877",
"0.5353717",
"0.52477705",
"0.51856023",
"0.51831543",
"0.5131652",
"0.50362307",
"0.5002932",
"0.49729705",
"0.49688485",
"0.49680483",
"0.4939706",
"0.49229112",
"0.49186707",
"0.490497",
"0.49013633",
"0.48941785",
"0.48923492",
"0.48626003",
"0.4857058",
"0.48545763",
"0.48542026",
"0.47940385",
"0.47902662",
"0.4790168"
]
| 0.6236007 | 1 |
Instrument a method with the given Test. | def instrumentMethod(test, method_name, c=TestRunner):
unadorned = getattr(c, method_name)
import new
method = new.instancemethod(test.wrap(unadorned), None, c)
setattr(c, method_name, method) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def instrumentMethod(test, method_name, c=TestRunner):\r\n unadorned = getattr(c, method_name)\r\n import new\r\n method = new.instancemethod(test.wrap(unadorned), None, c)\r\n setattr(c, method_name, method)",
"def add_test_method(cls, queue_name_fragment, hdrs, props, send_shim, receive_shim, timeout):\n\n def inner_test_method(self):\n self.run_test(self.sender_addr,\n self.receiver_addr,\n queue_name_fragment,\n self.jms_message_type,\n self.test_values,\n hdrs[1],\n props[1],\n send_shim,\n receive_shim,\n timeout)\n\n inner_test_method.__name__ = 'test.C.%s.%s%s.%s->%s' % (jms_message_type[4:-5], hdrs[0], props[0],\n send_shim.NAME, receive_shim.NAME)\n setattr(cls, inner_test_method.__name__, inner_test_method)",
"def _generate_test_method(self, method_name, function):\n suite(*getattr(self, '_suites', set()))(function)\n setattr(self, method_name, instancemethod(function, self, self.__class__))",
"def add_test_method(cls, queue_name_fragment, hdrs, props, send_shim, receive_shim, timeout):\n\n def inner_test_method(self):\n self.run_test(self.sender_addr,\n self.receiver_addr,\n queue_name_fragment,\n self.jms_message_type,\n self.test_values,\n hdrs[1],\n props[1],\n send_shim,\n receive_shim,\n timeout)\n\n inner_test_method.__name__ = 'test.A.%s.%s%s.%s->%s' % (jms_message_type[4:-5], hdrs[0], props[0],\n send_shim.NAME, receive_shim.NAME)\n setattr(cls, inner_test_method.__name__, inner_test_method)",
"def add_test_method(cls, queue_name_fragment, hdrs, props, send_shim, receive_shim, timeout):\n\n def inner_test_method(self):\n self.run_test(self.sender_addr,\n self.receiver_addr,\n queue_name_fragment,\n self.jms_message_type,\n self.test_values,\n hdrs[1],\n props[1],\n send_shim,\n receive_shim,\n timeout)\n\n inner_test_method.__name__ = 'test.D.%s.%s%s.%s->%s' % (jms_message_type[4:-5], hdrs[0], props[0],\n send_shim.NAME, receive_shim.NAME)\n setattr(cls, inner_test_method.__name__, inner_test_method)",
"def _instrument_class_method(\n self,\n estimator: Type[BaseEstimator],\n method_name: str,\n attributes: Attributes = None,\n ):\n if self._check_instrumented(estimator, method_name):\n logger.debug(\n \"Already instrumented: %s.%s\",\n estimator.__qualname__,\n method_name,\n )\n return\n class_attr = getattr(estimator, method_name)\n delegator = get_delegator(estimator, method_name)\n if isinstance(class_attr, property):\n logger.debug(\n \"Not instrumenting found property: %s.%s\",\n estimator.__qualname__,\n method_name,\n )\n elif delegator is not None:\n implement_span_delegator(delegator)\n else:\n setattr(\n estimator,\n \"_otel_original_\" + method_name,\n (estimator, class_attr),\n )\n setattr(\n estimator,\n method_name,\n implement_span_estimator(class_attr, estimator, attributes),\n )",
"def add_test_method(cls, queue_name_fragment, hdrs, props, send_shim, receive_shim, timeout):\n\n def inner_test_method(self):\n self.run_test(self.sender_addr,\n self.receiver_addr,\n queue_name_fragment,\n self.jms_message_type,\n self.test_values,\n hdrs[1],\n props[1],\n send_shim,\n receive_shim,\n timeout)\n\n inner_test_method.__name__ = 'test.B.%s.%s%s.%s->%s' % (jms_message_type[4:-5], hdrs[0], props[0],\n send_shim.NAME, receive_shim.NAME)\n setattr(cls, inner_test_method.__name__, inner_test_method)",
"def test_method(self, test, another_test, _): # noqa: D213, D407",
"def _instrument_instance_method(\n self,\n estimator: BaseEstimator,\n method_name: str,\n attributes: Attributes = None,\n ):\n if self._check_instrumented(estimator, method_name):\n logger.debug(\n \"Already instrumented: %s.%s\",\n estimator.__class__.__qualname__,\n method_name,\n )\n return\n\n class_attr = getattr(type(estimator), method_name, None)\n if isinstance(class_attr, property):\n logger.debug(\n \"Not instrumenting found property: %s.%s\",\n estimator.__class__.__qualname__,\n method_name,\n )\n else:\n method = getattr(estimator, method_name)\n setattr(\n estimator, \"_otel_original_\" + method_name, (estimator, method)\n )\n setattr(\n estimator,\n method_name,\n implement_span_estimator(method, estimator, attributes),\n )",
"def hook(cls, cov, method_name):\n method = getattr(cov, method_name)\n hook = cls(method)\n setattr(cov, method_name, hook.wrapper)\n return hook",
"def TestMethodBody(run_method_name, run_dargs):\n return lambda self: getattr(self, run_method_name)(**run_dargs)",
"def test(self, test):\n\n self._test = test",
"def test(self, test):\n\n self._test = test",
"def addTest(self, test):\r\n self.tests.append(test)\r\n return",
"def _mock_function(self, obj, func):\n setattr(obj, func.__name__, MethodType(func, self.breaker))",
"def _mock_function(self, obj, func):\n setattr(obj, func.__name__, MethodType(func, self.breaker))",
"def instrument_func(func):\n old_index = current_index\n\n func.__code__ = patch_code(func.__code__, True, True)\n _reserve_counters(current_index - old_index)\n\n return func",
"def decorator(func):\n\n setattr(func, mock_method.attr_name, name)\n return func",
"def test(func):\n register_tests(func, [func.__name__])",
"def run(self, test_method) -> unittest.TestResult:\n # TestCase supports getting a runner for an individual method by name\n test = self._test_case(test_method.__name__)\n result = unittest.TestResult()\n test_runner = threading.Thread(target=test.run, args=(result,))\n test_runner.daemon = True\n test_runner.start()\n\n test_runner.join(self.timeout)\n\n # if the test is still running, report a failure\n if test_runner.is_alive():\n # create a fake exception so we can use that for the failure\n try:\n raise TimeoutError(\n f\"Test {repr(test)} took longer than {self.timeout} seconds\"\n )\n except TimeoutError:\n info = sys.exc_info()\n result.addFailure(test, info)\n\n self._results[test_method] = result\n return result",
"def add_instrument(self, mount, instrument):\n pass",
"def inner_test():\n pass",
"def inner_test():\n pass",
"def run(self, test, env):\n\n raise NotImplementedError",
"def test_method(self, test, another_test, z, _, x=1, y=2, _private_arg=1): # noqa: D213, D407",
"def istest(func):\n func.__test__ = True\n return func",
"def identify_method(self, func):",
"def decorator(test_method):\n\n @wraps(test_method)\n def wrapper(self):\n \"\"\" Parse file then run test. \"\"\"\n parser = Gerber(ignore_unknown=False)\n self.design = parser.parse(path.join(DIR, filename))\n test_method(self)\n\n return wrapper",
"def __call__(self, example):\n return self.test(example)",
"def test(coverage):\n print('success')\n pass"
]
| [
"0.8075311",
"0.5744945",
"0.57412565",
"0.57279474",
"0.57251406",
"0.56970775",
"0.5630941",
"0.5606898",
"0.55533004",
"0.5506733",
"0.54564655",
"0.5387677",
"0.5387677",
"0.53375614",
"0.521765",
"0.521765",
"0.51903296",
"0.51803833",
"0.5178082",
"0.51614606",
"0.5117643",
"0.51030827",
"0.51030827",
"0.50961363",
"0.5093407",
"0.5090495",
"0.5061234",
"0.50480103",
"0.5005201",
"0.5004791"
]
| 0.8082962 | 0 |
Prepares to execute the task that this module is design to do. Preparations include loading and running the NLP model, retrieving the information needed from the NLP output and the user to execute the task. | def prepare(self, nlp_model_names, text, sender):
if self.nlp_model is None:
# Replace "NLP_MODEL" with the name of the NLP models which this module should use.
self.nlp_model = spacy.load(nlp_model_names["NLP_MODEL"])
to, when, body = self.nlp(text)
self.description = None
return self.prepare_processed(to, when, body, sender) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _preparation_workflow(self):\n self._validate_environment()\n self._validate_parameters()\n self._update_verbosity()",
"def setup(self) -> None:\n self.logger.info(\"ML Train task: setup method called.\")",
"def launch(self, force_prep=False):\n #TODO process upploaded csv\n assert self.ready()\n self.launch_progress = 0\n self.set_status(\"Gathering data\")\n if 'O' not in set(self.labels.dict.values()):\n self.add_labels({max(list(self.labels.dict.keys()))+1:'O'})\n\n processed_file_path = os.path.join(DATASETS_PATH, self.dataset_uuid, 'processed.csv')\n bert_file_path = os.path.join(DATASETS_PATH, self.dataset_uuid, 'processed.bert')\n elmo_file_path = os.path.join(DATASETS_PATH, self.dataset_uuid, 'processed.elmo')\n nlp_file_path = os.path.join(DATASETS_PATH, self.dataset_uuid, 'processed.nlp')\n sbert_file_path = os.path.join(DATASETS_PATH, self.dataset_uuid, 'processed.sbert')\n if os.path.exists(processed_file_path) and not force_prep:\n df = pd.read_csv(processed_file_path)\n if 'span_label' in df.columns:\n df['span_label']=df['span_label'].apply(eval)\n # Let's say loading the file is ~half the launch time\n # (if the file already exists)\n self.total = 2\n self.update(1)\n else:\n datafiles = [os.path.join(DATASETS_PATH, self.dataset_uuid, d) \\\n for d in os.listdir(os.path.join(DATASETS_PATH, self.dataset_uuid))]\n df = concat_dataset(datafiles)\n # expand total to account for time it takes to initialize the model\n self.total = len(df)*(1.1) \n self.set_status(\"Preprocessing data\")\n df = self.process_data(df, processed_file_path)\n\n # load list of 'allennlp.data.instance's. allennlp.data.instance can store true labels and tag info internally.\n if os.path.exists(nlp_file_path) and not force_prep:\n with open(nlp_file_path, 'rb') as f:\n sentences = pickle.load(f)\n else:\n #TODO define a universal reader for certain format\n # reader = RestaurantsDatasetReader()\n # data = reader.read(processed_file_path)\n #TODO handle when aux files do not exist\n pass\n bert_emb = np.load(bert_file_path, allow_pickle=True)\n elmo_emb = np.load(elmo_file_path, allow_pickle=True)\n sbert_emb = np.load(sbert_file_path, allow_pickle=True)\n for s, b, e, sb in zip(sentences, bert_emb, elmo_emb, sbert_emb):\n s.fields['bert'] = b\n s.fields['sbert'] = sb\n s.fields['elmo'] = e\n\n df['bert'] = bert_emb\n df['sbert'] = [sb for sb in sbert_emb]\n df['elmo'] = elmo_emb\n df['text_nlp'] = sentences\n\n columns_to_drop = list(\n set(df.columns).intersection(set(['span_label','file','label'])))\n df = df.drop(columns=columns_to_drop).reset_index()\n # since df['text_nlp'] contains true label info, drop 'labels' column.\n columns_to_drop = list(set(df.columns).difference(set(['index', 'Unnamed: 0', 'text', 'labels', 'split', 'bert', 'sbert',\n 'elmo', 'text_nlp'])))\n if len(columns_to_drop) > 0:\n df = df.drop(columns=columns_to_drop)\n df_train = df[df['split']=='train']\n df_dev = df[df['split'] == 'dev']\n df_valid = df[df['split'] == 'valid']\n df_test = df[df['split'] == 'test']\n\n self.text_inv_dict = dict(\n zip(list(df['text']),list(df.index))\n )\n\n # TODO split heldout set if necessary\n # for now, passing empty df as heldout set\n df_heldout = df_test\n\n self.emb_dict = Embeddings(df)\n\n self.set_status(\"Initializing modeler\")\n self.modeler = Modeler(df_train, df_dev, df_valid, df_test, df_heldout, self.labels, emb_dict=self.emb_dict)\n\n self.launch_progress = 1.0\n self.set_status(\"Finished\")\n return self.modeler",
"def test_pyt_preprocess_train(self):\n # Second, check that the model will train\n defaults = parser_defaults.copy()\n defaults['datatype'] = 'train'\n defaults['pytorch_preprocess'] = True\n str_output, _, _ = testing_utils.train_model(defaults)\n self.assertTrue(\n solved_task(str_output),\n 'Teacher could not teach seq2seq with preprocessed obs, output: {}'\n .format(str_output)\n )",
"def prepare(self):\n self.parse_template()\n self.build_argparser()\n self.parse_arguments()\n self.render_template()\n self.update_relation()",
"def __init__(self) -> None:\n\n self.config = TbSETConfig()\n self.session = PromptSession()\n self.commands = WordCompleter([\n \"train\",\n \"translate\"\n ])\n\n # Check if saved model is present for inference\n saved_path = self.config.TRN_HYPERP[\"save_path\"]\n\n if saved_path and tf.saved_model.contains_saved_model(saved_path):\n print(\"INFO: Trained model found. It will be used for inference.\\n\")\n self.saved_translator = tf.saved_model.load(saved_path)\n else:\n print(\"INFO: Trained model not found. Please train the model before making inference.\\n\")\n self.saved_translator = None",
"def _preprocess(self):\n self.data['sentences'] = self.data['text'].apply(self._tokenize_sent)\n self.data['nouns'] = self.data['sentences'].apply(self._get_nouns)\n # self._get_frequent_features()\n # self._compactness_pruning()\n # self._redundancy_pruning()\n # self._get_features()\n self._extract_opinions()",
"def prepare(self):\n\n inp = self.config.get('predict.input', 'X:0')\n out = self.config.get('predict.output', 'output:0')\n\n # set input placeholder\n self.input = self.graph.get_tensor_by_name(inp)\n\n # set output operation\n if isinstance(out, str) or isinstance(out, unicode):\n self.output = {out: self.graph.get_tensor_by_name(out)}\n self.output_alone = True\n elif isinstance(out, list):\n self.output = {o: self.graph.get_tensor_by_name(o) for o in out}\n self.output_alone = False\n else:\n raise LoaderException('incorrect predict.output type')",
"def task_process(args):\n if args.mode == 'change model':\n for i in range(config.center_len):\n h, w = config.center_list[i][0], config.center_list[i][1]\n os.system('rm -rf ctpn_change_{}x{}.onnx'.format(h, w))\n for i in range(config.center_len):\n h, w = config.center_list[i][0], config.center_list[i][1]\n os.system('{} change_model.py --input_path={}/ctpn_{}x{}.onnx --output_path={}/ctpn_change_{}x{}.onnx' \\\n .format(args.interpreter, args.src_dir, h, w,args.res_dir, h, w)) \n if args.mode == 'preprocess':\n for i in range(config.center_len):\n os.system('mkdir -p {}_{}x{}'.format(args.res_dir, config.center_list[i][0], config.center_list[i][1]))\n os.system('{} ctpn_preprocess.py --src_dir={} --save_path={}' \\\n .format(args.interpreter, args.src_dir, args.res_dir))\n if args.mode == 'ais_infer':\n fps_all = 0\n os.system('mkdir -p {}/inf_output'.format(args.res_dir))\n for i in range(config.center_len):\n h, w = config.center_list[i][0], config.center_list[i][1]\n\n os.system('{} --model={} --input={}_{}x{} --dymHW {},{} --device {} --batchsize={} --output={}/inf_output' \\\n .format(args.interpreter, args.om_path, args.src_dir ,h , w, h, w,args.device, args.batch_size, args.res_dir))\n\n sumary_path = glob.glob('{}/inf_output/*ary.json'.format(args.res_dir))[0]\n with open(sumary_path, 'r') as f:\n output = json.load(f)\n throughput = output['throughput'] \n fps_all = fps_all + throughput * config.center_count[i]\n os.system('rm -f {}'.format(sumary_path))\n os.system('mv {}/inf_output/*/*.bin {}'.format(args.res_dir, args.res_dir))\n os.system('rm {}/inf_output -rf'.format(args.res_dir))\n fps_all = fps_all / config.imgs_len\n print(\"====performance data====\")\n print('CTPN bs{} models fps:{}'.format(args.batch_size, fps_all))",
"def __init__(self , model_file_name ):\n logging.set_verbosity(logging.ERROR)\n with TheTFGraph.as_default():\n with TheTFSession.as_default():\n self.model = keras.models.load_model( model_file_name + \".hdf5\" , compile=False )\n JSON = json.load( open(model_file_name + \".json\" ) )\n self.all_sites = list(JSON['all_sites'])\n self.all_errors = list(JSON['all_errors'])\n self.all_actions = list(JSON['all_actions'])\n self.IsBinary = bool(JSON['IsBinary'])\n self.TiersOnly = bool(JSON['TiersOnly'])\n self.Task = Task({} , \"TaskLoader\" , self)\n self.Name = model_file_name.split('/')[-1]\n self.ModelID = int( JSON['model'] )\n self.InputTrainingDataID = int( JSON['trainingdata'])\n\n self.Prediction = Prediction.Prediction( self.ModelID , self.InputTrainingDataID )",
"def _execute_after_reading(self):\n # Auxiliary parameters object for the CheckAndPepareModelProcess\n params = KratosMultiphysics.Parameters(\"{}\")\n params.AddValue(\"computing_model_part_name\",self.settings[\"computing_model_part_name\"])\n params.AddValue(\"problem_domain_sub_model_part_list\",self.settings[\"problem_domain_sub_model_part_list\"])\n params.AddValue(\"processes_sub_model_part_list\",self.settings[\"processes_sub_model_part_list\"])\n # Assign mesh entities from domain and process sub model parts to the computing model part.\n import check_and_prepare_model_process_structural\n check_and_prepare_model_process_structural.CheckAndPrepareModelProcess(self.main_model_part, params).Execute()\n\n # Import constitutive laws.\n materials_imported = self.import_constitutive_laws()\n if materials_imported:\n self.print_on_rank_zero(\"::[MechanicalSolver]:: \", \"Constitutive law was successfully imported.\")\n else:\n self.print_on_rank_zero(\"::[MechanicalSolver]:: \", \"Constitutive law was not imported.\")",
"def process(self):\n self.apply_language_filter()\n self.load_feature_rates()\n self.compute_feature_properties()\n self.remove_unwanted_features()\n if self.pruned:\n self.messages.append(\"\"\"[DEPENDENCY] Model %s: Pruned trees are implemented in the BEAST package \"BEASTlabs\".\"\"\" % self.name)",
"def handle(self, args, unknown):\n\n settings = Dict2Obj(**runpy.run_path(\"%s/%s\" % (os.getcwd(), 'settings.py')))\n project_name = os.getcwd().split('/')[-1]\n extra_args = self.parse_unknown(unknown)\n Train(project_name=project_name, settings=settings, args=args, **extra_args).begin()",
"def prepare_for_model_run(self, model_time):\n\n pass",
"def _manual_setup(self):\n # If self.cache is None, then all caching should be skipped\n if self.name_to_index_dict is None:\n self.name_to_index_dict = {name:i for i, name in enumerate(self.input_fields)}\n self.task_label_index = self.name_to_index_dict[self.task_name]\n if self.cache is not None:\n self.setup_cache()",
"def main():\n\n ############################ variable settings #################################\n parser = argparse.ArgumentParser(description='Run Subtask C of GermEval 2017 Using Pre-Trained Language Model.')\n parser.add_argument('--seed', type=int, default=42, help='Random seed.')\n parser.add_argument('--lang_model', type=str, default='bert-base-german-dbmdz-uncased', help='The pre-trained language model.')\n parser.add_argument('--epochs', type=int, default=4, help='Number of epochs for training.')\n parser.add_argument('--lr', type=float, default=5e-5, help='The learning rate.')\n parser.add_argument('--max_len', type=int, default=256, help='The maximum sequence length of the input text.')\n parser.add_argument('--batch_size', type=int, default=32, help='Your train set batch size.')\n parser.add_argument('--df_path', type=str, default='./data/', help='The data directory.') \n parser.add_argument('--train_data', type=str, default='train_df_cat.tsv', help='The filename of the input train data.')\n parser.add_argument('--dev_data', type=str, default='dev_df_cat.tsv', help='The filename of the input development data.')\n parser.add_argument('--test_data1', type=str, default='test_syn_df_cat.tsv', help='The filename of the first input test data (synchronic).')\n parser.add_argument('--test_data2', type=str, default='test_dia_df_cat.tsv', help='The filename of the second input test data (diachronic).')\n parser.add_argument('--output_path', type=str, default='./output/subtaskC/', help='The output directory of the model and predictions.')\n parser.add_argument(\"--train\", default=True, action=\"store_true\", help=\"Flag for training.\")\n parser.add_argument(\"--save_prediction\", default=False, action=\"store_true\", help=\"Flag for saving predictions.\")\n parser.add_argument(\"--save_cr\", default=False, action=\"store_true\", help=\"Flag for saving confusion matrix.\")\n parser.add_argument(\"--exclude_general\", default=False, action=\"store_true\", help=\"Flag for excluding category Allgemein.\")\n parser.add_argument(\"--exclude_neutral\", default=False, action=\"store_true\", help=\"Flag for excluding neutral polarity.\")\n parser.add_argument(\"--exclude_general_neutral\", default=False, action=\"store_true\", help=\"Flag for excluding category Allgemein:neutral.\")\n args = parser.parse_args()\n ################################################################################\n set_all_seeds(args.seed)\n device, n_gpu = initialize_device_settings(use_cuda=True)\n \n # Load data\n train_df = pd.read_csv(args.df_path + args.train_data, delimiter = '\\t')\n dev_df = pd.read_csv(args.df_path + args.dev_data, delimiter = '\\t')\n test_syn_df = pd.read_csv(args.df_path + args.test_data1, delimiter = '\\t')\n test_dia_df = pd.read_csv(args.df_path + args.test_data2, delimiter = '\\t')\n \n # Create a tokenizer\n lower_case = False\n if args.lang_model[-7:] == \"uncased\":\n lower_case = True\n\n if args.lang_model[:4] == \"bert\":\n model_class = \"BERT\"\n tokenizer = BertTokenizer.from_pretrained(args.lang_model, do_lower_case=lower_case, max_length=args.max_len)\n \n if args.lang_model[:10] == \"distilbert\":\n model_class = \"DistilBERT\"\n tokenizer = DistilBertTokenizer.from_pretrained(args.lang_model, do_lower_case=lower_case, max_length=args.max_len)\n \n\n # get training features\n cats = train_df.columns[5:]\n end = \"full\"\n # exclude categories if required\n if (args.exclude_general):\n cats = [i for i in list(cats) if \"Allgemein\" not in i]\n end = \"excl_gen\"\n if (args.exclude_neutral):\n cats = [i for i in list(cats) if \"neutral\" not in i]\n end = \"excl_neu\"\n if (args.exclude_general_neutral):\n cats = [i for i in list(cats) if \"Allgemein:neutral\" not in i]\n end = \"excl_genneu\"\n \n num_labels = len(list(cats))\n\n # create one hot labels\n train_df['one_hot_labels'] = list(train_df[list(cats)].values)\n dev_df['one_hot_labels'] = list(dev_df[list(cats)].values)\n test_syn_df['one_hot_labels'] = list(test_syn_df[list(cats)].values)\n test_dia_df['one_hot_labels'] = list(test_dia_df[list(cats)].values)\n\n # retrieve sentences and labels\n df = pd.concat([train_df, dev_df])\n sentences = df.text.values\n labels = list(df.one_hot_labels.values) \n\n sentences_syn = test_syn_df.text.values\n labels_syn = list(test_syn_df.one_hot_labels.values)\n\n sentences_dia = test_dia_df.text.values\n labels_dia = list(test_dia_df.one_hot_labels.values)\n \n print(\"number of categories:\", len(list(cats)))\n\n # Tokenize all of the sentences and map the tokens to their word IDs. \n input_ids = [tokenizer.encode(sent, add_special_tokens=True, truncation=True, \n max_length=args.max_len) for sent in sentences]\n input_ids = pad_sequences(input_ids, maxlen=args.max_len, dtype=\"long\", \n value=0.0, truncating=\"post\", padding=\"post\")\n # Create attention masks\n attention_masks = [[int(token_id > 0) for token_id in sent] for sent in input_ids]\n \n # synchronic test data\n input_ids_syn = [tokenizer.encode(sent, add_special_tokens=True, truncation=True) for sent in sentences_syn]\n input_ids_syn = pad_sequences(input_ids_syn, maxlen=args.max_len, dtype=\"long\", \n value=0.0, truncating=\"post\", padding=\"post\")\n attention_masks_syn = [[int(token_id > 0) for token_id in sent] for sent in input_ids_syn]\n \n # diachronic test data\n input_ids_dia = [tokenizer.encode(sent, add_special_tokens=True, truncation=True) for sent in sentences_dia]\n input_ids_dia = pad_sequences(input_ids_dia, maxlen=args.max_len, dtype=\"long\", \n value=0.0, truncating=\"post\", padding=\"post\")\n attention_masks_dia = [[int(token_id > 0) for token_id in sent] for sent in input_ids_dia]\n\n # split train, dev\n train_inputs, train_labels, dev_inputs, dev_labels, train_masks, dev_masks = split_train_dev(\n train_df, dev_df, attention_masks, input_ids, labels)\n \n # transform to torch tensor\n train_inputs = torch.tensor(train_inputs)\n dev_inputs = torch.tensor(dev_inputs)\n\n train_labels = torch.tensor(train_labels)\n dev_labels = torch.tensor(dev_labels)\n\n train_masks = torch.tensor(train_masks)\n dev_masks = torch.tensor(dev_masks)\n\n test_syn_inputs = torch.tensor(input_ids_syn)\n test_syn_masks = torch.tensor(attention_masks_syn)\n test_syn_labels = torch.tensor(labels_syn)\n\n test_dia_inputs = torch.tensor(input_ids_dia)\n test_dia_masks = torch.tensor(attention_masks_dia)\n test_dia_labels = torch.tensor(labels_dia)\n\n # Create the DataLoader\n train_dataloader = create_dataloader(train_inputs, train_masks, \n train_labels, args.batch_size, train = True)\n\n dev_dataloader = create_dataloader(dev_inputs, dev_masks, \n dev_labels, args.batch_size, train = False)\n\n test_syn_dataloader = create_dataloader(test_syn_inputs, test_syn_masks, \n test_syn_labels, args.batch_size, \n train = False)\n\n test_dia_dataloader = create_dataloader(test_dia_inputs, test_dia_masks, \n test_dia_labels, args.batch_size, \n train = False)\n\n # Create model\n if args.train:\n if model_class == \"BERT\":\n config = BertConfig.from_pretrained(args.lang_model, num_labels=num_labels) \n config.hidden_dropout_prob = 0.1 \n model = BertForSequenceClassification.from_pretrained(\n args.lang_model,\n num_labels = num_labels,\n output_attentions = False,\n output_hidden_states = False\n )\n\n if model_class == \"DistilBERT\":\n config = DistilBertConfig.from_pretrained(args.lang_model, num_labels=num_labels) \n config.hidden_dropout_prob = 0.1 \n model = DistilBertForSequenceClassification.from_pretrained(\n args.lang_model,\n num_labels = num_labels,\n output_attentions = False,\n output_hidden_states = False\n )\n model.cuda()\n\n\n # Create an optimizer\n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],\n 'weight_decay_rate': 0.01},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],\n 'weight_decay_rate': 0.0}\n ]\n optimizer = AdamW(\n optimizer_grouped_parameters,\n lr=args.lr,\n eps = 1e-8\n )\n # Total number of training steps = number of batches * number of epochs\n total_steps = len(train_dataloader) * args.epochs\n # Create the learning rate scheduler\n scheduler = get_linear_schedule_with_warmup(\n optimizer,\n num_warmup_steps=0,\n num_training_steps=total_steps\n )\n \n # train model\n # Main Loop\n print(\"=================== Train ================\")\n print(\"##### Language Model:\", args.lang_model, \",\", \"learning rate:\", args.lr)\n print()\n\n track_time = time.time()\n # trange is a tqdm wrapper around the normal python range\n for epoch in trange(args.epochs, desc=\"Epoch\"):\n print(\"Epoch: %4i\"%epoch, dt.datetime.now())\n\n model, optimizer, scheduler, tr_loss = train_multilabel(\n train_dataloader=train_dataloader, \n model=model, \n device=device, \n optimizer=optimizer, \n scheduler=scheduler, \n num_labels=num_labels\n )\n # EVALUATION: TRAIN SET\n pred_bools_train, true_bools_train, f1_train = eval_multilabel(\n train_dataloader, model=model, device=device)\n print(\"TRAIN: micro F1 %.3f\"%(f1_train))\n \n # EVALUATION: DEV SET\n pred_bools_dev, true_bools_dev, f1_dev = eval_multilabel(\n dev_dataloader, model=model, device=device)\n print(\"EVAL: micro F1 %.3f\"%(f1_dev))\n \n\n print(\" Training and validation took in total: {:}\".format(format_time(time.time()-track_time)))\n\n # EVALUATION: TEST SYN SET\n pred_bools_syn, true_bools_syn, f1_test_syn = eval_multilabel(\n test_syn_dataloader, model=model, device=device)\n print(\"TEST SYN: micro F1 %.4f\"%(f1_test_syn))\n\n # classification report\n clf_report_syn = classification_report(true_bools_syn, pred_bools_syn, target_names=cats, digits=3)\n print(clf_report_syn)\n\n\n # EVALUATION: TEST DIA SET\n pred_bools_dia, true_bools_dia, f1_test_dia = eval_multilabel(\n test_dia_dataloader, model=model, device=device\n )\n print(\"TEST DIA: micro F1 %.4f\"%(f1_test_dia))\n\n # classification report\n clf_report_dia = classification_report(true_bools_dia, pred_bools_dia, target_names=cats, digits=3)\n print(clf_report_dia)\n \n if args.save_cr:\n pickle.dump(clf_report_syn, open(args.output_path+'clf_report_'+args.lang_model+'_test_syn_'+str(num_labels)+end+'.txt','wb'))\n pickle.dump(clf_report_dia, open(args.output_path+'clf_report_'+args.lang_model+'_test_dia_'+str(num_labels)+end+'.txt','wb'))\n\n\n if args.save_prediction:\n test_syn_df[\"category_pred\"] = pred_bools_syn\n test_dia_df[\"category_pred\"] = pred_bools_dia\n test_syn_df.category_pred.to_csv(args.output_path+args.lang_model+'_test_syn_'+str(num_labels)+end+\".tsv\", \n sep=\"\\t\", index = False, header = True, encoding = \"utf-8-sig\")\n test_dia_df.category_pred.to_csv(args.output_path+args.lang_model+'_test_dia_'+str(num_labels)+end+\".tsv\", \n sep=\"\\t\", index = False, header = True, encoding = \"utf-8-sig\")",
"def main(self):\n\n env = self.env\n\n # Initial setup\n self.parse_args()\n self.find_taskfile()\n\n if self.taskfile is None:\n env.abort(\"Unable to find {0}\".format(self.cmdline.file))\n\n # Set command line NAME=VALUE variables before loading the file\n (tasks, params) = self.get_tasks_params()\n env.update(**params)\n\n env[\"_TOP_\"] = os.path.dirname(self.taskfile)\n env[\"_ABSTOP_\"] = os.path.abspath(env[\"_TOP_\"])\n env[\"_CWD_\"] = os.path.abspath(self.cwd)\n env._load(self.taskfile)\n\n # Print tasks list if requested\n if self.cmdline.list:\n for name in env._tasks:\n if not name.startswith(\"_\"):\n env.outputln(name)\n env.exit()\n\n # Print var/task help if requested\n if self.cmdline.varhelp:\n self.show_varhelp()\n\n if self.cmdline.taskhelp:\n names = sorted(set([i[0] for i in tasks]))\n self.show_taskhelp(names)\n\n if self.cmdline.taskhelp or self.cmdline.varhelp:\n env.exit()\n\n # Execute the requested tasks setting task specific variables\n for (task, params) in tasks:\n env.calltask(task, **params)",
"def main(_):\n # Set FLAGS defaults.\n words = FLAGS.words\n if FLAGS.vocab_size == -1:\n FLAGS.__setattr__(\"vocab_size\", word_default_vocab_size if words else char_default_vocab_size)\n if FLAGS.num_samples == -1:\n FLAGS.__setattr__(\"num_samples\", word_default_num_samples if words else char_default_num_samples)\n if FLAGS.tensorboard_logdir is None:\n FLAGS.__setattr__(\"tensorboard_logdir\", FLAGS.train_dir)\n\n if FLAGS.words:\n data_utils._START_VOCAB = data_utils.START_VOCAB_WORD\n\n # Check compatibility with word2vec file\n if FLAGS.word_embeddings:\n # For now, assume the embedding size is 300. If variable, reprogram.\n print(\"Setting LSTM size to 300 to conform to the word2vec file\")\n FLAGS.__setattr__(\"size\", 300)\n\n # Start task according to flags.\n if FLAGS.self_test:\n self_test()\n elif FLAGS.decode:\n decode()\n else:\n train_distributed() if FLAGS.distributed else train_not_distributed()",
"def main():\n # process CLI arguments\n argparser = argparse.ArgumentParser(description=\"\"\"Script for classifying\ntweets according to their sentiment polarity\"\"\")\n\n subparsers = argparser.add_subparsers(help=\"type of operation to perform\", dest = \"mode\")\n # training options\n tr_parser = subparsers.add_parser(TRAIN, help = \"train the model\")\n tr_parser.add_argument(\"-d\", \"--dev-set\", help = \"development set\",\n type = argparse.FileType('r'))\n tr_parser.add_argument(\"-l\", \"--lexicon\", help = \"sentiment lexicon to use for sampling\",\n type = str, action = \"append\", default = [])\n _add_cmn_options(tr_parser)\n # testing options\n test_parser = subparsers.add_parser(TEST, help = \"test the model\")\n test_parser.add_argument(\"-d\", \"--debug\", help = \"output debug information\", \\\n action = \"store_true\")\n test_parser.add_argument(\"-v\", \"--verbose\", help = \"output scores along with predicted labels\",\n action = \"store_true\")\n test_parser.add_argument(\"--scikit\", help = \"use supervised scikit classifier istead of deep\",\n action = \"store_true\")\n _add_cmn_options(test_parser)\n # evaluation options (train and test at the same time)\n ev_parser = subparsers.add_parser(EVALUATE, help = \"evaluate trained model\")\n _add_cmn_options(ev_parser)\n ev_parser.add_argument(\"-v\", \"--verbose\", help = \"output errors along with evaluation\",\n action = \"store_true\")\n args = argparser.parse_args()\n # perform the requied action\n if args.mode == TRAIN:\n classifier = SentimentClassifier(a_path = None)\n if args.dev_set is None:\n dev_set = None\n else:\n dev_set = _read_dataset([args.dev_set])\n lexica = [_read_lexicon(ilex) for ilex in args.lexicon]\n pos, pos_re, neg, neg_re = _merge_lexica(lexica)\n classifier.train(_read_dataset(args.files), a_path=args.model,\n a_dev_set=dev_set, a_pos_re=pos_re, a_pos=pos,\n a_neg_re=neg_re, a_neg=neg)\n elif args.mode == TEST:\n # load model from default location\n y = \"\"; score = 0.\n if args.model:\n classifier = SentimentClassifier(args.model)\n else:\n classifier = SentimentClassifier()\n for ifile in args.files:\n for ifields in iterlines(ifile, TEST_TOPIC_IDX):\n if args.debug:\n classifier.debug(list(ifields[TXT_IDX]))\n else:\n y, score = classifier.predict(list(ifields[TXT_IDX]))\n if args.verbose:\n ifields.append(str(score))\n ifields.append(y)\n print(TAB.join(ifields))\n else:\n raise NotImplementedError\n # for ifile in a_files:\n # macro_MAE, micro_MAE = evaluate(classify(classifier, ifile), args.verbose, lambda x: x)\n # print(\"{:20s}{:.7}\".format(\"Macro-averaged MAE:\", macro_MAE), file = sys.stderr)\n # print(\"{:20s}{:.7}\".format(\"Micro-averaged MAE:\", micro_MAE), file = sys.stderr)\n return 0",
"def _initialise_run(self) -> None:",
"def __init__(self):\n super(PreProcess, self).__init__()",
"def preprocess_main():",
"def setup_task(self, *args, **kwargs):\n pass",
"def setup():\n load_from_pretrain = args.pretrained is not None and os.path.exists(args.pretrained)\n\n if load_from_pretrain and not args.finetune:\n cfg_folder = args.pretrained\n cfg_from_file(os.path.join(cfg_folder, \"config.yaml\"), reset_model_spec=False)\n cfg.RL_MODEL_SPEC = os.path.join(cfg_folder, cfg.RL_MODEL_SPEC.split(\"/\")[-1])\n dt_string = args.pretrained.split(\"/\")[-1]\n \n else:\n if args.fix_output_time is None:\n dt_string = datetime.datetime.now().strftime(\"%d_%m_%Y_%H:%M:%S\")\n else:\n dt_string = args.fix_output_time\n\n model_output_dir = os.path.join(cfg.OUTPUT_DIR, dt_string)\n print(\"Output will be saved to `{:s}`\".format(model_output_dir))\n new_output_dir = not os.path.exists(model_output_dir) and not args.test\n print(\"Using config:\")\n pprint.pprint(cfg)\n net_dict = make_nets_opts_schedulers(cfg.RL_MODEL_SPEC, cfg.RL_TRAIN)\n print(\"Output will be saved to `{:s}`\".format(model_output_dir))\n return net_dict, dt_string",
"def prepare_learning(self):\n print 'Separating inputs and outputs...'\n self.inputs, self.outputs = extract_samples(self.matches,\n self.input_features,\n self.output_feature)\n\n print 'Normalizing data...'\n self.normalizer, self.inputs = normalize(self.inputs)\n\n print 'Separating train and test sets...'\n self.train_inputs, self.train_outputs, self.test_inputs, self.test_outputs = split_samples(self.inputs, self.outputs)\n\n print 'Building neural network...'\n self.network = buildNetwork(len(self.input_features),\n 2 * len(self.input_features),\n 1,\n outclass=SigmoidLayer,\n bias=True)\n\n print 'Building and filling pybrain train set object...'\n self.train_set = ClassificationDataSet(len(self.input_features))\n\n for i, input_line in enumerate(self.train_inputs):\n self.train_set.addSample(self.train_inputs[i],\n [self.train_outputs[i] - 1])\n\n self.trainer = BackpropTrainer(self.network, dataset=self.train_set,\n momentum=0.5, weightdecay=0.0)\n\n self.train_set.assignClasses()",
"def setup(self):\n super(__class__, self).setup()\n # construct command line call\n setup_script = '%s/tfMRI.py' % \\\n os.environ['ABCDTASKPREPDIR']\n arg1 = self.kwargs['path']\n arg2 = self.kwargs['sourcedata_root']\n arg3 = self.kwargs['subject']\n arg4 = self.kwargs['session']\n anat_metadata = self.config.get_bids('t1w_metadata')\n # get make/software information\n make = anat_metadata['Manufacturer']\n if make == 'GE':\n reg = re.compile(r'.*(DV2[56]).*')\n software_version = reg.match(anat_metadata[\n 'SoftwareVersions']).group(1)\n else:\n software_version = 'NA'\n cmd = ' '.join((setup_script, arg1, arg2, arg3, arg4, make,\n software_version))\n print(cmd)\n\n log_dir = self._get_log_dir()\n out_log = os.path.join(log_dir, self.__class__.__name__ + '_setup.out')\n err_log = os.path.join(log_dir, self.__class__.__name__ + '_setup.err')\n result = self.call(cmd, out_log, err_log)",
"def initialize(self, args):\n # You must parse model_config. JSON string is not parsed here\n self.model_config = json.loads(args['model_config'])\n print(\"model_config:\", self.model_config)\n\n self.input_names = []\n for input_config in self.model_config[\"input\"]:\n self.input_names.append(input_config[\"name\"])\n print(\"postprocess input names:\", self.input_names)\n\n self.output_names = []\n self.output_dtype = []\n for output_config in self.model_config[\"output\"]:\n self.output_names.append(output_config[\"name\"])\n dtype = pb_utils.triton_string_to_numpy(output_config[\"data_type\"])\n self.output_dtype.append(dtype)\n print(\"postprocess output names:\", self.output_names)\n self.postprocessor = fd.vision.ocr.DBDetectorPostprocessor()\n self.cls_preprocessor = fd.vision.ocr.ClassifierPreprocessor()\n self.rec_preprocessor = fd.vision.ocr.RecognizerPreprocessor()\n self.cls_threshold = 0.9",
"def run_workflow(EMBEDDING_BASE_PATH):\n train_tweets_path, val_tweets_path, test_tweets_path, image_dataset = run_pre_workflow()\n\n input_images, train_tweets, val_tweets, test_tweets, glove_embeddings = replica_catalog(train_tweets_path, val_tweets_path, test_tweets_path, image_dataset, EMBEDDING_BASE_PATH)\n\n preprocess_tweets, preprocess_images, train_resnet, hpo_train_resnet, train_bilstm, hpo_train_bilstm, resnet_inference, bilstm_inference, late_fusion = transformation_catalog()\n \n sites_catalog()\n\n pegasus_properties()\n \n wf = Workflow('Crisis_Computing_Workflow')\n\n # --------------------------------------------------- TEXT PIPELINE ------------------------------------------------------ \n\n # Job 1: Preprocess tweets\n preprocessed_train_tweets = File('preprocessed_train_tweets.csv')\n preprocessed_val_tweets = File('preprocessed_val_tweets.csv')\n preprocessed_test_tweets = File('preprocessed_test_tweets.csv')\n \n job_preprocess_tweets = [Job(preprocess_tweets) for i in range(3)]\n job_preprocess_tweets[0].add_inputs(train_tweets)\n job_preprocess_tweets[0].add_outputs(preprocessed_train_tweets)\n job_preprocess_tweets[0].add_args('--filename', 'train_tweets.csv')\n \n job_preprocess_tweets[1].add_inputs(val_tweets)\n job_preprocess_tweets[1].add_outputs(preprocessed_val_tweets)\n job_preprocess_tweets[1].add_args('--filename', 'val_tweets.csv')\n \n job_preprocess_tweets[2].add_inputs(test_tweets)\n job_preprocess_tweets[2].add_outputs(preprocessed_test_tweets)\n job_preprocess_tweets[2].add_args('--filename', 'test_tweets.csv')\n\n\n # Job 2: HPO Bi-LSTM\n bilstm_best_params = File('best_bilstm_hpo_params.txt')\n\n job_hpo_train_bilstm = Job(hpo_train_bilstm)\\\n .add_inputs(glove_embeddings, preprocessed_train_tweets, preprocessed_val_tweets, preprocessed_test_tweets)\\\n .add_outputs(bilstm_best_params)\\\n .add_args('--trials', BILSTM_NUM_TRIALS)\n\n\n # Job 3: Train Bi-LSTM using best parameters from HPO study and output loss and accuracy curves\n trained_bilstm_model = File('bilstm_final_model.h5') \n bilstm_loss_curve = File('Loss_curve_bilstm.png')\n bilstm_accuracy_curve = File('Accuracy_curve_bilstm.png')\n\n\n job_train_bilstm = Job(train_bilstm)\\\n .add_inputs(glove_embeddings, preprocessed_train_tweets, preprocessed_val_tweets, preprocessed_test_tweets, bilstm_best_params)\\\n .add_outputs(bilstm_loss_curve, bilstm_accuracy_curve, trained_bilstm_model)\\\n\n\n # Job 4: Run inference on best Bi-LSTM model to produce output on test dataset along with confusion matrix\n bilstm_train_output_prob = File('bilstm_train_output.csv')\n bilstm_test_output_prob = File('bilstm_test_output.csv')\n bilstm_confusion_matrix = File('bilstm_confusion_matrix.png')\n\n job_bilstm_inference = Job(bilstm_inference)\\\n .add_inputs(preprocessed_train_tweets, preprocessed_val_tweets, preprocessed_test_tweets, trained_bilstm_model)\\\n .add_outputs(bilstm_train_output_prob, bilstm_test_output_prob, bilstm_confusion_matrix)\n\n\n # --------------------------------------------------- IMAGE PIPELINE ------------------------------------------------------ \n\n \n # Job 1: Preprocess images\n prefix = \"resized_\"\n job_preprocess_images = [Job(preprocess_images) for i in range(NUM_WORKERS)]\n resized_images = split_preprocess_jobs(job_preprocess_images, input_images, prefix)\n\n # Job 2: HPO ResNet-50\n resnet_best_params = File('best_resnet_hpo_params.txt')\n\n job_hpo_train_resnet = Job(hpo_train_resnet)\\\n .add_inputs(*resized_images)\\\n .add_args('--trials', RESNET_NUM_TRIALS)\\\n .add_outputs(resnet_best_params)\\\n .add_profiles(Namespace.PEGASUS, key=\"maxwalltime\", value=MAXTIMEWALL)\n\n\n # Job 3: Train ResNet-50 using best parameters from HPO study and output loss and accuracy curves\n trained_resnet_model = File('resnet_final_model.pth')\n resnet_loss_curve = File('Loss_curve_resnet.png')\n resnet_accuracy_curve = File('Accuracy_curve_resnet.png')\n\n job_train_resnet = Job(train_resnet)\\\n .add_inputs(*resized_images, resnet_best_params)\\\n .add_outputs(resnet_loss_curve, resnet_accuracy_curve, trained_resnet_model)\\\n .add_profiles(Namespace.PEGASUS, key=\"maxwalltime\", value=MAXTIMEWALL)\n\n\n # Job 4: Run inference on best ResNet-50 model to produce output on test dataset along with confusion matrix\n resnet_train_output_prob = File('resnet_train_output.csv')\n resnet_confusion_matrix = File('resnet_confusion_matrix.png')\n resnet_test_output_prob = File('resnet_test_output.csv') \n\n job_resnet_inference = Job(resnet_inference)\\\n .add_inputs(*resized_images, trained_resnet_model)\\\n .add_outputs(resnet_train_output_prob, resnet_test_output_prob, resnet_confusion_matrix)\n\n \n \n # --------------------------------------------------- LATE FUSION ------------------------------------------------------ \n\n # Job 1: Late Fusion\n confusion_matrix_MPC = File('late_fusion_MPC.png')\n confusion_matrix_LR = File('late_fusion_LR.png')\n confusion_matrix_MLP = File('late_fusion_MLP.png')\n report_MLP = File('late_fusion_MLP.csv')\n report_MPC = File('late_fusion_MPC.csv')\n report_LR = File('late_fusion_LR.csv')\n\n job_late_fusion = Job(late_fusion)\\\n .add_inputs(resnet_train_output_prob, resnet_test_output_prob, bilstm_train_output_prob, bilstm_test_output_prob)\\\n .add_outputs(confusion_matrix_MPC, confusion_matrix_LR, confusion_matrix_MLP, report_MLP, report_MPC, report_LR)\n\n wf.add_jobs(*job_preprocess_tweets, *job_preprocess_images, job_bilstm_inference, job_hpo_train_bilstm, job_train_bilstm, job_hpo_train_resnet, job_train_resnet, job_resnet_inference, job_late_fusion)\n\n try:\n wf.plan(submit=False, sites=[\"donut\"], output_sites=[\"donut\"], dir=\"submit\")\n #wf.wait()\n #wf.statistics()\n except PegasusClientError as e:\n print(e.output)\n \n #plot_workflow_graph(wf)\n \n return",
"def finetuning_single(phase,token2id_dict,id2embedding_dict,inference,dataloaders,model,optimizer,device,weighted_sampling,criterion,classification,auxiliary_loss=False,attn_loss=False,epoch_count=None,new_task_epochs=None,trial=None,goal='IC',save_path_dir=None): #b/c it is single, models_list contains one model only\n running_loss = 0.0\n \n# outputs_list = []\n# representations_list = []\n# labels_list = []\n# modality_list = []\n# indices_list = []\n# task_names_list = []\n# attn_coefs_list = []\n# sentence_lens_list = []\n# class_labels_list = []\n# class_predictions_list = []\n \n \"\"\" Initialize Dictionaries to Store Results \"\"\" \n outputs_dict = dict()\n representations_dict = dict()\n attn_coefs_dict = dict()\n labels_dict = dict()\n sentence_lens_dict = dict()\n class_labels_dict = dict()\n class_predictions_dict = dict()\n epoch_bleu = dict()\n epoch_rouge = dict()\n epoch_meteor = dict()\n\n for dest_lang in token2id_dict.keys():\n outputs_dict[dest_lang] = list()\n attn_coefs_dict[dest_lang] = list()\n representations_dict[dest_lang] = list()\n labels_dict[dest_lang] = list()\n sentence_lens_dict[dest_lang] = list()\n class_labels_dict[dest_lang] = list()\n class_predictions_dict[dest_lang] = list()\n epoch_bleu[dest_lang] = 0\n epoch_rouge[dest_lang] = 0\n epoch_meteor[dest_lang] = 0\n\n batch_num = 0\n batch = 0\n #class label is that in IC setting, but class label is answer in VQA setting\n for inputs, text_indices, sentence_lens, class_labels, languages, document_level_text_indices, document_level_sentence_lens in tqdm(dataloaders[phase]):\n \"\"\" Weaning Off of Teacher Forcing in a Linear Manner \"\"\"\n #sampling_prob = (0.4/30000)*(batch+1)*(epoch_count+1)\n #uniform_value = np.random.uniform(0,1)\n #sampling = True if uniform_value < sampling_prob else False\n sampling = False\n batch += 1\n \"\"\" Send Data to Device \"\"\"\n inputs = inputs.to(device)\n class_labels = class_labels.to(device)\n #print(text_indices)\n with torch.set_grad_enabled('train1' in phase):# and inference == False): #('train' in phase and inference == False)\n \"\"\" Image Captioning Path \"\"\"\n if goal == 'IC':\n \"\"\" Perform Forward Pass i.e. Encoder and Decoder \"\"\"\n current_labels_dict = dict() #text\n# current_class_labels_dict = dict()\n# current_class_predictions_dict = dict()\n current_outputs_dict = dict()\n# current_attn_coefs_dict = dict()\n# current_representations_dict = dict()\n total_loss = 0\n for (dest_lang,current_text_indices),current_sentence_lens in zip(text_indices.items(),sentence_lens.values()): #, sorted_indices, attn_coefs, class_predictions\n outputs, representations = model(inputs,current_text_indices,current_sentence_lens,token2id_dict[dest_lang],id2embedding_dict[dest_lang],dest_lang,phase,sampling,device) #outputs is B x S x Words\n \"\"\" Convert Text Indices/Targets to Tensor \"\"\"\n current_text_indices = current_text_indices.to(device) #torch.tensor(current_text_indices,device=device)\n \"\"\" Remove '/START' Index from Target Indices \"\"\"\n #current_text_indices = current_text_indices[:,1:] # B x (S-1)\n if phase == 'train1':\n attn_coefs = 5\n class_predictions = 6\n loss = calculate_IC_loss(criterion,outputs,current_text_indices[:,1:],class_predictions,class_labels,attn_coefs,auxiliary_loss,attn_loss)\n total_loss = total_loss + loss\n \"\"\" Average Loss if This is Final Loss Collected \"\"\"\n if dest_lang == list(text_indices.keys())[-1]:\n loss = total_loss / len(text_indices)\n \"\"\" Store Results \"\"\"\n current_labels_dict[dest_lang] = current_text_indices[:,1:].cpu().detach().numpy()\n# current_class_labels_dict[dest_lang] = class_labels\n# current_class_predictions_dict[dest_lang] = class_predictions\n current_outputs_dict[dest_lang] = outputs.cpu().detach().numpy() #text\n# current_attn_coefs_dict[dest_lang] = attn_coefs\n# current_representations_dict[dest_lang] = representations\n #\"\"\" Detach Outputs and Attn Coefs To Avoid Memory Leakage \"\"\"\n #outputs = outputs.detach()\n #attn_coefs = attn_coefs.detach()\n current_text_indices.detach()\n elif goal == 'VQA':\n \"\"\" Perform Forward Pass and Get Answers \"\"\"\n outputs, representations, attn_coefs, class_predictions = model(inputs,text_indices,sentence_lens,id2embedding_dict,phase,device)\n \"\"\" Calculate MSE Loss \"\"\"\n #criterion = nn.MSELoss()\n #class_labels = class_labels.type(torch.float)\n \"\"\" Calculate CrossEntropyLoss \"\"\"\n criterion = nn.CrossEntropyLoss()\n class_labels = class_labels.type(torch.long)\n #print(outputs,outputs.shape)\n loss = criterion(outputs,class_labels)\n elif goal == 'Supervised': #encoder supervised pre-training\n h, representations, class_predictions = model(inputs)#,text_indices,sentence_lens,id2embedding_dict,phase,device)\n criterion = nn.CrossEntropyLoss()\n class_labels = class_labels.type(torch.long)\n loss = criterion(class_predictions,class_labels)\n elif goal == 'Text_Supervised':\n #h, class_predictions = model.supervised_forward(text_indices,sentence_lens,token2id_dict,id2embedding_dict,phase,device)\n criterion = nn.CrossEntropyLoss()\n class_labels = class_labels.type(torch.long)\n current_class_labels_dict = dict()\n current_class_predictions_dict = dict()\n# current_representations_dict = dict()\n total_loss = 0\n for (dest_lang,current_text_indices),current_sentence_lens in zip(text_indices.items(),sentence_lens.values()):\n class_predictions = model.supervised_forward(current_text_indices,current_sentence_lens,token2id_dict[dest_lang],id2embedding_dict[dest_lang],phase,device)\n loss = criterion(class_predictions,class_labels)\n total_loss = total_loss + loss\n \"\"\" Average Loss if This is Final Loss Collected \"\"\"\n if dest_lang == list(text_indices.keys())[-1]:\n loss = total_loss / len(text_indices)\n\n current_class_labels_dict[dest_lang] = class_labels.cpu().detach().numpy()\n current_class_predictions_dict[dest_lang] = class_predictions.cpu().detach().numpy()\n# current_representations_dict[dest_lang] = h\n #loss = criterion(class_predictions,class_labels)\n #print(loss)\n elif goal == 'Language_Change_Detection':\n criterion = nn.BCEWithLogitsLoss()\n class_labels = class_labels.type(torch.long)\n current_class_labels_dict = dict()\n current_class_predictions_dict = dict()\n# current_representations_dict = dict()\n total_loss = 0\n for (dest_lang,current_text_indices),current_sentence_lens in zip(text_indices.items(),sentence_lens.values()):\n \"\"\" Forward Pass \"\"\"\n replacement_predictions, replacement_labels = model.language_change_detection_forward(current_text_indices,current_sentence_lens,token2id_dict,id2embedding_dict,dest_lang,phase,device)\n #replacement_labels = replacement_labels.type(torch.float) #needed for BCELoss\n \"\"\" Instance-Wise Loss Because Each Sentence is of a Different Length \"\"\"\n loss = 0\n for i,(replacement_prediction,replacement_label) in enumerate(zip(replacement_predictions,replacement_labels)):\n current_loss = criterion(replacement_prediction,replacement_label)\n loss = loss + current_loss\n if i == len(replacement_predictions)-1:\n loss = loss / len(replacement_predictions)\n #loss = torch.mean(torch.tensor([criterion(replacement_prediction,replacement_label) for replacement_prediction,replacement_label in zip(replacement_predictions,replacement_labels)]))\n total_loss = total_loss + loss\n \"\"\" Average Loss if This is Final Loss Collected \"\"\"\n if dest_lang == list(text_indices.keys())[-1]:\n loss = total_loss / len(text_indices)\n \n \"\"\" Store Representations and Labels \"\"\"\n current_class_predictions_dict[dest_lang] = [predictions.cpu().detach().numpy() for predictions in replacement_predictions]\n current_class_labels_dict[dest_lang] = [labels.cpu().detach().numpy() for labels in replacement_labels]\n# current_representations_dict[dest_lang] = h \n elif goal == 'Language_Detection':\n criterion = nn.CrossEntropyLoss(ignore_index=0)\n class_labels = class_labels.type(torch.long)\n current_class_labels_dict = dict()\n current_class_predictions_dict = dict()\n# current_representations_dict = dict()\n total_loss = 0\n for (dest_lang,current_text_indices),current_sentence_lens in zip(text_indices.items(),sentence_lens.values()):\n \"\"\" Forward Pass \"\"\"\n replacement_predictions, replacement_labels = model.language_detection_forward(current_text_indices,current_sentence_lens,token2id_dict,id2embedding_dict,dest_lang,phase,device)\n #replacement_labels = replacement_labels.type(torch.long) #needed for CrossEntropyLoss\n \"\"\" Instance-Wise Loss Because Each Sentence is of a Different Length \"\"\"\n# loss = 0\n# for i,(replacement_prediction,replacement_label) in enumerate(zip(replacement_predictions,replacement_labels)):\n# replacement_label = replacement_label.type(torch.long)\n# current_loss = criterion(replacement_prediction,replacement_label)\n# loss = loss + current_loss\n# if i == len(replacement_predictions)-1:\n# loss = loss / len(replacement_predictions)\n #print(replacement_predictions.shape,replacement_labels.shape)\n loss = criterion(replacement_predictions.permute(0,2,1),replacement_labels)\n #print(loss)\n total_loss = total_loss + loss\n #print(dest_lang,total_loss)\n \"\"\" Average Loss if This is Final Loss Collected \"\"\"\n if dest_lang == list(text_indices.keys())[-1]:\n loss = total_loss / len(text_indices)\n \n \"\"\" Store Representations and Labels \"\"\"\n current_class_predictions_dict[dest_lang] = [predictions.cpu().detach().numpy() for predictions in replacement_predictions]\n current_class_labels_dict[dest_lang] = [labels.cpu().detach().numpy() for labels in replacement_labels]\n# current_representations_dict[dest_lang] = h\n elif goal == 'MLM':\n criterion = nn.CrossEntropyLoss(reduction='none')\n# current_labels_dict = dict() #text\n# current_outputs_dict = dict()\n total_loss = 0\n for (dest_lang,current_text_indices),current_sentence_lens in zip(text_indices.items(),sentence_lens.values()): #, sorted_indices, attn_coefs, class_predictions\n outputs, replacement_predictions = model.MLM_forward(current_text_indices,current_sentence_lens,token2id_dict,id2embedding_dict,dest_lang,phase,device) #outputs is B x S x Words\n \"\"\" Convert Text Indices/Targets to Tensor \"\"\"\n current_text_indices = current_text_indices.to(device) #torch.tensor(current_text_indices,device=device)\n \"\"\" Remove '/START' Index from Target Indices \"\"\"\n current_text_indices = current_text_indices[:,1:] # B x (S-1)\n \"\"\" Obtain Applicable Loss Locations (i.e., Where Token Was Masked) \"\"\"\n token_loss_mask = torch.where(replacement_predictions == 1,torch.tensor(1,device=device),torch.tensor(0,device=device)).type(torch.bool)\n #print(outputs.shape)\n #if phase == 'train1':\n \"\"\" Obtain Each Token's Loss \"\"\"\n token_loss = criterion(outputs.permute(0,2,1),current_text_indices)\n \"\"\" Retrieve Only Relevant Losses (Masked) \"\"\"\n loss = torch.mean(token_loss.masked_select(token_loss_mask))\n \"\"\" Aggregate Loss Across Languages \"\"\"\n total_loss = total_loss + loss\n \"\"\" Average Loss if This is Final Loss Collected \"\"\"\n if dest_lang == list(text_indices.keys())[-1]:\n loss = total_loss / len(text_indices)\n \n del current_text_indices\n del token_loss\n del token_loss_mask\n# \"\"\" Store Results \"\"\"\n# current_labels_dict[dest_lang] = current_text_indices.cpu().detach().numpy()\n# current_outputs_dict[dest_lang] = outputs.cpu().detach().numpy() #text\n elif goal == 'ELECTRA':\n generator_criterion = nn.CrossEntropyLoss(reduction='none')\n discriminator_criterion = nn.BCEWithLogitsLoss(reduction='none')\n# current_labels_dict = dict() #text\n# current_outputs_dict = dict()\n total_loss = 0\n for (dest_lang,current_text_indices),current_sentence_lens in zip(text_indices.items(),sentence_lens.values()): #, sorted_indices, attn_coefs, class_predictions\n \"\"\" Convert Text Indices/Targets to Tensor \"\"\"\n current_text_indices = current_text_indices.to(device) #torch.tensor(current_text_indices,device=device)\n \"\"\" Perform Forward Pass Through ELECTRA \"\"\"\n generator_outputs, generator_labels, discriminator_outputs, discriminator_labels = model.ELECTRA_forward(current_text_indices,current_sentence_lens,token2id_dict,id2embedding_dict,dest_lang,phase,sampling,device) #outputs is B x S x Words\n \"\"\" Remove '/START' Index from Target Indices \"\"\"\n current_text_indices = current_text_indices[:,1:] # B x (S-1)\n \"\"\" Generator Loss Mask (i.e., Only Consider Originally Masked Tokens ) \"\"\"\n generator_token_loss_mask = torch.where(generator_labels == 1,torch.tensor(1,device=device),torch.tensor(0,device=device)).type(torch.bool)\n \"\"\" Discrimiantor Loss Mask (i.e., Do Not Consider Padded Regions ) \"\"\"\n discriminator_labels = discriminator_labels.view_as(discriminator_outputs) \n discriminator_token_loss_mask = torch.ones_like(discriminator_labels)\n for i,sentence_len in zip(range(discriminator_token_loss_mask.shape[0]),current_sentence_lens):\n discriminator_token_loss_mask[i,sentence_len:] = 0\n \n #if phase == 'train1':\n \"\"\" Obtain Each Generator Token's Loss \"\"\"\n generator_token_loss = generator_criterion(generator_outputs.permute(0,2,1),current_text_indices) # B x S\n #print(generator_token_loss.shape,generator_token_loss_mask.shape)\n \"\"\" Retrieve Only Relevant Loss (Masked) \"\"\"\n generator_loss = torch.mean(generator_token_loss.masked_select(generator_token_loss_mask)) #scalar\n \n \"\"\" Obtain Each Discriminator Token's Loss \"\"\" \n discriminator_token_loss = discriminator_criterion(discriminator_outputs,discriminator_labels) # B x S\n #print(discriminator_token_loss.shape,discriminator_token_loss_mask.shape)\n \"\"\" Retrieve Only Relevant Loss (Masked) \"\"\"\n discriminator_loss = torch.mean(discriminator_token_loss.masked_select(discriminator_token_loss_mask.type(torch.bool))) #scalar\n \n #print(generator_loss,discriminator_loss)\n \"\"\" Aggregate Loss Across Languages \"\"\"\n total_loss = total_loss + generator_loss + discriminator_loss\n \"\"\" Average Loss if This is Final Loss Collected \"\"\"\n if dest_lang == list(text_indices.keys())[-1]:\n loss = total_loss / len(text_indices)\n \"\"\" Store Results \"\"\"\n# current_labels_dict[dest_lang] = discriminator_labels.cpu().detach().numpy()\n# current_outputs_dict[dest_lang] = discriminator_outputs.cpu().detach().numpy() #text\n elif goal == 'MARGE':\n# current_labels_dict = dict() #text\n# current_outputs_dict = dict()\n #total_loss = 0\n #for (dest_lang,current_text_indices),current_sentence_lens,current_languages in zip(text_indices.items(),sentence_lens.values(),languages.values()): #, sorted_indices, attn_coefs, class_predictions\n \"\"\" Randomly Choose Target Lang for This Mini-Batch \"\"\"\n #lang_list = list(text_indices.keys())\n #target_lang = random.sample(lang_list,1).item()\n #target_lang = 'de' #option to change based on dataset (MUST CHANGE IN PAD COLLATE)\n outputs, target_lang = model(text_indices,sentence_lens,languages,document_level_text_indices,document_level_sentence_lens,token2id_dict,id2embedding_dict,phase,device)\n \"\"\" Convert Text Indices/Targets to Tensor \"\"\"\n current_text_indices = text_indices[target_lang].to(device) #torch.tensor(current_text_indices,device=device)\n \"\"\" Remove '/START' Index from Target Indices \"\"\"\n current_text_indices = current_text_indices[:,1:] # B x (S-1)\n #if phase == 'train1':\n \"\"\" Obtain Each Token's Loss \"\"\"\n loss = criterion(outputs.permute(0,2,1),current_text_indices)\n #print(loss)\n #\"\"\" Aggregate Loss Across Languages \"\"\"\n #total_loss = total_loss + loss\n #\"\"\" Average Loss if This is Final Loss Collected \"\"\"\n #if dest_lang == list(text_indices.keys())[-1]:\n # loss = total_loss / len(text_indices)\n# print(loss)\n# \"\"\" Store Results \"\"\"\n# current_labels_dict[target_lang] = current_text_indices.cpu().detach().numpy()\n# current_outputs_dict[target_lang] = outputs.cpu().detach().numpy() #text\n \n\n \"\"\" Backpropagation and Update Step \"\"\"\n if phase == 'train1': #only perform backprop for train1 phase \n loss.backward()\n \n \"\"\" Network Parameters \"\"\"\n if isinstance(optimizer,tuple):\n optimizer[0].step()\n \"\"\" Task-Instance Parameters \"\"\"\n optimizer[1].step() \n optimizer[0].zero_grad()\n optimizer[1].zero_grad()\n else:\n optimizer.step()\n optimizer.zero_grad()\n \n \"\"\" Calculate Metrics \"\"\"\n if goal == 'IC':\n if phase == 'train1':\n running_loss += loss.item() * inputs.shape[0]\n elif goal == 'VQA':\n running_loss += loss.item() * inputs.shape[0] \n elif goal in ['Supervised','Text_Supervised','Language_Change_Detection','Language_Detection','MLM','ELECTRA','MARGE']:\n running_loss += loss.item() * inputs.shape[0] \n \n# \"\"\" These Need to be Language Specific \"\"\"\n \n if goal in ['IC']:\n batch_bleu = calculate_bleu_score(current_outputs_dict,current_labels_dict,token2id_dict)\n batch_rouge = calculate_rouge_score(current_outputs_dict,current_labels_dict,token2id_dict)\n batch_meteor = calculate_meteor_score(current_outputs_dict,current_labels_dict,token2id_dict) \n \n for dest_lang in batch_bleu.keys():\n epoch_bleu[dest_lang] = epoch_bleu[dest_lang] + (1/batch)*(batch_bleu[dest_lang] - epoch_bleu[dest_lang])\n epoch_rouge[dest_lang] = epoch_rouge[dest_lang] + (1/batch)*(batch_rouge[dest_lang] - epoch_rouge[dest_lang])\n epoch_meteor[dest_lang] = epoch_meteor[dest_lang] + (1/batch)*(batch_meteor[dest_lang] - epoch_meteor[dest_lang])\n \n if phase in ['val']:\n for dest_lang in text_indices.keys():\n predicted_sentences = convert_predicted_ids_to_sentences(current_outputs_dict[dest_lang],token2id_dict[dest_lang],dest_lang)\n target_sentences = convert_target_ids_to_sentences(current_labels_dict[dest_lang],token2id_dict[dest_lang],dest_lang)\n outputs_dict[dest_lang].extend(predicted_sentences)\n labels_dict[dest_lang].extend(target_sentences)\n \n elif goal in ['Language_Change_Detection','Language_Detection']:\n for dest_lang in text_indices.keys():\n if goal in ['Language_Change_Detection','Language_Detection']:\n \"\"\" Store Batch Data in The Dictionaries \"\"\"\n class_labels_dict[dest_lang].extend(current_class_labels_dict[dest_lang]) #.cpu().detach().numpy())\n class_predictions_dict[dest_lang].extend(current_class_predictions_dict[dest_lang]) #.cpu().detach().numpy())\n \n# elif goal in ['Text_Supervised']:\n## current_class_labels = current_class_labels_dict[dest_lang]\n## current_class_predictions = current_class_predictions_dict[dest_lang]\n## current_class_labels = current_class_labels.cpu().detach().numpy()\n## current_class_predictions = current_class_predictions.cpu().detach().numpy()\n# \n# \"\"\" Store Batch Data in The Dictionaries \"\"\"\n# #sentence_lens_dict[dest_lang].extend(current_sentence_lens)\n# class_labels_dict[dest_lang].extend(current_class_labels_dict[dest_lang]) #.cpu().detach().numpy())\n# class_predictions_dict[dest_lang].extend(current_class_predictions_dict[dest_lang]) #.cpu().detach().numpy())\n#\n# elif goal in ['MARGE']:\n# labels_dict[target_lang].extend(current_labels_dict[target_lang]) #.cpu().detach().numpy())\n# outputs_dict[target_lang].extend(current_outputs_dict[target_lang]) #.cpu().detach().numpy())\n# break # because only one target language per minibatch \n# if goal not in ['Supervised','Text_Supervised','Language_Change_Detection','Language_Detection']:\n## if current_labels_dict[dest_lang].data.dtype != torch.long:\n## current_labels_dict[dest_lang].data = current_labels_dict[dest_lang].data.type(torch.long)\n# \n## current_text_indices = current_labels_dict[dest_lang]\n## current_outputs = current_outputs_dict[dest_lang]\n## current_attn_coefs = current_attn_coefs_dict[dest_lang]\n## current_representations = current_representations_dict[dest_lang]\n# \"\"\" Store Batch Data in The Dictionaries \"\"\" \n# labels_dict[dest_lang].extend(current_labels_dict[dest_lang]) #.cpu().detach().numpy())\n# outputs_dict[dest_lang].extend(current_outputs_dict[dest_lang]) #.cpu().detach().numpy())\n## attn_coefs_dict[dest_lang].extend(current_attn_coefs.cpu().detach().numpy())\n## representations_dict[dest_lang].extend(current_representations.cpu().detach().numpy())\n## elif goal in ['Text_Supervised']:\n## current_representations = current_representations_dict[dest_lang]\n## representations_dict[dest_lang].extend(current_representations.squeeze().cpu().detach().numpy()) \n## else:\n## current_representations = current_representations_dict[dest_lang]\n## if goal in ['Language_Change_Detection','Language_Detection']:\n## current_representations = [representations.cpu().detach().numpy() for representations in current_representations]\n## else:\n## current_representations = current_representations.cpu().detach().numpy()\n## representations_dict[dest_lang].extend(current_representations) \n# \n## modality_list.append(modality)\n## indices_list.append(indices)\n## task_names_list.append(task_names)\n \n batch_num += 1\n #if batch_num == 2:\n # break\n \n #outputs_list, labels_list, modality_list, indices_list, task_names_list, pids_list = flatten_arrays(outputs_list,labels_list,modality_list,indices_list,task_names_list,pids_list)\n if goal == 'IC':\n if phase == 'train1':\n epoch_loss = running_loss / len(dataloaders[phase].dataset)\n else:\n epoch_loss = 0 #filler\n elif goal in ['VQA','Supervised','Text_Supervised','Language_Change_Detection','Language_Detection','MLM','ELECTRA','MARGE']:\n epoch_loss = running_loss / len(dataloaders[phase].dataset) \n \n \"\"\" Removed Recently \"\"\"\n #representations_list = np.concatenate(representations_list)\n \n if goal == 'IC':\n \"\"\" BLEU Score Evaluation \"\"\"\n# epoch_bleu = calculate_bleu_score(outputs_dict,labels_dict,token2id_dict)\n# epoch_rouge = calculate_rouge_score(outputs_dict,labels_dict,token2id_dict)\n# epoch_meteor = calculate_meteor_score(outputs_dict,labels_dict,token2id_dict) \n return epoch_loss, epoch_bleu, epoch_rouge, epoch_meteor, outputs_dict, labels_dict #, modality_list, indices_list, task_names_list, class_labels_list, attn_coefs_list, sentence_lens_list\n elif goal == 'VQA':\n \"\"\" Accuracy of Answers \"\"\"\n epoch_acc = calculate_answer_accuracy(outputs_dict,class_labels_dict)\n return epoch_loss, epoch_acc #representations_list, labels_list #, modality_list, indices_list, task_names_list, class_labels_list, attn_coefs_list, sentence_lens_list\n elif goal in ['Supervised','Text_Supervised','Language_Change_Detection','Language_Detection']:\n if goal in ['Language_Change_Detection','Language_Detection']:\n epoch_acc = calculate_language_detection_accuracy(class_predictions_dict,class_labels_dict,goal)\n else:\n \"\"\" Accuracy of Answers \"\"\"\n epoch_acc = calculate_answer_accuracy(class_predictions_dict,class_labels_dict)\n return epoch_loss, epoch_acc #representations_list, labels_list #, modality_list, indices_list, task_names_list, class_labels_list, attn_coefs_list, sentence_lens_list\n elif goal in ['MLM','ELECTRA','MARGE']:\n return epoch_loss#, outputs_dict, labels_dict #representations_list, labels_list #, modality_list, indices_list, task_names_list, class_labels_list, attn_coefs_list, sentence_lens_list",
"def run():\n print('*-----------------------------------*')\n print('Running main.py ...')\n model = MLPModel(CFG, name='tfds_tryout')\n print('* Model defined')\n model.load_data(method='tfds')\n print('* Data Loaded')\n print(model.datasetinfo)\n model.build()\n model.train()\n model.evaluate()\n model.save()"
]
| [
"0.66875076",
"0.6421311",
"0.6281637",
"0.62222123",
"0.6220328",
"0.6153359",
"0.5987517",
"0.595677",
"0.5844441",
"0.5797753",
"0.5791133",
"0.5779024",
"0.57548994",
"0.5751336",
"0.5750355",
"0.5739375",
"0.57367283",
"0.5709741",
"0.57093525",
"0.57036644",
"0.5699484",
"0.5698264",
"0.56973785",
"0.5692546",
"0.56592506",
"0.56415397",
"0.56208974",
"0.5617668",
"0.56118965",
"0.55975723"
]
| 0.6535937 | 1 |
Run the NLP module on the input. Group and return the NLP labels and entities. | def nlp(self, text):
# Runs the NLP model on the input.
doc = self.nlp_model(text)
to = []
when = []
body = []
# Group the labels into variables.
for token in doc:
if token.dep_ == "TO":
to.append(token.text)
elif token.dep_ == "WHEN":
when.append(token.text)
elif token.dep_ == "BODY":
body.append(token.text)
log.debug("%s %s", token.text, token.dep_)
# Get the time entity from the NLP model.
time = datetime.now()
if len(when) == 0:
time = time + timedelta(seconds=5)
else:
time = tc.parse_time(when)
_body = " ".join(body)
return (to, time, _body) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process_text(model_name: str, text: str) -> spacy.tokens.Doc:\r\n nlp = load_model(model_name)\r\n return nlp(text)",
"def main(model=None, output_dir=None, n_iter=20):\n if model is not None:\n nlp = spacy.load(model) # load existing spaCy model\n print(\"Loaded model '%s'\" % model)\n else:\n nlp = spacy.blank('en') # create blank Language class\n print(\"Created blank 'en' model\")\n\n # create the built-in pipeline components and add them to the pipeline\n # nlp.create_pipe works for built-ins that are registered with spaCy\n if 'ner' not in nlp.pipe_names:\n ner = nlp.create_pipe('ner')\n nlp.add_pipe(ner, last=True)\n # otherwise, get it so we can add labels\n else:\n ner = nlp.get_pipe('ner')\n\n # add labels\n for _, annotations in TRAIN_DATA:\n for ent in annotations.get('entities'):\n ner.add_label(str(ent[2]))\n\n # get names of other pipes to disable them during training\n other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'ner']\n with nlp.disable_pipes(*other_pipes): # only train NER\n optimizer = nlp.begin_training()\n for itn in range(n_iter):\n random.shuffle(TRAIN_DATA)\n losses = {}\n for text, annotations in TRAIN_DATA:\n nlp.update(\n [text], # batch of texts\n [annotations], # batch of annotations\n drop=0.5, # dropout - make it harder to memorise data\n sgd=optimizer, # callable to update weights\n losses=losses)\n print(losses)\n \n # test the trained model\n for text, _ in TRAIN_DATA:\n doc = nlp(text)\n print('Entities', [(ent.text, ent.label_) for ent in doc.ents])\n print('Tokens', [(t.text, t.ent_type_, t.ent_iob) for t in doc])\n \n # save model to output directory\n if output_dir is not None:\n print(output_dir)\n output_dir = Path(output_dir)\n if not output_dir.exists():\n output_dir.mkdir()\n nlp.to_disk(output_dir)\n print(\"Saved model to\", output_dir)\n\n # test the saved model\n print(\"Loading from\", output_dir)\n nlp2 = spacy.load(output_dir)\n for text, _ in TRAIN_DATA:\n doc = nlp2(text)\n print('Entities', [(ent.text, ent.label_) for ent in doc.ents])\n print('Tokens', [(t.text, t.ent_type_, t.ent_iob) for t in doc])",
"def google_nlp_entities(\n input,\n input_type=\"html\",\n result_type=\"all\",\n limit=10,\n invalid_types=[\"OTHER\", \"NUMBER\", \"DATE\"],\n):\n\n def get_type(type):\n return client.enums.Entity.Type(d.type).name\n\n if not input:\n print(\"No input content found.\")\n return None\n\n if input_type == \"html\":\n doc_type = language.enums.Document.Type.HTML\n else:\n doc_type = language.enums.Document.Type.PLAIN_TEXT\n\n document = types.Document(content=input, type=doc_type)\n\n features = {\"extract_entities\": True}\n\n try:\n response = client.annotate_text(\n document=document, features=features, timeout=20\n )\n except Exception as e:\n print(\"Error with language API: \", re.sub(r\"\\(.*$\", \"\", str(e)))\n return []\n\n used = []\n results = []\n for d in response.entities:\n\n if limit and len(results) >= limit:\n break\n\n if get_type(d.type) not in invalid_types and d.name not in used:\n\n data = {\n \"name\": d.name,\n \"type\": client.enums.Entity.Type(d.type).name,\n \"salience\": d.salience,\n }\n if result_type is \"wikipedia\":\n if \"wikipedia_url\" in d.metadata:\n data[\"wikipedia\"] = d.metadata[\"wikipedia_url\"]\n results.append(data)\n else:\n results.append(data)\n\n used.append(d.name)\n\n return results",
"def process_input(fname,onlynugget,onlyarg):\n content=utils.readFileEncode(fname,'utf8')\n lines = content.split('\\n')[:-1]\n sentences=[]\n labels=[]\n sent=[]\n label=[]\n for i in range(len(lines)):\n if len(lines[i])>3:\n words=lines[i].split('\\t')\n word={'originalText':words[0],'offset':int(words[1])}\n sent.append(word)\n if onlynugget:\n if words[2] in NuggetList10:\n label.append(words[2]) \n else:\n label.append('O')\n elif onlyarg:\n if words[2] in ArgumentList:\n\n if 'Software' in words[2]:\n label.append(words[2][0:2]+'System')\n else:\n label.append(words[2])\n else:\n label.append('O')\n else:\n if len(sent)>0 and len(label)>0: \n sentences.append(sent)\n labels.append(label) \n sent=[]\n label=[]\n elif len(sent)==0 and i < len(lines)-1:\n sentences.append([])\n labels.append([])\n \n return sentences,labels",
"def main(model=None, output_dir=None, n_iter=10):\n if model is not None:\n nlp = spacy.load(model) # load existing spaCy model\n print(\"Loaded model '%s'\" % model)\n else:\n nlp = spacy.blank('en') # create blank Language class\n print(\"Created blank 'en' model\")\n\n # add the parser to the pipeline if it doesn't exist\n # nlp.create_pipe works for built-ins that are registered with spaCy\n if 'parser' not in nlp.pipe_names:\n parser = nlp.create_pipe('parser')\n nlp.add_pipe(parser, first=True)\n # otherwise, get it, so we can add labels to it\n else:\n parser = nlp.get_pipe('parser')\n\n # add labels to the parser\n for _, annotations in TRAIN_DATA:\n for dep in annotations.get('deps', []):\n parser.add_label(dep)\n\n # get names of other pipes to disable them during training\n other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'parser']\n with nlp.disable_pipes(*other_pipes): # only train parser\n optimizer = nlp.begin_training()\n for itn in range(n_iter):\n random.shuffle(TRAIN_DATA)\n losses = {}\n for text, annotations in TRAIN_DATA:\n nlp.update([text], [annotations], sgd=optimizer, losses=losses)\n print(losses)\n\n # test the trained model\n test_text = \"It was back in 2007 that hip-hop bible XXL launched its first ever Freshman Class, a list of ten up-and-coming artists poised to change the rap game for good. The last decade has seen more than a hundred stars spotlighted as part of the list and its accompanying annual cover feature, but this year features a history-making entry: Stefflon Don. The talented star has already built a strong reputation for herself in the UK; her unique blend of hard-hitting raps and smooth, dancehall beats has galvanized the scene, earning her critical acclaim and a series of impressive chart positions. Now, she seems ready to achieve the unthinkable: global stardom. Earlier this year, her infectious hit โHurtinโ Meโ โ featuring former XXL Freshman French Montana โ ascended the Billboard charts, peaking at no. 7 and confirming her US fanbase; but could she truly become the first artist to crack the US? And, more importantly, why has it taken so long for UK rappers to achieve Stateside success?\"\n doc = nlp(test_text)\n print('Dependencies', [(t.text, t.dep_, t.head.text) for t in doc])\n sentence_spans = list(doc.sents)\n displacy.serve(sentence_spans, style='dep')\n\n # save model to output directory\n if output_dir is not None:\n output_dir = Path(output_dir)\n if not output_dir.exists():\n output_dir.mkdir()\n nlp.to_disk(output_dir)\n print(\"Saved model to\", output_dir)\n\n # test the saved model\n print(\"Loading from\", output_dir)\n nlp2 = spacy.load(output_dir)\n doc = nlp2(test_text)\n print('Dependencies', [(t.text, t.dep_, t.head.text) for t in doc])",
"def test_model():\n test_text = \"what is the price of jug?\"\n model = spacy.load(\"../model/custom_ner_model\")\n doc = model(test_text)\n for ent in doc.ents:\n print(ent.text, ent.start_char, ent.end_char, ent.label_)",
"def corenlp(limit):\n # limit should be the int of sentences to extract from database or 'all' to extract all\n modeller = Modeller(limit=limit)\n modeller.get_wiki_data()\n strings,y_vals = zip(*modeller.data)\n print('Total strings:',len(y_vals))\n print('People:',sum(y_vals))\n print('Other entities:',len(y_vals) - sum(y_vals))\n\n correct_person = 0\n correct_non_person = 0\n false_positive = 0\n false_negative = 0\n\n print('\\nPutting strings through CoreNLP')\n start_time_corenlp = time.time()\n for counter,(string,y) in enumerate(modeller.data):\n # pass string to stanford ner\n ner = stanford_ner(string)\n # check if ner indicates there is a person in the string\n person = False\n for word in ner:\n if word['ner'] == 'PERSON':\n person = True\n break\n if person:\n if y:\n correct_person += 1\n else:\n false_positive += 1\n else:\n if y:\n false_negative += 1\n else:\n correct_non_person += 1\n print('Strings put through CoreNLP in:',round(time.time()-start_time_corenlp),'seconds')\n\n\n precision = correct_person/(correct_person+false_positive)\n recall = correct_person/(correct_person+false_negative)\n f1_score = 2*(precision*recall)/(precision+recall)\n print('Precision:',round(precision,2),'\\nRecall:',round(recall,2),'\\nF1 score:',round(f1_score,2))\n # compute precision, recall, f1_score",
"def label_notes(all_notes_lines):\n# nf = nemo.core.NeuralModuleFactory(backend=nemo.core.Backend.PyTorch, log_dir=None)\n #note_line_queries = notes.split('\\n')\n #note_line_queries = ['pt arrived obtunded not answering questions responding to voice and sternal rub speaking in garbled voice pupils unequal left 3mm and right 2mm brisk bilaterally trauma sicu MD aware currently recieving keppra IV finished dilantin gtt due for level at 08a EEG today LSCTA on 3LNC sats 100 % SBP 90 s to 100 s HR NSR no ectopy 60 s NS @ 75cc continuous +BS no stools rec d lactulose at OSH to recieve PR q4h abd soft non-tender non-distended foley in place draining adequate amt clear yellow urine skin intact left 20G x2 WNL wife Name NI']\n\n# labels_dict = get_vocab(LABELS_DICT)\n# pretrained_bert_model = nemo_nlp.nm.trainables.get_huggingface_model(\n# bert_config=BERT_CONFIG, pretrained_model_name=PRETRAINED_MODEL_NAME\n# )\n\n# tokenizer = nemo.collections.nlp.data.tokenizers.get_tokenizer(\n# tokenizer_name=TOKENIZER,\n# pretrained_model_name=PRETRAINED_MODEL_NAME,\n# tokenizer_model=TOKENIZER_MODEL,\n# )\n# hidden_size = pretrained_bert_model.hidden_size\n\n load_datalayer_begin_time = time.time()\n data_layer = nemo_nlp.nm.data_layers.BertTokenClassificationInferDataLayer(\n queries=all_notes_lines, tokenizer=tokenizer, max_seq_length=MAX_SEQ_LENGTH, batch_size=2000\n )\n load_datalayer_end_time = time.time()\n\n classifier = TokenClassifier(hidden_size=hidden_size, num_classes=len(labels_dict))\n\n input_ids, input_type_ids, input_mask, _, subtokens_mask = data_layer()\n \n load_hidden_states_begin_time = time.time()\n hidden_states = pretrained_bert_model(input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask)\n load_hidden_states_end_time = time.time()\n load_logits_begin_time = time.time()\n logits = classifier(hidden_states=hidden_states)\n load_logits_end_time = time.time()\n\n ###########################################################################\n\n # Instantiate an optimizer to perform `infer` action\n infer_begin_time = time.time()\n evaluated_tensors = nf.infer(tensors=[logits, subtokens_mask], checkpoint_dir=CHECKPOINT_DIR)\n infer_end_time = time.time()\n\n logits, subtokens_mask = [concatenate(tensors) for tensors in evaluated_tensors]\n\n preds = np.argmax(logits, axis=2) \n all_notes_labeled_lines = []\n\n for i, query in enumerate(all_notes_lines):\n logging.info(f'Query: {query}')\n\n pred = preds[i][subtokens_mask[i] > 0.5]\n words = query.strip().split()\n\n #replaced with logic below instead of raising an error:\n '''\n if len(pred) != len(words):\n logging.info('Preds length: ' + str(len(preds[i])))\n logging.info('subtokens_mask length: ' + str(len(subtokens_mask[i])))\n logging.info('Pred length: ' + str(len(pred)))\n logging.info('words length: ' + str(len(words)))\n logging.info('Preds: ' + str(preds.tolist()))\n logging.info('subtokens_mask: ' + str(subtokens_mask[i]))\n logging.info('Pred:' + str(pred.tolist()))\n logging.info('words:' + str(words))\n\n labeled_note = '__Prediction/Word Mismatch__ pred length: ' + str(len(pred)) + ', words length: ' + str(len(words))\n break\n #raise ValueError('Pred and words must be of the same length')\n \n output = ''\n for j, w in enumerate(words):\n output += w\n label = labels_dict[pred[j]]\n if label != NONE_LABEL:\n label = add_brackets(label)\n output += label\n output += ' '\n labeled_note += '\\n' + output.strip()\n logging.info(f'Combined: {output.strip()}')\n\n '''\n\n if len(pred) == len(words):\n output = ''\n for j, w in enumerate(words):\n output += w\n label = labels_dict[pred[j]]\n if label != NONE_LABEL:\n label = add_brackets(label)\n output += label\n output += ' '\n all_notes_labeled_lines.append(output.strip())\n logging.info(f'Combined: {output.strip()}')\n else:\n all_notes_labeled_lines.append(query)\n pred_length = str(len(pred))\n word_length = str(len(words))\n logging.info(f'__Prediction/Word Length Mismatch__ pred length: {pred_length}, words length: {word_length}')\n logging.info(f'{query}')\n \n\n print(str(load_datalayer_end_time-load_datalayer_begin_time)+' seconds to load the datalayer')\n print(str(load_hidden_states_end_time-load_hidden_states_begin_time)+' seconds to load hidden states')\n print(str(load_logits_end_time-load_logits_begin_time)+' seconds to load logits')\n print(str(infer_end_time-infer_begin_time)+' seconds to run inference')\n\n return all_notes_labeled_lines",
"def construct_NLP_model(self, df=None):\n import review_processing as rp\n # get words\n if df is not None:\n nitems = df.shape[0]\n col_names = df.columns.values\n if self.review_col_name not in col_names or \\\n self.sentiment_col_name not in col_names:\n sys.exit('construct_NL_model: The name {0}/{1} cannot be found'.\n format(self.review_col_name, self.sentiment_col_name))\n review_list = df[self.review_col_name].values.tolist()\n meaningful_words = map(self.review_to_meaningful_words,\n review_list)\n # Get training sentiment values\n self.sentiment = df[self.sentiment_col_name].values\n\n else:\n if self.training_file_name is None:\n sys.exit('construct_NLP_model: traning file name does not '\n 'exist')\n else:\n suffix = os.path.splitext(self.training_file_name)[1][1:]\n if suffix == 'csv':\n df = pd.read_csv(self.training_file_name)\n if self.review_col_name not in col_names or \\\n self.sentiment_col_name not in col_names::\n sys.exit('construct_NL_model: The name {0}/{1} cannot '\n ' be found'.format(self.review_col_name,\n self.sentiment_col_name))\n nitems = df.shape[0]\n review_list = df[review_col_name].values.tolist()\n meaningful_words = map(self.review_to_meaningful_words,\n review_list)\n elif suffix == 'json':\n data_dict_list = rp.load_data(self.training_file_name)\n if self.review_col_name not in data_dict_list.keys():\n sys.exit('construct_NL_model: The name {0} cannot be '\n 'found'.format(review_col_name))\n review_list = map(lambda x: x[review_col_name],\n data_dict_list)\n meaningful_words = map(self.review_to_meaningful_words,\n review_list)\n else:\n sys.exit('construct_NLP_model: file type not supported '\n 'yet!')\n\n # Training process of Bag of Worlds\n if self.NLP_model == 'BagofWords':\n print('construct_NLP_model: Creating bag of words...')\n self.vectorizer = CountVectorizer(analyzer='word',\n tokenizer=None,\n preprocessor=None,\n stop_words=None,\n max_features=self.maxfeature)\n self.train_data_features = vectorizer.fit_transform(\n meaningful_words)\n self.train_data_features = train_data_features.toarray()\n\n # vocab = vectorizer.get_feature_names()\n # dist = np.sum(train_data_features, axis=0)\n # for tag, count in zip(vocab, dist):\n # print(count, tag)\n\n else:\n sys.exit('construct_NLP_model: NLP_model type not supported yet!')",
"def entity_recognition(text: str) -> spacy:\n nlp = spacy.load('en_core_web_sm')\n document = nlp(text)\n return document",
"def get_entities_dict(p_str):\n nlp = en_core_web_sm.load()\n doc = nlp(p_str)\n entities = {}\n relevant_keywords = []\n list_of_types = ['NORP', 'ORG', 'GPE', 'LAW', 'LANGUAGE']\n for X in doc.ents:\n if not(X.label_ in entities):\n entities[X.label_] = []\n entities[X.label_].append(X.text)\n if X.label_ in list_of_types:\n relevant_keywords.append(X.text)\n print(entities)\n print(\"HERE\")\n print(relevant_keywords)\n return entities, relevant_keywords",
"def nlp_parse(self, input):\n resp = {}\n resp['type'] = 'nomatch'\n VDB_set = {}\n WP_set = {}\n tagset = self.build_tagset(input)\n resp['words'] = self.build_keywords(tagset)\n w = resp['words']\n\n if not w:\n if constants.DEBUG:\n log.debug(\"No words: \" + str(resp))\n return resp\n\n # store nouns\n NN_set = set(w.get('NN', []))\n\n # matches a request for a list\n if 'list' in NN_set \\\n or 'List' in w.get('NNP', []):\n resp['count'] = w.get('CD', [constants.LIST_COUNT])[0]\n resp['type'] = 'show-list'\n if set(['serving', 'serve']) & set(w.get('VBG', [])):\n resp['meal'] = (NN_set & constants.MEALS_SET).pop()\n if 'in' in w.get('IN', []):\n resp['zone'] = w.get('NNP', [None])[0]\n if 'close' in w.get('VBD', []) \\\n or 'close' in w.get('JJ', []) \\\n or 'close' in NN_set:\n resp['distance'] = True\n return resp\n\n # finds neighborhood\n for word in tagset:\n if word[1] == 'VBD':\n VDB_set = word[0]\n for word in tagset:\n if word[1] == 'WP':\n WP_set = word[0]\n if 'neighborhood' in VDB_set and 'what' in WP_set:\n if w.get('NNP', [None])[0]: \n r_name = w.get('NNP', [None])[0]\n else :\n return resp\n\n r_name = w.get('NNP', [None])[0] \n resp['restaurant'] = r_name\n resp['type'] = 'name-zone'\n return resp\n\n # matches \"how expensive it is\" and \"is it expensive\"\n if 'expensive' in w.get('JJ', ()):\n if w.get('NNP', [None])[0]: \n r_name = w.get('NNP', [None])[0]\n else :\n return resp\n\n r_name = w.get('NNP', [None])[0] \n resp['restaurant'] = r_name\n resp['type'] = 'name-price'\n return resp\n\n if 'between' in w.get('IN', ()) \\\n or 'price' in NN_set:\n price_range = w.get('CD', ())\n\n # price between a and b\n # require at least 2 numerals\n if len(price_range) >= 2:\n resp['min'] = min(map(int, price_range))\n resp['max'] = max(map(int, price_range))\n resp['type'] = 'list-price-range'\n return resp\n\n # price of exactly a\n if len(price_range) > 0:\n price_range = w.get('CD', ())\n resp['price'] = min(price_range)\n resp['type'] = 'list-price-single'\n return resp\n\n\n # need to merge NN and JJ for this step\n w['NNJJ'] = NN_set | set(w.get('JJ', []))\n meal = constants.MEALS_SET & w['NNJJ']\n if meal:\n resp['type'] = 'list-meal-single'\n resp['meal'] = meal.copy().pop()\n return resp\n\n # matches a quality list\n if 'quality' in NN_set and \\\n (constants.QUALITIES & w['NNJJ']) and \\\n (set(['food', 'service']) & w['NNJJ']):\n resp['degree'] = (constants.QUALITIES \\\n & w['NNJJ']).pop()\n resp['type'] = 'list-quality-' + \\\n (set(['food', 'service']) & w['NNJJ']).pop()\n return resp\n\n # matches a phone number request\n if NN_set & constants.PHONE_KEYWORDS:\n r_name = w.get('NNP', [None])[0] or \\\n w['NN'][-1]\n\n resp['restaurant'] = r_name\n resp['type'] = 'name-phone'\n return resp\n\n # matches a single meal request\n if NN_set & constants.MEALS_SET:\n r_name = w.get('NNP', [None])[0] or \\\n w['NN'][-1]\n\n resp['restaurant'] = r_name\n resp['type'] = 'name-meal'\n resp['meal'] = word.lower()\n return resp\n\n # matches a request for an address\n if 'address' in NN_set:\n r_name = w.get('NNP', [None])[0] or \\\n w['NN'][-1]\n resp['restaurant'] = r_name\n resp['type'] = 'name-location'\n return resp\n\n # matches a restaurant in neighborhood\n if 'in' in w.get('IN', []) and \\\n NN_set & constants.NAME_KEYWORDS:\n r_name = w.get('NNP', [None])[0]\n if not r_name:\n for kw in reversed(w['NN']):\n if kw not in constants.NAME_KEYWORDS:\n r_name = kw\n break\n if r_name:\n resp['type'] = 'random-city'\n resp['city'] = string.capitalize(r_name)\n return resp\n\n # matches a request for a cuisine type\n if NN_set & constants.NAME_KEYWORDS:\n r_name = w.get('NNP', [None])[0]\n if not r_name:\n for kw in reversed(w['NN']):\n if kw not in constants.NAME_KEYWORDS:\n r_name = kw\n break\n if r_name:\n resp['type'] = 'random-cuisine'\n resp['cuisine'] = string.capitalize(r_name)\n return resp\n\n # merge all numerals together for list-mode\n w['CDLS'] = set(w.get('CD', []) + w.get('LS', []))\n if w['CDLS']:\n w_copy = w['CDLS'].copy()\n while w_copy:\n try:\n resp['listitem'] = int(w_copy.pop())\n resp['type'] = 'single-listitem'\n return resp\n except:\n pass\n\n # distance / how far\n if ('far' in w.get('RB', [])\n and 'how' in w.get('WRB', [])\n ) or ('distance' in NN_set):\n r = w.get('NNP', [None])[0]\n if r:\n resp['type'] = 'name-distance'\n resp['restaurant'] = string.capitalize(r)\n return resp\n\n if constants.DEBUG:\n log.debug(resp)\n return resp",
"def predict(cls, input):\n clf = cls.get_model() \n\n input.to_csv(data_dir + 'vdok_predction_src_file.csv')\n\n q = qa_serializer_lang_selector(data_dir)\n q.serialize_record('vdok_predction_src_file.csv', task_name)\n q.select_lang([1], task_name).to_csv(data_dir + data_file, encoding= 'latin1')\n\n pipeline=['pos', 'lemma', 'synset', 'hype', 'hypo']\n\n bnlqd = fex_basic_nlp(data_file, data_dir)\n bnlqd.nlp_run(pipeline[0])\n bnlqd.nlp_run(pipeline[1])\n bnlqd.df_ac_lemma.to_csv(data_dir + 'Lemma-' + data_file, encoding= 'latin1')\n bnlqd.nlp_run(pipeline[2])\n bnlqd.df_ac_synset.to_csv(data_dir + 'Synset-' + data_file , encoding= 'latin1')\n bnlqd.nlp_run(pipeline[3])\n bnlqd.df_ac_hypernyms.to_csv(data_dir + 'Hypernyms-' + data_file, encoding= 'latin1')\n bnlqd.nlp_run(pipeline[4])\n bnlqd.df_ac_hyponyms.to_csv(data_dir + 'Hyponyms-' + data_file, encoding= 'latin1')\n\n bnlpd = fex_basic_nlp(def_file, data_dir, task_name)\n bnlpd.nlp_run(pipeline[0])\n bnlpd.nlp_run(pipeline[1])\n bnlpd.df_ac_lemma.to_csv(data_dir + 'Lemma-P-' + data_file, encoding= 'latin1')\n \n btgqd = bi_trigram(data_file, data_dir)\n btgqd.nlp_run(r'bigram')\n btgqd.nlp_run(r'trigram') \n\n stop_words_d = cls.remove_non_extracted_stop_word(bnlqd.df_ac_lemma, stop_words)\n\n oanc_shelve = oanc_resource + 'ANC-all-lemma-04262014.db'\n oalqd = odi_oanc_lemma_frequency(data_file, oanc_shelve, None, data_dir, stop_words_d) \n oalqd.oanc_lemma_frequency('Lemma-' + data_file, 'Student_Question_Index', 'Pre_Col_Name')\n \n stop_words_hy_d = cls.remove_non_extracted_stop_word(bnlqd.df_ac_lemma, stop_words_hy)\n\n ovlqd = odi_overlapping(data_file, def_file, data_dir, stop_words_d)\n ovlqd.count_overlapping('Lemma-' + data_file, 'Student_Question_Index',\n 'Pre_Col_Name', 'Question_ID', 'Question_ID_Sec',\n 'Lemma-P-' + data_file, 'Question_ID', 'Question_ID_Sec')\n ovlqd.count_overlapping_synset('Synset-' + data_file)\n ovlqd.count_overlapping_hypernyms('Hypernyms-' + data_file, stop_words_hy_d)\n ovlqd.count_overlapping_hyponyms('Hyponyms-' + data_file, stop_words_hy_d)\n\n df_ac_pmi_dist_bigram = cls.bi_trigram_pmi_distribution(pmi_bigram_file, data_dir, \n bnlqd.num_clm_in, btgqd.df_ac_bigram, 'bigram')\n df_ac_pmi_dist_trigram = cls.bi_trigram_pmi_distribution(pmi_trigram_file, data_dir, \n bnlqd.num_clm_in, btgqd.df_ac_trigram, 'Trigram')\n\n df_ac_aggregate = cls.aggregate_plim(bnlqd, oalqd, ovlqd, df_ac_pmi_dist_bigram, df_ac_pmi_dist_trigram,\n bnlpd, specific_count_lemmas, stop_words_pos, task_name)\n df_ac_aggregate.to_csv(data_dir + 'vdok_predction_Aggregate_plim.csv', encoding= 'latin1')\n df_ac_aggregate_item_level = cls.aggregate_item_level_plim(df_ac_aggregate, oalqd.stem_option_name_clm, \n task_name)\n df_ac_aggregate_item_level.to_csv(data_dir + 'vdok_predction_Key_Stem_Passage_Aggregate_plim.csv',\n encoding= 'latin1')\n\n rfrpod = tmv_RF_classify('Independent_Variable_w_Label-Def.csv', data_dir)\n rfrpod.load_data('vdok_predction_Key_Stem_Passage_Aggregate_plim.csv', True, drop_vars, dependent_var)\n clf.perform_prediction(rfrpod.df_ac_modeling_values)\n return clf.df_ac_classified",
"def parseSentences(jobidsentences):\n\n jobid, docs, Config = jobidsentences\n\n #start stanford server, we need to find an open port through guessing\n maxtries = 12\n tries=0\n err=[]\n while tries <maxtries:\n try:\n np.random.seed()\n jobid = np.random.randint(0, 2000)\n nlp = StanfordCoreNLP(Config.parserPath, port=8000+(jobid%2000), memory='8g', timeout=500000) #https://github.com/Lynten/stanford-corenlp\n maxtries = 0\n print(\"Starting DepParse\", jobid)\n except IOError as e:\n err=e\n tries += 1\n\n wmap = {}\n #wcou={} #word counts\n compounds = [] #of lemmatized words\n newdocs = []\n useNLTK = not \"nlp\" in locals() # check if StanfordCoreParser could be used, if not use NLTK lemmatizer\n if useNLTK:\n print(\"StanfordCoreNLP parser not found or ioport in use - We automatically try another;\", \"Message \",err, \" Jobid\",jobid)\n # from nltk.stem import WordNetLemmatizer\n # lemmatizer=WordNetLemmatizer()\n props = {'annotators': 'tokenize, ssplit, lemma, depparse', 'pipelineLanguage': 'en', 'outputFormat': 'json'} #options for parsing\n failed=0\n for i, (docid, d) in enumerate(docs):\n if i%10 == 9: print(docid, jobid)\n if useNLTK:\n words=tt.docSpacePunctuation(d).split(\" \")\n for w in words:\n lem=tt.changeWord(w) #lem = lemmatizer.lemmatize(w)\n if not len(lem): lem=w\n addWord(wmap, w, lem)\n newdocs.append((docid, words))\n else: #Use StanfordCoreParser\n docTokens = []\n parseRes = nlp.annotate(d, properties=props)\n try: var = json.loads(parseRes)\n except json.decoder.JSONDecodeError as e:\n print(\" Not parsed\", e, str(d)[:30].replace(\"\\n\", \"\"), str(parseRes)[:30].replace(\"\\n\", \"\"))\n failed += 1\n newdocs.append((docid, docTokens))\n continue\n\n for s in var[\"sentences\"]:\n csent = []\n currcomp = []\n mapTow = {}\n for i, b in enumerate(s[\"enhancedPlusPlusDependencies\"]):\n tok = s[\"tokens\"][b[\"dependent\"]-1][\"word\"]\n lem = s[\"tokens\"][b[\"dependent\"]-1][\"lemma\"]\n #print(\"t,l\",tok,lem,b[\"dep\"],b[\"dependent\"])\n if b[\"dep\"] == \"compound\": #if part of compound\n # compounds should be pure words, Stanford parser often creates clutter words like \"Section_1\" or so\n if len(tok) > 1 and tok.isalpha(): #note this skips non-alpha words!\n currcomp.append((tok, lem)) #tok also ok, but leads to some redundant words => communication skill, communication skills\n iEnd = b['governor']\n mapTow[b[\"dependent\"]] = \"\"\n elif len(currcomp) > 0 and b['dependent'] == iEnd: #last word of compound\n rawcomp = \" \".join([x[0] for x in currcomp]) #create compounds (except last word)\n comp = \" \".join([x[1] for x in currcomp])\n if len(tok) > 1 and tok.isalpha(): #last word is alpha => add it\n rawcomp += \" \" + tok\n comp += \" \" + lem\n else: addWord(wmap, tok, lem) #add last word as new word if non-alpha => not really needed\n if len(comp.split()) > 1: #if compound\n comp = comp.lower() #all lemmas are lower case\n compounds.append(comp)\n addWord(wmap, rawcomp, comp)\n # wcou[tok] = wcou.get(rawcomp, 0) + 1\n currcomp = []\n mapTow[b[\"dependent\"]] = rawcomp\n elif not (b[\"dep\"] == \"punct\" or (lem in tt.setStopWords and not tok == \"IT\" ) or (len(tok) == 1 and not tok in [\"R\", \"C\"])): #a single word / no compound\n #wcou[tok]=wcou.get(tok,0)+1\n addWord(wmap, tok, lem)\n\n for i, t in enumerate(s[\"tokens\"]): #add all tokens (single words/compounds)\n if i+1 in mapTow:\n if len(mapTow[i+1]) > 0: csent.append(mapTow[i+1])\n else:\n if \"-lrb-\" in t[\"word\"].lower(): csent.append(\"(\") #left bracket\n elif \"-rrb-\" in t[\"word\"].lower(): csent.append(\")\") #right brackt\n else: csent.append(t[\"word\"])\n #print(\"wmap\", wmap)\n docTokens.append(\" \".join(csent))\n newdocs.append((docid, docTokens))\n if not useNLTK: nlp.close()\n print(\" Parse errors\", failed, \"out of\", len(docs))\n\n return compounds, wmap, newdocs #,wcou",
"def process_text(text):\n return [token.text for token in nlp(text) if not token.is_stop]",
"def main():\n\n st.title(\"Spacy-Streamlit NLP App\")\n models = ['ja_core_news_md', 'ja_core_news_sm']\n \n# 3ใคใฎใขใใซใฏใตใคใบใ็ฐใชใฃใฆใใ\n# lg โ> large\n# md โ> medium\n# sm โ> small\n# core โ> ใขใใซใฎ่ฝๅใจใใฆๆฌกใฎใฟในใฏใ่กใใใใจใ็คบใ: general-purpose model with vocabulary, syntax, entities and word vectors\n# news โ> ใใฅใผใน่จไบใฎใใผใฟใงๅญฆ็ฟใใใใใจใ็คบใ\n\n choiced_model = st.sidebar.selectbox(\"Model\",models)\n\n nlp = load_model(choiced_model)\n\n menu = [\"MAIN\", \"Home\",\"NER\"]\n choice = st.sidebar.selectbox(\"Menu\",menu)\n \n if choice == \"MAIN\":\n st.subheader(\"visualize\")\n raw_text = st.text_area(\"Your Text\",\"Enter Text Here\")\n if st.button(\"Visualize\"):\n docx = nlp(raw_text)\n spacy_streamlit.visualize(models, docx)\n \n if choice == \"Home\":\n st.subheader(\"Tokenization\")\n raw_text = st.text_area(\"Your Text\",\"Enter Text Here\")\n docx = nlp(raw_text)\n if st.button(\"Tokenize\"):\n spacy_streamlit.visualize_tokens(docx,attrs=['text','pos_','dep_','ent_type_'])\n\n elif choice == \"NER\":\n st.subheader(\"Named Entity Recognition\")\n raw_text = st.text_area(\"Your Text\",\"Enter Text Here\")\n docx = nlp(raw_text)\n spacy_streamlit.visualize_ner(docx,labels=nlp.get_pipe('ner').labels)",
"def query_preprocess(input_pack: DataPack):\n sentence = input_pack.get_single(Sentence)\n\n relations = defaultdict(dict)\n text_mention_mapping = {}\n\n # get all srl relations\n for link in input_pack.get(PredicateLink, sentence):\n verb = link.get_parent()\n verb_text = verb.text\n argument = link.get_child()\n argument_text = argument.text\n\n text_mention_mapping[verb_text] = verb\n text_mention_mapping[argument_text] = argument\n relations[verb_text][link.arg_type] = argument_text\n\n arg0, arg1, predicate = None, None, None\n for verb_text, entity in relations.items():\n arg0, arg1, predicate = collect_mentions(text_mention_mapping, entity, verb_text)\n if not arg0 and not arg1:\n continue\n else:\n break\n\n if not arg0 and not arg1:\n raise Exception('AllenNLP SRL cannot extract the two arguments or the '\n 'predicate in your query, please check our examples '\n 'or rephrase your question')\n\n verb_lemma, is_answer_arg0 = None, None\n\n # check pos tag and lemma for tokens\n for j, token in enumerate(input_pack.get(entry_type=Token,\n range_annotation=sentence,\n components=['forte_wrapper.nltk.nltk_processors.NLTKWordTokenizer']\n )):\n # find WH words\n if token.pos in {\"WP\", \"WP$\", \"WRB\", \"WDT\"}:\n if arg0.begin <= token.begin and arg0.end >= token.end:\n is_answer_arg0 = True\n elif arg1.begin <= token.begin and arg1.end >= token.end:\n is_answer_arg0 = False\n\n # find verb lemma\n if token.text == predicate.text:\n verb_lemma = token.lemma\n\n return sentence, arg0.text if arg0 else '', arg1.text if arg1 else '', \\\n predicate.text, verb_lemma, is_answer_arg0",
"def main ():\n\n\tfio = fileIo('input.txt')\n text = fio.getInput()\n\n\tp = re.compile(r'#?\\d[\\s\\.]?[\\s]?')\n\tout = filter(None, p.split(text))\n\ti = 0\n\tlistOfLists = []\n\t\n\n\tfor s in out:\n\t\ti += 1\n\t\ttext = nltk.word_tokenize(s)\n\t\tpos = nltk.pos_tag(text)\n\t\tpattern = \"NP: {<DT>?<JJ>*<NN>}\"\n\t\tNPChunker = nltk.RegexpParser(pattern)\n\t\tresult = NPChunker.parse(pos)\n\t\tlistOfLists.append( result )\n\n\tprint \"Noun Count:\\n\" + str(countNouns( listOfLists ))\n\tprint \"Verb Count:\\n\" + str(countVerbs( listOfLists ))\n\tprint \"Adjective Count:\\n\" + str(countAdjectives( listOfLists ))",
"def prepare(self, nlp_model_names, text, sender):\n if self.nlp_model is None:\n # Replace \"NLP_MODEL\" with the name of the NLP models which this module should use.\n self.nlp_model = spacy.load(nlp_model_names[\"NLP_MODEL\"])\n to, when, body = self.nlp(text)\n self.description = None\n return self.prepare_processed(to, when, body, sender)",
"def process_sentence_pos_tags(input_file, group_tags):\n\n print('Reading file and POS tagging...')\n if input_file is not None:\n f = open(input_file, 'r', encoding='utf-8', errors='ignore')\n sentences = nltk.sent_tokenize(f.read())\n sentence_tag_tokens = [nltk.pos_tag(nltk.word_tokenize(\n sentence, language='english'), lang='eng')\n for sentence in sentences]\n else:\n sentence_tag_tokens = nltk.corpus.treebank.tagged_sents()[0:1000]\n\n sentences = []\n tags = []\n for pos_tags in sentence_tag_tokens:\n sentence_tmp = ''\n pos_tags_tmp = []\n for word, tag in pos_tags:\n sentence_tmp += word + ' '\n # Group tags\n if group_tags:\n # Preprocess tags\n if re.match('VB.*$', tag): # Group all verbs\n tag = 'VB'\n elif re.match('JJ.*$', tag): # Group all adjectives\n tag = 'JJ'\n elif re.match('NN$|NNS$', tag): # Group all nouns\n tag = 'NN'\n elif re.match('NNP$|NNPS$', tag): # Group all proper nouns\n tag = 'NNP'\n elif re.match('RB.*$', tag): # Group all adverbs\n tag = 'RB'\n\n if tag in concepts:\n pass\n else:\n tag = 'OTHER'\n pos_tags_tmp.append((word, tag))\n pos_tags_tmp.append((' ', 'SPACE'))\n sentences.append(sentence_tmp)\n tags.append(pos_tags_tmp)\n print('...completed.')\n return sentences, tags",
"def generate_entities(self, data):\r\n\t\t# create an empty dictionary to hold entities\r\n\t\tent_dic = {}\r\n\r\n\t\tfor row in data.itertuples():\r\n\t\t\t# feed nlp the first line's set of keywords\r\n\t\t\tdoc = self.nlp(row.keywords)\t\r\n\t\t\t# begin iterating through the nlp's entities\r\n\t\t\tfor ent in doc.ents:\r\n\r\n\t\t\t\t# For each entity, check if the label exists in 'ent_dic'.\r\n\t\t\t\t# If it does, append the entity into the key, value pair.\r\n\t\t\t\t# If it doesn't, create a new key, value pair\r\n\t\t\t\tkey = str(ent.label_) + ''\r\n\t\t\t\tif ent.label_ in ent_dic:\r\n\t\t\t\t\tent_dic[key].append(str(ent)) if not str(ent) in ent_dic[key] else print(f'The entity: {ent} is already in the array')\r\n\t\t\t\telse: \r\n\t\t\t\t\tent_dic[key] = [str(ent)]\r\n\r\n\t\t# return the dictionary of entities\r\n\t\treturn ent_dic",
"def entities_text(text):\n client = language.LanguageServiceClient()\n\n # Instantiates a plain text document.\n document = types.Document(\n content=text,\n type=enums.Document.Type.PLAIN_TEXT)\n\n # Detects entities in the document. You can also analyze HTML with:\n # document.type == enums.Document.Type.HTML\n entities = client.analyze_entities(document).entities\n\n # entity types from enums.Entity.Type\n entity_type = ('UNKNOWN', 'PERSON', 'LOCATION', 'ORGANIZATION',\n 'EVENT', 'WORK_OF_ART', 'CONSUMER_GOOD', 'OTHER')\n \n ingredients = []\n #out = \"\"\n products = [] \n wegmans = WegmansClass()\n\n for entity in entities:\n ingredients.append(entity.name)\n #out += entity.name + '\\n'\n weg_sku = wegmans.GetSKUs(entity.name)\n\n prod = []\n\n for x in weg_sku:\n r = wegmans.GetProduct(x)\n prod.append(r)\n #out += r.name + \", \" + str(r.price) + \", \" + str(r.image) + str(r.velocity) + '#'\n\n products.append(prod)\n\n #print('=' * 20)\n #print(u'{:<16}: {}'.format('name', entity.name))\n #print(u'{:<16}: {}'.format('type', entity_type[entity.type]))\n #print(u'{:<16}: {}'.format('metadata', entity.metadata))\n #print(u'{:<16}: {}'.format('salience', entity.salience))\n #print(u'{:<16}: {}'.format('wikipedia_url',\n # entity.metadata.get('wikipedia_url', '-')))\n\n #for x in ingredients:\n # print(x + '\\n')\n\n #for x in products:\n # for y in x:\n # print(y.name)\n # print(y.price)\n # print(y.velocity)\n # print(y.image)\n # print('\\t')\n\n js = json.dumps(products, default=lambda o: o.__dict__)\n \n return jsonify({\"results\": json.loads(js)})",
"def main(self, data):\n\t\ttokenizer = BertTokenizer.from_pretrained(\"bert-base-uncased\", do_lower_case=True)\n\t\teval_features = self.get_features(data, self.labels, tokenizer, self.max_seq_length)\n\t\tlabel, prob = self.predict(eval_features)\n\t\treturn label, prob",
"def word_level_prediction(model_dir: str, ner_training_output_dir: str, ner_data_dir: str):\n\n output_dir = 'gs://ekaba-assets/{}/{}/{}'.format(model_dir, ner_training_output_dir, ner_data_dir)\n ner_data_dir_path = 'gs://ekaba-assets/datasets/NER/{}'.format(ner_data_dir)\n\n try:\n run('python biobert/biocodes/ner_detoknize.py --token_test_path={}/token_test.txt ' \\\n '--label_test_path={}/label_test.txt --answer_path={}/test.tsv --output_dir={} '.format(\n output_dir, output_dir, ner_data_dir_path, output_dir\n ))\n except exceptions.UnexpectedExit:\n print('Cannot do NER word level prediction')\n\n try:\n if not os.path.exists('{}'.format(ner_training_output_dir)):\n os.makedirs('{}'.format(ner_training_output_dir))\n\n run('gsutil cp gs://ekaba-assets/{}/{}/{}/NER_result_conll.txt {}'.format(\n model_dir, ner_training_output_dir, ner_data_dir, ner_training_output_dir))\n\n run('perl biobert/biocodes/conlleval.pl < {}/NER_result_conll.txt'.format(ner_training_output_dir))\n except exceptions.UnexpectedExit:\n print('Cannot do NER word level prediction - perl biocodes')",
"def get_features(self, para, label_list, tokenizer, max_seq_length):\n\t\tlabel_map = {label : i for i, label in enumerate(label_list)}\n# self.reverse_label_map = {v: k for k, v in label_map.items()}\n\t\tguid = \"%s-%s\" % (\"test\", 1)\n\t\ttext_a = para[\"model_answer\"]\n\t\ttext_b = para[\"candidate_answer\"]\n\t\tlabel = label_list[0]\n\t\texample = InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)\n\t\t\n\t\ttokens_a = tokenizer.tokenize(example.text_a)\n\n\t\ttokens_b = tokenizer.tokenize(example.text_b)\n\t\tif example.text_b:\n\t\t\ttokens_b = tokenizer.tokenize(example.text_b)\n\t\t\t# Modifies `tokens_a` and `tokens_b` in place so that the total\n\t\t\t# length is less than the specified length.\n\t\t\t# Account for [CLS], [SEP], [SEP] with \"- 3\"\n\t\t\tself._truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n\t\telse:\n\t\t\t# Account for [CLS] and [SEP] with \"- 2\"\n\t\t\tif len(tokens_a) > max_seq_length - 2:\n\t\t\t\ttokens_a = tokens_a[:(max_seq_length - 2)]\n\n\t\t# The convention in BERT is:\n\t\t# (a) For sequence pairs:\n\t\t# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n\t\t# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n\t\t# (b) For single sequences:\n\t\t# tokens: [CLS] the dog is hairy . [SEP]\n\t\t# type_ids: 0 0 0 0 0 0 0\n\t\t#\n\t\t# Where \"type_ids\" are used to indicate whether this is the first\n\t\t# sequence or the second sequence. The embedding vectors for `type=0` and\n\t\t# `type=1` were learned during pre-training and are added to the wordpiece\n\t\t# embedding vector (and position vector). This is not *strictly* necessary\n\t\t# since the [SEP] token unambigiously separates the sequences, but it makes\n\t\t# it easier for the model to learn the concept of sequences.\n\t\t#\n\t\t# For classification tasks, the first vector (corresponding to [CLS]) is\n\t\t# used as as the \"sentence vector\". Note that this only makes sense because\n\t\t# the entire model is fine-tuned.\n\t\ttokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n\t\tsegment_ids = [0] * len(tokens)\n\n\t\tif tokens_b:\n\t\t\ttokens += tokens_b + [\"[SEP]\"]\n\t\t\tsegment_ids += [1] * (len(tokens_b) + 1)\n\n\t\tinput_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n\t\t# The mask has 1 for real tokens and 0 for padding tokens. Only real\n\t\t# tokens are attended to.\n\t\tinput_mask = [1] * len(input_ids)\n\n\t\t# Zero-pad up to the sequence length.\n\t\tpadding = [0] * (max_seq_length - len(input_ids))\n\t\tinput_ids += padding\n\t\tinput_mask += padding\n\t\tsegment_ids += padding\n\n\t\tassert len(input_ids) == max_seq_length\n\t\tassert len(input_mask) == max_seq_length\n\t\tassert len(segment_ids) == max_seq_length\n\t\tlabel_id = label_map[example.label]\n# print(\"*** Example ***\")\n# print(\"guid: %s\" % (example.guid))\n# print(\"tokens: %s\" % \" \".join(\n# [str(x) for x in tokens]))\n# print(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n# print(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n# print(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n\n\t\t\n\t\treturn InputFeatures(input_ids=input_ids,\n\t\t\t\t\t\t\t input_mask=input_mask,\n\t\t\t\t\t\t\t segment_ids=segment_ids,\n\t\t\t\t\t\t\t label_id=label_id)",
"def _preprocess(self, txt_seq):\n input = []\n label = []\n punc = \" \"\n for token in txt_seq.split():\n if token in self.punc2id:\n punc = token\n else:\n input.append(self.word2id.get(token, self.word2id[\"<UNK>\"]))\n label.append(self.punc2id[punc])\n punc = \" \"\n input.append(self.word2id[\"<END>\"])\n label.append(self.punc2id[punc])\n input = torch.LongTensor(input)\n label = torch.LongTensor(label)\n # input = np.array(input)\n # label = np.array(label)\n return input, label",
"def sentence_parse(list_of_posts): \n for parsedPosts in nlp.pipe(line_review(list_of_posts)):\n for sent in parsedPosts.sents:\n yield str(sent)",
"def training(training_data, iterations):\n nlp = spacy.blank('en') # create blank Language class\n # create the built-in pipeline components and add them to the pipeline\n # nlp.create_pipe works for built-ins that are registered with spaCy\n if 'ner' not in nlp.pipe_names:\n ner = nlp.create_pipe('ner')\n nlp.add_pipe(ner, last=True)\n\n # add labels\n for _, annotations in training_data:\n for ent in annotations.get('entities'):\n ner.add_label(ent[2])\n\n # get names of other pipes to disable them during training\n other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'ner']\n with nlp.disable_pipes(*other_pipes): # only train NER\n optimizer = nlp.begin_training()\n for itn in range(iterations):\n print(\"Starting iteration \" + str(itn))\n random.shuffle(TRAIN_DATA)\n losses = {}\n for text, annotations in training_data:\n nlp.update(\n [text], # batch of texts\n [annotations], # batch of annotations\n drop=0.2, # dropout - make it harder to memorise data\n sgd=optimizer, # callable to update weights\n losses=losses)\n print(losses)\n return nlp",
"def test_text_classifier_get_labels(self):\n pass",
"def perform_NER(self,text):\n X_test = []\n documents = [text]\n sequences = tokenize_fa(documents)\n word_sequences = []\n for seq in sequences:\n features_seq = []\n labels_seq = []\n sentence = []\n for i in range(0, len(seq)):\n features_seq.append(self.word2features(seq, i))\n labels_seq.append(self.word2labels(seq[i]))\n sentence.append(seq[i][0])\n X_test.append(features_seq)\n word_sequences.append(sentence)\n y_pred = [self.crf_model.tag(xseq) for xseq in X_test]\n #y_pred = self.crf_model.tag(X_test)\n final_sequences = []\n for i in range(0,len(y_pred)):\n sentence = []\n for j in range(0,len(y_pred[i])):\n sentence.append((word_sequences[i][j],y_pred[i][j]))\n final_sequences.append(sentence)\n return final_sequences"
]
| [
"0.6044815",
"0.60106623",
"0.5951502",
"0.59280443",
"0.58727735",
"0.582777",
"0.5826666",
"0.57033956",
"0.56903803",
"0.5683299",
"0.56206745",
"0.56038415",
"0.56035453",
"0.55429745",
"0.55383086",
"0.553127",
"0.55225134",
"0.5516551",
"0.5504731",
"0.5500881",
"0.5488051",
"0.54480016",
"0.54469293",
"0.54066676",
"0.5380213",
"0.5370099",
"0.5358817",
"0.5355973",
"0.53515595",
"0.53401864"
]
| 0.66848516 | 0 |
Returns an array of all pixels' rgb values in a surface | def get_pixels(surface):
pixels = []
for y in range(surface.get_height()):
for x in range(surface.get_width()):
pixels.append(surface.get_at((x,y))[:3])
return pixels | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rgb(self):\n return [self.__r, self.__g, self.__b]",
"def getPixels(self):\n\t\treturn self.strip.ledsColorBuffer",
"def get_rgb_light():\n return list(light.rgb())",
"def _colored_img_to_arr(image, verbose=False):\n height, width = image.size\n arr = np.array(image.getdata())\n arr = arr.reshape(3, height, width)\n r = arr[0]\n g = arr[1]\n b = arr[2]\n return r, g, b",
"def getState(game):\n pixels = pygame.surfarray.array3d(game.screen)[:]\n pixels = np.array([pixels], dtype=float)\n\n # Here we will preprocess the pixel data\n bitsize = game.screen.get_bitsize() / 4\n pixels *= 1 / 2**bitsize # Normalize to [0..1]\n\n return pixels",
"def rgb_image(self):\n z3 = self.z[:,:,newaxis]\n return z3 * self.c",
"def rgb(self):\n return (self.r, self.g, self.b)",
"def get_colors(self):\n x = np.linspace(0, 1, self.length)\n y = x**self.gamma\n\n value = np.linspace(0, 1, len(self.colors))\n r = np.interp(y, value, self.colors[:,0])\n g = np.interp(y, value, self.colors[:,1])\n b = np.interp(y, value, self.colors[:,2])\n\n return np.dstack((r, g, b)).reshape(len(r), 3).astype(np.uint8)",
"def get_all_rgb_values(self):\n\n rgb_values = []\n response = self._table.scan()\n for item in response['Items']:\n rgb_values.append(self._convert_rgb_string_to_tuple(item['rgb_values']))\n\n return rgb_values",
"def GetRGBArray(self, p_int):\n ...",
"def get_rgbColorArray(self, ledIndex, count):\n # buff\n res = []\n # idx\n # r\n # g\n # b\n\n buff = self._download(\"rgb.bin?typ=0&pos=\" + str(int(3*ledIndex)) + \"&len=\" + str(int(3*count)))\n del res[:]\n\n idx = 0\n while idx < count:\n r = YGetByte(buff, 3*idx)\n g = YGetByte(buff, 3*idx+1)\n b = YGetByte(buff, 3*idx+2)\n res.append(r*65536+g*256+b)\n idx = idx + 1\n\n return res",
"def render(self):\n\n pixels = [\n [Color() for _ in range(self.width)] for _ in range(self.height)]\n\n for y in range(self.height):\n for x in range(self.width):\n ray_direction = Point(x, y) - self.camera\n ray = Ray(self.camera, ray_direction)\n pixels[y][x] = self._trace_ray(ray)\n\n return pixels",
"def rgb(self):\n return (self.red, self.green, self.blue)",
"def get_pixel_list(img):\n orig_shape = img.shape # Remember the original shape of the img.\n # Store the img as a x by z array (z being the length of the colour space)\n # Essentially just a list of pixels.\n\n if len(img.shape) == 3:\n img = img.reshape(img.shape[0] * img.shape[1], img.shape[2])\n elif len(img.shape) == 2:\n img = img.reshape(img.shape[0] * img.shape[1],)\n return orig_shape, img",
"def RGB255(RGB):\r\n return [round((RGB[0]*255), 2),\r\n round((RGB[1]*255), 2),\r\n round((RGB[2]*255), 2)]",
"def get_pixels(self):\n\n # pygame board needs to be initialized the first time\n if not self.board:\n self.setup_display(render_gui=False)\n\n self.draw_window(draw_leaderboard=False)\n pixels = pygame.surfarray.array3d(self.window)\n return np.moveaxis(pixels, 1, 0)",
"def surface_indices(self):\n return self._unique_surface_indices(self.surf_mesh)",
"def get_player_colors() -> List[Tuple[float, float, float]]:\n return PLAYER_COLORS",
"def _rgb(x, y, z):\n rgb = np.array([x, y, z]).T\n rgb -= rgb.min(0)\n rgb /= np.maximum(rgb.max(0), 1e-16) # avoid div by zero\n return rgb",
"def pix_to_rvals(cube, pixel):\n return cube.rvals[pixel[1], pixel[0]] * cube.dist",
"def colors(self):\n\t\treturn [(0, 30, 255),(0, 30, 120)]",
"def read_colormap(name):\n\n path = get_demo_file(name + '.c3g')\n\n out = []\n with open(path, 'r') as file:\n for line in file:\n if 'rgb(' not in line:\n continue\n line = line.split('(')[-1].split(')')[0]\n out.append([float(n) for n in line.split(',')])\n\n return np.asarray(out).astype(float) / 256.",
"def rgb_bytes(self):\n return (\n int(self.red * 255),\n int(self.green * 255),\n int(self.blue * 255),\n )",
"def _get_region_color(self, region):\n return [\n x / 255 for x in self._get_from_structure(region, \"rgb_triplet\")\n ]",
"def get_rgbColorArrayAtPowerOn(self, ledIndex, count):\n # buff\n res = []\n # idx\n # r\n # g\n # b\n\n buff = self._download(\"rgb.bin?typ=4&pos=\" + str(int(3*ledIndex)) + \"&len=\" + str(int(3*count)))\n del res[:]\n\n idx = 0\n while idx < count:\n r = YGetByte(buff, 3*idx)\n g = YGetByte(buff, 3*idx+1)\n b = YGetByte(buff, 3*idx+2)\n res.append(r*65536+g*256+b)\n idx = idx + 1\n\n return res",
"def surfaces(self):\n surfaces = []\n for i in range(1000):\n surface = self.surfaceInfo(i)\n if surface is not None:\n surfaces.append(surface)\n else:\n break\n\n return surfaces",
"def generate_lut(self):\n r,g,b=(Numeric.zeros(256),Numeric.zeros(256),Numeric.zeros(256))\n for i in Numeric.arange(256):\n r_,g_,b_=self.colfct(i/255.0) # these are from [0,1]\n r[i],g[i],b[i]=int(255*r_),int(255*g_),int(255*b_)\n return r,g,b",
"def getColors(num, rgb=False):\n cmapRGB = get_cmap('nipy_spectral', num)\n c = [cmapRGB(i)[:-1] for i in range(num)][1:]\n if rgb:\n return c\n # the negative of the first color\n c2 = [tuple(arr((1, 1, 1)) - arr(color)) for color in c]\n c = ['#%02x%02x%02x' % tuple(int(255 * color[i]) for i in range(len(color))) for color in c]\n c2 = ['#%02x%02x%02x' % tuple(int(255 * color[i]) for i in range(len(color))) for color in c2]\n return c, c2",
"def get_rgb(self, img, r, g, b):\r\n\r\n # Get specific bands of hyperspectral image\r\n red_channel = img[:, :, r]\r\n green_channel = img[:, :, g]\r\n blue_channel = img[:, :, b]\r\n\r\n img = np.stack((red_channel, green_channel, blue_channel), axis=2)\r\n img = img.astype('float32')\r\n return img",
"def surfacePairs(self):\n return self._els"
]
| [
"0.70980895",
"0.66557866",
"0.66320956",
"0.6622494",
"0.6541926",
"0.65298855",
"0.6529636",
"0.6522719",
"0.6466939",
"0.64495635",
"0.6425906",
"0.63456154",
"0.63314754",
"0.6304527",
"0.62431765",
"0.6228581",
"0.61598295",
"0.610881",
"0.61073124",
"0.60987216",
"0.6046213",
"0.6043861",
"0.60247356",
"0.6021844",
"0.60079193",
"0.5996096",
"0.59960157",
"0.5971086",
"0.59410214",
"0.59386635"
]
| 0.7694175 | 0 |
image_location is a file path to the image. First we load the surface, turn it to a small 8x8 greyscale copy get the average rgb from that and create our hash from the average rgbs | def image_hash(image_location):
image = pygame.image.load(image_location)
grey = greyscale(image)
avg = average_image_value(grey)
bitstring = ''
for pixels in get_pixels(grey):
if pixels[0] < avg:
bitstring += '1'
else: bitstring += '0'
hash = int(bitstring, 2).__format__('016x').upper()
return hash | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_average_hash(image_path):\n\n return imagehash.average_hash(Image.open(image_path))",
"def _calc_hash(self) -> None:\n self.image = Image.open(self.path)\n self.image = self.image.convert(\"L\")\n self.image = self.image.resize((self.width, self.height), Image.ANTIALIAS)\n lpixels = list(self.image.getdata())\n self.hash = \"0b\"\n for i, pixel in enumerate(lpixels):\n if (i + 1) % self.width == 0 and i != 0:\n continue\n if pixel < lpixels[i + 1]:\n self.hash += \"1\"\n continue\n self.hash += \"0\"\n self.hash_hex = DHash.bin2hex(self.hash)",
"def pHash(imgfile):\n #ๅ ่ฝฝๅนถ่ฐๆดๅพ็ไธบ32x32็ฐๅบฆๅพ็\n img=cv.imread(imgfile, cv.IMREAD_GRAYSCALE) \n img=cv.resize(img,(64,64),interpolation=cv.INTER_CUBIC)\n\n #ๅๅปบไบ็ปดๅ่กจ\n h, w = img.shape[:2]\n vis0 = np.zeros((h,w), np.float32)\n vis0[:h,:w] = img #ๅกซๅ
ๆฐๆฎ\n\n #ไบ็ปดDctๅๆข\n vis1 = cv.dct(cv.dct(vis0))\n #cv.SaveImage('a.jpg',cv.fromarray(vis0)) #ไฟๅญๅพ็\n vis1.resize(32,32)\n\n #ๆไบ็ปดlistๅๆไธ็ปดlist\n img_list=list(itertools.chain.from_iterable(vis1.tolist()))\n #่ฎก็ฎๅๅผ\n avg = sum(img_list)*1./len(img_list)\n avg_list = ['0' if i<avg else '1' for i in img_list]\n\n #ๅพๅฐๅๅธๅผ\n return ''.join(['%x' % int(''.join(avg_list[x:x+4]),2) for x in range(0,32*32,4)])",
"def computeAverageHash(img, length = 64):\n if length == 64:\n img_resize = cv2.resize(img, (8, 8))\n elif length == 32:\n img_resize = cv2.resize(img, (6, 6))\n avg = np.mean(img_resize)\n img_hash = np.where(img_resize > avg, 1, 0)\n hash = \"\".join(map(str, img_hash.flatten()[0:length]))\n return hash",
"def check_image_color(image):\n\n def check_color(i, j, k):\n \"\"\" Function used only for DEBUGGING\"\"\"\n img.show()\n image = Image.new(\"RGB\", (200, 200), (int(Y), int(Y), int(Y)))\n image.show()\n image = Image.new(\"RGB\", (200, 200), (int(i), int(j), int(k)))\n image.show()\n\n if not os.path.isfile(image):\n return \"Image not found\"\n\n def calculate_bgr(data):\n average_color_per_row = numpy.average(data, axis=0)\n average_color = numpy.average(average_color_per_row, axis=0)\n return tuple(average_color)\n\n def calculate_y(r, g, b):\n alpha = 0.299\n betta = 0.587\n gamma = 0.114\n return alpha * r + betta * g + gamma * b\n\n # split the image for four squares calucate averate pixel for them and take higest value\n # blure image and save to /Library/Caches as com.apple.desktop.admin.png\n # in case using blur tool --> blur = cv2.blur(img,(5,5))\n try:\n img_cv_data = cv2.imread(image)\n B, G, R = calculate_bgr(img_cv_data)\n Y = calculate_y(B, G, R)\n height, width = img_cv_data.shape[:2]\n except Exception as err:\n print(f\"[ERROR] {err} with image: {image}\")\n return \"Error parsing image\"\n\n # image detection\n if Y < 72.0:\n _type = \"dark\"\n elif Y >= 73.0 and Y <= 108.0:\n _type = \"evening\"\n else:\n _type = \"light\"\n\n return _type",
"def sort(self):\n\n img_files = os.listdir(self.path)\n\n img_list = {}\n\n for img_file in img_files:\n filename = os.path.join(self.path, img_file)\n\n try:\n img = Image.open(filename)\n except:\n continue\n\n print \"Analyzing %s\" % img_file\n\n points = self.points(img.size[0], img.size[1])\n key = \"\"\n for point in points:\n\n # Get the average color for each point\n ave_points = self.diamond_points(point[0], point[1])\n red = 0\n green = 0\n blue = 0\n for ave_point in ave_points:\n try:\n rgb = img.getpixel(ave_point)\n red += rgb[0]\n green += rgb[1]\n blue += rgb[2]\n except IndexError:\n pass\n red /= len(ave_points)\n green /= len(ave_points)\n blue /= len(ave_points)\n\n # Bitdepths:\n # 12 bit - 4096 colors, range 0-F, divide by 16\n # 9 bit - 512 colors, range 0-7, divide by 32\n # 6 bit - 64 colors, range 0-3, divide by 64\n # 3 bit - 8 colors, range 0-1, divide by 128\n\n if self.num_colors == 8:\n div = 128\n elif self.num_colors == 64:\n div = 64\n elif self.num_colors == 512:\n div = 32\n elif self.num_colors == 4096:\n div = 16\n else:\n self.usage()\n\n # Lower the bitdepth\n red = int(red / div)\n green = int(green / div)\n blue = int(blue / div)\n\n # Add to the key\n key += \"%x%x%x\" % (red, green, blue)\n\n # Add the key if needed\n if key not in img_list:\n img_list[key] = []\n\n # Add the file to the list\n img_list[key].append(img_file)\n\n # Go through and rename the files, based on the img_list dictionary\n # and the prefix\n num = 1\n for img in sorted(img_list.iterkeys()):\n for filename in sorted(img_list[img]):\n name, ext = os.path.splitext(filename)\n new_filename = \"%s%04d%s\" % (self.prefix, num, ext)\n full_filename = os.path.join(self.path, filename)\n full_new_filename = os.path.join(self.path, new_filename)\n if os.path.isfile(full_new_filename):\n print \"File %s exists - aborting!\" % full_new_filename\n return\n\n os.rename(full_filename, full_new_filename)\n print \"Renamed %s to %s.\" % (filename, new_filename)\n num += 1",
"def readImageFileRGB(imageFileLocation):\n \n sourceImage = io.imread(imageFileLocation)\n \n return sourceImage",
"def compute_ahash(im):\n return imagehash.average_hash(ensure_pil(im))",
"def read_file(filename):\n\tprint(\"Beginning file read...\")\n\tmy_image = cv2.imread(filename)\n\treturn computer_hash(my_image)",
"def current_average_luma(camera):\n camera.capture('/home/pi/Desktop/image1.jpg')#camera take picture\n img = Image.open(\"/home/pi/Desktop/image1.jpg\") #opens image\n \n luma=0 #sum of the lumenance of each pixels\n pixels = img.width*img.height #number of pixels\n \n for x in range(img.width):\n for y in range(img.height):\n (r, g, b) = img.getpixel((x,y))#get colour touple \n luma += (0.2126*r + 0.7152*g + 0.0722*b) #calculate luma of RGB data, then add to total\n #END for\n #END for\n \n img.close()#ensure to properly close the image\n return luma/pixels #return average of all pixels",
"def compareTo(self,imagefullpath):\n exc = ExtractColor2(self.k)\n bgrcolor = exc.getColorBGR(imagefullpath)\n\n score = 0\n for i in range(self.k):\n score += np.linalg.norm(bgrcolor[i] - self._ref_BGRcolor[i])/(np.sqrt(255*255*3))\n score /= self.k\n return 1 - score",
"def compute_textures(\n src_image,\n src_profile,\n dst_dir,\n kind='simple',\n x_radius=5,\n y_radius=5,\n x_offset=1,\n y_offset=1,\n image_min=0,\n image_max=255,\n nb_bins=64,\n prefix='',\n):\n # Write input image to disk\n profile = src_profile.copy()\n profile.update(dtype='uint8', transform=None, count=1)\n tmp_dir = tempfile.TemporaryDirectory()\n tmp_image = os.path.join(tmp_dir.name, 'image.tif')\n with rasterio.open(tmp_image, 'w', **profile) as dst:\n dst.write(src_image.astype(np.uint8), 1)\n\n # Run OTB command\n tmp_glcm = os.path.join(tmp_dir.name, 'glcm.tif')\n subprocess.run([\n 'otbcli_HaralickTextureExtraction', '-in', tmp_image,\n '-parameters.xrad', str(x_radius), '-parameters.yrad', str(y_radius),\n '-parameters.xoff', str(x_offset), '-parameters.yoff', str(y_offset),\n '-parameters.min', str(image_min), '-parameters.max', str(image_max),\n '-parameters.nbbin', str(nb_bins), '-texture', kind,\n '-out', tmp_glcm, 'double'\n ], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n\n # Save each texture in an individual GeoTIFF\n os.makedirs(dst_dir, exist_ok=True)\n with rasterio.open(tmp_glcm) as src:\n\n for i, texture in enumerate(TEXTURES[kind]):\n \n profile = src.profile\n img = src.read(i+1).astype(np.float64)\n\n # Linear rescale and convert to UINT16\n img = np.interp(img, (img.min(), img.max()), (0, 65535))\n img = img.astype(np.uint16)\n profile.update(dtype=img.dtype.name)\n\n # Save as 1-band compressed GeoTIFF\n profile.update(compression='LZW', count=1, transform=None)\n filename = f'{prefix}{texture}_{x_radius*2+1}x{y_radius*2+1}.tif'\n output_file = os.path.join(dst_dir, filename)\n with rasterio.open(output_file, 'w', **profile) as dst:\n dst.write(img, 1)\n\n tmp_dir.cleanup()\n return",
"def compute_phash(im):\n return imagehash.phash(ensure_pil(im))",
"def search_image_hash(self, image_id: str) -> int:\n return self._id_to_hash.get(image_id)",
"def verify_image(self, image_location):\n checksum = self._hash_algo.hexdigest()\n LOG.debug('Verifying image at %(image_location)s against '\n '%(algo_name)s checksum %(checksum)s',\n {'image_location': image_location,\n 'algo_name': self._hash_algo.name,\n 'checksum': checksum})\n if checksum != self._expected_hash_value:\n error_msg = errors.ImageChecksumError.details_str.format(\n image_location, self._image_info['id'],\n self._expected_hash_value, checksum)\n LOG.error(error_msg)\n raise errors.ImageChecksumError(image_location,\n self._image_info['id'],\n self._expected_hash_value,\n checksum)",
"def record(self, new_image):\r\n\r\n hash = dhash.dhash_int(Image.fromarray(new_image), HASH_SIZE)\r\n if hash not in self.existing_dhashes:\r\n self.existing_dhashes.append(hash)\r\n write_to = self.output_dir / (str(hash) + \".png\")\r\n print(\"Writing image to \", write_to)\r\n cv2.imwrite(str(write_to), new_image)",
"def compress_color_data(self):\n avg_rgb_vals_dict = {} # dictionary of average color coordinates\n for label in self.labels_list:\n try:\n avg_rgb = np.mean(\n np.mean(np.mean(self.jzazbz_dict[label], axis=0), axis=0), axis=0\n )\n avg_rgb_vals_dict[label] = avg_rgb\n except Exception as exc:\n self.log.error(exc)\n self.log.error(label + \" failed\")\n self.avg_rgb_vals_dict = avg_rgb_vals_dict\n\n jzazbz_dict_simp = {}\n for label in self.labels_list:\n avg_jzazbz = np.mean(self.jzazbz_dist_dict[label], axis=0)\n jzazbz_dict_simp[label] = avg_jzazbz\n self.jzazbz_dict_simp = jzazbz_dict_simp",
"def get_image(self):\n if self._image is None:\n image_data = np.load(self.image_file)\n if not isinstance(image_data, np.ndarray):\n image_data = image_data['arr_0']\n self.meta_data = ImageWrapper.load_metadata(self.image_file+\".meta\")\n exposure_time = self.meta_data['exposure_time_us'] * 1e-6\n dark_level = float(self.meta_data['black_level'])\n # saturation_mask = image_data.max(axis=2) >= 4094\n image_data = np.clip((image_data.astype(np.float32) - dark_level),\n a_min=0.0, a_max=None) / exposure_time\n if self.original_vignetting is not None:\n image_data = image_data / self.original_vignetting\n if self.crop is not None:\n image_data = image_data[\n self.crop[1,0]:self.crop[1,1],\n self.crop[0,0]:self.crop[0,1]\n ]\n # saturation_mask = saturation_mask[\n # self.crop[1,0]:self.crop[1,1],\n # self.crop[0,0]:self.crop[0,1]\n # ]\n if self.down_sample is not None:\n image_data = cv2.resize(\n image_data,\n dsize=None,\n fx=1./self.down_sample,\n fy=1./self.down_sample,\n interpolation=cv2.INTER_AREA\n )\n # saturation_mask = cv2.resize(\n # saturation_mask,\n # dsize=None,\n # fx=1./self.down_sample,\n # fy=1./self.down_sample,\n # interpolation=cv2.INTER_AREA\n # )\n if self.reup_sample is not None:\n image_data = cv2.resize(\n image_data,\n dsize=None,\n fx=self.reup_sample,\n fy=self.reup_sample,\n interpolation=cv2.INTER_CUBIC\n )\n # saturation_mask = cv2.resize(\n # saturation_mask,\n # dsize=None,\n # fx=self.reup_sample,\n # fy=self.reup_sample,\n # interpolation=cv2.INTER_CUBIC\n # )\n image = torch.tensor(np.transpose(image_data, (2,0,1)), dtype=torch.float32, device=self.device)\n # saturation_mask = torch.tensor(saturation_mask, dtype=torch.float32, device=self.device)\n if not self.lazy:\n self._image = image\n # self._saturation_mask = saturation_mask\n else:\n image = self._image\n # saturation_mask = self._saturation_mask\n\n return image#, saturation_mask",
"def diff_image_color(image_path0, image_path1):\n image0 = Image.open(image_path0)\n #color_image0 = get_histogram(image0)\n color_image0 = image0.histogram()\n cut_color_image0 = cut_histogram_min(color_image0)\n image1 = Image.open(image_path1)\n color_image1 = image1.histogram()\n #color_image1 = get_histogram(image1)\n cut_color_image1 = cut_histogram_min(color_image1)\n color_difference = bhattacharyya(color_image0, color_image1)\n return color_difference",
"def stain_image(image, num_stains, color):",
"def rgb_image(self):\n z3 = self.z[:,:,newaxis]\n return z3 * self.c",
"def process_image(overviews, db_graph, input_filename, color, out_raster_srs):\n if verbose > 0:\n print(\"~~~process_image\")\n input_image = gdal.Open(input_filename)\n stem = Path(input_filename).stem\n if not(\"dataSet\" in overviews):\n overviews['dataSet'] = {}\n overviews['dataSet']['boundingBox'] = {}\n overviews['dataSet']['limits'] = {}\n\n tile_limits = get_tile_limits(input_filename)\n\n if not(\"LowerCorner\" in overviews['dataSet']['boundingBox']):\n overviews['dataSet']['boundingBox'] = tile_limits\n else:\n if tile_limits['LowerCorner'][0] < overviews['dataSet']['boundingBox']['LowerCorner'][0]:\n overviews['dataSet']['boundingBox']['LowerCorner'][0] = tile_limits['LowerCorner'][0]\n if tile_limits['LowerCorner'][1] < overviews['dataSet']['boundingBox']['LowerCorner'][1]:\n overviews['dataSet']['boundingBox']['LowerCorner'][1] = tile_limits['LowerCorner'][1]\n if tile_limits['UpperCorner'][0] > overviews['dataSet']['boundingBox']['UpperCorner'][0]:\n overviews['dataSet']['boundingBox']['UpperCorner'][0] = tile_limits['UpperCorner'][0]\n if tile_limits['UpperCorner'][1] > overviews['dataSet']['boundingBox']['UpperCorner'][1]:\n overviews['dataSet']['boundingBox']['UpperCorner'][1] = tile_limits['UpperCorner'][1]\n\n # for z in tiles:\n for tile_z in range(overviews['level']['min'], overviews['level']['max'] + 1):\n print('Niveau de zoom : ', tile_z)\n\n resolution = overviews['resolution'] * 2 ** (overviews['level']['max'] - tile_z)\n\n MinTileCol = \\\n math.floor(round((tile_limits['LowerCorner'][0] - overviews['crs']['boundingBox']['xmin'])/(resolution*overviews['tileSize']['width']),8))\n MinTileRow = \\\n math.floor(round((overviews['crs']['boundingBox']['ymax']-tile_limits['UpperCorner'][1])/(resolution*overviews['tileSize']['height']),8))\n MaxTileCol = \\\n math.ceil(round((tile_limits['UpperCorner'][0] - overviews['crs']['boundingBox']['xmin'])/(resolution*overviews['tileSize']['width']),8)) - 1\n MaxTileRow = \\\n math.ceil(round((overviews['crs']['boundingBox']['ymax']-tile_limits['LowerCorner'][1])/(resolution*overviews['tileSize']['height']),8)) - 1\n\n if not( str(tile_z) in overviews['dataSet']['limits'] ):\n overviews['dataSet']['limits'][str(tile_z)] = {\n 'MinTileCol': MinTileCol,\n 'MinTileRow': MinTileRow,\n 'MaxTileCol': MaxTileCol,\n 'MaxTileRow': MaxTileRow,\n }\n\n else:\n if MinTileCol < overviews['dataSet']['limits'][str(tile_z)]['MinTileCol']:\n overviews['dataSet']['limits'][str(tile_z)]['MinTileCol'] = MinTileCol\n if MinTileRow < overviews['dataSet']['limits'][str(tile_z)]['MinTileRow']:\n overviews['dataSet']['limits'][str(tile_z)]['MinTileRow'] = MinTileRow\n if MaxTileCol > overviews['dataSet']['limits'][str(tile_z)]['MaxTileCol']:\n overviews['dataSet']['limits'][str(tile_z)]['MaxTileCol'] = MaxTileCol\n if MaxTileRow > overviews['dataSet']['limits'][str(tile_z)]['MaxTileRow']:\n overviews['dataSet']['limits'][str(tile_z)]['MaxTileRow'] = MaxTileRow\n\n for tile_x in range(MinTileCol, MaxTileCol + 1): \n for tile_y in range(MinTileRow, MaxTileRow + 1):\n # on cree une image 3 canaux pour la tuile\n opi = create_blank_tile(overviews, {'x': tile_x, 'y': tile_y, 'resolution': resolution}, 3, out_raster_srs)\n # on reech l'OPI dans cette image\n gdal.Warp(opi, input_image)\n # si necessaire on cree le dossier de la tuile\n tile_dir = args.cache+'/'+str(tile_z)+'/'+str(tile_y)+'/'+str(tile_x)\n Path(tile_dir).mkdir(parents=True, exist_ok=True)\n # on export en jpeg (todo: gerer le niveau de Q)\n PNG_DRIVER.CreateCopy(tile_dir+\"/\"+stem+\".png\", opi)\n # on cree une image mono canal pour la tuile\n mask = create_blank_tile(overviews, {'x': tile_x, 'y': tile_y, 'resolution': resolution}, 3, out_raster_srs)\n # on rasterise la partie du graphe qui concerne ce cliche\n gdal.Rasterize(mask, db_graph,\n SQLStatement='select geom from ' + args.table + ' where cliche = \\''+stem+'\\' ')\n img_mask = mask.GetRasterBand(1).ReadAsArray()\n # si le mask est vide, on a termine\n val_max = np.amax(img_mask)\n if val_max > 0:\n # on cree le graphe et l'ortho\n ortho = create_blank_tile(overviews, {'x': tile_x, 'y': tile_y, 'resolution': resolution}, 3, out_raster_srs)\n graph = create_blank_tile(overviews, {'x': tile_x, 'y': tile_y, 'resolution': resolution}, 3, out_raster_srs)\n if Path(tile_dir+\"/ortho.png\").is_file():\n existing_ortho = gdal.Open(tile_dir+\"/ortho.png\")\n existing_graph = gdal.Open(tile_dir+\"/graph.png\")\n else:\n existing_ortho = False\n existing_graph = False\n for i in range(3):\n opi_i = opi.GetRasterBand(i+1).ReadAsArray()\n if existing_ortho:\n ortho_i = existing_ortho.GetRasterBand(i+1).ReadAsArray()\n else:\n ortho_i = ortho.GetRasterBand(i+1).ReadAsArray()\n opi_i[(img_mask == 0)] = 0\n ortho_i[(img_mask != 0)] = 0\n ortho.GetRasterBand(i+1).WriteArray(np.add(opi_i, ortho_i))\n if existing_graph:\n graph_i = existing_graph.GetRasterBand(i+1).ReadAsArray()\n else:\n graph_i = graph.GetRasterBand(i+1).ReadAsArray()\n graph_i[(img_mask != 0)] = color[i]\n graph.GetRasterBand(i+1).WriteArray(graph_i)\n existing_ortho = None\n existing_graph = None\n PNG_DRIVER.CreateCopy(tile_dir+\"/ortho.png\", ortho)\n PNG_DRIVER.CreateCopy(tile_dir+\"/graph.png\", graph)",
"def test_hue(self):\n thispath = os.path.dirname(__file__)\n impath = os.path.join(\"test\", \"737.jpg\")\n impath2 = os.path.join(\"test\", \"738.jpg\")\n \n img = cv2.imread(os.path.join(thispath, impath))\n img2 = cv2.imread(os.path.join(thispath, impath2))\n colorextr = ColorFeatureExtracter(img)\n colorextr2 = ColorFeatureExtracter(img2)\n print(colorextr.CompareFeatures(colorextr2.ComputeFeatures(),colorextr.ComputeFeatures()))\n # ... and then evaluate the output",
"def rebin_image(self):\r\n\r\n # bin the image down to smaller size by combining groups of bins\r\n\r\n print('Rebinning image')\r\n\r\n sh = self.imageData.shape\r\n\r\n if self.binsize > 1 or self.zbinsize > 1:\r\n\r\n nredx = int(sh[1]/self.binsize)\r\n\r\n nredy = int(sh[2]/self.binsize)\r\n\r\n nredz = int(self.imageData.shape[0]/self.zbinsize)\r\n print('nredx,nredy,nredz: ',[nredx,nredy,nredz])\r\n\r\n self.imageData = self.bin_ndarray(self.imageData, new_shape=(nredz, nredx, nredy), operation='mean')\r\n\r\n if nredz > 1:\r\n\r\n beforeFrames = self.nFrames\r\n\r\n self.nFrames = self.imageData.shape[0]\r\n\r\n self.framerate = self.nFrames/(self.nrepetitions*self.period)\r\n\r\n self.times = np.arange(0, self.nFrames/self.framerate, 1.0/self.framerate)\r\n\r\n print(' Image Rebinned')\r\n\r\n self.print_image_info()",
"def _process_img_rgb(self, sensor_data):\n img = np.array(sensor_data.raw_data).reshape((self.img_y, self.img_x, 4))\n img = img[:, :, :3] # sensor is actualy rgba, we dont need alpha values\n self.rgb = img # need to scale rgb values to be {0,1}",
"def compute_img(self):\r\n self.load_img()\r\n self.check_shape()\r\n self.convert_img()\r\n self.img_computed = True",
"def test_RGB_mode():\n\n model = Instafilter(\"Lo-Fi\")\n\n f_image = __local__ / \"Normal.jpg\"\n\n img1 = model(f_image)\n img2 = model(f_image, is_RGB=True)\n\n diff = (img1 - img2).sum()\n\n assert abs(diff) > 0",
"def set_avg_rgb_group(poly_cstr, lyrsql, updatesql, rast_cstr):\n ds = gdal.Open(rast_cstr)\n georef = ds.GetGeoTransform()\n rgb = ds.ReadAsArray()\n assert rgb.shape[0] == 3\n img_shape = rgb.shape[1:]\n extent = get_extent(georef, img_shape)\n LOG.info(\"Extent: %s\", extent)\n vec_ds, lyr = open(poly_cstr, layersql=lyrsql, extent=extent, open_for_update=True)\n ldefn = lyr.GetLayerDefn()\n int_attr_name = ldefn.GetFieldDefn(0).name\n id_attr_name = ldefn.GetFieldDefn(1).name\n mask = just_burn_layer(lyr, georef, img_shape, attr=int_attr_name, dtype=np.int32, all_touched=False)\n LOG.info(\"Done burning - setting attr in %d features\", lyr.GetFeatureCount())\n n_ok = 0\n for n, feat in enumerate(lyr):\n if n % 100 == 0:\n LOG.info(\"Done: %d, ok: %d\", n, n_ok)\n int_id = feat[int_attr_name]\n group_id = feat[id_attr_name]\n I, J = np.where(mask == int_id)\n if I.size > 0:\n n_ok += 1\n r = int(round(np.sqrt(((rgb[0, I, J].astype(np.float64) ** 2).mean()))))\n g = int(round(np.sqrt(((rgb[1, I, J].astype(np.float64) ** 2).mean()))))\n b = int(round(np.sqrt(((rgb[2, I, J].astype(np.float64) ** 2).mean()))))\n if n_ok % 100 == 0:\n LOG.info(\"size: %d, sq-mean was %d, while raw mean red is: %.1f\",\n I.size, r, rgb[0, I, J].astype(np.float64).mean())\n rgb_str = '{},{},{}'.format(r, g, b)\n vec_ds.ExecuteSQL(updatesql.format(rgb_str, group_id))",
"def hash_images(default_pack_dir, resource_patches_dir):\n\n # used for printing completion state during long-running task\n file_total = sum(len(files) for _, _, files in os.walk(default_pack_dir))\n file_count = 0\n file_checkpoint = 0\n\n # for each image in the original pack, list all textures in the resource pack\n image_hashes = defaultdict(list) # [image_hash] => [resource_pack_paths]\n for file_dir, _, file_names in os.walk(default_pack_dir):\n for file_name in file_names:\n\n file_count += 1\n if file_count / file_total > file_checkpoint:\n print(f\"Detection status: {file_checkpoint:.0%}\")\n file_checkpoint += .05\n\n # .mcmeta files are also ported when the associated .png is ported\n if not file_name.endswith(\".png\"):\n continue\n\n file_path = os.path.join(file_dir, file_name)\n relative_path = file_path.replace(default_pack_dir, \"\")\n\n # don't include minecraft in ports\n if get_domain(relative_path) == \"minecraft\":\n continue\n\n # in most cases this will only contain one, as mod patches don't typically overwrite each other\n patch_names = get_patch_names(resource_patches_dir, relative_path)\n\n # skip textures that have no patch that overwrites it\n if not patch_names:\n continue\n\n # skip transparent images. This check is done last because it is relatively heavy\n try:\n if np.array(Image.open(file_path).convert('RGBA').split()[-1]).sum() == 0:\n # print(f\"skipping transparent file: {file_path}\")\n continue\n except:\n pass\n\n # add this image/patch data to the [image_hash] => [resource_patch_paths] mapping\n with open(file_path, 'rb') as image_file:\n image_hash = hashlib.md5(image_file.read()).hexdigest()\n patch_paths = (os.path.join(patch_name, *relative_path.split(os.sep)) for patch_name in patch_names)\n image_hashes[image_hash].extend(patch_paths)\n\n return image_hashes",
"def _calculate_square_metric(self, binary_image, transform, size):\n center = transform.trans\n angle = transform.rot\n\n # Create cache key\n key = (angle, (center.x, center.y))\n\n # If this x,y,angle combination has been seen before, retrieve the previous cached result\n if key in self.metric_cache:\n brightness = self.metric_cache[key]\n\n else:\n rotated = binary_image.rotate(angle, center)\n brightness = rotated.calculate_brightness(center, size, size)\n\n # Store in dictionary\n self.metric_cache[key] = brightness\n self.count += 1\n\n return brightness"
]
| [
"0.6444688",
"0.6177515",
"0.5947265",
"0.5923501",
"0.583346",
"0.56014127",
"0.55459803",
"0.5524121",
"0.5515747",
"0.5444249",
"0.54428154",
"0.54388857",
"0.5426759",
"0.5384609",
"0.5372793",
"0.5368804",
"0.53539616",
"0.5330574",
"0.53144616",
"0.5249488",
"0.5230912",
"0.52305144",
"0.522202",
"0.5207771",
"0.51803666",
"0.51772285",
"0.5171639",
"0.51570386",
"0.51543945",
"0.513198"
]
| 0.7241937 | 0 |
Download songs from YouTube, for use in our database | def download_songs(playlist_url):
command_string = 'youtube-dl -x --audio-format wav --postprocessor-args "-ar 44100 -ac 1" --output "Songs/%(' \
'title)s_%(id)s.%(ext)s" ' + \
playlist_url
args = shlex.split(command_string)
subprocess.call(args) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def download(self, ctx, *, song):\n try:\n with youtube_dl.YoutubeDL(ytdl_download_format_options) as ydl:\n if \"https://www.youtube.com/\" in song:\n download = ydl.extract_info(song, True)\n else:\n infosearched = ydl.extract_info(\n \"ytsearch:\"+song, False)\n download = ydl.extract_info(\n infosearched['entries'][0]['webpage_url'], True)\n filename = ydl.prepare_filename(download)\n embed = discord.Embed(\n title=\"Your download is ready\", description=\"Please wait a moment while the file is beeing uploaded\")\n await ctx.send(embed=embed, delete_after=30)\n await ctx.send(file=discord.File(filename))\n os.remove(filename)\n except (youtube_dl.utils.ExtractorError, youtube_dl.utils.DownloadError):\n embed = discord.Embed(title=\"Song couldn't be downloaded\", description=(\"Song:\"+song))\n await ctx.send(embed=embed)",
"def download_from_youtube():\n linkinput = input(\"Enter the url you want to download: \")\n youtube_object = Youtube(linkinput)\n youtube_object.youtube()",
"def download_song(song):\n ydl.download([song])\n print(\"Finished downloading and converting: \" + song)",
"def download_all_videos(self, dl_limit=10):\r\n counter = dl_limit\r\n self.video_link_title_keylist = self.video_link_title_dict.keys()\r\n music = []\r\n for title in self.video_link_title_keylist:\r\n try:\r\n title = title.encode('ascii')\r\n # print 'downloading title with counter: ', counter\r\n if not counter:\r\n return random.choice(music) #some margin for randomness, first result isnt always accurate, (gets slower...)\r\n print 'downloading title: ', title\r\n\r\n self.add_result(\"Dowloaded_Song\", title)\r\n\r\n path = self.download_video(self.video_link_title_dict[title], title)\r\n music.append(path)\r\n counter = counter - 1\r\n except:\r\n print \"illegal characters in youtube name\" + title + \"\\n trying next result\"",
"async def youtube(self, ctx, *args):\n if not args:\n await ctx.send(\"usage: `>youtube [search string]`\")\n return\n search_string = \" \".join(args)\n search_string = urllib.parse.urlencode({'search_query': search_string})\n response = requests.get('http://www.youtube.com/results?' + search_string + \"&hl=en_US&app=desktop\")\n if response.status_code == 200:\n search_results = re.findall('href=\\\\\"\\\\/watch\\\\?v=(.{11})', response.content.decode())\n try:\n first_result_url = 'http://www.youtube.com/watch?v=' + search_results[0]\n except IndexError:\n with open('downloads/yt_dump.txt', 'w') as f:\n f.write(response.content.decode())\n #print(response.is_redirect)\n return await ctx.send(\"Found nothing!\")\n await ctx.send(first_result_url)\n self.logger.info(misolog.format_log(ctx, f\"{first_result_url}\"))\n else:\n await ctx.send(\"Error: status code \" + str(response.status_code))\n self.logger.info(misolog.format_log(ctx, f\"error{response.status_code}\"))",
"def download_audio_from_youtube(youtube_link: str):\r\n with st.spinner(\"Extracting audio from Youtube...\"):\r\n try:\r\n a = pytube.YouTube(youtube_link).streams.first().download('files/','video_for_audio') # Download video from youtube\r\n b = ffmpg.ffmpeg_extract_audio('files/video_for_audio.mp4','files/audio.mp3') # extract sound and save as mp3\r\n os.remove('files/video_for_audio.mp4') # remove unecessary video\r\n # Release the process from the downloaded files\r\n del a, b\r\n st.success(\"Sound was extracted successfully from the youtube video!\")\r\n except:\r\n st.error(\"Unexpected error has occured, please try again!\")",
"def download_songs(**kwargs):\n for url in kwargs[\"songs\"][\"urls\"]:\n log.debug(\"Downloading to %s\", url[\"save_path\"])\n reference_file = DOWNLOAD_LIST\n track_db = write_tracks(reference_file, kwargs[\"songs\"])\n os.rename(reference_file, kwargs[\"output_dir\"] + \"/\" + reference_file)\n reference_file = str(kwargs[\"output_dir\"]) + \"/\" + reference_file\n kwargs[\"reference_file\"] = reference_file\n kwargs[\"track_db\"] = track_db\n if kwargs[\"multi_core\"] > 1:\n multicore_find_and_download_songs(kwargs)\n else:\n find_and_download_songs(kwargs)\n os.remove(reference_file)",
"def download_audio(self, link: str):\n\t\t# Download file to specified directory\n\t\toptions = {'outtmpl': 'data/music/%(title)s.%(ext)s'}\n\t\tyt_dl = youtube_dl.YoutubeDL(options)\n\t\tlink_info = yt_dl.extract_info(link, download=True) # Video Download\n\t\ttitle = link_info['title']\n\t\t# Find file in downloaded directory\n\t\tsource = None\n\t\tfor file in os.listdir(\"data/music/\"):\n\t\t\tfile_title, file_ext = file.title().split(\".\")\n\t\t\tif self.caseless_equals(title, file_title):\n\t\t\t\tsource = f\"data/music/{title}.{file_ext.lower()}\"\n\t\t\t\tbreak # End loop\n\t\t# Add audio source to playlist\n\t\tself.playlist.append({\n\t\t\t\"title\": title,\n\t\t\t\"link\": link,\n\t\t\t\"source\": source,\n\t\t\t\"loop\": False\n\t\t})",
"def download_track(self, track = None, url = None):\n # check that track doesn't exist\n if url == None or track == None:\n return\n\n print \"Retrieving the name of the track.\"\n filename = self.get_track_filename(url)\n\n print \"Filename found: \" + filename\n \n if (filename, track.user[\"username\"]) in self.past_songs_db_data or \\\n (filename, \"\") in self.past_songs_db_data or \\\n os.path.isfile(filename): \n print \"File exists\"\n else:\n print \"Downloading\"\n filename = wget.download(url)\n self.set_track_metadata(track, filename, url)\n mp3_name = filename[:-4] + \".mp3\"\n\n # Save filename for future reference\n self.past_songs_db.write(filename + \"\\n\")\n self.past_songs_db_data.append((filename, track.user[\"username\"]))\n \n if not filename.endswith(\".mp3\"):\n self.past_songs_db.write(mp3_name + \"\\n\")\n self.past_songs_db_data.append((mp3_name, track.user[\"username\"]))\n \n print",
"def download_songs(songs, download_directory, format_string, skip_mp3,\n keep_playlist_order=False, no_overwrites=False, file_name_f=default_filename):\n overwrites = not no_overwrites\n log.debug(f\"Downloading to {download_directory}\")\n for song in songs:\n query = f\"{song.get('artist')} - {song.get('name')} Lyrics\".replace(\":\", \"\").replace(\"\\\"\", \"\")\n download_archive = path.join(download_directory, 'downloaded_songs.txt')\n\n file_name = file_name_f(song)\n file_path = path.join(download_directory, file_name)\n\n outtmpl = f\"{file_path}.%(ext)s\"\n ydl_opts = {\n 'format': format_string,\n 'download_archive': download_archive,\n 'outtmpl': outtmpl,\n 'default_search': 'ytsearch',\n 'noplaylist': True,\n 'postprocessor_args': ['-metadata', 'title=' + song.get('name'),\n '-metadata', 'artist=' + song.get('artist'),\n '-metadata', 'album=' + song.get('album')]\n }\n if not skip_mp3:\n mp3_postprocess_opts = {\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '192',\n }\n ydl_opts['postprocessors'] = [mp3_postprocess_opts.copy()]\n\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n try:\n ydl.download([query])\n except Exception as e:\n log.debug(e)\n print('Failed to download: {}, please ensure YouTubeDL is up-to-date. '.format(query))\n continue\n\n if not skip_mp3:\n mp3filename = f\"{file_path}.mp3\"\n mp3file_path = path.join(mp3filename)\n if overwrites or not path.exists(mp3file_path):\n try:\n song_file = MP3(mp3file_path, ID3=EasyID3)\n except mutagen.MutagenError as e:\n log.debug(e)\n print('Failed to download: {}, please ensure YouTubeDL is up-to-date. '.format(query))\n continue\n song_file['date'] = song.get('year')\n if keep_playlist_order:\n song_file['tracknumber'] = str(song.get('playlist_num'))\n else:\n song_file['tracknumber'] = str(song.get('num')) + '/' + str(song.get('num_tracks'))\n song_file['genre'] = song.get('genre')\n song_file.save()\n song_file = MP3(mp3filename, ID3=ID3)\n cover = song.get('cover')\n if cover is not None:\n if cover.lower().startswith('http'):\n req = urllib.request.Request(cover)\n else:\n raise ValueError from None\n with urllib.request.urlopen(req) as resp: # nosec\n song_file.tags['APIC'] = APIC(\n encoding=3,\n mime='image/jpeg',\n type=3, desc=u'Cover',\n data=resp.read()\n )\n song_file.save()\n else:\n print('File {} already exists, we do not overwrite it '.format(mp3filename))",
"def download_videos(download_limit=6):\n videos = []\n for fname in os.listdir('yt_api_data'):\n videos += load_video_data(fname)\n vids_downloaded = 0\n excluded_vids = get_excluded_videos()\n for video_id, title in videos:\n if download_limit != 'all' and vids_downloaded == download_limit:\n break\n title = title.replace(' ', '_')\n mkv_path = \"videos/\" + title + \".mkv\"\n mp4_path = \"videos/\" + title + \".mp4\"\n download_fpath = \"videos/\" + title\n if not check_excluded_list(excluded_vids, title) and not os.path.isfile(mkv_path) and not os.path.isfile(mp4_path):\n print(colored(str(vids_downloaded + 1) + \": \", \"yellow\") + colored(video_id + \" downloading: \" + download_fpath, \"green\"))\n command_prefix = \"youtube-dl -o \" + download_fpath\n if video_id[0] == '-': \n os.system(command_prefix + \" -- \" + video_id)\n else:\n os.system(command_prefix + \" \" + video_id)\n vids_downloaded += 1\n else:\n print(colored(\"skipping download: \" + title + \"with youtube_id: \" + video_id, \"yellow\"))",
"def get_videos_in_playlist(self):\n\n self.ydl = youtube_dl.YoutubeDL()\n # uses the youtube_dl as a context manager\n with self.ydl:\n self.result = self.ydl.extract_info(\n self.url, extra_info={'listformats': True}, download=False)\n for video in (self. result['entries']):\n video_id = video['id']\n self. url = f'https://www.youtube.com/watch?v={video_id}'\n self. show_formats()",
"def download_whole(no_interval):\n print(os.getcwd())\n SAVE_PATH = 'tmp'\n ydl_opts = {\"nocheckcertificate\": True, \"noplaylist\": True,\n 'outtmpl': f'{SAVE_PATH}/%(title)s.%(ext)s'}\n\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n for video in range(len(no_interval)):\n try:\n ydl.download([no_interval[video]])\n except youtube_dl.utils.ExtractorError or youtube_dl.utils.DownloadError:\n print(f\"Couldn't download {no_interval[video]}\")\n continue",
"def youtube_download(url, output_dir='.', merge=True, info_only=False):\n \n id = match1(url, r'youtu.be/([^/]+)') or parse_query_param(url, 'v')\n assert id\n \n youtube_download_by_id(id, title=None, output_dir=output_dir, merge=merge, info_only=info_only)",
"def download_all(self):\r\n download_path = os.path.join(self.download_path, self.username)\r\n already_downloaded = []\r\n successful_downloads = []\r\n failed_downloads = []\r\n if not os.path.exists(download_path):\r\n os.makedirs(download_path)\r\n elif not os.path.isdir(download_path):\r\n raise NotADirectoryError(\"Download path is not a directory: \" + download_path)\r\n elif self.skip_downloaded:\r\n for item in os.listdir(download_path):\r\n file_path = str(os.path.join(download_path, item))\r\n if os.path.isfile(file_path):\r\n parsed_file = self._parse_file_name(os.path.basename(file_path))\r\n if parsed_file is not None:\r\n already_downloaded.append(parsed_file[\"id\"])\r\n for index, item in enumerate(self.videos):\r\n # Don't download it if the user has set that option, and the tiktok already exists on the disk\r\n if item[\"id\"] in already_downloaded:\r\n logger.info(\"Already downloaded video with id: \" + item[\"id\"])\r\n continue\r\n file_name = self._format_file_name(item[\"createTime\"], item[\"id\"])\r\n file_path = os.path.join(download_path, file_name)\r\n logger.info(\"Downloading video: \" + file_name + \" (\" + str(index + 1) + \"/\" + str(len(self.videos)) + \")\")\r\n video_url = self._format_video_url(item)\r\n success = self.download_video(file_path, video_url, item[\"createTime\"])\r\n if success:\r\n successful_downloads.append(video_url)\r\n else:\r\n failed_downloads.append(video_url)\r\n sleep_time = random.uniform(self.sleep_min, self.sleep_max)\r\n logger.info(\"Sleeping for: \" + str(sleep_time) + \" seconds\")\r\n sleep(sleep_time)\r\n logger.info(\"Processed all {} videos\".format(self.video_count))\r\n logger.debug(\"Fallback counter: \" + str(self.fallback_counter))\r\n logger.debug(\"YouTube-dl DownloadError counter: \" + str(self.fallback_counter))\r\n logger.debug(\"Other error counter: \" + str(self.other_error_counter))\r\n return {\"successful_downloads\": successful_downloads,\r\n \"failed_downloads\": failed_downloads,\r\n \"skipped_downloads\": already_downloaded}",
"async def play(self, ctx, url):\n server_id = ctx.message.server.id\n requester = ctx.message.author\n #refuse command if we don't know which voice channel to join\n if not self.in_voice(server_id) and not requester.voice.voice_channel:\n await ctx.bot.send_message(ctx.message.channel, \"Dude, get in voice first.\")\n return\n #warn user that the bot won't jump channels while playing\n if self.in_voice(server_id) and not self.user_in_channel(server_id, requester):\n vcname = self.get_server_dict(server_id)['voice'].channel.name\n await ctx.bot.send_message(ctx.message.channel, \"I'm already playing in {}. Get in.\".format(vcname))\n return\n #create ytdl instance\n #set quiet: True if needed\n await ctx.bot.send_typing(ctx.message.channel)\n ytdl = YoutubeDL(self._default_options)\n try:\n info = ytdl.extract_info(url, download=False)\n except DownloadError:\n #url was bullshit\n search_kw = ctx.message.content[5:]\n info = await self._find(ctx.bot, search_kw)\n if not info:\n #no hits\n await ctx.bot.send_message(ctx.message.channel, \"No media found.\")\n if 'entries' in info:\n #it's a playlist\n #just grab the first item\n info = info['entries'][0]\n #at this point info['url'] should point to our preferred format\n download_url = info['url']\n #get media attributes\n title = info.get('title')\n duration = ''\n if info.get('is_live'):\n duration = 'LIVE'\n else:\n seconds = info.get('duration')\n if seconds:\n duration = str(datetime.timedelta(seconds=seconds))\n nick = self.get_nick(requester)\n #add to queue\n self.enqueue(server_id, download_url, title, duration, nick)\n await ctx.bot.send_message(ctx.message.channel, self.format_song_display('+', title, duration, nick))\n #join user's voice channel unless already in voice\n if not self.in_voice(server_id):\n await self._join(ctx.bot, server_id, requester.voice.voice_channel)\n #start playback unless already playing\n if not self.is_playing(server_id):\n await self._play(ctx.bot, server_id)",
"def download(target_url):\n program_location = sys.executable\n program_name = \"youtube-dl.exe\"\n # Define arguments. see this url for help\n # https://github.com/rg3/youtube-dl\n ignore_errors = \"-i\"\n safe_filenames = \"--restrict-filenames\"\n output_arg = \"-o\"\n output_template = \"download\\%(uploader)s\\%(playlist)s\\%(title)s-%(id)s.%(ext)s\"\n command = [program_name, ignore_errors, safe_filenames, output_arg, output_template, target_url]\n result = subprocess.call(command)\n print \"Command result: \", result",
"def fetch_song_data(url):\r\n response = requests.get(url)\r\n return response.text",
"def do_downloads(filename1=\"og\", filename2=\"lyrical\", video_id=DEFALT_VIDEO_ID):\n original_video_url = youtube_id_to_url(video_id)\n download_from_url(original_video_url, filename1)\n lyrics_video_url = get_lyrics_url(original_video_url)\n download_from_url(lyrics_video_url, filename2)\n\n return filename1, filename2",
"async def youtube(self, ctx, *search):\n voice = discord.utils.get(self.bot.voice_clients, guild=ctx.guild)\n\n if not (ctx.author.voice or voice):\n await ctx.message.add_reaction('\\U0001F615')\n await ctx.send(\"Not in a voice channel.\")\n return\n \n YDL_OPTS = {'default_search': 'auto', 'format': 'bestaudio',\n 'noplaylist': 'True'}\n FFMPEG_OPTS = {'before_options': '-reconnect 1 -reconnect_streamed 1 '\n '-reconnect_delay_max 5',\n 'options': '-vn'}\n \n with youtube_dl.YoutubeDL(YDL_OPTS) as ydl:\n try:\n info = ydl.extract_info(' '.join(search), download=False)\n except youtube_dl.utils.DownloadError:\n await ctx.message.add_reaction('\\U0001F615');\n await ctx.send(f\"Couldn't stream that sound.\")\n return\n\n if 'entries' in info:\n if info['entries']:\n info = info['entries'][0]\n else:\n await ctx.send(f\"No results found for `{search}`.\")\n return\n \n if not voice:\n await ctx.message.add_reaction('\\U0001F615')\n await self.join(ctx)\n\n voice = discord.utils.get(self.bot.voice_clients, guild=ctx.guild)\n \n if voice:\n if voice.is_playing():\n voice.stop()\n \n URL = info['formats'][0]['url']\n title = info['title']\n voice.play(discord.FFmpegPCMAudio(executable=os.environ['FFMPEG_PATH'],\n source=URL, **FFMPEG_OPTS))\n \n await ctx.send(f\"Playing `{title}`.\")",
"def find_and_download_songs(kwargs):\n sponsorblock_postprocessor = []\n reference_file = kwargs[\"reference_file\"]\n files = {}\n with open(reference_file, \"r\", encoding=\"utf-8\") as file:\n for line in file:\n temp = line.split(\";\")\n name, artist, album, i = (\n temp[0],\n temp[1],\n temp[4],\n int(temp[-1].replace(\"\\n\", \"\")),\n )\n\n query = f\"{artist} - {name} Lyrics\".replace(\":\", \"\").replace('\"', \"\")\n print(f\"Initiating download for {query}.\")\n\n file_name = kwargs[\"file_name_f\"](\n name=name, artist=artist, track_num=kwargs[\"track_db\"][i].get(\"playlist_num\")\n )\n\n if kwargs[\"use_sponsorblock\"][0].lower() == \"y\":\n sponsorblock_postprocessor = [\n {\n \"key\": \"SponsorBlock\",\n \"categories\": [\"skip_non_music_sections\"],\n },\n {\n \"key\": \"ModifyChapters\",\n \"remove_sponsor_segments\": [\"music_offtopic\"],\n \"force_keyframes\": True,\n },\n ]\n save_path = kwargs[\"track_db\"][i][\"save_path\"]\n file_path = path.join(save_path, file_name)\n\n mp3file_path = f\"{file_path}.mp3\"\n\n if save_path not in files:\n path_files = set()\n files[save_path] = path_files\n else:\n path_files = files[save_path]\n\n path_files.add(f\"{file_name}.mp3\")\n\n if (\n kwargs[\"no_overwrites\"]\n and not kwargs[\"skip_mp3\"]\n and path.exists(mp3file_path)\n ):\n print(f\"File {mp3file_path} already exists, we do not overwrite it \")\n continue\n\n outtmpl = f\"{file_path}.%(ext)s\"\n ydl_opts = {\n \"proxy\": kwargs.get(\"proxy\"),\n \"default_search\": \"ytsearch\",\n \"format\": \"bestaudio/best\",\n \"outtmpl\": outtmpl,\n \"postprocessors\": sponsorblock_postprocessor,\n \"noplaylist\": True,\n \"no_color\": False,\n \"postprocessor_args\": [\n \"-metadata\",\n \"title=\" + name,\n \"-metadata\",\n \"artist=\" + artist,\n \"-metadata\",\n \"album=\" + album,\n ],\n }\n if not kwargs[\"skip_mp3\"]:\n mp3_postprocess_opts = {\n \"key\": \"FFmpegExtractAudio\",\n \"preferredcodec\": \"mp3\",\n \"preferredquality\": \"192\",\n }\n ydl_opts[\"postprocessors\"].append(mp3_postprocess_opts.copy())\n with yt_dlp.YoutubeDL(ydl_opts) as ydl:\n try:\n ydl.download([query])\n except Exception as e: # skipcq: PYL-W0703\n log.debug(e)\n print(f\"Failed to download {name}, make sure yt_dlp is up to date\")\n if not kwargs[\"skip_mp3\"]:\n set_tags(temp, mp3file_path, kwargs)\n if kwargs[\"remove_trailing_tracks\"] == \"y\":\n for save_path in files:\n for f in os.listdir(save_path):\n if f not in files[save_path]:\n print(f\"File {f} is not in the playlist anymore, we delete it\")\n os.remove(path.join(save_path, f))",
"def get_liked_videos(self):\n request = self.youtube_client.playlists().list(\n part=\"snippet\",\n mine=True\n )\n playlistid_ = 0\n response = request.execute()\n for item in response[\"items\"]:\n if item[\"snippet\"][\"title\"] == self.nameOfPlaylist:\n playlistid_ = item[\"id\"]\n if playlistid_ == 0:\n raise\n request2 = self.youtube_client.playlistItems().list(\n part=\"snippet\",\n playlistId=playlistid_,\n maxResults=\"50\"\n )\n response = request2.execute()\n nextToken = response.get('nextPageToken')\n while('nextPageToken' in response):\n nextpage = self.youtube_client.playlistItems().list(\n part=\"snippet\",\n playlistId=playlistid_,\n maxResults=\"50\",\n pageToken=nextToken\n ).execute()\n response['items'] += nextpage['items']\n if 'nextPageToken' not in nextpage:\n response.pop('nextPageToken',None)\n else:\n nextToken = nextpage['nextPageToken']\n for item in response[\"items\"]:\n video_title = item[\"snippet\"][\"title\"]\n youtube_url = f\"https://www.youtube.com/watch?v={item['snippet']['resourceId']['videoId']}\"\n\n #use youtube_dl to collect the song name & artist name\n try:\n video = youtube_dl.YoutubeDL({}).extract_info(youtube_url,download=False)\n except:\n continue\n song_name= video[\"track\"]\n artist = video[\"artist\"]\n\n #save all important info\n songUri= self.get_spotify_uri(song_name,artist)\n if songUri != None:\n self.all_song_info[video_title] ={\n \"youtube_url\": youtube_url,\n \"song_name\": song_name,\n \"artist\": artist,\n\n #add the uri, easy to get song to put into playlist\n \"spotify_uri\": self.get_spotify_uri(song_name,artist)\n }",
"def download_video_data(self):\n\n def scrape_url(url):\n \"\"\"Scrape the video list, youtube_dl does all the heavy lifting\"\"\"\n ydl_opts = {\n \"ignoreerrors\": True, # Skip private and unavaliable videos\n }\n\n ydl = youtube_dl.YoutubeDL(ydl_opts)\n\n with ydl:\n result_ydl = ydl.extract_info(\n url,\n download=False # No download needed, only the info\n )\n\n logger.debug('Url scraped {}', url)\n if 'entries' in result_ydl:\n # It's a playlist or a list of videos\n return result_ydl['entries']\n # Just a video\n return [result_ydl]\n\n youtube_list = sum((scrape_url(url) for url in self.youtube_lists), [])\n for youtube_video_data in youtube_list:\n if youtube_video_data: # Valid video\n self.youtube_videos.append(\n Video.from_youtube(\n video_data=youtube_video_data, event=self))\n else:\n logger.warning('Null youtube video')",
"def download_by_link(link: str, videoid: str) -> [str, str]:\n\t# set youtube_dl arguments \n\tydl_opts = {\n\t\t'quiet': False, # don't write in output\n\t\t'no_warnings': True, # write warnings in output\n\t\t'format': \"bestaudio/best\", # download best audio quality\n\t\t'format': 'mp4', # setup format webm\n\t\t'outtmpl': '%(name)s' + str(videoid) + '.%(ext)s', # setup output name \n\t\t'postprocessor': [{ # dk how this need work, but if this not setup audio didn't download\n\t\t\t'key': \"FFmpegExtractAudioPP\",\n\t\t\t'preferredquality': \"512\",\n\t\t }],\n\t}\n\t# start download audio\n\twith youtube_dl.YoutubeDL(ydl_opts) as ydl:\n\t\tdata = ydl.extract_info(link) # exctrat info about audio\n\tfake_name = \"NA\" + str(videoid)\n\t# TODO: think about this query \n\t# refactoring title \n\ttitle = data.pop('title')\n\ttitle = re.sub(r'[^\\w]', ' ', title)\n\ttitle = translate(title)\n\ttitle = title.replace(' ', '_')\n\treturn fake_name, title",
"def podcast_download(self):\r\n warnings.filterwarnings(\"ignore\", category=UnicodeWarning)\r\n now = datetime.datetime.now()\r\n\r\n for podcast_file in self.podcast_list:\r\n published, name, link, title = podcast_file\r\n if self.podcast_list != []:\r\n line_file = (published + ';' + title + ';' + name + ';' + link).encode(\"utf-8\") \r\n if line_file in open(self.download_log).read():\r\n pass\r\n else:\r\n title = unicodedata.normalize('NFKD', title).encode('ascii', 'ignore')\r\n download_folder = os.path.join('downloads', title)\r\n if not os.path.exists(download_folder): \r\n os.makedirs(download_folder)\r\n try:\r\n published = str(parser.parse(published))[:10]\r\n except IOError as error:\r\n print 'Error' + (error) + ': File - ' + str(title)\r\n download_folder = os.path.join(download_folder, published)\r\n if not os.path.exists(download_folder): \r\n os.makedirs(download_folder)\r\n namefile_unicode = link[link.rfind('/')+1:]\r\n namefile_str = unicodedata.normalize('NFKD', \r\n namefile_unicode).encode('ascii', 'ignore')\r\n namefile_str = namefile_str.decode('utf-8', 'ignore').encode(\"utf-8\")\r\n if '.mp3' in namefile_str:\r\n len_name = namefile_str.index('.mp3')\r\n elif '.MP3' in namefile_str:\r\n len_name = namefile_str.index('.MP3')\r\n namefile_str = namefile_str[:len_name + 4]\r\n fileoutput = os.path.join(download_folder, namefile_str)\r\n name = unicodedata.normalize('NFKD', name).encode('ascii', 'ignore')\r\n print str(published) + '; ' + name\r\n ## downlink\r\n download_file(link, fileoutput) \r\n ## tagging\r\n mp3_tagging(fileoutput, podcast_file)\r\n ## write log\r\n write_file(self.download_log, line_file)\r\n end = datetime.datetime.now()\r\n print '\\r' + 'Download Time = ' + str(end-now) + '\\r'\r\n return None",
"def download_song(url, filename):\n page = requests.get(url, headers=HEADERS)\n if page.status_code == 200: # OK\n with open(filename, 'w') as outf:\n outf.write(page.text)\n else:\n print(f'download failed with status code {page.status_code}!')",
"def download_cdn_videos(filenames,sub_urls,handout_urls,video_urls, target_dir):\n \"\"\" using a simple file downloader \"\"\"\n for i, v in enumerate(video_urls):\n filename_prefix = str(i+1).zfill(2) + '-'\n #original_filename = v.rsplit('/', 1)[1]\n video_filename = filename_prefix + filenames[i] + '.mp4'\n sub_filename = filename_prefix + filenames[i] + '.srt'\n handout_filename = filename_prefix + filenames[i] + '.srt'\n video_path = os.path.join(target_dir, video_filename)\n sub_path = os.path.join(target_dir, sub_filename)\n handout_path = os.path.join(target_dir, handout_filename)\n #print('[debug] GET %s' % v)\n print('[download] Destination: %s' % video_path)\n v = quote(v,safe=\":/\")\n if len(v) != YOUTUBE_VIDEO_ID_LENGTH:\n req = Request(v) \n try:\n video = urlopen(v)\n fileSize = int(video.headers['content-length'])\n finish = False\n existSize = 0\n if os.path.exists(video_path):\n output = open(video_path,\"ab\")\n existSize = os.path.getsize(video_path)\n #If the file exists, then only download the remainder\n if existSize < fileSize:\n #print(\"[debug] bytes range is: %s-%s\" % (existSize,fileSize))\n req.headers[\"Range\"]= \"bytes=%s-%s\" % (existSize,fileSize)\n video = urlopen(req)\n else:\n finish = True\n else:\n output = open(video_path,\"wb\")\n if finish == False:\n file_size_dl = existSize\n block_sz = 262144\n while True:\n buffer = video.read(block_sz)\n if not buffer:\n break\n \n file_size_dl += len(buffer)\n output.write(buffer)\n status = r\"%10d [%3.2f%%]\" % (file_size_dl, file_size_dl * 100. / fileSize)\n status = status + chr(8)*(len(status)+1)\n sys.stdout.write(status)\n sys.stdout.flush()\n \n output.close()\n\n except URLError as e:\n print(\"[warning]error: %r when downloading %s\" % (e.reason,v) )\n\n else:\n download_youtube_video(v,video_path)\n \n if sub_urls[i] != \"\":\n #print('[debug] GET %s' % BASE_URL+sub_urls[i])\n if not os.path.exists(sub_path):\n subs_string = edx_get_subtitle(sub_urls[i], headers)\n if subs_string:\n print('[info] Writing edX subtitles: %s' % sub_path)\n open(os.path.join(os.getcwd(), sub_path),\n 'wb+').write(subs_string.encode('utf-8'))\n\n if handout_urls[i] != \"\":\n #print('[debug] GET %s' % BASE_URL+sub_urls[i])\n if not os.path.exists(handout_path):\n handout_content = urlopen(BASE_URL+handout_urls[i]).read()\n if handout_content:\n print('[info] Writing handout: %s' % handout_path)\n open(os.path.join(os.getcwd(), handout_path),\n 'wb+').write(handout_content)\n #srtfile = urlopen(BASE_URL+sub_urls[i])\n #output = open(srt_path,'wb')\n #output.write(srtfile.read())\n #output.close()",
"def download(idd, path):\n print(f'[{script}]: Downloading YT video \"{idd}\"...') if verbosity >= 1 else None\n\n try:\n yt = pytube.YouTube(\"https://www.youtube.com/watch?v=\" + idd)\n stream = yt.streams.filter(progressive=True).first()\n stream.download(path, filename=idd)\n except Exception:\n print(f'[{script}]: Failed download of YT video \"{idd}\".')\n return None\n\n data = {\n \"idd\": idd,\n \"abr\": stream.abr,\n \"acodec\": stream.audio_codec,\n \"bitrate\": stream.bitrate,\n \"codecs\": stream.codecs,\n \"fps\": stream.fps,\n \"mime\": stream.mime_type,\n \"res\": stream.resolution,\n \"vcodec\": stream.video_codec,\n \"size\": stream._filesize,\n \"frames\": stream.fps * yt.length,\n }\n\n file_path = path + \"/\" + data[\"idd\"] + \".mp4\"\n print(\n f'[{script}]: Download successful. Saved to \"{file_path}\".'\n ) if verbosity >= 2 else None\n return data",
"def youtube_dl_latest(args=None):\n args = parse_youtube_dl_arguments(args=args)\n download_videos(channels_file=args.channels_file, hierarchy=args.hierarchy)",
"def extract_data():\n args = arguments()\n\n if args.list is not None:\n songs = utility.get_songs(args.list)\n logger.debug(str(songs))\n if len(songs) != 0:\n logger.info(\"Downloading songs in {}\".format(args.list))\n for song_name in songs:\n logger.debug(song_name)\n args.SONG_NAME = [song_name]\n main(args)\n else:\n logger.info(\"{}: is empty\".format(args.list))\n elif args.SONG_NAME and yt.is_playlist(args.SONG_NAME[0]):\n logger.info(\"Youtube playlist passed...extracting!\")\n songs, playlist_name = yt.get_playlist(\n args.SONG_NAME[0],\n args.proxy,\n args.pl_start,\n args.pl_end,\n args.pl_items\n )\n\n # Check if data is actually returned\n if songs is None:\n logger.error(\"Couldn't extract playlist data!\")\n\n logger.info(\"Playlist: {}\".format(playlist_name))\n logger.info(\"{} songs found\".format(len(songs)))\n\n # Iterate and work on the data.\n url_base = \"https://www.youtube.com/watch?v=\"\n for song in songs:\n args.url = url_base + song[\"url\"]\n\n # Try to pass the title as well, if it's not there\n # that will be handled by ytmdl\n try:\n args.SONG_NAME = [stringutils.remove_yt_words(song[\"title\"])]\n except KeyError:\n pass\n\n main(args)\n else:\n main(args)"
]
| [
"0.72204304",
"0.70007014",
"0.69705606",
"0.6968855",
"0.6955529",
"0.6739802",
"0.6696157",
"0.66440094",
"0.66403437",
"0.6592989",
"0.6551917",
"0.65344065",
"0.6519165",
"0.65043634",
"0.6503763",
"0.6496321",
"0.647971",
"0.64418465",
"0.64357823",
"0.6430905",
"0.6421194",
"0.63948905",
"0.6382434",
"0.63330364",
"0.6325426",
"0.63007814",
"0.62788105",
"0.6239511",
"0.6224253",
"0.6209334"
]
| 0.74034494 | 0 |
Prunes dictionary containing the words in the message to the most interesting 15 tokens based on the size of their deviation from the "Neutral" value of 0.5 To perform this function, it first assigns spam probability to each words by referencing a established dataset of words and their spam probability. If word not found in dataset, assigned a fixed specific value. | def find_interesting_tokens(test_corpus_words, word_spam_chance_dict):
# Assign probabilities to words in the test corpus based on established dataset of words and spam probabilities.
test_corpus_word_probability_dict = {}
for each_word in test_corpus_words:
# If dataset contains that word, assign previously calculated spam probability.
if each_word.lower() in word_spam_chance_dict.keys():
test_corpus_word_probability_dict[each_word.lower()] = word_spam_chance_dict[each_word.lower()]
# If dataset doesn't contain that word, assign specified spam probability value.
else:
test_corpus_word_probability_dict[each_word.lower()] = 0.4
print("\ncontents of test corpus word probability dictionary: " + str(test_corpus_word_probability_dict))
############################################
# If more than 15 tokens, prune to the most "interesting" 15.
# Determine the 15 tokens with the largest deviation from neutral 0.5.
normalized_word_spam_chance = {}
for key, value in test_corpus_word_probability_dict.items():
# Prevent normalized values = 0.0.
if value == 0.5:
normalized_word_spam_chance[key] = abs(0.51 - value)
else:
normalized_word_spam_chance[key] = abs(0.5 - value)
print("\nnormalized word spam chances: " + str(normalized_word_spam_chance))
############################################
# Sort dictionary so that largest deviations are at the front.
# FIXME - figure out a way to properly sort the dictionary based on value.
sorted_dict = sorted(normalized_word_spam_chance.items())
print("\nsorted normalized word spam chances: " + str(sorted_dict))
# Slice so only first 15 key-value pairs are left.
slice_dict = islice(sorted_dict, interesting_tokens_threshold_value)
# Convert to dictionary as islice returns an iterator.
first15 = {}
for each in slice_dict:
first15[each[0]] = each[1]
print("\nfirst 15 tokens with normalized keys and values: " + str(first15))
############################################
# Un-normalize and return to original values by assigning original values.
first15_unnormalized = {}
for key, value in first15.items():
first15_unnormalized[key] = test_corpus_word_probability_dict[key]
print("first 15 tokens un-normalized keys and values: " + str(first15_unnormalized))
return first15_unnormalized | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def individual_word_spam_chance(spam_words_dict, non_spam_words_dict, threshold):\n\n # Combine unique spam and non-spam words into one dictionary with their associated combined occurrences.\n combined_spam_nonspam_word_occurrences = {}\n\n for key, value in spam_words_dict.items():\n if key not in combined_spam_nonspam_word_occurrences:\n combined_spam_nonspam_word_occurrences[key] = value\n else:\n combined_spam_nonspam_word_occurrences[key] += value\n print(\"\\nlower-case only spam word occurrences as part of one combined dictionary: \"\n + str(combined_spam_nonspam_word_occurrences))\n\n for key, value in non_spam_words_dict.items():\n if key not in combined_spam_nonspam_word_occurrences:\n combined_spam_nonspam_word_occurrences[key] = value\n else:\n combined_spam_nonspam_word_occurrences[key] += value\n print(\"lower-case only spam and non-spam word occurrences as part of one combined dictionary: \"\n + str(combined_spam_nonspam_word_occurrences))\n\n ############################################\n\n # Iterate through all spam and non-spam words in combined dictionary and calculate spam probability for each word.\n words_spam_chance = {}\n\n for key, value in combined_spam_nonspam_word_occurrences.items():\n\n # If word is not found in non-spam dictionary set value to 0, otherwise set to value found * 2.\n if key not in non_spam_words_dict:\n good_occurrences = 0\n else:\n good_occurrences = 2 * value\n\n # If word is not found in spam dictionary set value to 0, otherwise set to that value found.\n if key not in spam_words_dict:\n bad_occurrences = 0\n else:\n bad_occurrences = value\n\n # Statistical algorithm to calculate the associated probability for each word.\n # Note to self: don't be an idiot and forget a parentheses messing up your order of operations.\n if good_occurrences + bad_occurrences > threshold:\n probability = max(0.01, min(0.99, min(1.0, bad_occurrences / number_bad_message) /\n (min(1.0, good_occurrences / number_good_messages) +\n min(1.0, bad_occurrences / number_bad_message))))\n else:\n probability = 0.0\n\n # Store to dictionary each word and their associated probability.\n words_spam_chance[key] = probability\n\n # Return our dictionary of stored words spam probabilities.\n return words_spam_chance",
"def message_spam_chance(word_probabilities_dict):\n\n # Remove the keys from the dictionary and store only the associated values.\n word_probability_values = sorted(word_probabilities_dict.values())\n print(\"\\nword spam probability values only, keys removed: \" + str(word_probability_values))\n\n # Calculate the product of all individual word probabilities.\n product_of_probabilities = 1.0\n for each_probability in word_probability_values:\n product_of_probabilities *= each_probability\n print(\"product of individual values: \" + str(product_of_probabilities))\n\n # Determine the complement value of all individual word probabilities.\n word_probability_complement_values = []\n for each_probability in word_probability_values:\n complement = 1.00 - each_probability\n word_probability_complement_values.append(complement)\n print(\"word spam complement probability values: \" + str(word_probability_complement_values))\n\n # Calculate the product of all complement probabilities.\n product_of_complement_probabilities = 1.0\n for each_complement_probability in word_probability_complement_values:\n product_of_complement_probabilities *= each_complement_probability\n print(\"product of complement values: \" + str(product_of_complement_probabilities))\n\n spam_message_probability = product_of_probabilities / \\\n (product_of_probabilities + product_of_complement_probabilities)\n print(\"final probability message is spam: \" + str(spam_message_probability))\n\n return spam_message_probability",
"def naive_bayes_predict(spam_ratio, words, spamicity, sms):\n res = set(sms.split())\n\n product = 1\n for word in res:\n if word in words:\n heur = spamicity[words[word]]\n product *= heur\n \n is_spam = spam_ratio * product\n # print(is_spam)\n return is_spam",
"def filter_dataset(self):\n articles = list(self.data.keys())\n for article in articles:\n if self.levels[0] not in self.data[article].keys() or \\\n self.levels[1] not in self.data[article].keys():\n del self.data[article]\n continue\n for level in self.data[article].keys():\n self.data[article][level] = [sent for sent in self.data[article][level]\n if len(sent[\"strings\"]) <= self.max_words]\n if len(self.data[article][level]) > self.max_sents:\n random.shuffle(self.data[article][level])\n self.data[article][level] = self.data[article][level][:self.max_sents]",
"def spam_prob(comment, word_spamrisk_map):\n sc = tokenize(comment)\n l = sanitize(sc)\n cost = dict()\n for word in l: \n if not word in word_spamrisk_map:\n cost[word] = 0.4\n else:\n cost[word] = abs(0.5 - word_spamrisk_map[word])\n\n sort_cost_list = sorted(cost.items(), key=lambda x: -x[1])[:15]\n\n return reduce(operator.mul, [i[1] / (i[1] + reduce(operator.mul, [1 - i[1] for i in sort_cost_list])) for i in sort_cost_list])",
"def classify_message(message_words, ham_l, spam_l):\n data_ham_words, data_spam_words = train_function(ham_l, spam_l)\n message_unique_words = set(message_words)\n message_ham_words, message_spam_words = [], []\n for word in message_unique_words:\n if word in data_ham_words:\n message_ham_words.append(word)\n if word in data_spam_words:\n message_spam_words.append(word)\n probability_ham = ((len(ham_l)) / (len(ham_l) + len(spam_l))) + get_message_probability(message_ham_words, data_ham_words)\n probability_spam = ((len(spam_l)) / (len(ham_l) + len(spam_l))) + get_message_probability(message_spam_words, data_spam_words)\n print(probability_ham, probability_spam)\n if probability_ham > probability_spam:\n return \"This letter is ham.\"\n else:\n return \"This letter is spam.\"",
"def naive_bayes_predict_bis(spam_ratio, words, spamicity, spamicity_no, spamicity_inv, product_word_dic, sms):\n res = set(sms.split())\n\n product_word_mess = 1\n for word in res:\n if word in words:\n heur = spamicity[words[word]]\n if heur > 0.8 or heur < 0.2:\n if heur == 0:\n heur = 1\n # print(word + \" \" + str(heur))\n product_word_mess *= ( heur ) * ( 1 / ( spamicity_inv[words[word]] ))\n # product_word_mess *= heur\n # print(product_word_dic)\n is_spam = spam_ratio * product_word_mess * product_word_dic \n # print(is_spam)\n return is_spam",
"def classify_spam(sms):\n return naive_bayes_predict(spam_ratio, words, spamicity, sms) > seuil",
"def process_dict(text, frequency_threshold):\n\n # Trying to load previous unique_words (pickle file)\n UNIQUE_WORDS_PICKLE = \"unique_words_with_frequency_\" + str(frequency_threshold) + \".pickle\"\n \n unique_words = None\n if os.path.isfile(UNIQUE_WORDS_PICKLE):\n try:\n with open(UNIQUE_WORDS_PICKLE, 'r') as f:\n unique_words = pickle.load(f)\n except:\n os.remove(UNIQUE_WORDS_PICKLE)\n unique_words = None\n\n if (type(unique_words) == list):\n return unique_words\n\n\n WORD_COUNT_PICKLE = \"word_count.pickle\"\n WORD_COUNT = 253855\n\n print(\"Processing dictionary. This will take a while.\")\n\n # Trying to load previous word_count (pickle file)\n word_count = None\n if os.path.isfile(WORD_COUNT_PICKLE):\n try:\n with open(WORD_COUNT_PICKLE, 'r') as f:\n word_count = pickle.load(f)\n if len(word_count) != WORD_COUNT:\n os.remove(WORD_COUNT_PICKLE)\n word_count = None\n except:\n raise\n os.remove(WORD_COUNT_PICKLE)\n word_count = None\n\n # count words\n if word_count == None:\n print(\"Pickle file not found. Counting word occurence...\")\n\n # grab all the words\n words = text.split(\" \")\n\n # counting word occurence\n word_count = dict(Counter(words).most_common())\n \n # saving word count for future reuse\n with open(WORD_COUNT_PICKLE, 'w') as f:\n pickle.dump(word_count, f)\n print(\"Word count saved for future reuse.\")\n \n # making sure we have the correct count loaded\n assert(type(word_count) == dict)\n assert(len(word_count) == WORD_COUNT)\n\n # remove the duplicates and single-character words.\n unique_words = [w for w in word_count.keys() if len(w) > 1]\n vocab_size = len(unique_words)\n print(\"Vocab size:\", vocab_size)\n\n # remove words with frequency lower than 1%\n unique_words = [word for word in unique_words if float(word_count[word]) / vocab_size > frequency_threshold]\n print(\"Vocab size (>%.3f%% frequency): %d\" % ((frequency_threshold * 100), len(unique_words)))\n\n unique_words.sort(key=lambda word: len(word), reverse=True)\n unique_words.append('a')\n unique_words.append('i')\n\n # save unique words for future reuse\n with open(UNIQUE_WORDS_PICKLE, 'w') as f:\n pickle.dump(unique_words, f)\n print(\"unique_words saved for future reuse.\")\n\n return unique_words",
"def ngram_detection(self, min_topic_count=5, min_text_id_count=4):\n\n for text_id, text in self.texts.items():\n # single-word topics act a bit different (no zips or comprehensions)\n # store data in self.topics, not zip_grams\n for word in text['doc']:\n word_lemma = word.text.lower() if word.lemma_ == '-PRON-' else word.lemma_\n\n if {word.text}.intersection(self.punct) or {word.lemma_}.intersection(self.stop_words):\n continue\n\n if not (word.pos in self.nouns or word.ent_type in self.entities):\n continue\n\n if word_lemma in self.topics:\n self.topics[word_lemma][\"count\"] += 1\n self.topics[word_lemma][\"textIDs\"] |= {text_id}\n self.topics[word_lemma][\"verbatims\"] |= {word.text.lower()}\n else:\n self.topics[word_lemma] = {\"name\": word_lemma,\n \"count\": 1,\n \"textIDs\": {text_id},\n \"verbatims\": {word.text.lower()},\n \"subtopics\": {}}\n\n # Populate self.ngrams and self.topics\n for text_id, text in self.texts.items():\n doc = text['doc']\n\n # Find pentagrams - ngrams with 5 words\n for ngram in zip(doc, doc[1:], doc[2:], doc[3:], doc[4:]):\n self._ngram_counter(ngram, 5, text_id, doc)\n\n # Find pentagrams - ngrams with 4 words\n for ngram in zip(doc, doc[1:], doc[2:], doc[3:]):\n self._ngram_counter(ngram, 4, text_id, doc)\n\n for ngram in zip(doc, doc[1:], doc[2:]):\n self._ngram_counter(ngram, 3, text_id, doc)\n\n for ngram in zip(doc, doc[1:]):\n self._ngram_counter(ngram, 2, text_id, doc)\n\n\n # Add text_id_count (the number of texts that the topic occurs in; so a topic might occur 50 times,\n # but it's only mentioned in 3 different texts, we'd show 3.\n for _, topic in self.topics.items():\n topic['textIDCount'] = len(topic['textIDs'])\n for _, ngram in self.ngrams.items():\n ngram['textIDCount'] = len(ngram['textIDs'])\n\n # Eliminate rarely occurring topics and ngrams.\n self.topics = {k: v for k, v in self.topics.items() if\n v['textIDCount'] >= min_text_id_count and v['count'] >= min_topic_count}\n self.ngrams = {k: v for k, v in self.ngrams.items() if\n v['textIDCount'] >= min_text_id_count}\n\n # Loop through each ngram pair: outer loop is all ngrams, inner loop is all ngrams\n for ngram_lemma, ngram in self.ngrams.items():\n for ngram_plus_lemma, ngram_plus in self.ngrams.items():\n # only stay in this loop if the inner ngram is one word longer than the outer loop and if the\n # inner loop lemma contains the outer group lemma (avoid partial word matches like man in woman)\n # r'\\b' + ngram_lemma + r'\\b' --> does the ngram lemma fit in ngram_plus lemma (\\b is word boundary)\n if ngram['n'] + 1 != ngram_plus['n']:\n continue\n\n if not re.search(r'\\b' + ngram_lemma + r'\\b', ngram_plus_lemma):\n continue\n\n # Is the absolute count of occurrences and the count of text_id occurrences both big enough to use it\n # instead of the other loop?\n if ngram_plus['count'] + 3 >= ngram['count'] and ngram_plus['textIDCount'] + 3 >= ngram['textIDCount']:\n # TODO: Is this the right action (deleting shorter, but not much more explanatory) phrase?\n # TODO: Is this enough? Or will I end up double explaining things sometimes?\n ngram['count'] = -1\n\n # Eliminate newly demoted items\n self.ngrams = {ngram_lemma: ngram for ngram_lemma, ngram in self.ngrams.items() if ngram['count'] > 0}",
"def naive_bayes_train(sms_file):\n dic, list1, list2 = tokenize_and_split_bis(sms_file)\n nbr_words = len(list1) + len(list2)\n spam_ratio = len(list1) / nbr_words\n document = list1 + list2\n\n nbr_spam = 0\n for line in list1:\n for word in line:\n nbr_spam += 1\n \n nbr_ham = 0\n for line in list2:\n for word in line:\n nbr_ham += 1\n \n nbr_words = nbr_ham + nbr_spam\n sms_ratio_list = compute_frequencies(nbr_words, document)\n spam_ratio_list = compute_frequencies(nbr_words, list1)\n spamicity = [0. for i in range(nbr_words)]\n\n # print(nbr_words)\n\n for i in range(nbr_words):\n if sms_ratio_list[i] != 0:\n spamicity[i] = spam_ratio_list[i] / sms_ratio_list[i]\n\n return spam_ratio, dic, spamicity",
"def train():\n num_spam=0 \n num_ham=0\n spam_words=()\n ham_words=()\n pullData = open(\"labels\", \"r\").read()\n dataArray= pullData.split('\\n')\n #print(dataArray)\n dataArrayTrain=dataArray[0:21300] #opens training set from folder 000-070\n \n for eachLine in dataArrayTrain:\n kind,file = eachLine.split(' ')\n file=file.strip('../') \n #print(kind)\n #print(file)\n \n fileDir = os.path.dirname(os.path.realpath('__file__'))\n filepath = os.path.join(fileDir,file) \n print(filepath)\n email=\"\"\n fh = open(filepath, encoding=\"ascii\", errors=\"ignore\")\n for line in fh:\n email += line\n fh.close()\n email= email.lower()\n #print(email)\n email_words = processText(contentEmail(email))\n #print(email_words)\n email_words = tuple(list(set(email_words))) #converted it into a set to avoid repetition of words in every email\n #print(email_words)\n if (kind == \"spam\"):\n num_spam+=1 #counts how many spam emails\n spam_words= spam_words + tuple(email_words) #adds every word to a spam tuple\n\n elif (kind==\"ham\"):\n num_ham+=1 #counts how many ham emails\n ham_words= ham_words + tuple(email_words) #adds every word to a ham tuple\n\n spam_words= tuple(spam_words)\n ham_words= tuple(ham_words)\n\n \n count_spam = collections.Counter(spam_words) #counts how many times a words appears in all spam emails\n count_ham = collections.Counter(ham_words) #counts how many times a words appears in all ham emails\n total_count = (count_spam + count_ham).most_common(5000) #adds the total occurences of the words and gets top 5000\n #print(total_count)\n #print(num_ham, num_spam)\n\n top_words = []\n for everyword in total_count:\n top_words.append(everyword[0])\n for everyword in list(count_spam):\n if everyword not in top_words:\n del count_spam[everyword] #deletes words in spam emails not included in top 5000\n for everyword in list(count_ham):\n if everyword not in top_words:\n del count_ham[everyword] #deletes words in ham emails not included in top 5000\n #print(words, count_ham, count_spam)\n\n file_encoder = open(\"top_word_count.txt\", \"w+\", encoding = 'utf-8', errors = 'ignore')\n file_encoder.write(\"HERE ARE YOUR TOP 5000 WORDS: \"+\"\\n\"+str(total_count)+\"\\n\"+\"\\n\"+\"SPAM WORDS: \"+\"\\n\"+str(count_spam)+\"\\n\"+\"\\n\"+\"HAM WORDS: \"+\"\\n\"+str(count_ham))\n file_encoder.close()\n print(\"Counting and getting top 5000 words successful!\")\n probabilityGet(num_spam, num_ham, count_spam, count_ham)",
"def naive_bayes_train_bis(sms_file):\n dic, list1, list2 = tokenize_and_split_bis(sms_file)\n nbr_words = len(list1) + len(list2)\n spam_ratio = len(list1) / nbr_words\n document = list1 + list2\n\n nbr_spam = 0\n for line in list1:\n for word in line:\n nbr_spam += 1\n \n nbr_ham = 0\n for line in list2:\n for word in line:\n nbr_ham += 1\n \n nbr_words = nbr_ham + nbr_spam\n sms_ratio_list = compute_frequencies(nbr_words, document)\n spam_ratio_list = compute_frequencies(nbr_words, list1)\n spamicity = [0. for i in range(nbr_words)]\n # print(sms_ratio_list)\n # print(spam_ratio_list)\n spamicity_no = [0. for i in range(nbr_words)]\n spamicity_inv = [0. for i in range(nbr_words)]\n\n product_word_dic = 1\n for i in range(nbr_words):\n if sms_ratio_list[i] != 0:\n spamicity[i] = ((spam_ratio_list[i]) / sms_ratio_list[i])\n spamicity_no[i] = 1 - ((spam_ratio_list[i]) / sms_ratio_list[i])\n spamicity_inv[i] = ((1 - (spam_ratio_list[i])) / (1 - sms_ratio_list[i]))\n # print(spamicity_inv[i])\n # if spamicity_inv[i] != 0 :\n product_word_dic *= spamicity_inv[i]\n \n return spam_ratio, dic, spamicity, spamicity_no, spamicity_inv, product_word_dic",
"def add_unknown_words(word_vecs, vocab, min_df=5000, k=300):\n for word in vocab:\n if word not in word_vecs and vocab[word] >= min_df:\n #print word\n word_vecs[word] = np.random.uniform(-0.25,0.25,k)",
"def unigram_model(list_of_words, unigram_count, N=count_token()):\n d = pd.read_csv(unigram_count)\n proba_dict = {list_of_words[i]: (d[el].values[0] / float(N)) if el in d.columns.values else 0.0 for i, el in enumerate(list_of_words) }\n return proba_dict",
"def classify(texts: List[str], params: Any) -> List[str]:\n\n alpha = 1\n token_probs_pos = params[\"token_probs_pos\"]\n token_probs_neg = params[\"token_probs_neg\"]\n all_words = params[\"all_words\"]\n M = len(all_words)\n cnt_pos_docs = params[\"cnt_pos_docs\"]\n cnt_neg_docs = params[\"cnt_neg_docs\"]\n\n sum_len_neg = params[\"sum_len_neg\"]\n sum_len_pos = params[\"sum_len_pos\"]\n pos_dict = params[\"pos_dict\"]\n neg_dict = params[\"neg_dict\"]\n\n\n test_texts = preprocessing(texts)\n test_tokenized_texts = text_to_tokens(test_texts)\n \n res = []\n log_pos_probablity = 0\n log_neg_probablity = 0\n i = 0\n for text in test_tokenized_texts:\n if (i % 5000 == 0):\n print(\"Classified\", i, \"texts\")\n i += 1\n log_pos_probablity = log(cnt_pos_docs)\n log_neg_probablity = log(cnt_neg_docs)\n for token in text:\n if (token_probs_pos[token] == 0):\n token_probs_pos[token] = alpha / (alpha * M + sum_len_pos)\n else:\n log_pos_probablity += log(token_probs_pos[token])\n if (token_probs_neg[token] == 0):\n token_probs_neg[token] = alpha / (alpha * M + sum_len_neg)\n else:\n log_neg_probablity += log(token_probs_neg[token])\n if (log_neg_probablity > log_pos_probablity):\n res.append(\"neg\")\n #for token in text:\n # all_words.add(token)\n # M = len(all_words)\n # neg_dict[token] += text[token]\n # sum_len_neg += text[token]\n # token_probs_neg[token] = (alpha + neg_dict[token]) / (alpha * M + sum_len_neg)\n\n else:\n res.append(\"pos\")\n #for token in text:\n # all_words.add(token)\n # M = len(all_words)\n # pos_dict[token] += text[token]\n # sum_len_pos += text[token]\n # token_probs_pos[token] = (alpha + pos_dict[token]) / (alpha * M + sum_len_pos)\n\n\n \n print('Predicted labels counts:')\n print(count_labels(res))\n return res",
"def keep_word(word, num_words, count_dict):\r\n try:\r\n z_wi = float(count_dict[word]) / (num_words)\r\n part1 = math.sqrt((z_wi / 0.001)) + 1\r\n part2 = 0.001 / z_wi\r\n total_prob = part1 * part2\r\n return random.uniform(0, 1) < total_prob\r\n except:\r\n return False",
"def senti_wordnet(self, tokens):\n\n positive_score, negative_score = 0.0, 0.0\n positive_unigram_words, negative_unigram_words = 0, 0\n\n # loop through the bigrams \n for token in tokens:\n if token in self.senti_wordnet_map:\n if self.senti_wordnet_map[token] >= 0:\n positive_score += self.senti_wordnet_map[token]\n positive_unigram_words += 1\n else:\n negative_score += self.senti_wordnet_map[token]\n negative_unigram_words += 1\n\n return {\n \"senti_wordnet_positive_score\": positive_score, \n \"senti_wordnet_negative_score\": negative_score, \n \"senti_wordnet_postive_words\": positive_unigram_words, \n \"senti_wordnet_negative_words\": negative_unigram_words\n }",
"def sentiment140_unigrams(self, tokens):\n positive_score, negative_score = 0.0, 0.0\n positive_unigram_words, negative_unigram_words = 0, 0\n\n # loop through the bigrams\n for token in tokens:\n if token in self.sentiment140_unigrams_map:\n if self.sentiment140_unigrams_map[token] >= 0:\n positive_score += self.sentiment140_unigrams_map[token]\n positive_unigram_words += 1\n else:\n negative_score += self.sentiment140_unigrams_map[token]\n negative_unigram_words += 1\n return {\n \"sentiment140_positive_unigram_score\": positive_score, \n \"sentiment140_negative_unigram_score\": negative_score, \n \"sentiment140_postive_unigram_words\": positive_unigram_words, \n \"sentiment140_negative_unigram_words\": negative_unigram_words\n }",
"def preprocessing():\n english_dictionary = nltk.corpus.brown.words()\n slang_vocab = pickle.load(open('vocab_pattern_match_with_freq.pkl', 'rb'))\n\n normalize_english_dict = len(english_dictionary)\n normalize_slang_vocab = 0\n for w, n in slang_vocab.items():\n normalize_slang_vocab += n\n\n words = {}\n for w, n in Counter(english_dictionary).items():\n words[w] = n/normalize_english_dict\n \n for w, n in slang_vocab.items():\n if w not in words:\n words[w] = 0.\n words[w] += n/normalize_slang_vocab\n\n words_by_freq = [w for w,_ in sorted(words.items(), key=lambda x: x[1], reverse=True)]\n\n # Build a cost dictionary, assuming Zipf's law and cost = -math.log(probability).\n #words = open(\"words_by_frequency.txt\").read().split()\n wordcost = dict((k, log((i+1)*log(len(words_by_freq)))) for i,k in enumerate(words_by_freq))\n maxword = max(len(x) for x in words_by_freq)\n return wordcost,maxword",
"def add_unknown_words(self, word_vecs, vocab, min_df=3, k=300):\n for word in vocab:\n if word not in word_vecs and vocab[word] >= min_df:\n word_vecs[word] = np.random.uniform(-0.25,0.25,k)",
"def probability(self, words):\n prob = 1\n for w in words: # Loop through words\n if w not in self.mdl.index: # Not in tokens\n return 0\n else: # Get prob\n prob *= self.mdl.loc[w] \n return prob",
"def probability(self, words):\n prob = 1\n for w in words: # Loop through words\n if w not in self.mdl.index: # Not in tokens\n return 0\n else: # Get prob\n prob *= self.mdl.loc[w] \n return prob",
"def positive_word(tweets, positives):\n\n wordcount = {}\n\n positive_words = set(positives)\n\n # Makes a dictionary of all positive words to be able to store the appearances\n for i in positives:\n wordcount[i] = 0\n\n for tweet in tweets:\n for word in tweet:\n if word in positive_words:\n wordcount[word] += 1\n\n # Sorts the dictionary so the first 5 words are the top used words\n items = wordcount.items()\n sorted_dic = sorted(items, key=lambda wordcount: wordcount[1], reverse=True)\n print(\"\\nTrump's top 5 most used positive words:\")\n for i in range(5):\n print(\" \" + sorted_dic[i][0] + \" \" + str(sorted_dic[i][1]))",
"def add_unknown_words(word_vecs, vocab, min_df=1, k=100):\n for word in vocab:\n if word not in word_vecs and vocab[word] >= min_df:\n # print(\"************************\")\n # print(word)\n # print(\"************************\")\n word_vecs[word] = np.random.uniform(-0.25,0.25,k)",
"def preprocess(self, documents):\n\n # A dict storing the frequency of each word\n word_freq = {}\n\n # Iterate for each document\n for doc in documents:\n # Split the document into a list of words and iterate on it\n for w in extract_words(doc):\n # Update word frequencies\n '''YOUR CODE HERE'''\n if w not in word_freq.keys():\n word_freq[w] = 1\n else:\n word_freq[w] += 1\n\n ''' END CODE FOR THIS LOOP '''\n\n\n # A set of words with frequency less than 'self.min_freq'\n remove_words = set()\n\n # Check frequency of each word and add to 'remove_words'\n # if it's frequency is below self.min_freq\n\n ''' YOUR CODE HERE '''\n for w in word_freq.keys():\n if word_freq[w] < self.min_freq:\n remove_words.add(w)\n\n # Delete the words in 'remove_words' from 'word_freq'\n for w in remove_words:\n del word_freq[w]\n\n # Fill 'self.word_to_idx' and 'self.idx_to_word' for\n # each word in 'word_freq' (dicts are explained above)\n\n i = 0\n for w in word_freq.keys():\n self.word_to_idx[w] = i\n self.idx_to_word[i] = w \n i += 1\n\n ''' END YOUR CODE HERE '''",
"def normalize(self):\n for key in self.corpus.keys():\n sum_count = 0\n words = []\n counts = []\n for k, v in self.corpus[key].items():\n sum_count += v\n words.append(k)\n counts.append(v)\n prob = [float(count)/sum_count for count in counts]\n\n self.corpus[key] = [words, prob]",
"def score(self, sentence):\n # TODO your code here\n\n # initialize count with trained data\n unigram_count = self.count.copy()\n N = self.total\n\n # make a new key for UNK, add-one later\n for token in sentence:\n if token not in unigram_count:\n unigram_count[token] = 0\n\n # calcutate lopP(<s>) + logP(w1) + logP(w2) + ...\n score = 0.0 # P(<s>) = 1\n V = len(unigram_count) # the number of vocab including UNK\n for word in sentence:\n prob = float((unigram_count[word] + 1) / (N + V)) # c(w) + 1 / N + V\n score += math.log(prob)\n\n return score",
"def analyze(self, word_count_thresh):",
"def add_unknown_words(word_vecs, vocab, min_df=10, k=300):\n for word in vocab:\n if word not in word_vecs and vocab[word] >= min_df:\n word_vecs[word] = np.random.uniform(-0.25, 0.25, k)"
]
| [
"0.6841508",
"0.6254475",
"0.59997356",
"0.58946764",
"0.58677113",
"0.5823959",
"0.58210856",
"0.5756859",
"0.566003",
"0.56481004",
"0.5628697",
"0.56022924",
"0.55878425",
"0.55792934",
"0.5560454",
"0.5552837",
"0.55461967",
"0.55404204",
"0.5540015",
"0.55317634",
"0.5510022",
"0.54924065",
"0.54924065",
"0.5450364",
"0.5445059",
"0.54329515",
"0.5432226",
"0.5426278",
"0.5423517",
"0.53949827"
]
| 0.6931594 | 0 |
Determines the final probability of the message being spam. | def message_spam_chance(word_probabilities_dict):
# Remove the keys from the dictionary and store only the associated values.
word_probability_values = sorted(word_probabilities_dict.values())
print("\nword spam probability values only, keys removed: " + str(word_probability_values))
# Calculate the product of all individual word probabilities.
product_of_probabilities = 1.0
for each_probability in word_probability_values:
product_of_probabilities *= each_probability
print("product of individual values: " + str(product_of_probabilities))
# Determine the complement value of all individual word probabilities.
word_probability_complement_values = []
for each_probability in word_probability_values:
complement = 1.00 - each_probability
word_probability_complement_values.append(complement)
print("word spam complement probability values: " + str(word_probability_complement_values))
# Calculate the product of all complement probabilities.
product_of_complement_probabilities = 1.0
for each_complement_probability in word_probability_complement_values:
product_of_complement_probabilities *= each_complement_probability
print("product of complement values: " + str(product_of_complement_probabilities))
spam_message_probability = product_of_probabilities / \
(product_of_probabilities + product_of_complement_probabilities)
print("final probability message is spam: " + str(spam_message_probability))
return spam_message_probability | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculate_probability(self, unseen_mail):\n unseen_mail = self.__handle_unseen_mail_unknown_words(unseen_mail)\n \n start_token = ' '.join([NGramModel.START_SENTENCE_TOKEN]*(self.__n-1));\n sentences = unseen_mail.replace(NGramModel.START_SENTENCE_TOKEN, start_token).split(NGramModel.END_SENTENCE_TOKEN)\n \n log_probability = 0;\n for sentence in sentences:\n if len(sentence.strip()) > 0:\n word_list = sentence.split()\n word_list.append(NGramModel.END_SENTENCE_TOKEN)\n \n for ngram in self.__generate_n_grams(word_list, self.__n):\n probability = self.__smoother.calculate_probability(self, ' '.join(ngram))\n if probability == 0:\n return 0\n log_probability += math.log10(probability)\n return log_probability",
"def spamProba(message, wordProba):\n messageWords = uf.tokenizeMessage(message)\n logProbSpam = logProbHam = 0.0\n# probSpam = probHam = 1\n \n for word in wordProba.keys():\n \n if word in messageWords:\n logProbSpam += math.log(wordProba[word][0])\n logProbHam += math.log(wordProba[word][1])\n# probSpam = probSpam*wordProba[word][0]\n# probHam = probHam*wordProba[word][1]\n \n else:\n logProbSpam += math.log(1 - wordProba[word][0])\n logProbHam += math.log(1 - wordProba[word][1])\n# probSpam = probSpam*(1-wordProba[word][0])\n# probHam = probHam*(1-wordProba[word][1])\n \n# probSpam = math.exp(logProbSpam)\n# probHam = math.exp(logProbHam)\n \n return logProbSpam - logProbHam",
"def calculate_probability(self):\n return 0",
"def raw_bigram_probability(self, bigram):\n\n assert len(bigram) == 2\n unigram = bigram[:1]\n count_unigram = self.unigramcounts[unigram]\n count_bigram = self.bigramcounts[bigram]\n if count_unigram == 0 :\n return 1 / self.total_sentence_count\n else :\n return (count_bigram / count_unigram)",
"def raw_bigram_probability(self, bigram):\n if bigram == (\"START\", \"START\"):\n return 1/2\n num = self.bigramcounts[bigram]\n den = self.unigramcounts[bigram[:1]]\n if den == 0:\n return 1/len(self.unigramcounts)\n\n return num/den",
"def get_success_probability(self):\n\t\treturn min(self.get_raw_probability(), RunOrder.MAX_PERCENTS)",
"def _basic_probability(count: int, sequence_total_count: int) -> float:\n return float(count) / sequence_total_count",
"def spam(bot, msg):\n\n sendername = msg.sendername\n\n if msg.command != \"PRIVMSG\" or sendername in bot.services:\n return\n\n message = msg.args[1]\n\n if sendername not in spammers or message != spammers[sendername][0]:\n spammers[sendername] = [message, 0]\n else:\n spammers[sendername][1] += 1\n\n if spammers[sendername][1] == 1:\n bot.privmsg(msg.sendername, \\\n \"WARNING: Spam detected. Stop or you will be kicked.\")\n if spammers[sendername][1] >= 4:\n for channel in bot.channels:\n bot.kick(msg.sendername, channel, \"Spam detected\")",
"def _favg(sequence):\n return math.fsum(sequence) / len(sequence)",
"def probability(p):\n return p > random.uniform(0.0, 1.0)",
"def guess_email(email, ham_model, spam_model, class_prior=0.5):\n email = email.split()\n correct_answer = email.pop(0)\n\n ham_prob = 0\n for word in email:\n ham_prob += ham_model[word] + math.log(class_prior)\n spam_prob = 0\n for word in email:\n spam_prob += spam_model[word] + math.log((1-class_prior))\n \n if ham_prob > spam_prob: \n guess = \"ham\"\n else:\n guess = \"spam\"\n if guess == correct_answer:\n correctness = True\n else:\n correctness = False\n return correctness",
"def raw_unigram_probability(self, unigram):\n\n assert len(unigram) == 1\n count_unigram = self.unigramcounts[unigram]\n if self.total_word_count == 0 :\n return 1 / self.total_sentence_count\n else :\n return (count_unigram / self.total_word_count)",
"def probability(self, sequence):\n return 2 ** (self.log_probability(self._transform(sequence)))",
"def probability(prob):\n return random.random() <= prob",
"def spam_prob(comment, word_spamrisk_map):\n sc = tokenize(comment)\n l = sanitize(sc)\n cost = dict()\n for word in l: \n if not word in word_spamrisk_map:\n cost[word] = 0.4\n else:\n cost[word] = abs(0.5 - word_spamrisk_map[word])\n\n sort_cost_list = sorted(cost.items(), key=lambda x: -x[1])[:15]\n\n return reduce(operator.mul, [i[1] / (i[1] + reduce(operator.mul, [1 - i[1] for i in sort_cost_list])) for i in sort_cost_list])",
"def spam_indicator(text):\n\n # this function returns the spam indicator rounded to two decimals\n user_input_set = set(text.lower().split())\n total_unique_words = round(len(user_input_set),2)\n set_operation = user_input_set & SPAM_WORDS\n spam_words = round(len(set_operation),2)\n spam_indicate = round(spam_words/total_unique_words,2)\n return spam_indicate",
"def prob(self, sequence):\n prob = 1\n for event, context in self.extract_ngrams(sequence):\n prob *= self.cond_prob(event, context)\n return prob",
"def raw_trigram_probability(self,trigram):\n\n assert len(trigram) == 3\n bigram = trigram[:2]\n count_bigram = self.bigramcounts[bigram]\n count_trigram = self.trigramcounts[trigram]\n if count_bigram == 0 :\n return 1 / self.total_sentence_count\n else :\n return (count_trigram / count_bigram)",
"async def check_for_spam(self, message: discord.Message):\n user = message.author\n guild = message.guild\n\n similarity_threshold = await self.config.guild(guild).similarity_threshold()\n\n last_message = await self.config.member(user).last_message()\n current_message = message.content\n\n if last_message is None:\n await self.config.member(user).last_message.set(current_message)\n return False\n\n last_message_time = await self.config.member(user).last_message_time()\n\n if last_message_time is None:\n await self.config.member(user).last_message_time.set(\n message.created_at.timestamp()\n )\n return False\n\n current_message_time = message.created_at.timestamp()\n time_difference_in_seconds = current_message_time - last_message_time\n\n await self.config.member(user).last_message.set(current_message)\n await self.config.member(user).last_message_time.set(current_message_time)\n\n if time_difference_in_seconds < 1800:\n similarity = self.similarity(last_message, message.content)\n\n if similarity > similarity_threshold:\n spam_count = await self.config.member(user).spam_count()\n spam_count = spam_count + 1\n\n spam_threshold = await self.config.guild(guild).spam_threshold()\n\n if spam_count > spam_threshold:\n punish = self.bot.get_cog(\"Punish\")\n punish_hours = await self.config.guild(guild).spam_punish_hours()\n async with punish.config.member(user)() as current:\n now = time.time()\n duration = now + 3600 * punish_hours\n punish_role = await punish.get_role(guild, user, quiet=True)\n\n if punish_role is None:\n return\n\n current[\"start\"] = (\n current[\"start\"] or now\n ) # don't override start time if updating\n current[\"until\"] = duration\n current[\"by\"] = (\n current[\"by\"] or guild.me.id\n ) # don't override original moderator\n current[\"reason\"] = \"Spamming messages\"\n current[\"unmute\"] = False\n current[\"caseno\"] = None\n\n await user.add_roles(punish_role)\n\n await punish.schedule_unpunish(duration, user)\n await message.channel.send(\n \"%s has been muted for 12 hours for Spamming Messages\"\n % user.name\n )\n\n # Reset spam counter since we punished\n await self.config.member(user).spam_count.set(0)\n else:\n await self.config.member(user).spam_count.set(spam_count)\n\n # We delete the message in any case\n await asyncio.sleep(0.5)\n await message.delete()\n\n return True\n\n return False",
"def spam_indicator(text):\n # This function returns the spam indicator rounded to two decimals\n\n word_list = text.split() # Turning string into list\n unique_words = set(word_list) # Turning list into set\n shared_words = unique_words & SPAM_WORDS # Intersection of two sets\n shared_ratio = (len(shared_words)) / (len(unique_words)) # Finding ratio\n rounded_ratio = round(shared_ratio, 2) # Rounding ratio to two places\n return rounded_ratio # Return rounded ratio",
"def probability(self, tokens):\n\n return 2 ** self.log_probability(tokens)",
"def raw_bigram_probability(self, bigram):\n result = 0.0\n try:\n unigram = (bigram[0],)\n result = self.bigramcounts[bigram]/self.unigramcounts[unigram]\n except Exception as e:\n pass\n else:\n pass\n\n return result",
"def get_ngram_prob(self, label_seq):\n curr_ngram = self.all_grams\n for i in range(0, len(label_seq)):\n label = label_seq[i]\n if i == len(label_seq) - 1:\n denom = curr_ngram.get_count() + self.SMOOTHING_VALUE * 9\n curr_ngram = curr_ngram.get_next_Ngram(label)\n # For smoothing, just add self.SMOOTHING_VALUE\n numer = curr_ngram.get_count() + self.SMOOTHING_VALUE\n return float(numer) / denom",
"def individual_word_spam_chance(spam_words_dict, non_spam_words_dict, threshold):\n\n # Combine unique spam and non-spam words into one dictionary with their associated combined occurrences.\n combined_spam_nonspam_word_occurrences = {}\n\n for key, value in spam_words_dict.items():\n if key not in combined_spam_nonspam_word_occurrences:\n combined_spam_nonspam_word_occurrences[key] = value\n else:\n combined_spam_nonspam_word_occurrences[key] += value\n print(\"\\nlower-case only spam word occurrences as part of one combined dictionary: \"\n + str(combined_spam_nonspam_word_occurrences))\n\n for key, value in non_spam_words_dict.items():\n if key not in combined_spam_nonspam_word_occurrences:\n combined_spam_nonspam_word_occurrences[key] = value\n else:\n combined_spam_nonspam_word_occurrences[key] += value\n print(\"lower-case only spam and non-spam word occurrences as part of one combined dictionary: \"\n + str(combined_spam_nonspam_word_occurrences))\n\n ############################################\n\n # Iterate through all spam and non-spam words in combined dictionary and calculate spam probability for each word.\n words_spam_chance = {}\n\n for key, value in combined_spam_nonspam_word_occurrences.items():\n\n # If word is not found in non-spam dictionary set value to 0, otherwise set to value found * 2.\n if key not in non_spam_words_dict:\n good_occurrences = 0\n else:\n good_occurrences = 2 * value\n\n # If word is not found in spam dictionary set value to 0, otherwise set to that value found.\n if key not in spam_words_dict:\n bad_occurrences = 0\n else:\n bad_occurrences = value\n\n # Statistical algorithm to calculate the associated probability for each word.\n # Note to self: don't be an idiot and forget a parentheses messing up your order of operations.\n if good_occurrences + bad_occurrences > threshold:\n probability = max(0.01, min(0.99, min(1.0, bad_occurrences / number_bad_message) /\n (min(1.0, good_occurrences / number_good_messages) +\n min(1.0, bad_occurrences / number_bad_message))))\n else:\n probability = 0.0\n\n # Store to dictionary each word and their associated probability.\n words_spam_chance[key] = probability\n\n # Return our dictionary of stored words spam probabilities.\n return words_spam_chance",
"def log_probability(self, tokens):\n\n log_sum = 0\n for leader, token in generate_ngrams(tokens, self.size, include_terminator = self.include_terminator):\n if not leader in self.frequencies:\n return float(\"-inf\")\n\n word_frequency = self.frequencies[leader][token]\n leader_frequency = self.frequencies[leader].total\n\n probability = (word_frequency + self.smoothing) / (leader_frequency + len(self.vocab) * self.smoothing)\n\n if probability == 0:\n return float(\"-inf\")\n\n log_sum += math.log2(probability)\n \n return log_sum",
"def _calc_freeze_probability(self, num_iterations, final_fraction):\n return 1.0 - (final_fraction ** (1.0 / num_iterations))",
"def score(self, beam, logprobs):\n l_term = (((5 + len(beam.next_ys)) ** self.alpha) /\n ((5 + 1) ** self.alpha))\n return (logprobs / l_term)",
"def classify_message(message_words, ham_l, spam_l):\n data_ham_words, data_spam_words = train_function(ham_l, spam_l)\n message_unique_words = set(message_words)\n message_ham_words, message_spam_words = [], []\n for word in message_unique_words:\n if word in data_ham_words:\n message_ham_words.append(word)\n if word in data_spam_words:\n message_spam_words.append(word)\n probability_ham = ((len(ham_l)) / (len(ham_l) + len(spam_l))) + get_message_probability(message_ham_words, data_ham_words)\n probability_spam = ((len(spam_l)) / (len(ham_l) + len(spam_l))) + get_message_probability(message_spam_words, data_spam_words)\n print(probability_ham, probability_spam)\n if probability_ham > probability_spam:\n return \"This letter is ham.\"\n else:\n return \"This letter is spam.\"",
"def confirmProbability(self, totalDice, bidCount):\n result = self.choose(totalDice, bidCount) * P**bidCount * (1 - P)**(totalDice-bidCount)\n return result",
"def should_pay_attention(self):\n return random.randint(1,100) > self.wander"
]
| [
"0.6856276",
"0.66305524",
"0.63954985",
"0.6126914",
"0.59960806",
"0.59381163",
"0.592768",
"0.58817714",
"0.5818623",
"0.5792749",
"0.575886",
"0.5748488",
"0.5743481",
"0.5741628",
"0.5736055",
"0.572044",
"0.57093024",
"0.57084846",
"0.56884634",
"0.56721234",
"0.56641084",
"0.5661426",
"0.565414",
"0.5634728",
"0.56196624",
"0.5594685",
"0.5578704",
"0.55276597",
"0.5508947",
"0.54917425"
]
| 0.6654423 | 1 |
Create sorted savings list and matrix with distance and savings | def Savings(self,geolocations):
self.savings = [] #List to store savings
size = len(geolocations)
for from_node in xrange(1,size):
for to_node in xrange(from_node+1,size):
save = self.Distance(0,from_node) + self.Distance(0,to_node) - self.Distance(from_node,to_node)
member = (from_node,to_node,save) #(i,j,saving for i to j or j to i)
self.savings.append(member)
self.savings.sort(key=lambda member: member[2],reverse = True)
return self.savings | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculate_savings_matrix(self):\n mat_dim = self._problem.number_of_patients()\n savings_matrix = np.zeros((mat_dim, mat_dim))\n cost_matrix = self._problem.costs_matrix\n for i in range(mat_dim):\n for j in range(mat_dim):\n if i != j:\n savings_matrix[i, j] = cost_matrix[i + 1, 0] + cost_matrix[0, j + 1] - cost_matrix[i + 1, j + 1]\n return savings_matrix",
"def sort(self): # sort all entries to make room for new ones, determine best and worst\n ns = self.num_stored.value\n ys = np.asarray(self.ys[:ns])\n yi = ys.argsort()\n sortRuns = []\n for i in range(len(yi)):\n y = ys[yi[i]]\n xs = self.get_x(yi[i])\n sortRuns.append((y, xs))\n numStored = min(len(sortRuns),int(0.9*self.capacity)) # keep 90% best \n for i in range(numStored):\n self.replace(i, sortRuns[i][0], sortRuns[i][1])\n self.num_sorted.value = numStored \n self.num_stored.value = numStored \n return numStored",
"def sort_values(self):\r\n for loopindex in range(0, self.population_size):\r\n index = self.cost_populations.index(min(self.cost_populations))\r\n \r\n if loopindex < int(self.population_size / 2):\r\n self.best_districts.append(self.district_population[index])\r\n self.best_costs.append(self.cost_populations[index])\r\n else:\r\n self.worst_districts.append(self.district_population[index])\r\n \r\n del self.cost_populations[index]\r\n del self.district_population[index]",
"def distances(self):",
"def buildOrd(abscissaTab, nbWalks):\n random = []\n nonReversing = []\n selfAvoiding = []\n for ab in abscissaTab:\n print(\"# Computing for ab = \", ab)\n print(\" -> Random...\")\n random.append(averageDistance(ab, nbWalks, randomWalk))\n print(\" -> Non reversing...\")\n nonReversing.append(averageDistance(ab, nbWalks, nonReversingWalk))\n print(\" -> Self-avoiding...\")\n selfAvoiding.append(averageDistance(ab, nbWalks, selfAvoidingWalk))\n return random, nonReversing, selfAvoiding",
"def test_stations_by_distance():\n station_list = build_station_list()\n #test for stations closest to cambridge city coordinates\n station_list_sort = stations_by_distance(station_list, (52.2053, 0.1218))\n output = [(station.name, distance) for (station, distance) in station_list_sort]\n for n in range(1, len(station_list)):\n #make sure that the distance of the previous station to the point is less than the next one in the list\n assert output[n-1][1] <= output[n][1]",
"def mi_from_dm_alt(distance_matrix, ns, nh, spike_train_list=None):\n \n #print \"start loading\"\n \n nr = len(distance_matrix)\n nt = nr/ns\n nearest_neighbours = np.array([r.argsort()[:nh] for r in distance_matrix])\n near_to = [[j for j in range(nr) if i in nearest_neighbours[j] ] for i in range(nr)]\n \n #print \"finished sorting\"\n #return\n #nr = len(distance_matrix)\n #nearest_neighbours = np.array([[i] + distance_matrix[i].argsort()[1:nh].tolist() for i in range(nr)])\n \n members_of_glob = trains_in_glob(spike_train_list)\n glob_comp = glob_composition(spike_train_list, ns, nt, nh)\n \n counts = []\n counted_glob = False #set a flag for later use\n if spike_train_list is not None:\n for i in range(len(near_to)):\n c_i = 0\n \n if i not in members_of_glob:\n #print near_to[i]\n for j in near_to[i]:\n \n if j not in members_of_glob and spike_train_list[i].start_time == spike_train_list[j].start_time:\n c_i += 1\n else:\n if not counted_glob: #this should only really happen if glob has a small number of members...\n f_i = glob_comp[i]/float(sum(glob_comp.values()))\n g_i = f_i - 1.0/float(sum(glob_comp.values()))\n c_i += (nh - c_i)*g_i\n \n counted_glob = True\n else:\n pass\n \n else: #If i is in the glob...\n f_i = glob_comp[i]/float(sum(glob_comp.values()))\n g_i = f_i - 1.0/float(sum(glob_comp.values()))\n c_i = 1 + (nh - 1)*g_i\n \n counts.append(c_i) \n counts = np.array(counts) \n I = (1.0/nr)*sum( np.log2((ns*counts)/float(nh)) ) \n \n else:\n near_to_same_stim = [[n for n in near_to[j] if abs(n-j)%ns==0 ] for j in range(nr)]\n number_of_neighbourhoods = np.array([len(l) for l in near_to])\n number_of_neighbourhoods_same_stim = np.array([len(l) for l in near_to_same_stim])\n I = (1.0/nr)*sum( np.log2((ns*number_of_neighbourhoods_same_stim)/float(nh)) )\n \n return I",
"def clarke_and_wright_init(self):\n if self._problem.costs_matrix is None:\n self._problem.calculate_cost_matrix()\n savings_matrix = self.calculate_savings_matrix()\n savings_flat = np.ndarray.flatten(savings_matrix)\n arg_sorted_savings = np.argsort(savings_flat)\n sorted_savings = [savings_flat[i] for i in arg_sorted_savings]\n self._sorted_savings = sorted_savings\n self._arg_sorted_savings = arg_sorted_savings",
"def _compute_soffsets(self):\n self.soffsets = [ [] for i in self.doffsets ]\n for idx,dofs in enumerate(self.doffsets):\n for o in dofs:\n self.soffsets[(idx + o) % self.p].append(-o)",
"def sim_stored_2020_04_08(self):\n L = [[0.0, 2163000], [0.001, 2119380], [0.002, 2075760], [0.003, 2032140], [0.004, 1988520], [0.005, 1944900], [0.006, 1901280], [0.007, 1857660], [0.008, 1814040], [0.009, 1770410], [0.01, 1726790], [0.011, 1683170], [0.012, 1639550], [0.013, 1595930], [0.014, 1552310], [0.015, 1508690], [0.016, 1465070], [0.017, 1421440], [0.018, 1377820], [0.019, 1334200], [0.02, 1290580], [0.021, 1246960], [0.022, 1203340], [0.023, 1159720], [0.024, 1116100], [0.025, 1072470], [0.026, 1028850], [0.027, 985230], [0.028, 941610], [0.029, 897990], [0.03, 854370], [0.031, 810750], [0.032, 767130], [0.033, 724050], [0.034, 686170], [0.035, 648290], [0.036, 610410], [0.037, 572520], [0.038, 534640], [0.039, 496760], [0.04, 458880], [0.041, 421000], [0.042, 383110], [0.043, 345230], [0.044, 307350], [0.045, 269470], [0.046, 231580], [0.047, 193700], [0.048, 155820], [0.049, 117940], [0.05, 95870], [0.051, 95730], [0.052, 95590], [0.053, 95440], [0.054, 95300], [0.055, 95160], [0.056, 95020], [0.057, 94870], [0.058, 94730], [0.059, 94590], [0.06, 94450], [0.061, 94300], [0.062, 94160], [0.063, 94020], [0.064, 93880], [0.065, 93730], [0.066, 93590], [0.067, 93450], [0.068, 93300], [0.069, 93160], [0.07, 93020], [0.071, 92880], [0.072, 92730], [0.073, 92590], [0.074, 92450], [0.075, 92310], [0.076, 92160], [0.077, 92020], [0.078, 91880], [0.079, 91740], [0.08, 91590], [0.081, 91450], [0.082, 91310], [0.083, 91170], [0.084, 91020], [0.085, 90880], [0.086, 90740], [0.087, 90590], [0.088, 90450], [0.089, 90310], [0.09, 90170], [0.091, 90020], [0.092, 89880], [0.093, 89740], [0.094, 89600], [0.095, 89450], [0.096, 89310], [0.097, 89170], [0.098, 89030], [0.099, 88880], [0.1, 88740], [0.101, 88600], [0.102, 88450], [0.103, 88310], [0.104, 88170], [0.105, 88030], [0.106, 87880], [0.107, 87740], [0.108, 87600], [0.109, 87460], [0.11, 87310], [0.111, 87170], [0.112, 87030], [0.113, 86890], [0.114, 86740], [0.115, 86600], [0.116, 86460], [0.117, 86310], [0.118, 86170], [0.119, 86030], [0.12, 85890], [0.121, 85740], [0.122, 85600], [0.123, 85460], [0.124, 85340], [0.125, 85330], [0.126, 85330], [0.127, 85320], [0.128, 85320], [0.129, 85320], [0.13, 85310], [0.131, 85310], [0.132, 85300], [0.133, 85300], [0.134, 85300], [0.135, 85290], [0.136, 85290], [0.137, 85280], [0.138, 85280], [0.139, 85280], [0.14, 85270], [0.141, 85270], [0.142, 85260], [0.143, 85260], [0.144, 85260], [0.145, 85250], [0.146, 85250], [0.147, 85240], [0.148, 85240], [0.149, 85230], [0.15, 85230], [0.151, 85230], [0.152, 85220], [0.153, 85220], [0.154, 85210], [0.155, 85210], [0.156, 85210], [0.157, 85200], [0.158, 85200], [0.159, 85190], [0.16, 85190], [0.161, 85190], [0.162, 85180], [0.163, 85180], [0.164, 85170], [0.165, 85170], [0.166, 85170], [0.167, 85160], [0.168, 85160], [0.169, 85150], [0.17, 85150], [0.171, 85150], [0.172, 85140], [0.173, 85140], [0.174, 85130], [0.175, 85130], [0.176, 85130], [0.177, 85120], [0.178, 85120], [0.179, 85110], [0.18, 85110], [0.181, 85110], [0.182, 85100], [0.183, 85100], [0.184, 85090], [0.185, 85090], [0.186, 85090], [0.187, 85080], [0.188, 85080], [0.189, 85070], [0.19, 85070], [0.191, 85060], [0.192, 85060], [0.193, 85060], [0.194, 85050], [0.195, 85050], [0.196, 85040], [0.197, 85040], [0.198, 85040], [0.199, 85030], [0.2, 85030], [0.201, 85020], [0.202, 85020], [0.203, 85020], [0.204, 85010], [0.205, 85010], [0.206, 85000], [0.207, 85000], [0.208, 85000], [0.209, 84990], [0.21, 84990], [0.211, 84980], [0.212, 84980], [0.213, 84980], [0.214, 84970], [0.215, 84970], [0.216, 84960], [0.217, 84960], [0.218, 84960], [0.219, 84950], [0.22, 84950], [0.221, 84940], [0.222, 84940], [0.223, 84940], [0.224, 84930], [0.225, 84930], [0.226, 84920], [0.227, 84920], [0.228, 84910], [0.229, 84910], [0.23, 84910], [0.231, 84900], [0.232, 84900], [0.233, 84890], [0.234, 84890], [0.235, 84890], [0.236, 84880], [0.237, 84880], [0.238, 84870], [0.239, 84870], [0.24, 84870], [0.241, 84860], [0.242, 84860], [0.243, 84850], [0.244, 84850], [0.245, 84850], [0.246, 84840], [0.247, 84840], [0.248, 84830], [0.249, 84830], [0.25, 84830], [0.251, 84820], [0.252, 84820], [0.253, 84810], [0.254, 84810], [0.255, 84810], [0.256, 84800], [0.257, 84800], [0.258, 84790], [0.259, 84790], [0.26, 84790], [0.261, 84780], [0.262, 84780], [0.263, 84770], [0.264, 84770], [0.265, 84770], [0.266, 84760], [0.267, 84760], [0.268, 84750], [0.269, 84750], [0.27, 84740], [0.271, 84740], [0.272, 84740], [0.273, 84730], [0.274, 84730], [0.275, 84720], [0.276, 84720], [0.277, 84720], [0.278, 84710], [0.279, 84710], [0.28, 84700], [0.281, 84700], [0.282, 84700], [0.283, 84690], [0.284, 84690], [0.285, 84680], [0.286, 84680], [0.287, 84680], [0.288, 84670], [0.289, 84670], [0.29, 84660], [0.291, 84660], [0.292, 84660], [0.293, 84650], [0.294, 84650], [0.295, 84640], [0.296, 84640], [0.297, 84640], [0.298, 84630], [0.299, 84630], [0.3, 84620], [0.301, 84620], [0.302, 84620], [0.303, 84610], [0.304, 84610], [0.305, 84600], [0.306, 84600], [0.307, 84590], [0.308, 84590], [0.309, 84590], [0.31, 84580], [0.311, 84580], [0.312, 84570], [0.313, 84570], [0.314, 84570], [0.315, 84560], [0.316, 84560], [0.317, 84550], [0.318, 84550], [0.319, 84550], [0.32, 84540], [0.321, 84540], [0.322, 84530], [0.323, 84530], [0.324, 84530], [0.325, 84520], [0.326, 84520], [0.327, 84510], [0.328, 84510], [0.329, 84510], [0.33, 84500], [0.331, 84500], [0.332, 84490], [0.333, 84490], [0.334, 84490], [0.335, 84480], [0.336, 84480], [0.337, 84470], [0.338, 84470], [0.339, 84470], [0.34, 84460], [0.341, 84460], [0.342, 84450], [0.343, 84450], [0.344, 84450], [0.345, 84440], [0.346, 84440], [0.347, 84430], [0.348, 84430], [0.349, 84420], [0.35, 84420], [0.351, 84420], [0.352, 84410], [0.353, 84410], [0.354, 84400], [0.355, 84400], [0.356, 84400], [0.357, 84390], [0.358, 84390], [0.359, 84380], [0.36, 84380], [0.361, 84380], [0.362, 84370], [0.363, 84370], [0.364, 84360], [0.365, 84360], [0.366, 84360], [0.367, 84350], [0.368, 84350], [0.369, 84340], [0.37, 84340], [0.371, 84340], [0.372, 84330], [0.373, 84330], [0.374, 84320], [0.375, 84320], [0.376, 84320], [0.377, 84310], [0.378, 84310], [0.379, 84300], [0.38, 84300], [0.381, 84300], [0.382, 84290], [0.383, 84290], [0.384, 84280], [0.385, 84280], [0.386, 84280], [0.387, 84270], [0.388, 84270], [0.389, 84260], [0.39, 84260], [0.391, 84250], [0.392, 84250], [0.393, 84250], [0.394, 84240], [0.395, 84240], [0.396, 84230], [0.397, 84230], [0.398, 84230], [0.399, 84220], [0.4, 84220], [0.401, 84210], [0.402, 84210], [0.403, 84210], [0.404, 84200], [0.405, 84200], [0.406, 84190], [0.407, 84190], [0.408, 84190], [0.409, 84180], [0.41, 84180], [0.411, 84170], [0.412, 84170], [0.413, 84170], [0.414, 84160], [0.415, 84160], [0.416, 84150], [0.417, 84150], [0.418, 84150], [0.419, 84140], [0.42, 84140], [0.421, 84130], [0.422, 84130], [0.423, 84130], [0.424, 84120], [0.425, 84120], [0.426, 84110], [0.427, 84110], [0.428, 84100], [0.429, 84100], [0.43, 84100], [0.431, 84090], [0.432, 84090], [0.433, 84080], [0.434, 84080], [0.435, 84080], [0.436, 84070], [0.437, 84070], [0.438, 84060], [0.439, 84060], [0.44, 84060], [0.441, 84050], [0.442, 84050], [0.443, 84040], [0.444, 84040], [0.445, 84040], [0.446, 84030], [0.447, 84030], [0.448, 84020], [0.449, 84020], [0.45, 84020], [0.451, 84010], [0.452, 84010], [0.453, 84000], [0.454, 84000], [0.455, 84000], [0.456, 83990], [0.457, 83990], [0.458, 83980], [0.459, 83980], [0.46, 83980], [0.461, 83970], [0.462, 83970], [0.463, 83960], [0.464, 83960], [0.465, 83960], [0.466, 83950], [0.467, 83950], [0.468, 83940], [0.469, 83940], [0.47, 83930], [0.471, 83930], [0.472, 83930], [0.473, 83920], [0.474, 83920], [0.475, 83910], [0.476, 83910], [0.477, 83910], [0.478, 83900], [0.479, 83900], [0.48, 83890], [0.481, 83890], [0.482, 83890], [0.483, 83880], [0.484, 83880], [0.485, 83870], [0.486, 83870], [0.487, 83870], [0.488, 83860], [0.489, 83860], [0.49, 83850], [0.491, 83850], [0.492, 83850], [0.493, 83840], [0.494, 83840], [0.495, 83830], [0.496, 83830], [0.497, 83830], [0.498, 83820], [0.499, 83820], [0.5, 83810], [0.501, 83810], [0.502, 83810], [0.503, 83800], [0.504, 83800], [0.505, 83790], [0.506, 83790], [0.507, 83780], [0.508, 83780], [0.509, 83780], [0.51, 83770], [0.511, 83770], [0.512, 83760], [0.513, 83760], [0.514, 83760], [0.515, 83750], [0.516, 83750], [0.517, 83740], [0.518, 83740], [0.519, 83740], [0.52, 83730], [0.521, 83730], [0.522, 83720], [0.523, 83720], [0.524, 83720], [0.525, 83710], [0.526, 83710], [0.527, 83700], [0.528, 83700], [0.529, 83700], [0.53, 83690], [0.531, 83690], [0.532, 83680], [0.533, 83680], [0.534, 83680], [0.535, 83670], [0.536, 83670], [0.537, 83660], [0.538, 83660], [0.539, 83660], [0.54, 83650], [0.541, 83650], [0.542, 83640], [0.543, 83640], [0.544, 83640], [0.545, 83630], [0.546, 83630], [0.547, 83620], [0.548, 83620], [0.549, 83610], [0.55, 83610], [0.551, 83610], [0.552, 83600], [0.553, 83600], [0.554, 83590], [0.555, 83590], [0.556, 83590], [0.557, 83580], [0.558, 83580], [0.559, 83570], [0.56, 83570], [0.561, 83570], [0.562, 83560], [0.563, 83560], [0.564, 83550], [0.565, 83550], [0.566, 83550], [0.567, 83540], [0.568, 83540], [0.569, 83530], [0.57, 83530], [0.571, 83530], [0.572, 83520], [0.573, 83520], [0.574, 83510], [0.575, 83510], [0.576, 83510], [0.577, 83500], [0.578, 83500], [0.579, 83490], [0.58, 83490], [0.581, 83490], [0.582, 83480], [0.583, 83480], [0.584, 83470], [0.585, 83470], [0.586, 83470], [0.587, 83460], [0.588, 83460], [0.589, 83450], [0.59, 83450], [0.591, 83440], [0.592, 83440], [0.593, 83440], [0.594, 83430], [0.595, 83430], [0.596, 83420], [0.597, 83420], [0.598, 83420], [0.599, 83410], [0.6, 83410], [0.601, 83400], [0.602, 83400], [0.603, 83400], [0.604, 83390], [0.605, 83390], [0.606, 83380], [0.607, 83380], [0.608, 83380], [0.609, 83370], [0.61, 83370], [0.611, 83360], [0.612, 83360], [0.613, 83360], [0.614, 83350], [0.615, 83350], [0.616, 83340], [0.617, 83340], [0.618, 83340], [0.619, 83330], [0.62, 83330], [0.621, 83320], [0.622, 83320], [0.623, 83320], [0.624, 83310], [0.625, 83310], [0.626, 83300], [0.627, 83300], [0.628, 83290], [0.629, 83290], [0.63, 83290], [0.631, 83280], [0.632, 83280], [0.633, 83270], [0.634, 83270], [0.635, 83270], [0.636, 83260], [0.637, 83260], [0.638, 83250], [0.639, 83250], [0.64, 83250], [0.641, 83240], [0.642, 83240], [0.643, 83230], [0.644, 83230], [0.645, 83230], [0.646, 83220], [0.647, 83220], [0.648, 83210], [0.649, 83210], [0.65, 83210], [0.651, 83200], [0.652, 83200], [0.653, 83190], [0.654, 83190], [0.655, 83190], [0.656, 83180], [0.657, 83180], [0.658, 83170], [0.659, 83170], [0.66, 83170], [0.661, 83160], [0.662, 83160], [0.663, 83150], [0.664, 83150], [0.665, 83150], [0.666, 83140], [0.667, 83140], [0.668, 83130], [0.669, 83130], [0.67, 83120], [0.671, 83120], [0.672, 83120], [0.673, 83110], [0.674, 83110], [0.675, 83100], [0.676, 83100], [0.677, 83100], [0.678, 83090], [0.679, 83090], [0.68, 83080], [0.681, 83080], [0.682, 83080], [0.683, 83070], [0.684, 83070], [0.685, 83060], [0.686, 83060], [0.687, 83060], [0.688, 83050], [0.689, 83050], [0.69, 83040], [0.691, 83040], [0.692, 83040], [0.693, 83030], [0.694, 83030], [0.695, 83020], [0.696, 83020], [0.697, 83020], [0.698, 83010], [0.699, 83010], [0.7, 83000], [0.701, 83000], [0.702, 83000], [0.703, 82990], [0.704, 82990], [0.705, 82980], [0.706, 82980], [0.707, 82970], [0.708, 82970], [0.709, 82970], [0.71, 82960], [0.711, 82960], [0.712, 82950], [0.713, 82950], [0.714, 82950], [0.715, 82940], [0.716, 82940], [0.717, 82930], [0.718, 82930], [0.719, 82930], [0.72, 82920], [0.721, 82920], [0.722, 82910], [0.723, 82910], [0.724, 82910], [0.725, 82900], [0.726, 82900], [0.727, 82890], [0.728, 82890], [0.729, 82890], [0.73, 82880], [0.731, 82880], [0.732, 82870], [0.733, 82870], [0.734, 82870], [0.735, 82860], [0.736, 82860], [0.737, 82850], [0.738, 82850], [0.739, 82850], [0.74, 82840], [0.741, 82840], [0.742, 82830], [0.743, 82830], [0.744, 82830], [0.745, 82820], [0.746, 82820], [0.747, 82810], [0.748, 82810], [0.749, 82800], [0.75, 82800], [0.751, 82800], [0.752, 82790], [0.753, 82790], [0.754, 82780], [0.755, 82780], [0.756, 82780], [0.757, 82770], [0.758, 82770], [0.759, 82760], [0.76, 82760], [0.761, 82760], [0.762, 82750], [0.763, 82750], [0.764, 82740], [0.765, 82740], [0.766, 82740], [0.767, 82730], [0.768, 82730], [0.769, 82720], [0.77, 82720], [0.771, 82720], [0.772, 82710], [0.773, 82710], [0.774, 82700], [0.775, 82700], [0.776, 82700], [0.777, 82690], [0.778, 82690], [0.779, 82680], [0.78, 82680], [0.781, 82680], [0.782, 82670], [0.783, 82670], [0.784, 82660], [0.785, 82660], [0.786, 82660], [0.787, 82650], [0.788, 82650], [0.789, 82640], [0.79, 82640], [0.791, 82630], [0.792, 82630], [0.793, 82630], [0.794, 82620], [0.795, 82620], [0.796, 82610], [0.797, 82610], [0.798, 82610], [0.799, 82600], [0.8, 82600], [0.801, 82590], [0.802, 82590], [0.803, 82590], [0.804, 82580], [0.805, 82580], [0.806, 82570], [0.807, 82570], [0.808, 82570], [0.809, 82560], [0.81, 82560], [0.811, 82550], [0.812, 82550], [0.813, 82550], [0.814, 82540], [0.815, 82540], [0.816, 82530], [0.817, 82530], [0.818, 82530], [0.819, 82520], [0.82, 82520], [0.821, 82510], [0.822, 82510], [0.823, 82510], [0.824, 82500], [0.825, 82500], [0.826, 82490], [0.827, 82490], [0.828, 82480], [0.829, 82480], [0.83, 82480], [0.831, 82470], [0.832, 82470], [0.833, 82460], [0.834, 82460], [0.835, 82460], [0.836, 82450], [0.837, 82450], [0.838, 82440], [0.839, 82440], [0.84, 82440], [0.841, 82430], [0.842, 82430], [0.843, 82420], [0.844, 82420], [0.845, 82420], [0.846, 82410], [0.847, 82410], [0.848, 82400], [0.849, 82400], [0.85, 82400], [0.851, 82390], [0.852, 82390], [0.853, 82380], [0.854, 82380], [0.855, 82380], [0.856, 82370], [0.857, 82370], [0.858, 82360], [0.859, 82360], [0.86, 82360], [0.861, 82350], [0.862, 82350], [0.863, 82340], [0.864, 82340], [0.865, 82340], [0.866, 82330], [0.867, 82330], [0.868, 82320], [0.869, 82320], [0.87, 82310], [0.871, 82310], [0.872, 82310], [0.873, 82300], [0.874, 82300], [0.875, 82290], [0.876, 82290], [0.877, 82290], [0.878, 82280], [0.879, 82280], [0.88, 82270], [0.881, 82270], [0.882, 82270], [0.883, 82260], [0.884, 82260], [0.885, 82250], [0.886, 82250], [0.887, 82250], [0.888, 82240], [0.889, 82240], [0.89, 82230], [0.891, 82230], [0.892, 82230], [0.893, 82220], [0.894, 82220], [0.895, 82210], [0.896, 82210], [0.897, 82210], [0.898, 82200], [0.899, 82200], [0.9, 82190], [0.901, 82190], [0.902, 82190], [0.903, 82180], [0.904, 82180], [0.905, 82170], [0.906, 82170], [0.907, 82160], [0.908, 82160], [0.909, 82160], [0.91, 82150], [0.911, 82150], [0.912, 82140], [0.913, 82140], [0.914, 82140], [0.915, 82130], [0.916, 82130], [0.917, 82120], [0.918, 82120], [0.919, 82120], [0.92, 82110], [0.921, 82110], [0.922, 82100], [0.923, 82100], [0.924, 82100], [0.925, 82090], [0.926, 82090], [0.927, 82080], [0.928, 82080], [0.929, 82080], [0.93, 82070], [0.931, 82070], [0.932, 82060], [0.933, 82060], [0.934, 82060], [0.935, 82050], [0.936, 82050], [0.937, 82040], [0.938, 82040], [0.939, 82040], [0.94, 82030], [0.941, 82030], [0.942, 82020], [0.943, 82020], [0.944, 82020], [0.945, 82010], [0.946, 82010], [0.947, 82000], [0.948, 82000], [0.949, 81990], [0.95, 81990], [0.951, 81990], [0.952, 81980], [0.953, 81980], [0.954, 81970], [0.955, 81970], [0.956, 81970], [0.957, 81960], [0.958, 81960], [0.959, 81950], [0.96, 81950], [0.961, 81950], [0.962, 81940], [0.963, 81940], [0.964, 81930], [0.965, 81930], [0.966, 81930], [0.967, 81920], [0.968, 81920], [0.969, 81910], [0.97, 81910], [0.971, 81910], [0.972, 81900], [0.973, 81900], [0.974, 81890], [0.975, 81890], [0.976, 81890], [0.977, 81880], [0.978, 81880], [0.979, 81870], [0.98, 81870], [0.981, 81870], [0.982, 81860], [0.983, 81860], [0.984, 81850], [0.985, 81850], [0.986, 81850], [0.987, 81840], [0.988, 81840], [0.989, 81830], [0.99, 81830], [0.991, 81820], [0.992, 81820], [0.993, 81820], [0.994, 81810], [0.995, 81810], [0.996, 81800], [0.997, 81800], [0.998, 81800], [0.999, 81790]]\n self.ans = L\n\n return True",
"def create_data():\n # Locations\n data = {}\n num_vehicles = 20\n depot = 0\n locations = loc1\n demands = popn\n\n num_locations = len(locations)\n dist_matrix = {}\n\n for from_node in range(0,num_locations):\n dist_matrix[from_node] = {}\n\n for to_node in range(0,num_locations):\n dist_matrix[from_node][to_node] = (\n haversine(\n locations[from_node],[to_node])\n #locations[to_node],[from_node])\n \"\"\"\n data[\"distances\"] =dist_matrix\n data[\"num_locations\"] = len(dist_matrix)\n data[\"num_vehicles\"] = 6\n data[\"depot\"] = 0\n data[\"demands\"] = demands\n #data[\"vehicle_capacities\"] = capacities\n data[\"time_per_demand_unit\"] = 0.05\n return data\n \"\"\"\n return [ num_vehicles, depot, locations, dist_matrix]",
"def buildSweepTree(self):\n minXPos, minXNodeID = self._findMinX()\n cellDistanceArray = self._computeDistance(minXPos, minXNodeID) # distance of all cells to the \"min X node\"\n # sort distance array based on distances to min X cell\n cellDistanceArray = cellDistanceArray[cellDistanceArray[:, 2].argsort()]\n # cellDistanceArray = np.sort(cellDistanceArray, axis=2)\n for w, cellD in enumerate(cellDistanceArray):\n self.sweepTree.append((int(cellD[0]), int(cellD[1])))",
"def initial_dvec_and_forw_insert():\n global DATA\n for every_neighbor in DATA[\"neighbor\"]:\n DATA[\"distanc_vec\"].append([every_neighbor[0], every_neighbor[1]])\n # parent to direct neighbors is always\n # the router itself that's why DATA[\"router_id\"]\n # as third argument\n DATA[\"forw_table\"].append(\n [every_neighbor[0], every_neighbor[1], DATA[\"router_id\"]])",
"def getDistancesWithNames(twoDList):\n matrix = []\n for i in range(0,len(twoDList)):\n for j in range(len(twoDList) - len(twoDList) + i):\n SD = determineIdenticalBases(data[i][1], data[j][1])\n temp = []\n if SD[1] != 0:\n p = calculateP(SD[0]+SD[1], SD[1])\n temp.append(data[i][0])\n temp.append(data[j][0]) \n temp.append(estimateMutationsPerSite(p))\n matrix.append(temp)\n return matrix",
"def write_sorting(sorting, save_path):\n assert HAVE_SBEX, SHYBRIDSortingExtractor.installation_mesg\n dump = np.empty((0, 2))\n\n for unit_id in sorting.get_unit_ids():\n spikes = sorting.get_unit_spike_train(unit_id)[:, np.newaxis]\n expanded_id = (np.ones(spikes.size) * unit_id)[:, np.newaxis]\n tmp_concat = np.concatenate((expanded_id, spikes), axis=1)\n\n dump = np.concatenate((dump, tmp_concat), axis=0)\n\n sorting_fn = os.path.join(save_path, 'initial_sorting.csv')\n np.savetxt(sorting_fn, dump, delimiter=',', fmt='%i')",
"def _current_rent(self):\n self.masts.data.sort(key=lambda x: float(x[-1]))\n print(self.masts.header)\n for i in range(5):\n print(self.masts.data[i])",
"def run():\n\n # Build list of tuples of station names and distance \n stations = build_station_list()\n p = (52.2053, 0.1218)\n by_distance = stations_by_distance(stations, p)\n for n in range(10):\n print(by_distance[n])\n for n in range(10):\n i = len(by_distance) - 10 + n\n print(by_distance[i])",
"def add_distances(self,timetables):\n output = {}\n import copy\n for timetable in timetables:\n to_concat = []\n distances = []\n stopsA = []\n stopsB = []\n total_distance = 0\n for i in range(len(timetable['pattern'])-1):\n stopA = timetable['pattern'][i]\n stopB = timetable['pattern'][i+1]\n total_distance += self.s_getter.get_stop_distance(str(stopA),str(stopB))\n distances.append(total_distance)\n stopsA.append(stopA)\n stopsB.append(stopB)\n for index,distance in enumerate(distances):\n df = copy.deepcopy(timetable['matrix'])\n df['distance'] = distance\n df['stopA'] = stopsA[index]\n df['stopB'] = stopsB[index]\n to_concat.append(df)\n del(df)\n matrix = pd.concat(to_concat,axis=0)\n del(to_concat)\n output[timetable['variation']] = {'matrix':matrix,'pattern':timetable['pattern']}\n return output",
"def _compute_sims(self):\n no_duplicates = defaultdict(list)\n for num, lineset1, idx1, lineset2, idx2 in self._iter_sims():\n duplicate = no_duplicates[num]\n for couples in duplicate:\n if (lineset1, idx1) in couples or (lineset2, idx2) in couples:\n couples.add((lineset1, idx1))\n couples.add((lineset2, idx2))\n break\n else:\n duplicate.append({(lineset1, idx1), (lineset2, idx2)})\n sims = []\n for num, ensembles in no_duplicates.items():\n for couples in ensembles:\n sims.append((num, couples))\n sims.sort()\n sims.reverse()\n return sims",
"def savings_algorithm(self):\n self.generate_trivial_tours() # generate trivial solution\n while True: # endless loop\n maxSavings = 0 # values for best savings decision\n bestr1 = None\n bestr2 = None\n for r1 in self.routes: # loop through all route combinations\n for r2 in self.routes:\n if r1 != r2:\n currentSavings = self.savings2routes(r1,r2)\n if currentSavings > maxSavings: # if the savings are greater than the so far best savings\n bestr1 = r1 # store the routes and the savings value\n bestr2 = r2\n maxSavings = currentSavings\n if (bestr1 == None): # if no savings or no feasible joins exist break out of the loop\n break\n newRoute = VRP_Route(bestr1.route+bestr2.route) # generate new route and delete old routes\n self.routes.remove(bestr1)\n self.routes.remove(bestr2)\n self.routes.append(newRoute)\n self.get_objective()\n return self.objective",
"def organize(select, strain, equals):\n scores = []\n data = list(strainer(select, strain, equals))\n while len(data) != 0:\n number = lowest_number(data)\n scores.append(number)\n data.remove(number)\n return scores",
"def mi_from_dm_alt_hq(distance_matrix, ns, nh, spike_train_list=None):\n \n print \"start loading\"\n \n nr = len(distance_matrix)\n nt = nr/ns\n #nearest_neighbours = np.array([r.argsort()[:nh] for r in distance_matrix])\n nearest_neighbours = np.array([np.array(hq.nsmallest(nh, r)) for r in distance_matrix])\n near_to = [[j for j in range(nr) if i in nearest_neighbours[j] ] for i in range(nr)]\n \n print \"finished sorting\"\n return\n #nr = len(distance_matrix)\n #nearest_neighbours = np.array([[i] + distance_matrix[i].argsort()[1:nh].tolist() for i in range(nr)])\n \n members_of_glob = trains_in_glob(spike_train_list)\n glob_comp = glob_composition(spike_train_list, ns, nt, nh)\n \n counts = []\n counted_glob = False #set a flag for later use\n if spike_train_list is not None:\n for i in range(len(near_to)):\n c_i = 0\n \n if i not in members_of_glob:\n #print near_to[i]\n for j in near_to[i]:\n if j not in members_of_glob and spike_train_list[i].start_time == spike_train_list[j].start_time:\n c_i += 1\n else:\n if not counted_glob: #this should only really happen if glob has a small number of members...\n f_i = glob_comp[i]/float(sum(glob_comp.values()))\n g_i = f_i - 1.0/float(sum(glob_comp.values()))\n c_i += (nh - c_i)*g_i\n \n counted_glob = True\n else:\n pass\n \n else: #If i is in the glob...\n f_i = glob_comp[i]/float(sum(glob_comp.values()))\n g_i = f_i - 1.0/float(sum(glob_comp.values()))\n c_i = 1 + (nh - 1)*g_i\n \n counts.append(c_i) \n counts = np.array(counts) \n I = (1.0/nr)*sum( np.log2((ns*counts)/float(nh)) ) \n \n else:\n near_to_same_stim = [[n for n in near_to[j] if abs(n-j)%ns==0 ] for j in range(nr)]\n number_of_neighbourhoods = np.array([len(l) for l in near_to])\n number_of_neighbourhoods_same_stim = np.array([len(l) for l in near_to_same_stim])\n I = (1.0/nr)*sum( np.log2((ns*number_of_neighbourhoods_same_stim)/float(nh)) )\n \n return I",
"def _calc_(self):\n self.data = []\n all_xyz_data = self.Var.data.get_xyz_data()\n all_cols = self.Var.data.get_xyz_cols()\n\n # Loop over all the xyz data and cols we have\n for xyz_data, cols in zip(all_xyz_data, all_cols):\n\n at_crds = np.array([i[cols[0] != 'Ne'] for i in xyz_data])\n self.natom = len(at_crds[0])\n self.nstep = len(at_crds)\n self.step_data = {}\n\n # Calculate the nearest neighbour lists for each step\n for step in range(self.nstep):\n self.step_data[step] = {}\n\n # Get coords\n crds = at_crds[step]\n\n # Get distances between neighbours\n self.get_distances(crds)\n\n # Get a sorted list of atom indices by distance\n self.get_nearest_atom_inds()\n\n # If we have some molecule metadata\n if 'atoms_per_molecule' in self.Var.metadata:\n self.at_per_mol = self.Var.metadata['atoms_per_molecule']\n self.nmol = mol_utils.get_nmol(self.natom, self.at_per_mol)\n self.reshape_at_dist()\n self.get_nearest_atom_inds_per_mol()\n self.step_data[step]['closest_atoms_mol_grouped'] = self.closest_at_per_mol\n self.step_data[step]['distances_mol_grouped'] = self.all_dist_per_mol\n\n # Save data in dict\n self.step_data[step]['distances'] = self.all_dist\n self.step_data[step]['closest_atom_indices'] = self.closest_ats\n\n self.data.append(self.step_data)\n\n return self.data",
"def parallel_build_rounds(self):\n rounds_list = []\n n = len(self._sorted_savings)\n for i in range(1, n + 1):\n patient_a, patient_b = self.get_patients_pair_from_arg(self._arg_sorted_savings[n - i])\n patient_a_somewhere = self.search_rounds_for_patient(patient_a, rounds_list, True, True, True)\n patient_b_somewhere = self.search_rounds_for_patient(patient_b, rounds_list, True, True, True)\n patient_a_right = self.search_rounds_for_patient(patient_a, rounds_list, False, False, True)\n patient_b_left = self.search_rounds_for_patient(patient_b, rounds_list, True, False, False)\n if patient_a != patient_b and patient_a_somewhere is None and patient_b_somewhere is None:\n new_round = Round([patient_a, patient_b], problem=self._problem)\n self.add_round_if_possible(new_round, rounds_list, self._problem)\n elif patient_a_right is not None and patient_b_somewhere is None:\n merged_round = Round([patient for patient in patient_a_right.patients_list] + [patient_b],\n self._problem)\n self.add_merged_round_if_possible(merged_round, patient_a_right, rounds_list, self._problem)\n elif patient_b_left is not None and patient_a_somewhere is None:\n merged_round = Round([patient_a] + [patient for patient in patient_b_left.patients_list], self._problem)\n self.add_merged_round_if_possible(merged_round, patient_b_left, rounds_list, self._problem)\n elif patient_a_right is not None and patient_b_left is not None:\n if patient_a_right is not patient_b_left and patient_a_right.can_merge_right(patient_b_left):\n self.merge_rounds_if_possible(patient_a_right, patient_b_left, rounds_list, self._problem)\n return rounds_list",
"def map_vects(self, input_vects):\n \n if not self._trained:\n raise ValueError(\"SOM not trained yet\")\n \n to_return = []\n \n distances = []\n \n \n contador_adyacentes = 0\n \n matriz = np.array(list(self._neuron_locations(self._m, self._n)))\n \n m = self._m\n \n n = self._n\n \n matrices = []\n \n matrices = np.stack((matriz,\n matriz + np.array([m,n]), \n matriz - np.array([m,n]), \n matriz + np.array([m,0]),\n matriz - np.array([m,0]),\n matriz + np.array([0,n]),\n matriz - np.array([0,n]),\n matriz + np.array([m,-n]),\n matriz + np.array([-m,n])\n ))\n \n distancias_matriz = []\n\n for i in range(n*m):\n distancias_matriz.append([])\n for j in range(m*n):\n distancias_matriz[i].append(np.min(np.sum(np.power(np.subtract(matriz[i], matrices[:,j]),2), axis = 1)))\n \n distancias_matriz = np.array(distancias_matriz)\n \n \n for vect in input_vects:\n\n # min_index is the index of the BMU\n \n lista_indices = [i for i in range(len(self._weightages))]\n \n min_index = min(lista_indices,\n key=lambda x: np.linalg.norm(vect - self._weightages[x]))\n\n # min_index_2 is the index of the 2nd BMU\n \n lista_indices.pop(min_index) # El indice es el mismo que el valor\n \n min_index_2 = min(lista_indices,\n key=lambda x: np.linalg.norm(vect - self._weightages[x])) \n \n r2 = np.sqrt(2)\n\n if np.sqrt(distancias_matriz[min_index][min_index_2]) > r2: \n# print('loc 1')\n# print(locaciones[min_index])\n# print('loc 2')\n# print(locaciones[min_index_2])\n contador_adyacentes += 1\n\n\n distance = np.linalg.norm(vect - self._weightages[min_index])\n \n distances.append(distance)\n \n to_return.append(self._locations[min_index]) \n \n # Quantization Error qe (the mean of all distances to the BMU)!\n self.distances = distances \n \n # Topographic error te\n self.proporcion = contador_adyacentes / len(input_vects)\n \n self.prom_dist = np.mean(self.distances)\n \n return to_return",
"def split(self,\n dataset,\n seed=None,\n frac_train=.8,\n frac_valid=.1,\n frac_test=.1,\n log_every_n=None):\n\n np.testing.assert_almost_equal(frac_train + frac_valid + frac_test, 1.)\n self.mws = []\n if not seed is None:\n np.random.seed(seed)\n\n for smiles in dataset.ids:\n mol = Chem.MolFromSmiles(smiles)\n mw = Chem.rdMolDescriptors.CalcExactMolWt(mol)\n self.mws.append(mw)\n\n # Sort by increasing MW\n self.mws = np.array(self.mws)\n self.sortidx = np.argsort(self.mws)\n\n train_cutoff = int(frac_train * len(self.sortidx))\n valid_cutoff = int((frac_train + frac_valid) * len(self.sortidx))\n print(f\"train_cutoff: {train_cutoff}, valid_cutoff: {valid_cutoff}\")\n return (self.sortidx[:train_cutoff], self.sortidx[train_cutoff:valid_cutoff],\n self.sortidx[valid_cutoff:])",
"def _computeStikeDip(self):\n seg = self._group_index\n groups = np.unique(seg)\n ng = len(groups)\n norm_vec = Vector(0, 0, 0)\n north_vec = Vector(0, 0, 0)\n up_vec = Vector(0, 0, 0)\n for i in range(ng):\n group_segments = np.where(groups[i] == seg)[0]\n nseg = len(group_segments) - 1\n for j in range(nseg):\n ind = group_segments[j]\n P0 = Point(self._toplons[ind],\n self._toplats[ind],\n self._topdeps[ind])\n P1 = Point(self._toplons[ind + 1],\n self._toplats[ind + 1],\n self._topdeps[ind + 1])\n P2 = Point(self._botlons[ind + 1],\n self._botlats[ind + 1],\n self._botdeps[ind + 1])\n P3 = Point(self._botlons[ind],\n self._botlats[ind],\n self._botdeps[ind])\n P1up = Point(self._toplons[ind + 1],\n self._toplats[ind + 1],\n self._topdeps[ind + 1] - 1.0)\n P1N = Point(self._toplons[ind + 1],\n self._toplats[ind + 1] + 0.001,\n self._topdeps[ind + 1])\n P3up = Point(self._botlons[ind],\n self._botlats[ind],\n self._botdeps[ind] - 1.0)\n P3N = Point(self._botlons[ind],\n self._botlats[ind] + 0.001,\n self._botdeps[ind])\n p0 = Vector.fromPoint(P0)\n p1 = Vector.fromPoint(P1)\n p2 = Vector.fromPoint(P2)\n p3 = Vector.fromPoint(P3)\n p1up = Vector.fromPoint(P1up)\n p1N = Vector.fromPoint(P1N)\n p3up = Vector.fromPoint(P3up)\n p3N = Vector.fromPoint(P3N)\n\n # Sides\n s01 = p1 - p0\n s02 = p2 - p0\n s03 = p3 - p0\n s21 = p1 - p2\n s23 = p3 - p2\n\n # First triangle\n t1norm = (s02.cross(s01)).norm()\n a = s01.mag()\n b = s02.mag()\n c = s21.mag()\n s = (a + b + c) / 2\n A1 = np.sqrt(s * (s - a) * (s - b) * (s - c)) / 1000\n\n # Second triangle\n t2norm = (s03.cross(s02)).norm()\n a = s03.mag()\n b = s23.mag()\n c = s02.mag()\n s = (a + b + c) / 2\n A2 = np.sqrt(s * (s - a) * (s - b) * (s - c)) / 1000\n\n # Up and North\n p1up = (p1up - p1).norm()\n p3up = (p3up - p3).norm()\n p1N = (p1N - p1).norm()\n p3N = (p3N - p3).norm()\n\n # Combine\n norm_vec = norm_vec + A1 * t1norm + A2 * t2norm\n north_vec = north_vec + A1 * p1N + A2 * p3N\n up_vec = up_vec + A1 * p1up + A2 * p3up\n\n norm_vec = norm_vec.norm()\n north_vec = north_vec.norm()\n up_vec = up_vec.norm()\n\n # Do I need to flip the vector because it is pointing down (i.e.,\n # right-hand rule is violated)?\n flip = np.sign(up_vec.dot(norm_vec))\n norm_vec = flip * norm_vec\n\n # Angle between up_vec and norm_vec is dip\n self._dip = np.arcsin(up_vec.cross(norm_vec).mag()) * 180 / np.pi\n\n # Normal vector projected to horizontal plane\n nvph = (norm_vec - up_vec.dot(norm_vec) * up_vec).norm()\n\n # Dip direction is angle between nvph and north; strike is orthogonal.\n cp = nvph.cross(north_vec)\n sign = np.sign(cp.dot(up_vec))\n dp = nvph.dot(north_vec)\n strike = np.arctan2(sign * cp.mag(), dp) * 180 / np.pi - 90\n if strike < -180:\n strike = strike + 360\n self._strike = strike",
"def sort(self):\n for i in range(self.num):\n for j in range(i,0,-1):\n if self.genepool[0][j].fitness < self.genepool[0][j-1].fitness:\n self.genepool[0][j], self.genepool[0][j-1] = self.genepool[0][j-1], self.genepool[0][j]\n else:\n break\n for i in range(self.num):\n for j in range(i,0,-1):\n if self.genepool[1][j].fitness < self.genepool[1][j-1].fitness:\n self.genepool[1][j], self.genepool[1][j-1] = self.genepool[1][j-1], self.genepool[1][j]\n else:\n break",
"def makeSparse(self):\n center = self.center\n l = []\n for point in self.points:\n angle = Vector.createFromTwoPoints(point, center).angle\n l.append((angle, copy.deepcopy(point)))\n l = sorted(l, key=lambda x: x[0])\n for i in range(len(l)):\n self.points[i].set(l[i][1])",
"def build_retrieved_list(self, scores):\n\n res = self.index.rank(scores)\n tmp_res = []\n # keep scores too\n tmp_scores = []\n\n # build the list\n tmp_res = []\n #print rank, \"<--\"\n for i, k in res:\n tmp_res.append( self.indices[i] )\n tmp_scores.append( k )\n\n\n # compute the difference with the difference\n diff = list(set(self.indices.values())-set(tmp_res))\n\n # shuffle to fill the rest of the list\n np.random.shuffle(diff)\n\n scores_diff = np.zeros( (len(diff,)) )\n\n final = []\n final_scores = []\n\n final.extend(tmp_res)\n final.extend(diff)\n\n final_scores.extend(tmp_scores)\n final_scores.extend(scores_diff)\n\n # remove extension for evaluation\n f = lambda x: x.split('.')[0]\n final = map(f, final)\n\n return final, final_scores"
]
| [
"0.59517455",
"0.57231927",
"0.5445455",
"0.52918875",
"0.5289238",
"0.5222936",
"0.5219054",
"0.5216862",
"0.520472",
"0.51936656",
"0.51906747",
"0.51685256",
"0.5164179",
"0.5163071",
"0.5157834",
"0.5112156",
"0.51111937",
"0.51060206",
"0.51024145",
"0.50942177",
"0.5075433",
"0.5074046",
"0.50690025",
"0.5059826",
"0.5059283",
"0.5050749",
"0.50464237",
"0.5044156",
"0.5044052",
"0.50373614"
]
| 0.67217356 | 0 |
total_venues(filename) returns an integer that tells you how many different unique values there are in the csv timetable file. To use total_venues, enter the filename in the function. The function should return an integer. | def total_venues(filename):
#reading the file
f = open(filename,"r")
#incsv is a short form of 'input csv file'
incsv = f.readlines()
#removing affixes
incsv[:] = [i.rstrip('\n') for i in incsv]
#lines into lists
#tempstr and templist are temporary variables to split the strings in incsv
tempstr = ""
templist = []
for j in range(len(incsv)):
#enters each line into temporary string variable
tempstr = incsv[j]
#enters the split string into a temporary list variable
templist.append(tempstr.split(","))
#modify original line in original list with split list
incsv[j] = templist
#reset temporary variables
tempstr = ""
templist = []
#final format of incsv: [[[moduleCode,ClassNo,LessonType,DayCode,DayText,StartTime,EndTime,Venue,AcadYear,Semester]],...]
#yes each line is nested in two lists for some reason
#lists all venues
#venuelist stores all occurrences of the venues. venues can be repeated
venuelist = []
for k in range(len(incsv)):
#append venue to venuelist
venuelist.append(incsv[k][0][7])
#filter all unique venues by checking whether venue is already in filterlist
filterlist = []
#check is temporary variable to decide whether to add venue
check = True
#for all venues in venuelist
for l in range(len(venuelist)):
#if venue in venuelist already in filterlist
if venuelist[l] in filterlist:
#decision to add venue is false
check = False
#if decision is to add the venue in venuelist
if check == True:
#append new venue to filterlist
filterlist.append(venuelist[l])
#reset decision to true
check = True
return (len(filterlist)-1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def available_venues(filename,start,end,day):\n #open file\n f = open(filename,\"r\")\n incsv = f.readlines()\n #removing affixes\n incsv[:] = [i.rstrip('\\n') for i in incsv]\n #lines into lists\n tempstr = \"\"\n templist = []\n for j in range(len(incsv)):\n #enters each line into temporary string variable\n tempstr = incsv[j]\n #enters the split string into a temporary list variable\n templist.append(tempstr.split(\",\"))\n #modify original line in original list with split list\n incsv[j] = templist\n #reset temporary variables\n tempstr = \"\"\n templist = []\n #lists all venues\n venuelist = []\n for k in range(len(incsv)):\n venuelist.append(incsv[k][0][7])\n #filter all unique venues by checking whether venue is already in filterlist\n filterlist = []\n #check is temporary variable to decide whether to add venue\n check = True\n #for all venues in venuelist\n for l in range(len(venuelist)):\n #if venue in venuelist already in filterlist\n if venuelist[l] in filterlist:\n #decision to add venue is false\n check = False\n #if decision is to add the venue in venuelist\n if check == True:\n #append new venue to filterlist\n filterlist.append(venuelist[l])\n #reset decision to true\n check = True\n #finding all available venues\n #for all lines in the timetable\n for m in range(1,len(incsv)):\n #if the start time of the venue is in between the desired start time or the end time of the venue is in between the desired end time\n if ((int(incsv[m][0][5]) >= start and int(incsv[m][0][5]) < end) or (int(incsv[m][0][6]) > start and int(incsv[m][0][6]) <= end)) and int(incsv[m][0][3]) == 4:\n #if the venue is still in list of venues (filterlist)\n if incsv[m][0][7] in filterlist:\n #remove venue from filterlist\n filterlist.remove(incsv[m][0][7])\n #remove header \"venue\" from filterlist\n filterlist.remove(\"Venue\")\n return filterlist",
"def venue_occupancy(filename):\n #open file\n f = open(filename,\"r\")\n incsv = f.readlines()\n #removing affixes\n incsv[:] = [i.rstrip('\\n') for i in incsv]\n #lines into lists\n tempstr = \"\"\n templist = []\n for j in range(len(incsv)):\n #enters each line into temporary string variable\n tempstr = incsv[j]\n #enters the split string into a temporary list variable\n templist.append(tempstr.split(\",\"))\n #modify original line in original list with split list\n incsv[j] = templist\n #reset temporary variables\n tempstr = \"\"\n templist = []\n #all venues\n venuelist = []\n for k in range(1,len(incsv)):\n venuelist.append(incsv[k][0][7])\n #filter all unique venues by checking whether venue is already in filterlist\n filterlist = []\n #check is temporary variable to decide whether to add venue\n check = True\n #for all venues in venuelist\n for l in range(len(venuelist)):\n #if venue in venuelist already in filterlist\n if venuelist[l] in filterlist:\n #decision to add venue is false\n check = False\n #if decision is to add the venue in venuelist\n if check == True:\n #append new venue to filterlist\n filterlist.append(venuelist[l])\n #reset decision to true\n check = True\n #add hours to total count (time)\n time = 0\n #for all lines in file\n for m in range(1,len(incsv)):\n #if time of venue falls within office hours for weekdays\n if int(incsv[m][0][5]) >= 800 and int(incsv[m][0][5]) <= 1700 and int(incsv[m][0][6]) <= 1700 and int(incsv[m][0][6]) >= 800 and int(incsv[m][0][3]) >= 1 and int(incsv[m][0][3]) <= 5:\n #add hour to total count\n time += (int(incsv[m][0][6]) - int(incsv[m][0][5]))\n #if start time falls before office hours but end time is within office hours\n elif int(incsv[m][0][5]) < 800 and int(incsv[m][0][6]) <= 1700 and int(incsv[m][0][5]) > 800 and int(incsv[m][0][3]) >= 1 and int(incsv[m][0][3]) <= 5:\n #ignore hours before 800 and add remaining hours\n time += (int(incsv[m][0][6]) - 800)\n #if end time falls after office hours but start time is within office housr\n elif int(incsv[m][0][5]) >= 800 and int(incsv[m][0][5]) < 1700 and int(incsv[m][0][6]) > 1700 and int(incsv[m][0][3]) >= 1 and int(incsv[m][0][3]) <= 5:\n #ignore hours after 1700 and add remaining hours\n time += (1700 - int(incsv[m][0][5]))\n #if start time falls before 800 and end time falls after 1700\n elif int(incsv[m][0][5]) < 800 and int(incsv[m][0][6]) > 1700 and int(incsv[m][0][3]) >= 1 and int(incsv[m][0][3]) <= 5:\n #add the maximum of 9 hours\n time += 900\n #if time range falls outside of office hours\n elif ((int(incsv[m][0][5]) < 800 and int(incsv[m][0][6]) <= 800) or (int(incsv[m][0][5]) >= 1700 and int(incsv[m][0][6]) > 1700)) and int(incsv[m][0][3]) >= 1 and int(incsv[m][0][3]) <= 5:\n #total hours remain\n time = time\n #average (avr)\n avr = 0\n #average = total hours / (number of unique venues) * 45 hours\n avr = (time/(len(filterlist)*4500))\n return avr",
"def number_of_trips(filename): \r\n \r\n with open(filename, 'r') as f_in:\r\n # set up csv reader object\r\n trip_reader = csv.DictReader(f_in)\r\n \r\n # initialize count variables\r\n n_subscribers = 0\r\n n_customers = 0\r\n \r\n # tally up ride types\r\n for row in trip_reader:\r\n if row['user_type'] == 'Subscriber':\r\n n_subscribers += 1\r\n else:\r\n n_customers += 1\r\n \r\n # compute total number of rides\r\n n_total = n_subscribers + n_customers\r\n \r\n # return tallies as a tuple\r\n return(n_subscribers, n_customers, n_total)",
"def analyze(filename):\r\n start = datetime.datetime.now()\r\n\r\n ao_count = 0\r\n\r\n with open(filename) as csvfile:\r\n reader = csv.reader(csvfile, delimiter=',', quotechar='\"')\r\n year_count = {\r\n \"2013\": 0,\r\n \"2014\": 0,\r\n \"2015\": 0,\r\n \"2016\": 0,\r\n \"2017\": 0,\r\n \"2018\": 0\r\n }\r\n for row in reader:\r\n l_row = list(row)\r\n print(f\"\\n{row}\")\r\n year = l_row[5][6:]\r\n if year in year_count.keys():\r\n year_count[year] += 1\r\n if \"ao\" in l_row[6]:\r\n ao_count += 1\r\n\r\n end = datetime.datetime.now()\r\n return start, end, year_count, ao_count",
"def get_n_owned_games(file_size):\n df = pd.read_csv('Resources/formateddataset{0}.csv.gz'.format(file_size), compression='gzip', usecols=['steamid', 'rating'])\n nGames = df[(df.rating == 1.0)].groupby(by=['steamid']).rating.count().reset_index()\n nGames.columns = ['steamid', 'nGames']\n return(nGames)",
"def num_rainy_days(filename):\n weather_data = pandas.read_csv(filename)\n\n q = \"\"\"\n SELECT COUNT(*) \n FROM weather_data \n WHERE rain = 1;\n \"\"\"\n\n # Execute your SQL command against the pandas frame\n rainy_days = pandasql.sqldf(q.lower(), locals())\n return rainy_days",
"def get_counts(filename, key):\r\n column_keys, get_data = get_csv(filename)\r\n assert(key in column_keys[1:])\r\n column = column_keys[1:].index(key)\r\n print 'getcounts() %s : %s column = %d' % (filename, key, column+1) \r\n counts_dict = {}\r\n for i,(k,v) in enumerate(get_data()):\r\n x = v[column]\r\n counts_dict[x] = counts_dict.get(x, 0) + 1\r\n return counts_dict",
"def load_venues():\n\n print('load_venues')\n\n Venue.query.delete()\n\n for row in open(\"seed_data/venues.csv\"):\n row = row.rstrip()\n subcategory, \\\n created_by, \\\n title, \\\n addr_1, \\\n addr_2, \\\n city, \\\n postal_code, \\\n state = row.split(',')\n\n cat_sub = Category_Subcategory.query.filter_by(name=subcategory).first()\n\n vnu = Venue(subcategory_id=cat_sub.id,\n created_by=created_by,\n name=title,\n addr_1=addr_1,\n addr_2=addr_2,\n city=city,\n postal_code=postal_code,\n state=state)\n\n db.session.add(vnu)\n\n db.session.commit()",
"def analyze(filename):\n\n date_dict = {\"2013\":0,\"2014\":0,\"2015\":0,\"2016\":0,\"2017\":0,\"2018\":0}\n\n start = datetime.datetime.now()\n\n with open(filename) as csvfile:\n for line in csvfile:\n lrow = line.split(',')\n\n if \"ao\" in lrow[6]:\n found += 1\n\n # pylint: disable=C0122\n # Less than should be the default comparison operation\n if \"2012\" < lrow[5][6:] < \"2019\":\n date_dict[lrow[5][6:]] += 1\n\n\n print(f\"'ao' was found {found} times\")\n print(\n f\"2013:{date_dict['2013']}\\t\"\n f\"2014:{date_dict['2014']}\\t\"\n f\"2015:{date_dict['2015']}\\t\"\n f\"2016:{date_dict['2016']}\\t\"\n f\"2017:{date_dict['2017']}\\t\"\n f\"2018:{date_dict['2018']}\\n\"\n )\n end = datetime.datetime.now()\n return (\n start,\n end,\n # {\n # \"2013\": _2013,\n # \"2014\": _2014,\n # \"2015\": _2015,\n # \"2016\": _2016,\n # \"2017\": _2017,\n # \"2018\": _2018,\n # },\n found,\n )",
"def countSamples(filename):\n with open(filename, \"r\") as f:\n line = f.readline().split(\"\\t\")\n return len(line) - 2",
"def _get_total_games(self) -> int:\n files = get_tfr_filenames(self.config)\n total_games = 0\n for file in files:\n total_games += int(str(file).split('-')[1].split('.')[0])\n return total_games",
"def analyze(filename, search_term):\n start = datetime.datetime.now()\n with open(filename) as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n new_ones = []\n year_count = {\n \"2013\": 0,\n \"2014\": 0,\n \"2015\": 0,\n \"2016\": 0,\n \"2017\": 0,\n \"2018\": 0\n }\n found = 0\n for row in reader:\n if row:\n lrow = list(row)\n if lrow[5] > '00/00/2012':\n if search_term in row[6]:\n found +=1\n try:\n year_count[lrow[5][-4:]] += 1 \n except:\n pass\n print(year_count)\n print(f\"'{search_term}' was found {found} times\")\n end = datetime.datetime.now()\n return (start, end, year_count, found)",
"def analyze(filename):\n start = datetime.datetime.now()\n first_open_start = time.time()\n with open(filename) as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n found = 0\n year_count = {\n \"2013\": 0,\n \"2014\": 0,\n \"2015\": 0,\n \"2016\": 0,\n \"2017\": 0,\n \"2018\": 0\n }\n for_start = time.time()\n for row in reader:\n lrow = list(row)\n if int(lrow[5][6:]) > 2012:\n year_count[lrow[5][6:]] += 1\n if \"ao\" in row[6]:\n found += 1\n print(f\"'ao' was found {found} times\")\n for_end = time.time()\n end = datetime.datetime.now()\n print(year_count)\n first_open_end = time.time()\n LOGGER.info('First open time: %s', first_open_end - first_open_start)\n LOGGER.info('For Loop with elifs: %s', for_end - for_start)\n return start, end, year_count, found",
"def countgenes():\n directory = openfile('db_directory.txt')\n no_genes_file = directory+'GENES_IN_HPO.txt'\n GENES_IN_HPO = openfile(no_genes_file)\n #GENES_IN_HPO = openfile(numbergenes_file)\n return int(GENES_IN_HPO)",
"def total_number():\r\n total_number = 0\r\n file_read = read_file()\r\n for key in file_read:\r\n total_number = total_number + len(file_read[key])\r\n return total_number",
"def countsubcatchments(inputfilename=FileSettings.settingsdict['inputfilename']):\r\n global count\r\n with open(inputfilename, 'r') as swmmput:\r\n contents = swmmput.readlines()\r\n count = len(contents)\r\n return(count)",
"def doCountTask(filename):\n f = open(filename)\n dataDict = json.load(f)\n weridCount = 0\n unweridCount = 0\n for key in dataDict:\n if dataDict[key][\"weird\"]:\n weridCount += 1\n else:\n unweridCount += 1\n return [unweridCount, weridCount]",
"def rank_uefa_teams(filename, epsilon=0.85):\n raise NotImplementedError(\"Task 3 Incomplete\")",
"def pre_process_file(filename):\n\n num_lines = 0\n vm_ids = set()\n with open(filename) as trace:\n for item in csv.reader(trace, delimiter=','):\n num_lines += 1\n disk_id = int(item[2])\n vm_ids.add(disk_id) # can make it more efficient\n no_of_vms = len(vm_ids)\n return (num_lines, no_of_vms, vm_ids)",
"def get_all_counts(filename):\r\n column_keys, get_data = get_csv(filename)\r\n all_counts_dict = {}\r\n for key in column_keys[1:]:\r\n all_counts_dict[key] = {}\r\n\r\n for i,(k,v) in enumerate(get_data()):\r\n for key in column_keys[1:]:\r\n column = column_keys[1:].index(key)\r\n x = v[column]\r\n all_counts_dict[key][x] = all_counts_dict[key].get(x, 0) + 1\r\n return all_counts_dict",
"def tr_count_for_run(self, run_idx, file_name, raw_rows, events):\n if self.is_gravy:\n return 180\n else:\n return 230",
"def pp_schedule(filename,venue):\n #reading the file\n f = open(filename,\"r\")\n incsv = f.readlines()\n #removing affixes\n incsv[:] = [i.rstrip('\\n') for i in incsv]\n #lines into lists\n tempstr = \"\"\n templist = []\n for j in range(len(incsv)):\n #enters each line into temporary string variable\n tempstr = incsv[j]\n #enters the split string into a temporary list variable\n templist.append(tempstr.split(\",\"))\n #modify original line in original list with split list\n incsv[j] = templist\n #reset temporary variables\n tempstr = \"\"\n templist = []\n #timelist stands for timetable list\n #format of timelist: [[day,[start,end,module],...],...]\n timelist = []\n for k in range(1,8):\n #for each day code add day code\n timelist.append([k])\n #assign and make list for ttint\n #for all lines in file\n for l in range(len(incsv)):\n #if venue in line matches desired venue\n if incsv[l][0][7] == venue:\n #after each day code, add a list with start time, end time and module. Repeat for each relevant line\n timelist[(int(incsv[l][0][3])-1)].append([int(incsv[l][0][5]),int(incsv[l][0][6]),incsv[l][0][0]])\n #turtle\n print(\"Your timetable is being printed on Python Turtle Graphics. This may take a while.\")\n ttint(timelist,venue)",
"def load_rsvp_responses_file(filename):\n with open(filename, 'r') as fp:\n reader = csv.DictReader(fp)\n times = [int(float(row['time'])) for row in reader]\n return times",
"def totalHours(path):\n total = 0\n start = 0\n active = False\n with open(path, 'r') as f:\n reader = csv.reader(f)\n for row in reader:\n if row[1] == 't':\n total += float(row[2])\n else:\n if active:\n if row[1] == 's':\n total += float(row[0]) - start\n active = False\n else:\n if row[1] == 'a':\n start = float(row[0])\n active = True\n final = row\n if active:\n total += time.time() - float(final[0])\n active = False\n return \"%.2f\" % (total / 3600)",
"def get_venues():\n papers = local.papers().values()\n venues = {paper[\"venue\"] for paper in papers if \"venue\" in paper}\n\n for venue_id in venues:\n file_name = local.file_name(\"venues\", venue_id)\n if os.path.isfile(file_name):\n continue\n venue = get_venue(venue_id)\n if not venue:\n continue\n with open(file_name, \"w\") as out:\n json.dump(venue, out, ensure_ascii=False, indent=4)",
"def analyze_handler():\n\n file_name = csv_file('exercise.csv')\n with open(file_name) as csvfile:\n file_reader = csv.reader(csvfile)\n file_line_cnt = sum(1 for row in file_reader)\n\n wrapper = wrap_analyser(analyze, file_name)\n timer = timeit.timeit(wrapper, number=1)\n print(file_name)\n print(f'Line count:\\t{file_line_cnt}\\tAnalyser ran in:\\t{timer}')",
"def strategy_guide(filename):\n score = 0\n # Iterate over the lines of the file\n with open(filename, \"rt\", encoding=\"utf-8\") as filetoread:\n for line in filetoread:\n # opponent, player = line.split()\n # print(line.rstrip())\n result = scoreit(line.rstrip())\n score = score + result\n return score",
"def get_data_by_time(filename):\n with open(filename, 'r') as f_in:\n # set up csv reader object\n reader = csv.DictReader(f_in)\n result = {}\n result['n_week'] = [0] * 7\n result['d_week'] = [0] * 7\n result['cus_hour'] = [0] * 24\n result['sub_hour'] = [0] * 24\n for data in reader:\n duration = float(data['duration'])\n if data['day_of_week'] == 'Sunday':\n result['n_week'][0] += 1\n result['d_week'][0] += duration\n elif data['day_of_week'] == 'Monday':\n result['n_week'][1] += 1\n result['d_week'][1] += duration\n elif data['day_of_week'] == 'Tuesday':\n result['n_week'][2] += 1\n result['d_week'][2] += duration\n elif data['day_of_week'] == 'Wednesday':\n result['n_week'][3] += 1\n result['d_week'][3] += duration\n elif data['day_of_week'] == 'Thursday':\n result['n_week'][4] += 1\n result['d_week'][4] += duration\n elif data['day_of_week'] == 'Friday':\n result['n_week'][5] += 1\n result['d_week'][5] += duration\n else:\n result['n_week'][6] += 1\n result['d_week'][6] += duration\n\n hour = int(data['hour'])\n if data['user_type'] == 'Customer':\n result['cus_hour'][hour] += 1\n else:\n result['sub_hour'][hour] += 1\n return result",
"def get_row_num(file_name):\n num = 0\n with open(file_name) as f:\n for _ in csv.DictReader(f):\n num = num + 1\n return num",
"def __get_files_row_count(self, region):\r\n \r\n count = 0\r\n for file in self.__files:\r\n file_to_parse = os.path.join(self.__folder, os.path.basename(file))\r\n with zipfile.ZipFile(file_to_parse, \"r\") as zf:\r\n with zf.open(self.__get_region_filename(region), 'r') as csv_file:\r\n reader = csv.reader(TextIOWrapper(csv_file, 'windows-1250'), delimiter=';', quotechar='\"')\r\n count += sum(1 for row in reader)\r\n return count"
]
| [
"0.61805046",
"0.6054929",
"0.57702976",
"0.5745972",
"0.5560655",
"0.5547302",
"0.5427406",
"0.5402603",
"0.5343917",
"0.5309098",
"0.5299042",
"0.5273285",
"0.5256531",
"0.5246932",
"0.5193131",
"0.51890045",
"0.51861143",
"0.5183398",
"0.5170527",
"0.51524687",
"0.510676",
"0.5094243",
"0.5092615",
"0.5091522",
"0.50689137",
"0.5039212",
"0.5036332",
"0.50280255",
"0.50125927",
"0.5009354"
]
| 0.68946314 | 0 |
available_venues(filename,start,end,day) returns a list of venues that are not occupied in a certain time range in a certain day. To use available_venues, enter the filename as the first argument, followed by the start time, end time and an integer end code. The function should return a list with all available venues. | def available_venues(filename,start,end,day):
#open file
f = open(filename,"r")
incsv = f.readlines()
#removing affixes
incsv[:] = [i.rstrip('\n') for i in incsv]
#lines into lists
tempstr = ""
templist = []
for j in range(len(incsv)):
#enters each line into temporary string variable
tempstr = incsv[j]
#enters the split string into a temporary list variable
templist.append(tempstr.split(","))
#modify original line in original list with split list
incsv[j] = templist
#reset temporary variables
tempstr = ""
templist = []
#lists all venues
venuelist = []
for k in range(len(incsv)):
venuelist.append(incsv[k][0][7])
#filter all unique venues by checking whether venue is already in filterlist
filterlist = []
#check is temporary variable to decide whether to add venue
check = True
#for all venues in venuelist
for l in range(len(venuelist)):
#if venue in venuelist already in filterlist
if venuelist[l] in filterlist:
#decision to add venue is false
check = False
#if decision is to add the venue in venuelist
if check == True:
#append new venue to filterlist
filterlist.append(venuelist[l])
#reset decision to true
check = True
#finding all available venues
#for all lines in the timetable
for m in range(1,len(incsv)):
#if the start time of the venue is in between the desired start time or the end time of the venue is in between the desired end time
if ((int(incsv[m][0][5]) >= start and int(incsv[m][0][5]) < end) or (int(incsv[m][0][6]) > start and int(incsv[m][0][6]) <= end)) and int(incsv[m][0][3]) == 4:
#if the venue is still in list of venues (filterlist)
if incsv[m][0][7] in filterlist:
#remove venue from filterlist
filterlist.remove(incsv[m][0][7])
#remove header "venue" from filterlist
filterlist.remove("Venue")
return filterlist | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def available_timing(filename,day,venue):\n #reading the file\n f = open(filename,\"r\")\n incsv = f.readlines()\n #removing affixes\n incsv[:] = [i.rstrip('\\n') for i in incsv]\n #lines into lists\n tempstr = \"\"\n templist = []\n for j in range(len(incsv)):\n #enters each line into temporary string variable\n tempstr = incsv[j]\n #enters the split string into a temporary list variable\n templist.append(tempstr.split(\",\"))\n #modify original line in original list with split list\n incsv[j] = templist\n #reset temporary variables\n tempstr = \"\"\n templist = []\n #finding occupied hours\n brlist = []\n #for all lines in file\n for k in range(len(incsv)):\n #if venue in line matches desired venue and day in line matches desired day\n if incsv[k][0][7] == venue and int(incsv[k][0][3]) == day:\n #add time range of line into brlist\n brlist.append([int(incsv[k][0][5]),int(incsv[k][0][6])])\n #pruning\n #tlist stands for timelist. stores remaining hours for synthesis\n tlist = []\n #list of hours\n tlist = [600,700,800,900,1000,1100,1200,1300,1400,1500,1600,1700,1800,1900,2000,2100,2200,2300,2400]\n #for line in brlist\n for l in range(len(brlist)):\n #for the range of hours of the line\n for m in range(int((brlist[l][1]-brlist[l][0])/100)):\n #if hours in range still in tlist\n if (brlist[l][0] + m*100) in tlist:\n #remove from tlist\n tlist.remove(brlist[l][0] + m*100)\n #plist for partition list. range of available timings appended here\n plist = []\n #check is for the start time of each available time ranges\n check = 0\n #formation of time ranges\n #for hours in tlist\n for n in range(len(tlist)):\n #if code is checking element 2. Could have used range(1,len(tlist)) but nevermind\n if n >= 1:\n #if 2 adjacent hours are not consecutive\n if tlist[n] != (tlist[n-1]+100):\n #add time range to plist\n plist.append((tlist[check],tlist[n-1]+100))\n #set check to next minimum available start time\n check = n\n #adding range with last hour\n #if last hour in tlist is 2400 and precedent hour in tlist is 2300\n if tlist[n] == 2400 and tlist[n-1] == 2300:\n #add time range\n plist.append((tlist[check],2400))\n return plist",
"def get_venues():\n papers = local.papers().values()\n venues = {paper[\"venue\"] for paper in papers if \"venue\" in paper}\n\n for venue_id in venues:\n file_name = local.file_name(\"venues\", venue_id)\n if os.path.isfile(file_name):\n continue\n venue = get_venue(venue_id)\n if not venue:\n continue\n with open(file_name, \"w\") as out:\n json.dump(venue, out, ensure_ascii=False, indent=4)",
"def venue_occupancy(filename):\n #open file\n f = open(filename,\"r\")\n incsv = f.readlines()\n #removing affixes\n incsv[:] = [i.rstrip('\\n') for i in incsv]\n #lines into lists\n tempstr = \"\"\n templist = []\n for j in range(len(incsv)):\n #enters each line into temporary string variable\n tempstr = incsv[j]\n #enters the split string into a temporary list variable\n templist.append(tempstr.split(\",\"))\n #modify original line in original list with split list\n incsv[j] = templist\n #reset temporary variables\n tempstr = \"\"\n templist = []\n #all venues\n venuelist = []\n for k in range(1,len(incsv)):\n venuelist.append(incsv[k][0][7])\n #filter all unique venues by checking whether venue is already in filterlist\n filterlist = []\n #check is temporary variable to decide whether to add venue\n check = True\n #for all venues in venuelist\n for l in range(len(venuelist)):\n #if venue in venuelist already in filterlist\n if venuelist[l] in filterlist:\n #decision to add venue is false\n check = False\n #if decision is to add the venue in venuelist\n if check == True:\n #append new venue to filterlist\n filterlist.append(venuelist[l])\n #reset decision to true\n check = True\n #add hours to total count (time)\n time = 0\n #for all lines in file\n for m in range(1,len(incsv)):\n #if time of venue falls within office hours for weekdays\n if int(incsv[m][0][5]) >= 800 and int(incsv[m][0][5]) <= 1700 and int(incsv[m][0][6]) <= 1700 and int(incsv[m][0][6]) >= 800 and int(incsv[m][0][3]) >= 1 and int(incsv[m][0][3]) <= 5:\n #add hour to total count\n time += (int(incsv[m][0][6]) - int(incsv[m][0][5]))\n #if start time falls before office hours but end time is within office hours\n elif int(incsv[m][0][5]) < 800 and int(incsv[m][0][6]) <= 1700 and int(incsv[m][0][5]) > 800 and int(incsv[m][0][3]) >= 1 and int(incsv[m][0][3]) <= 5:\n #ignore hours before 800 and add remaining hours\n time += (int(incsv[m][0][6]) - 800)\n #if end time falls after office hours but start time is within office housr\n elif int(incsv[m][0][5]) >= 800 and int(incsv[m][0][5]) < 1700 and int(incsv[m][0][6]) > 1700 and int(incsv[m][0][3]) >= 1 and int(incsv[m][0][3]) <= 5:\n #ignore hours after 1700 and add remaining hours\n time += (1700 - int(incsv[m][0][5]))\n #if start time falls before 800 and end time falls after 1700\n elif int(incsv[m][0][5]) < 800 and int(incsv[m][0][6]) > 1700 and int(incsv[m][0][3]) >= 1 and int(incsv[m][0][3]) <= 5:\n #add the maximum of 9 hours\n time += 900\n #if time range falls outside of office hours\n elif ((int(incsv[m][0][5]) < 800 and int(incsv[m][0][6]) <= 800) or (int(incsv[m][0][5]) >= 1700 and int(incsv[m][0][6]) > 1700)) and int(incsv[m][0][3]) >= 1 and int(incsv[m][0][3]) <= 5:\n #total hours remain\n time = time\n #average (avr)\n avr = 0\n #average = total hours / (number of unique venues) * 45 hours\n avr = (time/(len(filterlist)*4500))\n return avr",
"def test_files_missing():\n\tfiledir = \"./goes_files/%Y_events/%Y%m%devents.txt\"\n\tt0 = timerange.start.datetime\n\tdays = [t0]\n\twhile timerange.end.datetime > t0:\n\t\tt0 = t0 + relativedelta(days=1)\n\t\tdays.append(t0)\n\n\tmissing_files = []\n\tfor d in days:\n\t\tif not os.path.exists(d.strftime(filedir)):\n\t\t\tmissing_files.append(d.strftime(filedir))\n\tprint(missing_files)",
"def total_venues(filename):\n #reading the file\n f = open(filename,\"r\")\n #incsv is a short form of 'input csv file'\n incsv = f.readlines()\n #removing affixes\n incsv[:] = [i.rstrip('\\n') for i in incsv]\n #lines into lists\n #tempstr and templist are temporary variables to split the strings in incsv\n tempstr = \"\"\n templist = []\n for j in range(len(incsv)):\n #enters each line into temporary string variable\n tempstr = incsv[j]\n #enters the split string into a temporary list variable\n templist.append(tempstr.split(\",\"))\n #modify original line in original list with split list\n incsv[j] = templist\n #reset temporary variables\n tempstr = \"\"\n templist = []\n #final format of incsv: [[[moduleCode,ClassNo,LessonType,DayCode,DayText,StartTime,EndTime,Venue,AcadYear,Semester]],...]\n #yes each line is nested in two lists for some reason\n #lists all venues\n #venuelist stores all occurrences of the venues. venues can be repeated\n venuelist = []\n for k in range(len(incsv)):\n #append venue to venuelist\n venuelist.append(incsv[k][0][7])\n #filter all unique venues by checking whether venue is already in filterlist\n filterlist = []\n #check is temporary variable to decide whether to add venue\n check = True\n #for all venues in venuelist\n for l in range(len(venuelist)):\n #if venue in venuelist already in filterlist\n if venuelist[l] in filterlist:\n #decision to add venue is false\n check = False\n #if decision is to add the venue in venuelist\n if check == True:\n #append new venue to filterlist\n filterlist.append(venuelist[l])\n #reset decision to true\n check = True\n return (len(filterlist)-1)",
"def date_range(all_files,start_year,start_month,start_day,end_year,end_month,\r\n end_day):\r\n\r\n d1 = date(start_year,start_month,start_day)\r\n d_last = date(end_year,end_month,end_day)\r\n day_range = (d_last - d1).days\r\n #print('day range: %s' %day_range)\r\n files = []\r\n for t in range(day_range):\r\n d2 = d1 + timedelta(t)\r\n d2_str1 = str(d2)\r\n d2_str2 = d2.strftime('%Y_%m_%d')\r\n # print(d2)\r\n for f in all_files:\r\n if d2_str1 in str(f) or d2_str2 in str(f):\r\n files.append(f)\r\n return(files)",
"def venues(self):\n response = self._request(V2_ENDPOINTS['VENUES'])\n return response",
"def resident_birthday_list(only_village=False):\n\n if file_available(rf'{constants.OUTPUTS_DIR}\\Resident Birthdays\\ResidentBirthdays.xlsx'):\n ecase_driver = ecase_downloader.ecase_login()\n ecase_downloader.ecase_birthdays(ecase_driver)\n time.sleep(4)\n ecase_driver.quit()\n printing_documents.village_birthdays(only_village=only_village)",
"def venues(self):\n response = self._request(V2_ENDPOINTS['VENUES'])\n # Normalize `dateHours` to array\n for venue in response[\"result_data\"][\"document\"][\"venue\"]:\n if venue.get(\"id\") in VENUE_NAMES:\n venue[\"name\"] = VENUE_NAMES[venue.get(\"id\")]\n if isinstance(venue.get(\"dateHours\"), dict):\n venue[\"dateHours\"] = [venue[\"dateHours\"]]\n if \"dateHours\" in venue:\n for dh in venue[\"dateHours\"]:\n if isinstance(dh.get(\"meal\"), dict):\n dh[\"meal\"] = [dh[\"meal\"]]\n return response",
"def get_venues():\n venues = queries.random_venues(10)\n venues = [venue_schema.dump(v).data for v in venues]\n result = {\n 'success': True,\n 'data': {\n 'venues': venues\n }\n }\n return jsonify(result)",
"def _make_search(start, end, fname):\n end = end or now()\n start = start or 0\n\n validate_timestamp(start, end)\n start = iso_format_validation(start)\n end = iso_format_validation(end)\n\n search = []\n\n with open(fname, 'r', newline='') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',')\n for row in csvreader:\n if start <= row[1] <= end:\n search.append(row)\n return search",
"def get_available_time_slot():\n try:\n time_slot_set_list = list()\n # Read all time slot from database\n with open(InterviewCalendarApi.DB_FILE, \"r\") as fd:\n for line in fd:\n time_slot_list = list()\n (_,_,_, time_slots) = line.strip().split(\"|\")\n for time_slot in time_slots.split(\",\"):\n (from_time_slot, to_time_slot) = list(map(int, time_slot.split(\"-\")))\n time_slot_list.extend(range(from_time_slot, (to_time_slot + 1)))\n # Get all available time slot for every user\n time_slot_set_list.append(set(time_slot_list))\n \n # Find common time slot between multiple parties\n available_slots = list(set.intersection(*time_slot_set_list))\n\n msg = json.dumps({\"Status\": \"Success\", \"available_slots\": available_slots})\n return make_response(msg, 200, InterviewCalendarApi.HEADERS)\n except:\n err_msg = sys.exc_info()\n error = json.dumps({'error': 'Unable to find time slot due to error: %s' %str(err_msg)})\n return make_response(error, 401, InterviewCalendarApi.HEADERS)",
"def get_vacancies(config):\r\n vacancies_file = deepcopy(config[\"vacancies_file\"])\r\n headers = deepcopy(config[\"headers\"])\r\n filters = deepcopy(config[\"url_params\"])\r\n filters[\"area\"] = filters[\"area\"][-1].split(\"|\")\r\n if filters[\"area\"] == [\"\"]:\r\n del filters[\"area\"]\r\n if \"date_from\" or \"date_to\" in filters:\r\n del filters[\"period\"]\r\n filters[\"page\"] = 0\r\n in_tests.test_dict_data_type(filters)\r\n print (\"\\n\\nGetting vacancies from hh...\")\r\n\r\n date_current = datetime.datetime.now().replace(microsecond=0).isoformat()\r\n while True:\r\n vacancies = load_vacancies(headers, filters)\r\n found_vacancies = vacancies[\"found\"]\r\n if found_vacancies:\r\n write_vacancies_to_database(\r\n config, create_vacancies_generator(vacancies[\"items\"]))\r\n write_to_file(vacancies_file, vacancies)\r\n filters[\"page\"] += 1\r\n if vacancies[\"pages\"] <= filters[\"page\"]:\r\n break\r\n config[\"url_params\"][\"date_from\"] = date_current\r\n import_database_columns(config)\r\n got_vacancies = min(found_vacancies, filters[\"per_page\"]*filters[\"page\"])\r\n if \"period\" in filters:\r\n print(f\"\\n\\nFound: {found_vacancies} vacancies \\\r\nfor period of {filters['period']} days.\")\r\n elif \"date_from\" in filters:\r\n print(f\"\\n\\nFound: {found_vacancies} vacancies \\\r\nfrom {format(filters['date_from'])}.\")\r\n else:\r\n print(\r\n \"\\n\\nNo `period` or `date_from` in `config.yaml > url_params`\\n\\n\")\r\n raise AttributeError\r\n if found_vacancies:\r\n print(f\"Got: {got_vacancies} vacancies \\\r\n({round(got_vacancies/found_vacancies*100, 2)}%)\")\r\n else:\r\n print(f\"Got: {got_vacancies} vacancies (0%)\")\r\n if found_vacancies > got_vacancies:\r\n print(f\"\\nYou can get more vacancies by:\\n\\\r\n 1. Scheduling parse more often.\\n\\\r\n 2. Adding more filter params to `config.yaml > url_params`.\\n\\\r\n 3. Changing region\\n\\\r\n 4. Changing `config.yaml > url_params > period` \\\r\n(legal values in [1, 31]).\\n\\\r\n Works only if no param `date_from`: \\\r\n\")\r\n return ()",
"def pp_schedule(filename,venue):\n #reading the file\n f = open(filename,\"r\")\n incsv = f.readlines()\n #removing affixes\n incsv[:] = [i.rstrip('\\n') for i in incsv]\n #lines into lists\n tempstr = \"\"\n templist = []\n for j in range(len(incsv)):\n #enters each line into temporary string variable\n tempstr = incsv[j]\n #enters the split string into a temporary list variable\n templist.append(tempstr.split(\",\"))\n #modify original line in original list with split list\n incsv[j] = templist\n #reset temporary variables\n tempstr = \"\"\n templist = []\n #timelist stands for timetable list\n #format of timelist: [[day,[start,end,module],...],...]\n timelist = []\n for k in range(1,8):\n #for each day code add day code\n timelist.append([k])\n #assign and make list for ttint\n #for all lines in file\n for l in range(len(incsv)):\n #if venue in line matches desired venue\n if incsv[l][0][7] == venue:\n #after each day code, add a list with start time, end time and module. Repeat for each relevant line\n timelist[(int(incsv[l][0][3])-1)].append([int(incsv[l][0][5]),int(incsv[l][0][6]),incsv[l][0][0]])\n #turtle\n print(\"Your timetable is being printed on Python Turtle Graphics. This may take a while.\")\n ttint(timelist,venue)",
"def findAvailableTimes(self, nowDay, nowHour, nowMinute, workStart, workEnd, events, timeEst):\n global format\n format = Format()\n global timeSlot\n timeSlot = TimeSlot(timeEst)\n global availableTimes\n availableTimes = []\n print(self.current)\n try:\n if len(events) > 1:\n for i in range(len(events) - 1):\n\n event1 = events[i]\n event2 = events[i + 1]\n e1, e2 = format.formatEvent(event1, event2)\n self.compareEvents(e1, e2, workStart, workEnd, nowDay, nowHour, nowMinute, timeEst)\n\n lastEvent = events[len(events) - 1]\n secondToLast = events[len(events) - 2]\n self.compareLastEvent(lastEvent, secondToLast, workStart, workEnd, nowDay, nowHour, nowMinute, timeEst)\n\n elif len(events) == 1:\n lastEvent = events[0]\n nowTime = [self.current[:11] + str(int(self.current[11:13]) - 1) + self.current[13:], self.current]\n nowTime = format.eventFormatDictionary(nowTime, 'now')\n\n self.compareLastEvent(lastEvent, nowTime, workStart, workEnd, nowDay, nowHour, nowMinute, timeEst)\n\n self.addEmptyDays(events, workStart, workEnd, timeEst)\n availableTimes.sort()\n return availableTimes\n except:\n global msg\n msg = \"There isn't enough time. Try again\"\n return redirect('/error')",
"def check_time_range(file, startdate, enddate):\n key_date = parse_dt_from_logfile_name(file)\n if startdate <= key_date <= enddate:\n return file",
"def test_get_url_for_time_range_level2_allwave(suvi_client, start, end, expected_num_files):\n goes_sat = a.goes.SatelliteNumber.sixteen\n qresponse = suvi_client.search(a.Time(start, end), goes_sat, a.Level(2))\n urls = [i['url'] for i in qresponse]\n assert isinstance(urls, list)\n assert len(urls) == expected_num_files",
"def mainf(): \n \n \n fname = \"C:\\\\Users\\\\pfduc\\\\Documents\\\\room-booking\\\\Output_by_mcgill_system.csv\"\n \n start_data = False\n \n output_data = []\n \n with open(fname, 'r') as csvfile:\n \n spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')\n \n for row in spamreader:\n \n if \"For Week\" in row[0]:\n weekdate_start = row[0].replace(\"For Week\",'').strip()\n \n weekdate_start = weekdate_start.split(' to ')[0]\n \n weekdate_start = timezone.datetime.strptime(weekdate_start, '%d-%b-%Y')\n \n #parse only the meaningful data (see at the end of the loop)\n if start_data:\n\n #information about the days of the week the time information\n #will refer to\n weekdays = row[3].strip().split(' ')\n \n #hours it starts to be free and hours it stops\n time_start, time_stop = row[4].strip().split(' - ')\n \n #will contain which time slots aren't available so we can\n #hardbook them\n timeslots = []\n \n #loop over the weekdays\n for weekday in WEEKDAYS_CODE:\n \n if weekday in weekdays:\n #the room is available on that day, so we keep track of the\n #time at which it isn't in order to hardbook it\n \n #get the date of that day from the one of the beginning of \n #the week\n cur_weekdate = weekdate_start + \\\n timezone.timedelta(days = WEEKDAYS_CODE.index(weekday),\n hours = HOUR_MIN)\n \n #before the period the room is available we\n #need to recreate a hard booking\n hb_stop = timezone.datetime.strptime(\n \"%s %s\"%(cur_weekdate.date(),time_start),\n '%Y-%m-%d %H:%M')\n \n #compare the hour with the min allowed hour\n if hb_stop.hour > HOUR_MIN:\n \n ts = TimeSlot(\"%s from %02d:00 to %s\"%(\n hb_stop.strftime(\"%Y-%m-%d\"),\n HOUR_MIN,\n hb_stop.strftime(\"%H:%M\")),\n datestr = True)\n \n timeslots.append(ts)\n\n \n #after the period where the room is available we\n #need to recreate a hard booking\n hb_restart = timezone.datetime.strptime(\n \"%s %s\"%(cur_weekdate.date(),time_stop),\n '%Y-%m-%d %H:%M')\n \n #compare the hour with the max allowed hour\n if hb_restart.hour < HOUR_MAX:\n \n ts = TimeSlot(\"%s to %02d:00\"%(\n hb_restart.strftime(\"%Y-%m-%d from %H:%M\"),\n HOUR_MAX),\n datestr = True)\n \n timeslots.append(ts)\n else:\n #the room isn't available so we'll hardbook on whole day\n cur_weekdate = weekdate_start + \\\n timezone.timedelta(days = WEEKDAYS_CODE.index(weekday),\n hours = HOUR_MIN)\n \n #create a timeslot for the whole day\n ts = TimeSlot(cur_weekdate,\n duration = HOUR_MAX - HOUR_MIN)\n \n timeslots.append(ts)\n\n #the information needed to do the hard booking :\n #room name and timeslots\n booking = {\n \"room\" : \"%s %s\"%(row[1], row[2]),\n \"timeslots\" : timeslots \n }\n \n output_data.append(booking)\n \n #from this row the data starts to be interesting to parse\n if \"RDEF CODE\" in row[0]:\n \n start_data = True\n\n return output_data",
"def get_flarelist(goes_class_filter, filename): \n t_start = \"2012-08-22 00:00\"\n t_end = \"2018-04-20 00:00\"\n get_goes_event_list(t_start, t_end, filename=Path.cwd().joinpath(filename), goes_class_filter=goes_class_filter)",
"def load_venues():\n\n print('load_venues')\n\n Venue.query.delete()\n\n for row in open(\"seed_data/venues.csv\"):\n row = row.rstrip()\n subcategory, \\\n created_by, \\\n title, \\\n addr_1, \\\n addr_2, \\\n city, \\\n postal_code, \\\n state = row.split(',')\n\n cat_sub = Category_Subcategory.query.filter_by(name=subcategory).first()\n\n vnu = Venue(subcategory_id=cat_sub.id,\n created_by=created_by,\n name=title,\n addr_1=addr_1,\n addr_2=addr_2,\n city=city,\n postal_code=postal_code,\n state=state)\n\n db.session.add(vnu)\n\n db.session.commit()",
"def get_availability(self, schedules, start, end, interval=60):\n url = self.build_url(self._endpoints.get('get_availability'))\n\n data = {\n 'startTime': self._build_date_time_time_zone(start),\n 'endTime': self._build_date_time_time_zone(end),\n 'availabilityViewInterval': interval,\n 'schedules': schedules\n }\n\n response = self.con.post(url, data=data)\n if not response:\n return []\n\n data = response.json().get('value', [])\n\n # transform dates and availabilityView\n availability_view_codes = {\n '0': 'free',\n '1': 'tentative',\n '2': 'busy',\n '3': 'out of office',\n '4': 'working elsewhere',\n }\n for schedule in data:\n a_view = schedule.get('availabilityView', '')\n schedule['availabilityView'] = [availability_view_codes.get(code, 'unkknown') for code in a_view]\n for item in schedule.get('scheduleItems', []):\n item['start'] = self._parse_date_time_time_zone(item.get('start'))\n item['end'] = self._parse_date_time_time_zone(item.get('end'))\n\n return data",
"def venues():\n # find all venues on the basis of distinct city and states\n venues_by_locations = get_venues_by_distinct_locations()\n data = []\n if venues_by_locations:\n # prepare data to be displayed in the template\n data = [v.venue_location_serializer for v in venues_by_locations]\n for venue_data in data:\n venue_data['venues'] = get_venues_by_location(venue_data['city'], venue_data['state'])\n venue_data['venue_count'] = len(venue_data['venues'])\n return render_template('pages/venues.html', areas=data)",
"def search_venues():\n search_term = request.form.get('search_term', '')\n # search venue by venue name partial match\n venues_by_text = search_venue(search_term)\n # prepare data to shown in the template\n response = {\n 'count': len(venues_by_text),\n 'data': [v.short_serializer for v in venues_by_text]\n }\n return render_template('pages/search_venues.html', results=response,\n search_term=request.form.get('search_term', ''))",
"def check_visited_places(userid: int, day_range: int):\n print(f\"Checking visited places by user {userid} in the last {day_range} days\")\n # get reservations in which user actually showed up from Reservation service\n range = datetime.now() - timedelta(days=day_range)\n range.replace(hour=0, minute=0, second=0, microsecond=0)\n\n reservations = Reservation.query.filter_by(user_id=userid).\\\n filter(Reservation.entrance_time != None).filter(Reservation.entrance_time >= range).all()\n # print(\"DB\", db)\n # also all results must be json serializable\n return [row.to_dict() for row in reservations]",
"def get_events(start_date, end_date, source=utils.get_native_source, **kwargs):\n if not isinstance(source, games.models.Source):\n source = source()\n logger.info(\"getting events from source %s...\", source)\n if not source:\n return []\n # with open('sportmonks/response_texts/fixtures_{}-{}.txt'.format(start_date.strftime('%Y-%m-%d'),\n # end_date.strftime('%Y-%m-%d')), 'w') as outfile:\n # season is necessary so that the season object is extracted and used\n include = kwargs.get('include', '')\n include = ','.join([include, 'season']) if include else 'season'\n kwargs['include'] = include\n data, meta, status_code = sportmonks.fixtures.by_date_range(start_date=start_date, end_date=end_date, **kwargs)\n # json.dump(data, outfile, indent=4)\n if not data:\n return []\n pre_events = []\n try:\n num_fetched_objects = len(data)\n except:\n num_fetched_objects = None\n num_processed_objects = 0\n try:\n for obj in data:\n num_processed_objects += 1\n try:\n sid = obj.get('id', None)\n time = obj.get('time', dict())\n starting_at = time.get('starting_at', dict())\n event_datetime = get_date(starting_at, 'date_time')\n # custom_timezone = pytz.timezone('Europe/Athens')\n # event_datetime = event_datetime.astimezone(custom_timezone)\n home_team_sid = obj.get('localteam_id', None)\n away_team_sid = obj.get('visitorteam_id', None)\n competition_season_sid = obj.get('season_id', None)\n season_string = obj.get('season', {}).get('data', {}).get('name')\n stage_sid = obj.get('stage_id', None)\n round_sid = obj.get('round_id', None)\n competition_sid = obj.get('league_id', None)\n except Exception as e:\n logger.data_error('%s', e)\n continue\n\n zak_season_name = games.models.Season.zakandify_season_string(season_string)\n season = zakanda.utils.season_from_season_name(zak_season_name)\n if not season:\n logger.data_error('Could not extract season object from season string: %s', season_string)\n continue\n\n # todo sportmonks fix\n # if the event involves a problematic team it is not created in order to avoid future problems\n if is_in_problematic_teams(home_team_sid):\n home_team_sid = None\n if is_in_problematic_teams(away_team_sid):\n away_team_sid = None\n\n competition_seasons = games.models.CompetitionSeason.by_sid(competition_season_sid, source, season)\n try:\n competition_season = competition_seasons.first() # only one entity exists in the queryset\n except Exception as e:\n logger.warning('%s', e)\n competition_season = None\n\n home_team = games.models.Team.by_sid(home_team_sid, source)\n away_team = games.models.Team.by_sid(away_team_sid, source)\n pre_event = pre_models.PreEvent(source, sid, event_datetime, home_team, away_team, competition_season)\n pre_events.append(pre_event)\n except Exception as e:\n logger.error('%s Unexpected problem with sportmonks.fixtures.by_date_range %s %s from source %s',\n e, start_date, end_date, source)\n logger.info(\"%s event objects were contained in the response\", num_fetched_objects)\n logger.info(\"%s event objects were processed\", num_processed_objects)\n logger.info(\"%s pre events were created\", len(pre_events))\n return pre_events",
"def load_events():\n\n print('load_events')\n\n Event.query.delete()\n\n for row in open(\"seed_data/events.csv\"):\n row = row.rstrip()\n private, \\\n host_id, \\\n venue, \\\n title, \\\n time_begin, \\\n time_end, \\\n max_cap, \\\n url = row.split(',')\n\n private = int(private)\n host_id = int(host_id)\n\n ven = Venue.query.filter_by(name=venue).first()\n\n begin_at = datetime.strptime(time_begin, \"%y-%m-%d %H:%M:%S\")\n\n end_at = datetime.strptime(time_end, \"%y-%m-%d %H:%M:%S\")\n\n evt = Event(private=private,\n host_id=host_id,\n venue_id=ven.id,\n title=title,\n begin_at=begin_at,\n end_at=end_at,\n max_cap=max_cap,\n url=url)\n\n db.session.add(evt)\n\n db.session.commit()",
"def get_dates(file,start,end):\r\n \r\n data = format_data(file)\r\n data = data.loc[start:end,:] \r\n dates = list(data.index)\r\n \r\n return dates",
"def add_not_available(self, start: datetime.datetime=None, end: datetime.datetime=None):\n if not start and not end:\n return\n elif start and not end:\n end = self.conference.ends_at\n elif end and not start:\n start = self.conference.starts_at\n self.unavailable.append((start, end))",
"def _handle_missing_hr_files(netcdf_start_date, netcdf_end_date, tmp_dir):\n hrs_range = arrow.Arrow.range(\n \"hour\", netcdf_start_date.shift(days=-1), netcdf_end_date.shift(hours=+23)\n )\n missing_hrs = []\n for netcdf_hr in hrs_range:\n nemo_date = f\"y{netcdf_hr.year}m{netcdf_hr.month:02d}d{netcdf_hr.day:02d}\"\n nemo_hr_ds_path = tmp_dir / f\"gemlam_{nemo_date}_{netcdf_hr.hour:03d}.nc\"\n if nemo_hr_ds_path.exists():\n if missing_hrs:\n if len(missing_hrs) <= 4:\n _interpolate_intra_day_missing_hrs(missing_hrs)\n missing_hrs = []\n else:\n _interpolate_inter_day_missing_hrs(missing_hrs)\n missing_hrs = []\n else:\n missing_hrs.append({\"hr\": netcdf_hr, \"ds_path\": nemo_hr_ds_path})\n if missing_hrs:\n raise FileNotFoundError(f\"missing hours at end of date range: {missing_hrs}\")",
"def get_events_for_specific_hours(start,end):\n\tresults = session.query(\"event_name\",\"date\",\"start_time\",\"end_time\").\\\n\tfrom_statement(\"select event_name,date,start_time,end_time from event where date=curdate() and \\\n\t\tstart_time >= :starttime and end_time <= :endtime\").\\\n\tparams(starttime = start, endtime = end).all()\n\tif(len(results) > 0):\n\t\tret_dict = {}\n\t\tevents = []\n\n\t\tfor event_tuple in results:\n\t\t\ttemp = {}\n\t\t\ttemp['event_name'] = event_tuple[0]\n\t\t\ttemp['start_date'] = str(event_tuple[1])\n\t\t\ttemp['start_time'] = str(event_tuple[2])\n\t\t\ttemp['end_time'] = str(event_tuple[3])\n\t\t\tevents.append(temp)\n\n\t\tret_dict['events'] = events\n\t\treturn jsonify(ret_dict)\n\telse:\n\t\treturn \"{'events':'no results returned'}\""
]
| [
"0.61132663",
"0.60554844",
"0.5829282",
"0.5526698",
"0.54212916",
"0.5218367",
"0.52058125",
"0.51949495",
"0.5194945",
"0.51915556",
"0.5039286",
"0.502326",
"0.50132734",
"0.49665505",
"0.49555328",
"0.49053603",
"0.486853",
"0.48457333",
"0.48428378",
"0.48234007",
"0.4812037",
"0.4791134",
"0.47669575",
"0.47547546",
"0.47525337",
"0.47167563",
"0.47079837",
"0.4686681",
"0.46560743",
"0.46496323"
]
| 0.7609462 | 0 |
venue_occupancy(filename) returns the value of the average venue occupancy for all the venues. To use venue_occupancy, enter the filename in between the parentheses. The function should return a number that tells you the average venue occupancy in the timetable file. | def venue_occupancy(filename):
#open file
f = open(filename,"r")
incsv = f.readlines()
#removing affixes
incsv[:] = [i.rstrip('\n') for i in incsv]
#lines into lists
tempstr = ""
templist = []
for j in range(len(incsv)):
#enters each line into temporary string variable
tempstr = incsv[j]
#enters the split string into a temporary list variable
templist.append(tempstr.split(","))
#modify original line in original list with split list
incsv[j] = templist
#reset temporary variables
tempstr = ""
templist = []
#all venues
venuelist = []
for k in range(1,len(incsv)):
venuelist.append(incsv[k][0][7])
#filter all unique venues by checking whether venue is already in filterlist
filterlist = []
#check is temporary variable to decide whether to add venue
check = True
#for all venues in venuelist
for l in range(len(venuelist)):
#if venue in venuelist already in filterlist
if venuelist[l] in filterlist:
#decision to add venue is false
check = False
#if decision is to add the venue in venuelist
if check == True:
#append new venue to filterlist
filterlist.append(venuelist[l])
#reset decision to true
check = True
#add hours to total count (time)
time = 0
#for all lines in file
for m in range(1,len(incsv)):
#if time of venue falls within office hours for weekdays
if int(incsv[m][0][5]) >= 800 and int(incsv[m][0][5]) <= 1700 and int(incsv[m][0][6]) <= 1700 and int(incsv[m][0][6]) >= 800 and int(incsv[m][0][3]) >= 1 and int(incsv[m][0][3]) <= 5:
#add hour to total count
time += (int(incsv[m][0][6]) - int(incsv[m][0][5]))
#if start time falls before office hours but end time is within office hours
elif int(incsv[m][0][5]) < 800 and int(incsv[m][0][6]) <= 1700 and int(incsv[m][0][5]) > 800 and int(incsv[m][0][3]) >= 1 and int(incsv[m][0][3]) <= 5:
#ignore hours before 800 and add remaining hours
time += (int(incsv[m][0][6]) - 800)
#if end time falls after office hours but start time is within office housr
elif int(incsv[m][0][5]) >= 800 and int(incsv[m][0][5]) < 1700 and int(incsv[m][0][6]) > 1700 and int(incsv[m][0][3]) >= 1 and int(incsv[m][0][3]) <= 5:
#ignore hours after 1700 and add remaining hours
time += (1700 - int(incsv[m][0][5]))
#if start time falls before 800 and end time falls after 1700
elif int(incsv[m][0][5]) < 800 and int(incsv[m][0][6]) > 1700 and int(incsv[m][0][3]) >= 1 and int(incsv[m][0][3]) <= 5:
#add the maximum of 9 hours
time += 900
#if time range falls outside of office hours
elif ((int(incsv[m][0][5]) < 800 and int(incsv[m][0][6]) <= 800) or (int(incsv[m][0][5]) >= 1700 and int(incsv[m][0][6]) > 1700)) and int(incsv[m][0][3]) >= 1 and int(incsv[m][0][3]) <= 5:
#total hours remain
time = time
#average (avr)
avr = 0
#average = total hours / (number of unique venues) * 45 hours
avr = (time/(len(filterlist)*4500))
return avr | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main():\n \n file = open(\"profit.txt\",\"r\")\n profits = file.readlines()\n file = open(\"revenue.txt\",\"r\")\n revenues = file.readlines()\n file.close()\n \n totalProfits = 0\n for i in range(len(profits)):\n totalProfits += float(profits[i])\n \n print \"Total Fortune 500 profits, \" + os.environ['YEAR'] + \": $\" + str(totalProfits) + \"M\"\n \n totalRevenues = 0\n for j in range(len(revenues)):\n totalRevenues += float(revenues[i])\n \n print \"Total Fortune 500 revenues, \" + os.environ['YEAR'] + \": $\" + str(totalRevenues) + \"M\"",
"def advent_9a(file_name):\n with open(file_name) as input_file:\n line = input_file.readline()\n group_scores = []\n get_group(line, 0, 1, group_scores, [])\n return sum([value for (_, value) in group_scores])",
"def total_venues(filename):\n #reading the file\n f = open(filename,\"r\")\n #incsv is a short form of 'input csv file'\n incsv = f.readlines()\n #removing affixes\n incsv[:] = [i.rstrip('\\n') for i in incsv]\n #lines into lists\n #tempstr and templist are temporary variables to split the strings in incsv\n tempstr = \"\"\n templist = []\n for j in range(len(incsv)):\n #enters each line into temporary string variable\n tempstr = incsv[j]\n #enters the split string into a temporary list variable\n templist.append(tempstr.split(\",\"))\n #modify original line in original list with split list\n incsv[j] = templist\n #reset temporary variables\n tempstr = \"\"\n templist = []\n #final format of incsv: [[[moduleCode,ClassNo,LessonType,DayCode,DayText,StartTime,EndTime,Venue,AcadYear,Semester]],...]\n #yes each line is nested in two lists for some reason\n #lists all venues\n #venuelist stores all occurrences of the venues. venues can be repeated\n venuelist = []\n for k in range(len(incsv)):\n #append venue to venuelist\n venuelist.append(incsv[k][0][7])\n #filter all unique venues by checking whether venue is already in filterlist\n filterlist = []\n #check is temporary variable to decide whether to add venue\n check = True\n #for all venues in venuelist\n for l in range(len(venuelist)):\n #if venue in venuelist already in filterlist\n if venuelist[l] in filterlist:\n #decision to add venue is false\n check = False\n #if decision is to add the venue in venuelist\n if check == True:\n #append new venue to filterlist\n filterlist.append(venuelist[l])\n #reset decision to true\n check = True\n return (len(filterlist)-1)",
"def hotaverage( names):\n rs = radioastronomy.Spectrum() # create input and average structures\n nhot = 0\n\n avenames = names # create a list of files to average\n\n # for all input files\n for filename in names:\n\n parts = filename.split('/')\n nparts = len(parts)\n if nparts == 1:\n aname = parts[0]\n else:\n aname = parts[nparts-1]\n\n parts = aname.split('.')\n nparts = len(parts)\n if nparts < 2:\n print 'File is not an astronomy file: ',filename\n continue\n else:\n extension = parts[nparts-1]\n\n extension = extension.upper()\n if extension != 'HOT': # speed up by only looking at hot load files\n continue\n \n rs.read_spec_ast(filename)\n\n if rs.telel > 0: # only working with hot load, skip elevation > 0.\n continue\n\n avenames[nhot] = filename\n nhot = nhot + 1\n # end of for all files loop\n\n nhot, hot = average( avenames[0:nhot]) # now use generic program for averages\n if nhot < 1:\n print 'No hot load files; can not calibrate!'\n exit()\n\n return nhot, hot",
"def test_basic_file_mean(self):\n index = pds.date_range(*self.bounds1)\n names = [''.join((date.strftime('%Y-%m-%d'), '.nofile'))\n for date in index]\n self.testInst.bounds = (names[0], names[-1])\n ans = avg.mean_by_file(self.testInst, 'dummy4')\n assert np.all(ans == 86399 / 2.0)\n\n return",
"def load_data(filename):\n # create an evidence and label list\n evidence = []\n label = []\n\n # create a dictionary to hold key months matching to their respective values\n month = {'Jan': 0, 'Feb': 1, 'Mar': 2, 'Apr': 3, 'May': 4, 'June': 5, 'Jul': 6, 'Aug': 7, 'Sep': 8, 'Oct': 9,\n 'Nov': 10, 'Dec': 11}\n\n # open and read the csv file\n with open(filename) as data:\n # use the dictionary csv reader to be able to call the cell values by the csv column header names\n reader = csv.DictReader(data)\n # read each row in the csv and append the evidence and labels to their respective lists\n for row in reader:\n evidence.append([\n int(row[\"Administrative\"]),\n float(row[\"Administrative_Duration\"]),\n int(row[\"Informational\"]),\n float(row[\"Informational_Duration\"]),\n int(row[\"ProductRelated\"]),\n float(row[\"ProductRelated_Duration\"]),\n float(row[\"BounceRates\"]),\n float(row[\"ExitRates\"]),\n float(row[\"PageValues\"]),\n float(row[\"SpecialDay\"]),\n month[row[\"Month\"]],\n int(row[\"OperatingSystems\"]),\n int(row[\"Browser\"]),\n int(row[\"Region\"]),\n int(row[\"TrafficType\"]),\n 1 if row[\"VisitorType\"] == \"Returning_Visitor\" else 0,\n 1 if row[\"Weekend\"] == \"TRUE\" else 0,\n ])\n label.append(\n 1 if row['Revenue'] == 'TRUE' else 0\n )\n\n return evidence, label",
"def analyze(filename):\r\n start = datetime.datetime.now()\r\n\r\n ao_count = 0\r\n\r\n with open(filename) as csvfile:\r\n reader = csv.reader(csvfile, delimiter=',', quotechar='\"')\r\n year_count = {\r\n \"2013\": 0,\r\n \"2014\": 0,\r\n \"2015\": 0,\r\n \"2016\": 0,\r\n \"2017\": 0,\r\n \"2018\": 0\r\n }\r\n for row in reader:\r\n l_row = list(row)\r\n print(f\"\\n{row}\")\r\n year = l_row[5][6:]\r\n if year in year_count.keys():\r\n year_count[year] += 1\r\n if \"ao\" in l_row[6]:\r\n ao_count += 1\r\n\r\n end = datetime.datetime.now()\r\n return start, end, year_count, ao_count",
"def gacha_mean(sourcefile, targetfile):\n sys.stderr.write(err_msg('Calculating Gacha mean, please wait ...\\n'))\n c = num_char(sourcefile) / num_char(targetfile)\n sys.stderr.write(err_msg('Gacha mean = ' + str(c) + '\\n'))\n sys.stderr.write(err_msg('Filtering starts ...\\n'))\n return c",
"def upload_household_income(url, gcs_bucket, filename):\n year_range = {1989, 1993, *range(1995, 2019)}\n for year in year_range:\n url_params = get_census_params_by_county(\n get_household_income_columns().keys())\n url_params['time'] = year\n url_file_to_gcs(\n url, url_params, gcs_bucket, '{}_{}.json'.format(filename, year))",
"def file_cost(filename):\r\n return grid_cost(read_grid(filename))",
"def file_cost(filename):\r\n return grid_cost(read_grid(filename))",
"def main():\n data_file = open('lecture15f.csv', 'r')\n sum_of_values = 0.0\n count = 0\n\n data_file.readline()\n for line in data_file:\n print ('line of file =', line)\n # 'float,float,float,float'\n for data_point in line.split(','):\n # ['float', 'float', 'float',]\n print ('data point in line =', data_point)\n sum_of_values = sum_of_values + float(data_point)\n count += 1\n\n print(\"The average of {0} values is {1}\".format(\n count,\n sum_of_values / count))",
"def calculate_total_fuel(filename):\n return sum([calculate_fuel_from_mass(mass) for mass in read_mass_from_file(filename)])",
"def venue_size(venue):\n if venue == \"ws\":\n return 100\n elif venue in [\"acl\", \"aacl\", \"naacl\", \"emnlp\", \"coling\", \"lrec\"]:\n return 50\n else:\n return 1",
"def available_venues(filename,start,end,day):\n #open file\n f = open(filename,\"r\")\n incsv = f.readlines()\n #removing affixes\n incsv[:] = [i.rstrip('\\n') for i in incsv]\n #lines into lists\n tempstr = \"\"\n templist = []\n for j in range(len(incsv)):\n #enters each line into temporary string variable\n tempstr = incsv[j]\n #enters the split string into a temporary list variable\n templist.append(tempstr.split(\",\"))\n #modify original line in original list with split list\n incsv[j] = templist\n #reset temporary variables\n tempstr = \"\"\n templist = []\n #lists all venues\n venuelist = []\n for k in range(len(incsv)):\n venuelist.append(incsv[k][0][7])\n #filter all unique venues by checking whether venue is already in filterlist\n filterlist = []\n #check is temporary variable to decide whether to add venue\n check = True\n #for all venues in venuelist\n for l in range(len(venuelist)):\n #if venue in venuelist already in filterlist\n if venuelist[l] in filterlist:\n #decision to add venue is false\n check = False\n #if decision is to add the venue in venuelist\n if check == True:\n #append new venue to filterlist\n filterlist.append(venuelist[l])\n #reset decision to true\n check = True\n #finding all available venues\n #for all lines in the timetable\n for m in range(1,len(incsv)):\n #if the start time of the venue is in between the desired start time or the end time of the venue is in between the desired end time\n if ((int(incsv[m][0][5]) >= start and int(incsv[m][0][5]) < end) or (int(incsv[m][0][6]) > start and int(incsv[m][0][6]) <= end)) and int(incsv[m][0][3]) == 4:\n #if the venue is still in list of venues (filterlist)\n if incsv[m][0][7] in filterlist:\n #remove venue from filterlist\n filterlist.remove(incsv[m][0][7])\n #remove header \"venue\" from filterlist\n filterlist.remove(\"Venue\")\n return filterlist",
"def writing_get_date_avg(file_name):\n result = str(reports.get_date_avg(file_name))\n with open (\"report_for_judy_part2.txt\", \"+a\") as f:\n f.write(result)\n f.write(\"\\n\")",
"def avg(year):\r\n df = ouvrir_fichier()\r\n df = df.loc[df[\"year\"].isin([year])]\r\n df = df[(\r\n df[\r\n \"emissions\"\r\n ] == 'Emissions (thousand metric tons of carbon dioxide)'\r\n )]\r\n print(df)\r\n mean_value = df.mean()['value']\r\n resultat = {}\r\n resultat[\"year\"] = year\r\n resultat['total'] = float(mean_value)\r\n print(mean_value)\r\n return resultat",
"def area_report(\n file=None\n):\n for entry in file:\n elems = entry.strip().split(' ')\n elems = prune(elems)\n if len(elems) >= 3:\n if str(elems[0]) == \"Total\" and str(elems[1]) == \"cell\" and str(elems[2]) == \"area:\":\n area = float(elems[3])\n\n if str(elems[0]) == \"Total\" and str(elems[1]) == \"area:\":\n if str(elems[2]) != \"undefined\":\n if area < float(elems[2]):\n area = float(elems[2])\n \n area /= 1000000.0\n return area",
"def load_data(filename):\n\n # Load data\n df = pd.read_csv(filename)\n\n # Create mappings of months and visitor types\n months = {\"Jan\": 0,\n \"Feb\": 1,\n \"Mar\": 3,\n \"Apr\": 4,\n \"May\": 5,\n \"June\": 6,\n \"Jul\": 7,\n \"Aug\": 8,\n \"Sep\": 9,\n \"Oct\": 10,\n \"Nov\": 11,\n \"Dec\": 12}\n\n visitors = {\"New_Visitor\": 0,\n \"Returning_Visitor\": 1,\n \"Other\": 0}\n\n # Get labels for individual datapoints\n labels = df['Revenue'].astype(int).tolist()\n\n # Create data points as formatted lists of values, floats get rounded to two decimals\n df['Administrative'] = df['Administrative'].astype(int)\n df['Administrative_Duration'] = df['Administrative_Duration'].astype(float).round(2)\n df['Informational'] = df['Informational'].astype(int).round(2)\n df['Informational_Duration'] = df['Informational_Duration'].astype(float).round(2)\n df['ProductRelated'] = df['ProductRelated'].astype(int)\n df['ProductRelated_Duration'] = df['ProductRelated_Duration'].astype(float).round(2)\n df['BounceRates'] = df['BounceRates'].astype(float).round(2)\n df['ExitRates'] = df['ExitRates'].astype(float).round(2)\n df['PageValues'] = df['PageValues'].astype(float).round(2)\n df['SpecialDay'] = df['SpecialDay'].astype(float).round(2)\n df['Month'] = df['Month'].map(months)\n df['OperatingSystems'] = df['OperatingSystems'].astype(int)\n df['Browser'] = df['Browser'].astype(int)\n df['Region'] = df['Region'].astype(int)\n df['TrafficType'] = df['TrafficType'].astype(int)\n df['VisitorType'] = df['VisitorType'].map(visitors)\n df['Weekend'] = df['Weekend'].astype(int)\n del df['Revenue']\n\n # Init result\n evidence = df.values.tolist()\n result = [evidence, labels]\n\n # Return a tuple (evidence, labels).\n return result",
"def load_eeg(filename):\r\n data = np.load(filename)\r\n return data['eeg'], int(data['srate'])",
"def load_eeg(filename):\r\n data = np.load(filename)\r\n return data['eeg'], int(data['srate'])",
"def mean_by_airline_dow(flights):\n\n return ...",
"def load_avg():\n \n with open(Path.proc_loadavg()) as f:\n line = f.readline()\n \n load_avgs = [float(x) for x in line.split()[:3]]\n \n return load_avgs",
"def average_population_grade(population):\r\n total = 0\r\n for individual in population :\r\n total += get_individual_fitness(individual)\r\n return total/POPULATION_COUNT",
"def average_fitness(individuals):\n fitness_num = 0\n for individual in individuals:\n fitness = individual.get_fitness()\n fitness_num += fitness\n return fitness_num / len(individuals)",
"def average_city(g):\n average = 0\n ctr = 0\n \n for key in g.city_dict:\n average = average + g.city_dict[key].get_population()\n ctr = ctr + 1\n \n \n return (average / ctr)",
"def main():\n print(\"Running the progam template.\")\n\n filename = input(\"What is the File name?: \")\n f = open(filename, 'r')\n theSum = 0\n count = 0\n maximum = 0\n minimum = 9999999999999999999\n for line in f:\n count += 1\n lines = line.strip()\n number = int(line)\n theSum += number\n average = theSum / count\n if number > maximum:\n maximum = number\n if number < minimum:\n minimum = number\n\n print(\"The Number of Integers is:\",count) \n print(\"The Average is:\",average)\n print (\"The Maximum is:\",maximum)\n print(\"The Minimum is:\",minimum)\n f.close()\n print(\"Have a nice day!\")",
"def get_venues():\n papers = local.papers().values()\n venues = {paper[\"venue\"] for paper in papers if \"venue\" in paper}\n\n for venue_id in venues:\n file_name = local.file_name(\"venues\", venue_id)\n if os.path.isfile(file_name):\n continue\n venue = get_venue(venue_id)\n if not venue:\n continue\n with open(file_name, \"w\") as out:\n json.dump(venue, out, ensure_ascii=False, indent=4)",
"def import_agilent_gc_residual_solvents(file_name):\n df = pd.read_excel(file_name, sheet_name = None)\n samples = get_sample_name(df)\n df_compound = get_compound_df(df)\n samples['metrics'] = df_compound[['analyte', 'measurement']].to_dict('records')\n return samples",
"def solution():\n file = get_source()\n results = []\n for c in range(int(file.readline())):\n grades = sorted([int(v) for v in file.readline().split()][1:])\n average = float(sum(grades))/len(grades)\n first = next((i for i,g in enumerate(grades) if g > average), len(grades))\n people_above_average = len(grades) - first\n results.append(people_above_average * 100.0 / len(grades))\n for r in results:\n print '%.3f%%' % r"
]
| [
"0.527116",
"0.514293",
"0.512036",
"0.5116814",
"0.50583875",
"0.504117",
"0.5030007",
"0.5024472",
"0.5019992",
"0.5010778",
"0.5010778",
"0.4986395",
"0.49747267",
"0.49149644",
"0.4880314",
"0.4873189",
"0.48650548",
"0.48556772",
"0.4847156",
"0.48384276",
"0.48384276",
"0.4832203",
"0.48245466",
"0.47780815",
"0.47714862",
"0.4750713",
"0.47490323",
"0.474412",
"0.47377893",
"0.4724872"
]
| 0.6115728 | 0 |
available_timing(filename,day,venue) returns a list of available timings for a certain venue in a certain day. To use available_timing, enter the filename as the first argument, followed by an integer day code and finally the venue. The function should return a list with a range of available timings in round brackets. | def available_timing(filename,day,venue):
#reading the file
f = open(filename,"r")
incsv = f.readlines()
#removing affixes
incsv[:] = [i.rstrip('\n') for i in incsv]
#lines into lists
tempstr = ""
templist = []
for j in range(len(incsv)):
#enters each line into temporary string variable
tempstr = incsv[j]
#enters the split string into a temporary list variable
templist.append(tempstr.split(","))
#modify original line in original list with split list
incsv[j] = templist
#reset temporary variables
tempstr = ""
templist = []
#finding occupied hours
brlist = []
#for all lines in file
for k in range(len(incsv)):
#if venue in line matches desired venue and day in line matches desired day
if incsv[k][0][7] == venue and int(incsv[k][0][3]) == day:
#add time range of line into brlist
brlist.append([int(incsv[k][0][5]),int(incsv[k][0][6])])
#pruning
#tlist stands for timelist. stores remaining hours for synthesis
tlist = []
#list of hours
tlist = [600,700,800,900,1000,1100,1200,1300,1400,1500,1600,1700,1800,1900,2000,2100,2200,2300,2400]
#for line in brlist
for l in range(len(brlist)):
#for the range of hours of the line
for m in range(int((brlist[l][1]-brlist[l][0])/100)):
#if hours in range still in tlist
if (brlist[l][0] + m*100) in tlist:
#remove from tlist
tlist.remove(brlist[l][0] + m*100)
#plist for partition list. range of available timings appended here
plist = []
#check is for the start time of each available time ranges
check = 0
#formation of time ranges
#for hours in tlist
for n in range(len(tlist)):
#if code is checking element 2. Could have used range(1,len(tlist)) but nevermind
if n >= 1:
#if 2 adjacent hours are not consecutive
if tlist[n] != (tlist[n-1]+100):
#add time range to plist
plist.append((tlist[check],tlist[n-1]+100))
#set check to next minimum available start time
check = n
#adding range with last hour
#if last hour in tlist is 2400 and precedent hour in tlist is 2300
if tlist[n] == 2400 and tlist[n-1] == 2300:
#add time range
plist.append((tlist[check],2400))
return plist | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def available_venues(filename,start,end,day):\n #open file\n f = open(filename,\"r\")\n incsv = f.readlines()\n #removing affixes\n incsv[:] = [i.rstrip('\\n') for i in incsv]\n #lines into lists\n tempstr = \"\"\n templist = []\n for j in range(len(incsv)):\n #enters each line into temporary string variable\n tempstr = incsv[j]\n #enters the split string into a temporary list variable\n templist.append(tempstr.split(\",\"))\n #modify original line in original list with split list\n incsv[j] = templist\n #reset temporary variables\n tempstr = \"\"\n templist = []\n #lists all venues\n venuelist = []\n for k in range(len(incsv)):\n venuelist.append(incsv[k][0][7])\n #filter all unique venues by checking whether venue is already in filterlist\n filterlist = []\n #check is temporary variable to decide whether to add venue\n check = True\n #for all venues in venuelist\n for l in range(len(venuelist)):\n #if venue in venuelist already in filterlist\n if venuelist[l] in filterlist:\n #decision to add venue is false\n check = False\n #if decision is to add the venue in venuelist\n if check == True:\n #append new venue to filterlist\n filterlist.append(venuelist[l])\n #reset decision to true\n check = True\n #finding all available venues\n #for all lines in the timetable\n for m in range(1,len(incsv)):\n #if the start time of the venue is in between the desired start time or the end time of the venue is in between the desired end time\n if ((int(incsv[m][0][5]) >= start and int(incsv[m][0][5]) < end) or (int(incsv[m][0][6]) > start and int(incsv[m][0][6]) <= end)) and int(incsv[m][0][3]) == 4:\n #if the venue is still in list of venues (filterlist)\n if incsv[m][0][7] in filterlist:\n #remove venue from filterlist\n filterlist.remove(incsv[m][0][7])\n #remove header \"venue\" from filterlist\n filterlist.remove(\"Venue\")\n return filterlist",
"def pp_schedule(filename,venue):\n #reading the file\n f = open(filename,\"r\")\n incsv = f.readlines()\n #removing affixes\n incsv[:] = [i.rstrip('\\n') for i in incsv]\n #lines into lists\n tempstr = \"\"\n templist = []\n for j in range(len(incsv)):\n #enters each line into temporary string variable\n tempstr = incsv[j]\n #enters the split string into a temporary list variable\n templist.append(tempstr.split(\",\"))\n #modify original line in original list with split list\n incsv[j] = templist\n #reset temporary variables\n tempstr = \"\"\n templist = []\n #timelist stands for timetable list\n #format of timelist: [[day,[start,end,module],...],...]\n timelist = []\n for k in range(1,8):\n #for each day code add day code\n timelist.append([k])\n #assign and make list for ttint\n #for all lines in file\n for l in range(len(incsv)):\n #if venue in line matches desired venue\n if incsv[l][0][7] == venue:\n #after each day code, add a list with start time, end time and module. Repeat for each relevant line\n timelist[(int(incsv[l][0][3])-1)].append([int(incsv[l][0][5]),int(incsv[l][0][6]),incsv[l][0][0]])\n #turtle\n print(\"Your timetable is being printed on Python Turtle Graphics. This may take a while.\")\n ttint(timelist,venue)",
"def findAvailableTimes(self, nowDay, nowHour, nowMinute, workStart, workEnd, events, timeEst):\n global format\n format = Format()\n global timeSlot\n timeSlot = TimeSlot(timeEst)\n global availableTimes\n availableTimes = []\n print(self.current)\n try:\n if len(events) > 1:\n for i in range(len(events) - 1):\n\n event1 = events[i]\n event2 = events[i + 1]\n e1, e2 = format.formatEvent(event1, event2)\n self.compareEvents(e1, e2, workStart, workEnd, nowDay, nowHour, nowMinute, timeEst)\n\n lastEvent = events[len(events) - 1]\n secondToLast = events[len(events) - 2]\n self.compareLastEvent(lastEvent, secondToLast, workStart, workEnd, nowDay, nowHour, nowMinute, timeEst)\n\n elif len(events) == 1:\n lastEvent = events[0]\n nowTime = [self.current[:11] + str(int(self.current[11:13]) - 1) + self.current[13:], self.current]\n nowTime = format.eventFormatDictionary(nowTime, 'now')\n\n self.compareLastEvent(lastEvent, nowTime, workStart, workEnd, nowDay, nowHour, nowMinute, timeEst)\n\n self.addEmptyDays(events, workStart, workEnd, timeEst)\n availableTimes.sort()\n return availableTimes\n except:\n global msg\n msg = \"There isn't enough time. Try again\"\n return redirect('/error')",
"def get_available_time_slot():\n try:\n time_slot_set_list = list()\n # Read all time slot from database\n with open(InterviewCalendarApi.DB_FILE, \"r\") as fd:\n for line in fd:\n time_slot_list = list()\n (_,_,_, time_slots) = line.strip().split(\"|\")\n for time_slot in time_slots.split(\",\"):\n (from_time_slot, to_time_slot) = list(map(int, time_slot.split(\"-\")))\n time_slot_list.extend(range(from_time_slot, (to_time_slot + 1)))\n # Get all available time slot for every user\n time_slot_set_list.append(set(time_slot_list))\n \n # Find common time slot between multiple parties\n available_slots = list(set.intersection(*time_slot_set_list))\n\n msg = json.dumps({\"Status\": \"Success\", \"available_slots\": available_slots})\n return make_response(msg, 200, InterviewCalendarApi.HEADERS)\n except:\n err_msg = sys.exc_info()\n error = json.dumps({'error': 'Unable to find time slot due to error: %s' %str(err_msg)})\n return make_response(error, 401, InterviewCalendarApi.HEADERS)",
"def sleeptimeinfo(filename, timeinfofile, format=[\"%d-%b-%y %H:%M:%S\", \"%d-%b-%y %H:%M\"]):\n\t#print timeinfofile\n\tinput_subject, input_night = filename.split('_SP')\n\tinput_night = input_night[0] # the first character only (0 - 9)\n\tinfoall = file(timeinfofile).read().split('\\n');\n\tfor row in infoall:\t\t\n\t\t#print row.split(',')\n\t\tsubject, night, startdate, starttime, offdate, offtime, ondate, ontime = row.split(SLEEPTIME_SEP)[:8]\n\t\tontime = ontime.strip()\n\t\t#print subject, night, ondate, ontime, offdate, offtime \n\t\tif subject == input_subject and night == input_night:\n\t\t\t#print subject, night, startdate, starttime, offdate, offtime, ondate, ontime\t\n\t\t\ttimestart, timeon, timeoff = __gettime__(startdate + \" \" + starttime, format), __gettime__(ondate + \" \" + ontime, format), __gettime__(offdate + \" \" + offtime, format)\n\t\t\tif timestart == None or timeoff == None or timeon == None :\n \t\t\t\tprint \"waketimeinfo() error: format problem?\"\n\t\t\treturn timestart, timeoff, timeon\n\tprint \"waketimeinfo() error: \" + filename + \" not found in \" + timeinfofile",
"def mainf(): \n \n \n fname = \"C:\\\\Users\\\\pfduc\\\\Documents\\\\room-booking\\\\Output_by_mcgill_system.csv\"\n \n start_data = False\n \n output_data = []\n \n with open(fname, 'r') as csvfile:\n \n spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')\n \n for row in spamreader:\n \n if \"For Week\" in row[0]:\n weekdate_start = row[0].replace(\"For Week\",'').strip()\n \n weekdate_start = weekdate_start.split(' to ')[0]\n \n weekdate_start = timezone.datetime.strptime(weekdate_start, '%d-%b-%Y')\n \n #parse only the meaningful data (see at the end of the loop)\n if start_data:\n\n #information about the days of the week the time information\n #will refer to\n weekdays = row[3].strip().split(' ')\n \n #hours it starts to be free and hours it stops\n time_start, time_stop = row[4].strip().split(' - ')\n \n #will contain which time slots aren't available so we can\n #hardbook them\n timeslots = []\n \n #loop over the weekdays\n for weekday in WEEKDAYS_CODE:\n \n if weekday in weekdays:\n #the room is available on that day, so we keep track of the\n #time at which it isn't in order to hardbook it\n \n #get the date of that day from the one of the beginning of \n #the week\n cur_weekdate = weekdate_start + \\\n timezone.timedelta(days = WEEKDAYS_CODE.index(weekday),\n hours = HOUR_MIN)\n \n #before the period the room is available we\n #need to recreate a hard booking\n hb_stop = timezone.datetime.strptime(\n \"%s %s\"%(cur_weekdate.date(),time_start),\n '%Y-%m-%d %H:%M')\n \n #compare the hour with the min allowed hour\n if hb_stop.hour > HOUR_MIN:\n \n ts = TimeSlot(\"%s from %02d:00 to %s\"%(\n hb_stop.strftime(\"%Y-%m-%d\"),\n HOUR_MIN,\n hb_stop.strftime(\"%H:%M\")),\n datestr = True)\n \n timeslots.append(ts)\n\n \n #after the period where the room is available we\n #need to recreate a hard booking\n hb_restart = timezone.datetime.strptime(\n \"%s %s\"%(cur_weekdate.date(),time_stop),\n '%Y-%m-%d %H:%M')\n \n #compare the hour with the max allowed hour\n if hb_restart.hour < HOUR_MAX:\n \n ts = TimeSlot(\"%s to %02d:00\"%(\n hb_restart.strftime(\"%Y-%m-%d from %H:%M\"),\n HOUR_MAX),\n datestr = True)\n \n timeslots.append(ts)\n else:\n #the room isn't available so we'll hardbook on whole day\n cur_weekdate = weekdate_start + \\\n timezone.timedelta(days = WEEKDAYS_CODE.index(weekday),\n hours = HOUR_MIN)\n \n #create a timeslot for the whole day\n ts = TimeSlot(cur_weekdate,\n duration = HOUR_MAX - HOUR_MIN)\n \n timeslots.append(ts)\n\n #the information needed to do the hard booking :\n #room name and timeslots\n booking = {\n \"room\" : \"%s %s\"%(row[1], row[2]),\n \"timeslots\" : timeslots \n }\n \n output_data.append(booking)\n \n #from this row the data starts to be interesting to parse\n if \"RDEF CODE\" in row[0]:\n \n start_data = True\n\n return output_data",
"def available_hours_by_day(self, day, condition):\r\n if condition == \"close\":\r\n pass\r\n all_hours = [i for i in range(28)]\r\n if not self.availabilities:\r\n # Need to return this first or it will crash when it cant iterate through an empty list\r\n return [(i, str(i) + \":00\") for i in range(23)]\r\n busy_hours = [i[0] for i in self.working_hours_by_day(day)]\r\n available_hours = [i for i in all_hours if i not in busy_hours]\r\n options = []\r\n for i in available_hours:\r\n if condition == \"open\":\r\n hour, minute = self.verify_time_value(i, 0)\r\n else:\r\n hour, minute = self.verify_time_value(i + 1, 0)\r\n hour = time(hour, minute).hour\r\n options.append((hour, str(hour) + \":00\"))\r\n return options",
"def check_time_range(file, startdate, enddate):\n key_date = parse_dt_from_logfile_name(file)\n if startdate <= key_date <= enddate:\n return file",
"def __call__(self,time):\n \n fname = []\n tind =[]\n for t in time:\n flag=1\n for f in self.timelookup.keys():\n\n if t >= self.timelookup[f][0] and t<=self.timelookup[f][-1]:\n# print 'Found tstep %s'%datetime.strptime(t,'%Y-%m-%d %H:%M:%S')\n tind.append(othertime.findNearest(t,self.timelookup[f][:]))\n fname.append(f)\n flag=0\n\n# if flag:\n# print 'Warning - could not find matching file for time:%s'%datetime.strptime(t,'%Y-%m-%d %H:%M:%S')\n# tind.append(-1)\n# fname.append(-1)\n \n return tind, fname",
"def read_local_20Hz_files(**kwargs):\n pathlst = kwargs.get('pathlst')\n product = kwargs.get('product')\n varalias = kwargs.get('varalias')\n sdate = kwargs.get('sdate')\n edate = kwargs.get('edate')\n twin = kwargs.get('twin')\n\n # establish coords if defined in config file\n timestr = satellite_dict[product]['vardef']['time']\n lonstr = satellite_dict[product]['vardef']['lons']\n latstr = satellite_dict[product]['vardef']['lats']\n\n # adjust start and end\n sdate = sdate - timedelta(minutes=twin)\n edate = edate + timedelta(minutes=twin)\n # get meta data\n ncmeta = ncdumpMeta(pathlst[0])\n ncvar = get_filevarname(varalias, variable_info,\n satellite_dict[product], ncmeta)\n # retrieve sliced data\n ds = read_netcdfs(pathlst)\n ds_sort = ds.sortby(timestr)\n\n # get indices for included time period\n nptime = ds_sort[timestr].data\n print('here0')\n print(len(nptime))\n #dtime = [parse_date(str(nptime[i])) for i in range(len(nptime))]\n print('here1')\n #idx = find_included_times_pd(dtime, sdate=sdate, edate=edate)\n idx = find_included_times_pd(nptime, sdate=sdate, edate=edate)\n print(len(nptime[idx]))\n print('here2')\n dtime = [parse_date(str(nptime[idx][i])) for i in range(len(nptime[idx]))]\n print(dtime)\n print('here3')\n #dtime = list(np.array(dtime)[idx])\n lons = list(((ds_sort[lonstr].data[idx] - 180) % 360) - 180)\n lats = list(ds_sort[latstr].data[idx])\n\n unxt = (nptime[idx].astype(int) / 10**9)\n\n # make dict and start with stdvarname for varalias\n stdvarname = variable_info[varalias]['standard_name']\n vardict = {}\n vardict[stdvarname] = list(ds_sort[ncvar].data[idx])\n vardict['longitude'] = lons\n vardict['latitude'] = lats\n vardict['time'] = unxt\n vardict['datetime'] = dtime\n vardict['time_unit'] = variable_info['time']['units']\n print(vardict.keys())\n return vardict",
"def working_hours_by_day(self, day):\r\n availabilities = self.sorted_availabilities(day)\r\n options = []\r\n if not availabilities:\r\n return 0\r\n for availability in availabilities:\r\n count = 0\r\n while True:\r\n new = time(availability.start.hour + count, 0)\r\n options.append((new.hour, new.__str__()))\r\n count += 1\r\n if new >= availability.end:\r\n break\r\n return options",
"def set_available_time_slot():\n if request.content_type != 'application/json':\n error = json.dumps({'error': 'Invalid Content Type'})\n return make_response(error, 400, InterviewCalendarApi.HEADERS)\n\n data = request.json\n # For Temporary purpose, stored in flat file database\n with open(InterviewCalendarApi.DB_FILE, \"a+\") as fd:\n record = \"%s|%s|%s|%s\\n\" %(data[\"Category\"], data[\"Name\"],\n data[\"Email\"], \",\".join(data[\"AvailablityDateTime\"]))\n fd.write(record)\n msg = json.dumps({\"Status\": \"Success\"})\n return make_response(msg, 200, InterviewCalendarApi.HEADERS)",
"def _get_url_for_timerange(self, timerange, **kwargs):\n timerange = TimeRange(timerange.start.strftime('%Y-%m-%d'), timerange.end)\n if timerange.end < parse_time(\"1999/01/15\"):\n goes_file = \"%Y/go{satellitenumber:02d}%y%m%d.fits\"\n elif timerange.start < parse_time(\"1999/01/15\") and timerange.end >= parse_time(\"1999/01/15\"):\n return self._get_overlap_urls(timerange)\n else:\n goes_file = \"%Y/go{satellitenumber}%Y%m%d.fits\"\n\n goes_pattern = f\"https://umbra.nascom.nasa.gov/goes/fits/{goes_file}\"\n satellitenumber = kwargs.get(\"satellitenumber\", self._get_goes_sat_num(timerange.start))\n goes_files = Scraper(goes_pattern, satellitenumber=satellitenumber)\n\n return goes_files.filelist(timerange)",
"def find_granules(dire, tile, product, start_time, end_time):\n times = []\n fnames = []\n path = Path(dire)\n start_year = start_time.year\n end_year = end_time.year\n granules_start = path.rglob(f\"**/MCD43{product:s}.A{start_year:4d}*.{tile:s}.*.hdf\")\n granules = [f for f in granules_start] \n if end_year != start_year:\n granules_end = path.rglob(f\"**/MCD43{product:s}.A{end_year:4d}*.{tile:s}.*.hdf\")\n granules = granules + [f for f in granules_end] \n granules = list(set(granules))\n if len(granules) == 0:\n raise IOError(\"Couldn't find any MCD43%s files in %s\" % (product, dire))\n for granule in granules:\n fich = os.path.basename (granule)\n timex = datetime.datetime.strptime(fich.split(\".\")[1][1:], \"%Y%j\")\n if timex >= start_time and \\\n (end_time is None or timex <= end_time ):\n times.append (timex)\n fnames.append(granule.as_posix())\n return dict(list(zip(times, fnames)))",
"def read_file_to_list(filename):\n fields = {}\n tickets = []\n file = open(filename, \"r\")\n for line in file:\n if line.strip() == \"\": continue\n if line.strip() == \"your ticket:\": continue\n if line.strip() == \"nearby tickets:\": continue\n \n #seat: 13-40 or 45-50\n m = re.search(r'^([a-z\\s]+):\\s+([0-9]+)[-]([0-9]+)\\s+or\\s+([0-9]+)[-]([0-9]+)$', line)\n if m: \n fields[m.group(1)] = [\n {\"from\" : int(m.group(2)), \"to\" : int(m.group(3))},\n {\"from\" : int(m.group(4)), \"to\" : int(m.group(5))}\n ]\n continue\n #7,1,14\n m = re.search(r'^([0-9,]+)', line)\n if m: \n tickets.append([int(x) for x in m.group(1).split(',')])\n continue\n raise Exception(line)\n return fields , tickets",
"def offerings_scheduled(lang, fiscal_year, course_code):\n\tfield_name_1 = 'offering_city_{0}'.format(lang)\n\tfield_name_2 = 'offering_province_{0}'.format(lang)\n\tquery = \"\"\"\n\t\tSELECT offering_id, start_date, end_date, {0} as offering_city, {1} as offering_province,\n\t\t\toffering_language, instructor_names, confirmed_count, cancelled_count, waitlisted_count,\n\t\t\tno_show_count, client, offering_status\n\t\tFROM offerings\n\t\tWHERE course_code = %s AND fiscal_year >= %s\n\t\tORDER BY 2 DESC;\n\t\"\"\".format(field_name_1, field_name_2)\n\tresults = query_mysql(query, (course_code, fiscal_year), dict_=True)\n\t# Assign background colours\n\tresults_processed = []\n\tfor dict_ in results:\n\t\ttemp_dict = dict_\n\t\tstart_date = temp_dict['start_date']\n\t\tend_date = temp_dict['end_date']\n\t\tconfirmed_count = temp_dict['confirmed_count']\n\t\toffering_status = temp_dict['offering_status']\n\t\ttemp_dict['color'] = _assign_background_color(start_date, end_date, confirmed_count, offering_status)\n\t\tresults_processed.append(temp_dict)\n\treturn results_processed",
"def venue_occupancy(filename):\n #open file\n f = open(filename,\"r\")\n incsv = f.readlines()\n #removing affixes\n incsv[:] = [i.rstrip('\\n') for i in incsv]\n #lines into lists\n tempstr = \"\"\n templist = []\n for j in range(len(incsv)):\n #enters each line into temporary string variable\n tempstr = incsv[j]\n #enters the split string into a temporary list variable\n templist.append(tempstr.split(\",\"))\n #modify original line in original list with split list\n incsv[j] = templist\n #reset temporary variables\n tempstr = \"\"\n templist = []\n #all venues\n venuelist = []\n for k in range(1,len(incsv)):\n venuelist.append(incsv[k][0][7])\n #filter all unique venues by checking whether venue is already in filterlist\n filterlist = []\n #check is temporary variable to decide whether to add venue\n check = True\n #for all venues in venuelist\n for l in range(len(venuelist)):\n #if venue in venuelist already in filterlist\n if venuelist[l] in filterlist:\n #decision to add venue is false\n check = False\n #if decision is to add the venue in venuelist\n if check == True:\n #append new venue to filterlist\n filterlist.append(venuelist[l])\n #reset decision to true\n check = True\n #add hours to total count (time)\n time = 0\n #for all lines in file\n for m in range(1,len(incsv)):\n #if time of venue falls within office hours for weekdays\n if int(incsv[m][0][5]) >= 800 and int(incsv[m][0][5]) <= 1700 and int(incsv[m][0][6]) <= 1700 and int(incsv[m][0][6]) >= 800 and int(incsv[m][0][3]) >= 1 and int(incsv[m][0][3]) <= 5:\n #add hour to total count\n time += (int(incsv[m][0][6]) - int(incsv[m][0][5]))\n #if start time falls before office hours but end time is within office hours\n elif int(incsv[m][0][5]) < 800 and int(incsv[m][0][6]) <= 1700 and int(incsv[m][0][5]) > 800 and int(incsv[m][0][3]) >= 1 and int(incsv[m][0][3]) <= 5:\n #ignore hours before 800 and add remaining hours\n time += (int(incsv[m][0][6]) - 800)\n #if end time falls after office hours but start time is within office housr\n elif int(incsv[m][0][5]) >= 800 and int(incsv[m][0][5]) < 1700 and int(incsv[m][0][6]) > 1700 and int(incsv[m][0][3]) >= 1 and int(incsv[m][0][3]) <= 5:\n #ignore hours after 1700 and add remaining hours\n time += (1700 - int(incsv[m][0][5]))\n #if start time falls before 800 and end time falls after 1700\n elif int(incsv[m][0][5]) < 800 and int(incsv[m][0][6]) > 1700 and int(incsv[m][0][3]) >= 1 and int(incsv[m][0][3]) <= 5:\n #add the maximum of 9 hours\n time += 900\n #if time range falls outside of office hours\n elif ((int(incsv[m][0][5]) < 800 and int(incsv[m][0][6]) <= 800) or (int(incsv[m][0][5]) >= 1700 and int(incsv[m][0][6]) > 1700)) and int(incsv[m][0][3]) >= 1 and int(incsv[m][0][3]) <= 5:\n #total hours remain\n time = time\n #average (avr)\n avr = 0\n #average = total hours / (number of unique venues) * 45 hours\n avr = (time/(len(filterlist)*4500))\n return avr",
"def get_list_block_time_periods(filename_time_periods):\n\n if not hasattr(get_list_block_time_periods, \"cached\"):\n get_list_block_time_periods.cached = {}\n\n if filename_time_periods not in get_list_block_time_periods.cached:\n with open(filename_time_periods, 'r') as file:\n contents = csv.reader(file)\n next(contents)\n\n time_periods = []\n for row in contents:\n time_beginning = datetime.datetime.strptime(row[0], \"%Y-%m-%d %H:%M:%S\")\n time_ending = datetime.datetime.strptime(row[1], \"%Y-%m-%d %H:%M:%S\")\n\n time_periods.append((time_beginning, time_ending, row[2]))\n #print(\"List of time periods: \", time_periods)\n\n get_list_block_time_periods.cached[filename_time_periods] = time_periods\n\n return get_list_block_time_periods.cached[filename_time_periods]",
"def get_avail_time_slots(self, cid, date):\n booked = self.get_time_slots(cid, date)\n avail_time_slots = []\n for time in self.initial_time_slots:\n if time not in booked:\n avail_time_slots.append(time)\n return avail_time_slots",
"def produce_data2(self, filename):\n filepath = f'data/{filename}-sorted.csv'\n\n time_by_guards = defaultdict(int)\n guard = None\n asleep = None\n\n with open(filepath) as in_file:\n line = in_file.readline()\n while line:\n line = line.replace('\\n', '')\n if line == '':\n break\n _, time, occurrence = line.split(',')\n time = int(time)\n if '#' in occurrence:\n guard = int(occurrence[1:])\n asleep = None\n elif occurrence == 'FA':\n asleep = time\n elif occurrence == 'WU':\n if asleep is None:\n # exceptional case\n breakpoint()\n line = in_file.readline()\n # need to readline() because is exiting this iteration\n continue\n for t in range(asleep, time):\n time_by_guards[(guard, t)] += 1\n\n line = in_file.readline()\n\n return time_by_guards",
"def get_time_slots(self, cid, date):\n query = \"SELECT time from consultation where cid = %s and date = %s\"\n inputs = (cid, date)\n array_book = self.database_manager.execute_query(query, inputs)\n array_book = [e[0] for e in array_book]\n booked = array_book if array_book else []\n return booked",
"def tr_check_availability(agent_directory, agent_full_name, slot_range):\r\n tr_create_booking_register(agent_directory, agent_full_name) # CHANGE THIS WHEN POSSIBLE. IT IS ERRASING ALL BOOKINGS. NOW THE SYSTEM IS NOT CONSTRAINT IN TR RESOURCES.\r\n tr_booking_df = pd.read_csv(f'{agent_directory}''/'f'{agent_full_name}_booking.csv', header=0, delimiter=\",\", engine='python')\r\n tr_booking_df['booking_type'] = tr_booking_df['booking_type'].fillna(\"\")\r\n # Creates 2 lists: booked_slots_list & free_slots_list and checks availability.\r\n free_slots_list = []\r\n booked_slots_list = []\r\n prebooked_slots_list = []\r\n for x in slot_range:\r\n if tr_booking_df.loc[x - 1, 'booking_type'] == \"pre-book\":\r\n prebooked_slots_list.append(x)\r\n elif tr_booking_df.loc[x - 1, 'booking_type'] == \"booked\":\r\n booked_slots_list.append(x)\r\n else:\r\n free_slots_list.append(x)\r\n # Checks availability\r\n if len(booked_slots_list) >= 1:\r\n tr_msg_ca_body = \"negative\"\r\n else:\r\n tr_msg_ca_body = \"positive\"\r\n return tr_msg_ca_body",
"def test_files_missing():\n\tfiledir = \"./goes_files/%Y_events/%Y%m%devents.txt\"\n\tt0 = timerange.start.datetime\n\tdays = [t0]\n\twhile timerange.end.datetime > t0:\n\t\tt0 = t0 + relativedelta(days=1)\n\t\tdays.append(t0)\n\n\tmissing_files = []\n\tfor d in days:\n\t\tif not os.path.exists(d.strftime(filedir)):\n\t\t\tmissing_files.append(d.strftime(filedir))\n\tprint(missing_files)",
"def dailynames(directory='',\n trange=None,\n res=24*3600.,\n hour_res=False,\n file_format='%Y%m%d',\n prefix='',\n suffix='',\n return_times=False):\n if trange is None:\n logging.error('No trange specified')\n return\n\n if hour_res:\n res = 3600.\n file_format = '%Y%m%d%H'\n\n # allows the user to pass in trange as list of datetime objects\n if type(trange[0]) == datetime and type(trange[1]) == datetime:\n trange = [time_string(trange[0].replace(tzinfo=timezone.utc).timestamp()),\n time_string(trange[1].replace(tzinfo=timezone.utc).timestamp())]\n\n tr = [trange[0], trange[1]]\n \n if isinstance(trange[0], str):\n tr[0] = time_double(trange[0])\n if isinstance(trange[1], str):\n tr[1] = time_double(trange[1])\n\n # Davin's magic heisted from file_dailynames in IDL\n mmtr = [np.floor(tr[0]/res), np.ceil(tr[1]/res)]\n\n if mmtr[1]-mmtr[0] < 1:\n n = 1\n else:\n n = int(mmtr[1]-mmtr[0])\n\n times = [(float(num)+mmtr[0])*res for num in range(n)]\n\n if return_times:\n return times\n\n dates = []\n files = []\n \n for time in times:\n if time_string(time, fmt=file_format) not in dates:\n dates.append(time_string(time, fmt=file_format))\n \n for date in dates:\n files.append(directory + prefix + date + suffix)\n\n return files",
"def entries_from_goes_ts_file2(file, default_waveunit=None):\n\n headers = fits.get_header(file)\n if isinstance(file, (str, six.text_type)):\n filename = file\n else:\n filename = getattr(file, 'name', None)\n\n statinfo = os.stat(file)\n #print('a header')\n entry = DatabaseEntry(path=filename)\n size = statinfo.st_size\n\n # Add/tweak start/end entries for GOES\n if headers[0].get('TELESCOP','') != '':\n #header['INSTRUME'] = header['TELESCOP']# So E.G. 'GOES 6' instead 'X-ray Detector'\n entry.instrument = headers[0]['TELESCOP']\n if (headers[0].get('DATE-OBS','') != ''):\n if is_time_in_given_format(headers[0]['DATE-OBS'], '%d/%m/%Y'):\n start_time = datetime.strptime(headers[0]['DATE-OBS'], '%d/%m/%Y')\n elif is_time_in_given_format(headers[0]['DATE-OBS'], '%d/%m/%y'):\n start_time = datetime.strptime(headers[0]['DATE-OBS'], '%d/%m/%y')\n else:\n start_time = parse_time(headers[0]['DATE-OBS'])\n elif (headers[1].get('DATE-OBS','') != ''):\n if is_time_in_given_format(headers[1]['DATE-OBS'], '%d/%m/%Y'):\n start_time = datetime.strptime(headers[1]['DATE-OBS'], '%d/%m/%Y')\n elif is_time_in_given_format(headers[1]['DATE-OBS'], '%d/%m/%y'):\n start_time = datetime.strptime(headers[1]['DATE-OBS'], '%d/%m/%y')\n else:\n start_time = parse_time(headers[1]['DATE-OBS'])\n\n if (headers[0].get('DATE-END','') != ''):\n if is_time_in_given_format(headers[0]['DATE-END'], '%d/%m/%Y'):\n end_time = datetime.strptime(headers[0]['DATE-END'], '%d/%m/%Y')\n elif is_time_in_given_format(headers[0]['DATE-END'], '%d/%m/%y'):\n end_time = datetime.strptime(headers[0]['DATE-END'], '%d/%m/%y')\n else:\n end_time = parse_time(headers[0]['DATE-END'])\n elif (headers[1].get('DATE-END','') != ''):\n if is_time_in_given_format(headers[1]['DATE-END'], '%d/%m/%Y'):\n end_time = datetime.strptime(headers[1]['DATE-END'], '%d/%m/%Y')\n elif is_time_in_given_format(headers[1]['DATE-END'], '%d/%m/%y'):\n end_time = datetime.strptime(headers[1]['DATE-END'], '%d/%m/%y')\n else:\n end_time = parse_time(headers[1]['DATE-END'])\n else:\n end_time = start_time + timedelta(days=1,seconds=-1)\n\n # Add these to the entry\n observation_time_start = start_time\n observation_time_end = end_time\n\n wavemax = 0.8 # XRSB '1.0--8.0 $\\AA$'\n wavemin = 0.05 # XRSA '0.5--4.0 $\\AA$'\n\n metadata = MetaDict(headers[1])\n #entry.tags = sunpy.database.attrs.Tag('raw')\n\n entry = DatabaseEntry(observation_time_start=start_time,\n observation_time_end = end_time,\n instrument='EIT',\n wavemin=wavemin,\n wavemax=wavemax,\n metadata=metadata,\n size=size)\n\n return entry",
"def test_get_url_for_time_range_level2_allwave(suvi_client, start, end, expected_num_files):\n goes_sat = a.goes.SatelliteNumber.sixteen\n qresponse = suvi_client.search(a.Time(start, end), goes_sat, a.Level(2))\n urls = [i['url'] for i in qresponse]\n assert isinstance(urls, list)\n assert len(urls) == expected_num_files",
"def get_flarelist(goes_class_filter, filename): \n t_start = \"2012-08-22 00:00\"\n t_end = \"2018-04-20 00:00\"\n get_goes_event_list(t_start, t_end, filename=Path.cwd().joinpath(filename), goes_class_filter=goes_class_filter)",
"def getFiles(mintime=(17, 20, 17), maxtime=(17, 33, 17), folder='data/30Jul/'):\n start = datetime.time(*mintime)\n stop = datetime.time(*maxtime)\n all = g.glob(folder + '*.fits')\n ret = []\n for f in all:\n path, file = os.path.split(f)\n numbs = [int(x) for x in file.replace('sEuclid.fits', '').split('_')]\n data = datetime.time(*numbs)\n if start <= data <= stop:\n ret.append(file)\n return [folder + f for f in ret]",
"def _read_target_schedule(self):\n target_schedule = self.target_schedule\n target_select = self.target_select\n\n #-- list of scheduled times to be returned\n self.date_lst_xtgt = []\n\n if target_schedule!=None:\n if target_select!=None:\n msg = \"target_schedule and target_select were both specified, \" \\\n \"'target_select' will be ignored here!\"\n FileLogger.warn(msg)\n msg = \"target schedule will be read from file ***{}***\".format(target_schedule)\n FileLogger.info(msg)\n with open(target_schedule,'r') as fp:\n for line in fp:\n if line[0]=='#':\n continue\n else:\n # print \"---{}---\".format(line.rstrip())\n date_utc = dt.datetime.strptime(line.rstrip(),'%Y%m%dT%H:%M:%S')\n if self.time_start!=None and date_utc<self.time_start:\n continue\n elif self.time_end!=None and date_utc>self.time_end:\n continue\n else:\n self.date_lst_xtgt.append(date_utc)\n #-- ensure time-increase ordering\n self.date_lst_xtgt = sorted(self.date_lst_xtgt)\n nxtgt = len(self.date_lst_xtgt)\n msg = \"...reading target schedule DONE (nxtgt={})\".format(nxtgt)\n FileLogger.info(msg)\n elif target_select!=None:\n msg = \"target schedule will be determined from specification ---{}---\".format(\n target_select)\n FileLogger.info(msg)\n ttgt_min = target_select[0]\n ttgt_max = target_select[1]\n ttgt_delta = target_select[2]\n ttgt_min = dt.datetime.strptime(ttgt_min,'%Y%m%dT%H:%M')\n ttgt_max = dt.datetime.strptime(ttgt_max,'%Y%m%dT%H:%M')\n if ttgt_delta[-1].lower()=='h':\n ttgt_delta = dt.timedelta(hours=float(ttgt_delta[0:-1]))\n elif ttgt_delta[-1].lower()=='d':\n ttgt_delta = dt.timedelta(days=float(ttgt_delta[0:-1]))\n date_utc = ttgt_min\n while date_utc<=ttgt_max:\n if self.time_start!=None and date_utc<self.time_start:\n pass\n elif self.time_end!=None and date_utc>self.time_end:\n pass\n else:\n self.date_lst_xtgt.append(date_utc)\n #-- increment date\n date_utc += ttgt_delta\n msg = \"read {} state components at extra target times.\".format(len(self.date_lst_xtgt))\n FileLogger.info(msg)",
"def _read_lifeguard_schedule_file(file_name: str) -> list:\n\tlifeguard_shifts = []\n\twith open(file_name) as f:\n\t\tlifeguard_count = int(f.readline())\n\t\tfor lifeguard in range(0, lifeguard_count):\n\t\t\tshift_start, shift_end = f.readline().split(\" \")\n\t\t\tlifeguard_shifts.append((int(shift_start), int(shift_end)))\n\n\treturn sorted(lifeguard_shifts, key=lambda shift: shift[0])"
]
| [
"0.59195966",
"0.57383525",
"0.5540875",
"0.5463237",
"0.53547984",
"0.51768637",
"0.50629467",
"0.49165255",
"0.4908699",
"0.48233223",
"0.47792912",
"0.4748623",
"0.47469702",
"0.47431985",
"0.46946442",
"0.46777627",
"0.46385124",
"0.46170804",
"0.45761064",
"0.45463902",
"0.4506312",
"0.45005324",
"0.45003748",
"0.44692057",
"0.44617763",
"0.44526356",
"0.4442976",
"0.44419336",
"0.4435216",
"0.44339773"
]
| 0.7180395 | 0 |
ttint(timelist,venue) takes a timelist list from pp_schedule and draws in python turtle graphics as a readable table. Venue is the string to be printed at the bottom of the screen denoting the venue that is concerned. | def ttint(timelist,venue):
#setup
showturtle()
#make python turtle graphics window 1260 pixels wide and 800 pixels tall
setup(width = 1260, height = 800, startx = None, starty = None)
reset()
#text at top
pen(pencolor="black")
pu()
setpos(0,380)
write("Welcome to your schedule. Use the arrow keys to toggle the day of the week",move=False,align="center",font=("Courier New",10,"normal"))
setpos(0,360)
write("In Idle, type 'quit()' to exit turtle.",move=False,align="center",font=("Courier New",10,"normal"))
dayl = ["Mon","Tue","Wed","Thu","Fri","Sat","Sun"]
setpos(0,-350)
#writes venue at bottom of GUI
write(venue,move=False,align="center",font=("Courier New",20,"normal"))
#drawing the lines and timing
#baseY = 300 because y = 300 is the height of the line for monday
baseY = 300
for ch in range(7):
pu()
#goes to relevant y position for respective day code
setpos(-570,(baseY-(100*ch)))
#writes day name at side
write(dayl[ch],move=False,align="center",font=("Courier New",20,"normal"))
pen(pencolor="black",pensize="3")
#draws lines
#for each hour
for dh in range(19):
#move right 60 steps
setx(xcor()+60)
pd()
#move up 20 steps
sety(ycor()+20)
pu()
#stop drawing. move up 10 steps and write hour
sety(ycor()+10)
write(str((600+(dh*100))),move=False,align="center",font=("Courier New",10,"normal"))
#go back down 30 steps to main line
sety(ycor()-30)
#continue drawing
pd()
pu()
#goes to each relevant timing to write module code
#for every time range in timelist. dp stands for day parse
for dp in range(len(timelist)):
#if week day in timelist is not empty
if len(timelist[dp]) >= 1:
#for each timing in the week day. hp stands for hour parse
for hp in range(1,len(timelist[dp])):
#for each hour in the time range. pr is an arbitrary variable which helps to direct the turtle to the timings in between the start and end time to write the module code at the relevant location
for pr in range(int((timelist[dp][hp][1]-timelist[dp][hp][0])/100)):
#go to the relevant time and write the module code in between
setpos((-840+(int(timelist[dp][hp][0]/100)+pr)*60),(410-timelist[dp][0]*100))
write(timelist[dp][hp][2],move=False,align="center",font=("Courier New",8,"normal")) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pp_schedule(filename,venue):\n #reading the file\n f = open(filename,\"r\")\n incsv = f.readlines()\n #removing affixes\n incsv[:] = [i.rstrip('\\n') for i in incsv]\n #lines into lists\n tempstr = \"\"\n templist = []\n for j in range(len(incsv)):\n #enters each line into temporary string variable\n tempstr = incsv[j]\n #enters the split string into a temporary list variable\n templist.append(tempstr.split(\",\"))\n #modify original line in original list with split list\n incsv[j] = templist\n #reset temporary variables\n tempstr = \"\"\n templist = []\n #timelist stands for timetable list\n #format of timelist: [[day,[start,end,module],...],...]\n timelist = []\n for k in range(1,8):\n #for each day code add day code\n timelist.append([k])\n #assign and make list for ttint\n #for all lines in file\n for l in range(len(incsv)):\n #if venue in line matches desired venue\n if incsv[l][0][7] == venue:\n #after each day code, add a list with start time, end time and module. Repeat for each relevant line\n timelist[(int(incsv[l][0][3])-1)].append([int(incsv[l][0][5]),int(incsv[l][0][6]),incsv[l][0][0]])\n #turtle\n print(\"Your timetable is being printed on Python Turtle Graphics. This may take a while.\")\n ttint(timelist,venue)",
"def print_tlines(ty,slist,scaledtime, farright):\r\n xinc = 0.005\r\n yinc = 0.002\r\n if(scaledtime != []):\r\n if max(scaledtime)/1e6 < 1.0:\r\n yearscaler = 1e3\r\n yearscalestring = \" KYR\"\r\n else:\r\n yearscaler = 1e6\r\n yearscalestring = \" MYR\"\r\n if gv[\"eventimes\"] == False:\r\n for i in range(numpops-1):\r\n if (ty[i][1] > ty[i][0]):\r\n yline(ty[i][1],farright,1,2,gv[\"graylevel\"])\r\n yline(ty[i][0],farright,0.5,0,0)\r\n if (ty[i][2] < ty[i][0]):\r\n yline(ty[i][2],farright,1,2,gv[\"graylevel\"])\r\n if(scaledtime != []):\r\n scaledtime[i] /= yearscaler\r\n mtime = round(scaledtime[i],-int(math.log10(scaledtime[i])-2))\r\n nstr = str(mtime) + yearscalestring\r\n ## str(int(round(scaledtime[i],-int(math.log10(scaledtime[i])-2)))) + \" yrs\"\r\n dotext([xinc*(i+2),ty[i][0]+yinc],nstr,0, False)\r\n else:\r\n nstr = fround(slist[5][4][i][1]) + \"tu\"\r\n dotext([xinc*(i+2),ty[i][0]+yinc],nstr,0, False)\r\n if (ty[i][1] > ty[i][0]):\r\n arrow([xinc*(i+1),ty[i][1]],[xinc*(i+1),ty[i][0]],1, gv[\"black\"])\r\n if (ty[i][2] < ty[i][0]):\r\n arrow([xinc*(i+1),ty[i][2]],[xinc*(i+1),ty[i][0]],3, gv[\"black\"])\r\n else:\r\n for i in range(numpops-1):\r\n yline(ty[i][0],farright,0.5,0,0)\r\n if(scaledtime != []):\r\n scaledtime[i] /= yearscaler\r\n mtime = round(scaledtime[i],-int(math.log10(scaledtime[i])-2))\r\n nstr = str(mtime) + yearscalestring\r\n ## str(int(round(scaledtime[i],-int(math.log10(scaledtime[i])-2)))) + \" yrs\"\r\n dotext([xinc*(i+2),ty[i][0]+yinc],nstr,0, False)\r\n else:\r\n nstr = fround(slist[5][4][i][1]) + \"tu\"\r\n dotext([xinc*(i+2),ty[i][0]+yinc],nstr,0, False)\r\n return ty",
"def schedule_text():",
"def available_timing(filename,day,venue):\n #reading the file\n f = open(filename,\"r\")\n incsv = f.readlines()\n #removing affixes\n incsv[:] = [i.rstrip('\\n') for i in incsv]\n #lines into lists\n tempstr = \"\"\n templist = []\n for j in range(len(incsv)):\n #enters each line into temporary string variable\n tempstr = incsv[j]\n #enters the split string into a temporary list variable\n templist.append(tempstr.split(\",\"))\n #modify original line in original list with split list\n incsv[j] = templist\n #reset temporary variables\n tempstr = \"\"\n templist = []\n #finding occupied hours\n brlist = []\n #for all lines in file\n for k in range(len(incsv)):\n #if venue in line matches desired venue and day in line matches desired day\n if incsv[k][0][7] == venue and int(incsv[k][0][3]) == day:\n #add time range of line into brlist\n brlist.append([int(incsv[k][0][5]),int(incsv[k][0][6])])\n #pruning\n #tlist stands for timelist. stores remaining hours for synthesis\n tlist = []\n #list of hours\n tlist = [600,700,800,900,1000,1100,1200,1300,1400,1500,1600,1700,1800,1900,2000,2100,2200,2300,2400]\n #for line in brlist\n for l in range(len(brlist)):\n #for the range of hours of the line\n for m in range(int((brlist[l][1]-brlist[l][0])/100)):\n #if hours in range still in tlist\n if (brlist[l][0] + m*100) in tlist:\n #remove from tlist\n tlist.remove(brlist[l][0] + m*100)\n #plist for partition list. range of available timings appended here\n plist = []\n #check is for the start time of each available time ranges\n check = 0\n #formation of time ranges\n #for hours in tlist\n for n in range(len(tlist)):\n #if code is checking element 2. Could have used range(1,len(tlist)) but nevermind\n if n >= 1:\n #if 2 adjacent hours are not consecutive\n if tlist[n] != (tlist[n-1]+100):\n #add time range to plist\n plist.append((tlist[check],tlist[n-1]+100))\n #set check to next minimum available start time\n check = n\n #adding range with last hour\n #if last hour in tlist is 2400 and precedent hour in tlist is 2300\n if tlist[n] == 2400 and tlist[n-1] == 2300:\n #add time range\n plist.append((tlist[check],2400))\n return plist",
"def gettpoints(self,left,top,tnum):\n if tnum == 0:\n x1 = left + CELLSIZE/2\n y1 = top\n x2 = x1 + TSIZE\n y2 = y1 + TSIZE\n x3 = x2 - 2*TSIZE\n y3 = y2\n if tnum == 1:\n x1 = left + CELLSIZE\n y1 = top + CELLSIZE/2\n x2 = x1 - TSIZE\n y2 = y1 + TSIZE\n x3 = x2\n y3 = y2 - 2*TSIZE\n if tnum == 2:\n x1 = left + CELLSIZE/2\n y1 = top + CELLSIZE\n x2 = x1 - TSIZE\n y2 = y1 - TSIZE\n x3 = x2 + TSIZE*2\n y3 = y2\n if tnum == 3:\n x1 = left\n y1 = top + CELLSIZE/2\n x2 = x1 + TSIZE\n y2 = y1 + TSIZE\n x3 = x2\n y3 = y2 - TSIZE*2\n\n return ((x1,y1),(x2,y2),(x3,y3))",
"def print_list(schedule):\n START_TIME = 0\n END_TIME = 1\n MEETING_TITLE = 2\n print(\"\\nYour schedule for the day:\")\n if len(schedule) == 0:\n print(\"(empty)\\n\")\n else:\n for row in schedule:\n print(\n f\"{row[START_TIME]} - {row[END_TIME]} {row[MEETING_TITLE]}\")\n print(\"\\n\")",
"def set_tlines(ty,slist):\r\n t = []\r\n for i in range(numpops-1):\r\n t.append([slist[5][4][i][1],slist[5][4][i][2],slist[5][4][i][3]]) ## [time, upper ci, lower ci]\r\n ty = []\r\n if gv[\"localyscale\"] == -1:\r\n yint = gv[\"line0y\"] - gv[\"lastt_lower_y\"]\r\n for i in range(numpops-1):\r\n ty.append([])\r\n if gv[\"eventimes\"] == False:\r\n tmax = slist[5][4][numpops-2][3] ## bottom of confidence interval of largest(oldest) t\r\n for j in range(3):\r\n ty[i].append(gv[\"line0y\"] - (t[i][j]*yint)/tmax)\r\n else:\r\n## ty[i].append(gv[\"line0y\"] - ((i+1)/float(numpops+1)*yint)/tmax)\r\n ty[i].append(gv[\"line0y\"] - yint * (i+1)/float(numpops) )\r\n else:\r\n timeumean = slist[7][4][1]\r\n scaleumean = slist[7][4][2]\r\n for i in range(numpops-1):\r\n ty.append([])\r\n for j in range(3):\r\n ty[i].append(gv[\"line0y\"] - (t[i][j] * (scaleumean/timeumean/1e6)* gv[\"localyscale\"]))\r\n if ty[i][j] < gv[\"lineINFy\"]:\r\n print ( \" time line too low in graph, reduce local y scale (-y value) \")\r\n gv[\"lastt_lower_y\"] = ty[numpops-2][2]\r\n## print \"ty : \",ty\r\n return ty",
"def to_string(self, t = None, title = None):\n if title:\n print title\n column_width = self.max_cell_print_len()\n cell_hline = [ '-' for i in range(column_width + 2) ] + ['|']\n cell_hline = ''.join(cell_hline)\n hline = ['|'] + [ cell_hline for i in range(self.width + 1) ] + ['\\n']\n hline = ''.join(hline)\n slist = []\n if len(self.agents) > 0:\n slist += ['Scores:']\n for agent in self.agents:\n slist.append(' {0}={1}'.format(agent, agent.performance_measure))\n\n if len(self.agents) > 0:\n slist.append('\\n')\n for c in range(0, self.width + 1):\n spacer = ''.join([ ' ' for i in range(column_width - 1) ])\n slist.append(' {0}{1} '.format(c, spacer))\n\n slist.append(' time_step={0}'.format(t if t else self.time_step))\n slist.append('\\n')\n slist.append(hline)\n for r in range(self.height, -1, -1):\n for c in range(0, self.width + 1):\n things_at = self.list_things_at((c, r))\n cell_width = 0\n for thing_at in things_at:\n cell_width += len(thing_at.to_string())\n\n spacer = ''.join([ ' ' for i in range(column_width - cell_width) ])\n slist.append('| ')\n for thing in things_at:\n slist.append(thing.to_string())\n\n slist.append(spacer + ' ')\n\n slist.append('| {0}\\n'.format(r))\n slist.append(hline)\n\n return ''.join(slist)",
"def report_round_tournament(self, subtitle=\"Affichage des rounds du tournoi\", *, data: list):\n\n self.clean()\n print(f\"Bienvenue dans le gestionnaire de tournois d'รฉchec.\\n{subtitle}\")\n print(\"\\n\" * 1)\n for round in data:\n print(f\"\\t{round.name} - Dรฉbutรฉ : {round.start} - Fini : {round.end}\")",
"def print_team_schedule(\n sch: Schedule,\n team: str,\n team_list: list[str],\n capt_list: list[str],\n outfile: typing.Union[str, TextIOWrapper] = \"print\",\n):\n if outfile == \"print\":\n\n def pline(txt):\n print(txt)\n\n else:\n\n def pline(txt):\n outfile.write(txt + \"\\n\")\n\n line = \"\"\n\n pline(\"\\nTeam: \" + team + \"\\n\")\n for rnd in range(sch.nrounds):\n _rnd = sch.rounds[rnd]\n line = f\"{_rnd.play_date}\"\n game_not_found = True\n match = 0\n while game_not_found and match < _rnd.nmatches:\n _match = _rnd.matches[match]\n if _match.home == team:\n _teamidx = team_list.index(_match.away)\n _capt = capt_list[_teamidx]\n if \"Bye\" not in _match.away:\n line = line + f\" vs. {_match.away} ({_capt})\"\n else:\n line = line + \" --- BYE ---\"\n game_not_found = False\n elif _match.away == team:\n _teamidx = team_list.index(_match.home)\n _capt = capt_list[_teamidx]\n if \"Bye\" not in _match.home:\n line = line + f\" @ {_match.home} ({_capt})\"\n else:\n line = line + \" --- BYE ---\"\n game_not_found = False\n else:\n match = match + 1\n if game_not_found:\n logging.warning(\"Bye week is not expected.\")\n line = line + \"Bye Week\"\n pline(line)",
"def dept_to_tsv(subcourse_list, name):\n\n tsv_file = name.split(':')[0] + '.tsv'\n f = open(tsv_file, 'w+')\n f.write('day\\thour\\tvalue\\n')\n\n d = {}\n\n for day in range(1, 7 + 1):\n for hour in range(1, 24 + 1):\n d[(day, hour)] = 0\n\n for subcourse in subcourse_list:\n for day in subcourse[0]:\n for hour in subcourse[1]:\n d[(day, hour)] = d[(day, hour)] + 1\n\n for key, value in d.items():\n day, hour = key\n f.write(str(day) + '\\t' + str(hour) + '\\t' + str(value) + '\\n')\n\n f.close()",
"def tltn(sessions):\n # this will need some figuring out because time will need to be \"bucketed\"\n # somehow\n raise NotImplementedError('TLTD not implemented yet!')",
"def schedule_paragraph():",
"def tt(obs_time, *whatevers):\n n = whatevers[0].size\n return tuple(\n [obs_time[:n], ] +\n list(whatevers)\n )",
"def _view_schedule(self):\n def plus_top_attach(f):\n\n def plus(*args, **kwargs):\n top_attach, left_attach = f(*args, **kwargs)\n return top_attach + 1, left_attach + 1\n\n return plus\n\n @plus_top_attach\n def create_label(text, left_attach, right_attach,\n top_attach, bottom_attach, align=None):\n label = gtk.Label('<span font=\"%s\">%s</span>' %\n (Params().get_default_font(), text))\n label.set_use_markup(True)\n if align == 'left':\n label.set_alignment(xalign=0.0, yalign=0.5)\n elif align == 'right':\n label.set_alignment(xalign=1.0, yalign=0.5)\n self.table.attach(label, left_attach, right_attach,\n top_attach, bottom_attach, xoptions=gtk.FILL, yoptions=False)\n label.show()\n return top_attach, left_attach\n\n @plus_top_attach\n def create_separator(left_attach, right_attach,\n top_attach, bottom_attach):\n separator = gtk.HSeparator()\n self.table.attach(separator, left_attach, right_attach,\n top_attach, bottom_attach, xoptions=gtk.FILL, yoptions=False)\n separator.show()\n return top_attach, left_attach\n\n tattach, tlen, view_sch = 0, 0, Params().get_view_sch()\n for i in view_sch:\n if i:\n tlen += 1\n for day in ['Monday', 'Tuesday', 'Wednesday',\n 'Thursday', 'Friday', 'Saturday']:\n tattach = create_label('<b><span color=\"%s\">%s</span></b>' %\n (Params().get_day_color(), day), 0, tlen,\n tattach, tattach + 1, 'left')[0]\n tattach = create_separator(0, tlen, tattach, tattach + 1)[0]\n\n schedule = Schedule().get_schedule(day,\n Schedule().get_current_week() - 1)\n for i in range(8):\n if not schedule[i][1] == '' and \\\n (schedule[i][0] == Schedule().get_subgroup() or\n schedule[i][0] == 2):\n if not schedule[i][2]:\n label_color = '%s' % str(Params().get_lecture_color())\n elif schedule[i][2] == 1:\n label_color = '%s' % \\\n str(Params().get_laboratory_color())\n elif schedule[i][2] == 2:\n label_color = '%s' % str(Params().get_practice_color())\n else:\n label_color = '%s' % str(Params().get_non_color())\n\n label_template = '<span color=\"%s\">%s</span>'\n lattach = 0\n if view_sch[0]:\n lattach = create_label('<span color=\"%s\">%d.</span>' %\n (label_color, i),\n lattach, lattach + 1, tattach, tattach + 1)[1]\n if view_sch[1]:\n lattach = create_label(label_template % (label_color,\n '-'.join(Schedule().get_lessons_time()[i])),\n lattach, lattach + 1, tattach, tattach + 1)[1]\n if view_sch[2]:\n lattach = create_label(label_template %\n (label_color, schedule[i][1]),\n lattach, lattach + 1,\n tattach, tattach + 1, 'left')[1]\n if view_sch[3]:\n lattach = create_label(label_template %\n (label_color, schedule[i][3]),\n lattach, lattach + 1, tattach, tattach + 1)[1]\n if view_sch[4]:\n create_label(label_template %\n (label_color, schedule[i][4]),\n lattach, lattach + 1,\n tattach, tattach + 1, 'right')\n tattach += 1",
"def screen_lines_for_time(self, day, time):\n if day in self.tutors and time in self.tutors[day]:\n current = self.tutors[day][time]\n else:\n current = []\n\n if len(current) > 0:\n return [Line(\"Current Tutors:\")] + [Line(person) for person in current]\n else:\n return [Line(\"No Tutors on Duty\", center=True)]",
"def make_time_in_status_rows(self, obj_list):\n \n # Make an html table from a list of IssueClass objects\n html_table = \"\"\n\n # Put the data in html table rows\n for item in obj_list:\n html_table += '<tr><td nowrap>%s in \\\"%s\\\" status for %s days</td>' % (item.issue_type, item.status, item.time_in_status)\n html_table += '<td nowrap>| %s |</td>' % item.assignee # | is vertical bar in html\n if '&' in item.summary:\n item.summary = item.summary.replace('&', '&') # Ampersands screw up html, replace with html escaped version\n html_table += '<td nowrap><a href=\\\"http://jira.sandforce.com/browse/%s\\\">%s</a></td>' % (item.issue_id, item.issue_id)\n html_table += '<td nowrap>%s</td></tr>' % item.summary\n\n html_table += '<tr><td nowrap> </td></tr>' # blank line at end of table\n\n return html_table",
"def plot_ttt(data_frame):\n figtt, axtt = plot_var(\n data_frame=data_frame,\n x_var=\"flow\",\n y_var=\"totalTT\",\n label_var=\"mpr\",\n pivot=\"distance\",\n x_label=\"Flow [veh/m]\",\n y_label=\"Total Travel Time [s]\",\n t_label=\"Distance [m]: \",\n legends=[r\"0 \\%\", r\"10 \\%\", r\"20 \\%\", r\"30 \\%\", r\"40 \\%\"],\n fnt_size={\"fontsize\": 16},\n )\n return figtt, axtt",
"def plot_tcv(self):\n self.plot_profiles(0, title='Shot #{:d} @ t={:.2f} s'.format(self.shot, self.t))",
"def tuer(self, victime):\n self.script[\"tue\"].executer(personnage=victime, pnj=self)",
"def movie_tbl(band,tranges,verbose=0,framesz=0,retries=20):\n\tif verbose:\n\t\tprint_inline('Populating exposure time table.')\n\ttstarts,tstops,exptimes=[],[],[]\n\tfor trange in tranges:\n\t\tstepsz = framesz if framesz else trange[1]-trange[0]\n\t\tsteps = np.ceil((trange[1]-trange[0])/stepsz)\n\t\tfor i,t0 in enumerate(np.arange(trange[0],trange[1],stepsz)):\n\t\t\tt1 = trange[1] if i==steps else t0+stepsz\n\t\t\ttstarts.append(t0)\n\t\t\ttstops.append(t1)\n\t\t\texptimes.append(dbt.compute_exptime(band,[t0,t1],\n\t\t\t\t\t\t\tverbose=verbose,retries=retries))\n\tcol1 = pyfits.Column(name='tstart',format='E',array=np.array(tstarts))\n\tcol2 = pyfits.Column(name='tstop',format='E',array=np.array(tstops))\n\tcol3 = pyfits.Column(name='exptime',format='E',array=np.array(exptimes))\n\tcols = pyfits.ColDefs([col1,col2,col3])\n\ttbl = pyfits.new_table(cols)\n\n\treturn tbl",
"def plot_tttd(data_frame):\n figtt, axtt = plot_var(\n data_frame=data_frame,\n x_var=\"distance\",\n y_var=\"totalTT\",\n label_var=\"mpr\",\n pivot=\"flow\",\n x_label=\"Distance [m]\",\n y_label=\"Total Travel Time [s]\",\n t_label=\"Flow [veh/h]: \",\n legends=[r\"0 \\%\", r\"10 \\%\", r\"20 \\%\", r\"30 \\%\", r\"40 \\%\"],\n fnt_size={\"fontsize\": 16},\n )\n return figtt, axtt",
"def tentative_schedule(request):\n\n\tshows_dict = {\n\t\t0: [],\n\t\t1: [],\n\t\t2: [],\n\t\t3: [],\n\t\t4: [],\n\t\t5: [],\n\t\t6: []\n\t}\n\n\tfor i in range(7):\n\t\tfor show in Show.objects.filter(day=i).order_by('time'):\n\t\t\t\tshow_time = show.time\n\t\t\t\tdj = str(show.dj)\n\t\t\t\tif show.co_dj and str(show.co_dj) != \"Unknown Dj\":\n\t\t\t\t\tdj += \" & \" + str(show.co_dj)\n\t\t\t\tshows_dict[i].append([dj, show_time.strftime('%I:%M %p')])\n\n\treturn render(request, 'tentative_schedule.html', {\n\t\t\t'shows_dict': shows_dict\n\t})",
"def add_tally(grid):\n # Count number of each patient type in the grid\n tally['healthy'].append(len(grid[grid == 0]))\n tally['sickos'].append(len(grid[grid == 1]))\n tally['immune'].append(len(grid[grid == -1]))\n tally['dead'].append(len(grid[grid == 2]))\n tally['time'].append(tally['time'][-1]+1)",
"def TT(f):\n return dmp_ground_TT(f.rep, f.lev, f.dom)",
"def fillSchedule(self, schedule):\n\n self.rooster = schedule\n\n # select courses from zaalrooster\n courses2 = []\n for key, value in self.rooster.items():\n if key == self.room:\n value = value\n for courses in value:\n for course in courses:\n course = str(course)\n courses2.append(course)\n\n # fill schedule with courses from zaalrooster\n for i in range(5):\n for j in range(5):\n self.w.create_text(100 + i, 150 + j, text = courses2[i], width = 80)\n self.w.create_text(100 + i, 250 + j, text = courses2[i+1], width = 80)\n self.w.create_text(100 + i, 350 + j, text = courses2[i+2], width = 80)\n self.w.create_text(100 + i, 450 + j, text = courses2[i+3], width = 80)\n self.w.create_text(300 + i, 150 + j, text = courses2[i+4], width = 80)\n self.w.create_text(300 + i, 250 + j, text = courses2[i+5], width = 80)\n self.w.create_text(300 + i, 350 + j, text = courses2[i+6], width = 80)\n self.w.create_text(300 + i, 450 + j, text = courses2[i+7], width = 80)\n self.w.create_text(500 + i, 150 + j, text = courses2[i+8], width = 80)\n self.w.create_text(500 + i, 250 + j, text = courses2[i+9], width = 80)\n self.w.create_text(500 + i, 350 + j, text = courses2[i+10], width = 80)\n self.w.create_text(500 + i, 450 + j, text = courses2[i+11], width = 80)\n self.w.create_text(700 + i, 150 + j, text = courses2[i+12], width = 80)\n self.w.create_text(700 + i, 250 + j, text = courses2[i+13], width = 80)\n self.w.create_text(700 + i, 350 + j, text = courses2[i+14], width = 80)\n self.w.create_text(700 + i, 450 + j, text = courses2[i+15], width = 80)\n self.w.create_text(900 + i, 150 + j, text = courses2[i+16], width = 80)\n self.w.create_text(900 + i, 250 + j, text = courses2[i+17], width = 80)\n self.w.create_text(900 + i, 350 + j, text = courses2[i+18], width = 80)\n self.w.create_text(900 + i, 450 + j, text = courses2[i+19], width = 80)\n\n\n mainloop()",
"def printSchedule(dictBusSchedule):\n\n strPrint = \"\"\n for strStop in dictBusSchedule:\n strPrint = \"\"\n lstTimes = dictBusSchedule[strStop]\n for strTime in lstTimes:\n if strPrint == \"\":\n strPrint = strStop + \": \" + strTime\n else:\n strPrint = strPrint + \":\" + strTime\n\n print(strPrint)",
"def plot_ttc(data_frame):\n figtt, axtt = plot_var(\n data_frame=data_frame,\n x_var=\"flow\",\n y_var=\"timetC\",\n label_var=\"mpr\",\n pivot=\"distance\",\n x_label=\"Flow [veh/m]\",\n y_label=\"Time To Collision [s]\",\n t_label=\"Distance [m]: \",\n legends=[r\"0 \\%\", r\"10 \\%\", r\"20 \\%\", r\"30 \\%\", r\"40 \\%\"],\n fnt_size={\"fontsize\": 16},\n )\n return figtt, axtt",
"def drawTable(listOfWord, listOfFrequency):\r\n\r\n\tprint(\"Distribusi frekuensi kata: \")\t\t\t\t # judul di atas tabel\r\n\tprint('-' * 40)\r\n\tprint('{:3s} {:25s} {:10s}'.format('No.', 'Kata', 'Frekuensi'))\r\n\tprint('-' * 40)\r\n\r\n\tnumber = 0\t\t\t\t\t\t\t\t# penomoran poin di dalam tabel\r\n\tindexCounter = 0\t\t\t\t\t\t\t\t\t\r\n\tfor word in listOfWord:\t\t\t\t\t\t\t# mencetak isi tabel\r\n\t\tnumber += 1\r\n\t\tprint('{:3d} {:26s} {:<9d}'.format(number, word, listOfFrequency[indexCounter]))\r\n\t\tindexCounter += 1\r\n\r\n\tprint('-' * 40)",
"def to_srt(self, subtitles):\n \n srt_data = ''\n subtitle_num = self.start_index\n for subtitle in subtitles:\n subtitle_num += 1\n \n offset = self.start_time\n \n start_time = self._ms_to_time(subtitle['start_time'] + offset)\n end_time = self._ms_to_time(subtitle['end_time'] + offset)\n \n content = subtitle['content'].replace('<br>', ' ')\n \n srt_data += str(subtitle_num) + '\\r\\n'\n srt_data += '%s --> %s' % (start_time, end_time) + '\\r\\n'\n srt_data += content + '\\r\\n'\n srt_data += '\\r\\n'\n \n self.end_index = subtitle_num\n \n return srt_data"
]
| [
"0.7735832",
"0.62591535",
"0.53490853",
"0.52944165",
"0.5200095",
"0.5179349",
"0.5171907",
"0.5160587",
"0.5143638",
"0.51262563",
"0.51205915",
"0.5059147",
"0.4967323",
"0.49631694",
"0.49489382",
"0.49449074",
"0.49370623",
"0.4925877",
"0.4924541",
"0.48869017",
"0.48516008",
"0.48230264",
"0.48172453",
"0.48157087",
"0.48062763",
"0.48025286",
"0.4801028",
"0.47949448",
"0.47897196",
"0.47814596"
]
| 0.76725847 | 1 |
commanding quit() quits from python turtle graphics. | def quit():
#quits from python turtle graphics screen
bye() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def quit():\n return 'quit'",
"def quit():\n\tsys.exit()",
"def cmd_quit(args):",
"def quit(self):\n pass",
"def do_quit(self, args):\n quit()",
"def quit(self):\n\t\tpass",
"def quit(self, *args, **kwargs):\n pass",
"def do_quit(self,line):\n self.quit()",
"def do_quit(self, arg):\n exit()",
"def command_quit(self, arg):\n self.write('221 Bye', self.finish)",
"def quit():\n sio.emit('quit', 'quit')\n return 'OK'",
"def do_quit(self, arg):\n self.do_exit(arg)",
"def quit():\r\n autoquit()",
"def quit_game(self):\n pg.quit()\n sys.exit()",
"def command_quit(arguments):\n global quitting\n quitting = True\n return 'Now quitting'",
"def quit(self):\n self.disconnect()\n mySerialConnection = None\n logging.info(EXIT_STRING)\n self.frame.destroy()\n self.endCommand()\n #sys.exit()",
"def quit(self):\n self.quit = True",
"def done(self):\n turtle.done()",
"def Quit(self, event):\n pass",
"def clickQuit(self, event):\n self.quitFlag = True",
"def quit(self):\n raise NotImplementedError",
"def quitting(self):\n pass",
"def do_quit(self, args):\n print('Good Bye!')\n exit()",
"def qpressed(): #QUITTNG FUNCTION\n #print(\"q pressed\")\n sys.exit()",
"def quit_program():\n sys.exit()",
"def do_quit(self, arg):\n\n print('Good Bye!')\n exit()",
"def exit_program():\n quit()",
"def quit() -> None:\n pg.quit()\n sys.exit()",
"def _quit():\r\n\twin.quit()\r\n\twin.destroy()\r\n\tquit()",
"def main():\r\n intialize()\r\n draw_hexagon()\r\n draw_square()\r\n draw_triangle()\r\n shapes()\r\n shapes2()\r\n print (\"Close the window\")\r\n turtle.done()"
]
| [
"0.70075387",
"0.69214535",
"0.68864304",
"0.6808114",
"0.6739777",
"0.669683",
"0.66821736",
"0.66361433",
"0.6610632",
"0.6570589",
"0.6564331",
"0.65494555",
"0.65493435",
"0.6515216",
"0.64835155",
"0.64770013",
"0.64566535",
"0.6432395",
"0.64222944",
"0.63981736",
"0.6380984",
"0.6357744",
"0.6353264",
"0.6347332",
"0.6346468",
"0.63211405",
"0.62971723",
"0.62921757",
"0.6281865",
"0.627948"
]
| 0.89436555 | 0 |
pp_schedule(filename,venue) opens up a python turtle graphics window which shows what times of each of the seven days of the week is occupied by which module in which venue. To use pp_schedule, enter a filename as the first element and a venue as the second element. pp_schedule commands ttint(timelist,venue) to create a table in python turtles graphics using a timelist created by pp_schedule. When pp_schedule is run, a list with day codes, start times, end times and modules is created. It is neither returned nor printed, but if the code is modified to show the table, the following outputs are expected to be seen. | def pp_schedule(filename,venue):
#reading the file
f = open(filename,"r")
incsv = f.readlines()
#removing affixes
incsv[:] = [i.rstrip('\n') for i in incsv]
#lines into lists
tempstr = ""
templist = []
for j in range(len(incsv)):
#enters each line into temporary string variable
tempstr = incsv[j]
#enters the split string into a temporary list variable
templist.append(tempstr.split(","))
#modify original line in original list with split list
incsv[j] = templist
#reset temporary variables
tempstr = ""
templist = []
#timelist stands for timetable list
#format of timelist: [[day,[start,end,module],...],...]
timelist = []
for k in range(1,8):
#for each day code add day code
timelist.append([k])
#assign and make list for ttint
#for all lines in file
for l in range(len(incsv)):
#if venue in line matches desired venue
if incsv[l][0][7] == venue:
#after each day code, add a list with start time, end time and module. Repeat for each relevant line
timelist[(int(incsv[l][0][3])-1)].append([int(incsv[l][0][5]),int(incsv[l][0][6]),incsv[l][0][0]])
#turtle
print("Your timetable is being printed on Python Turtle Graphics. This may take a while.")
ttint(timelist,venue) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ttint(timelist,venue):\n #setup\n showturtle()\n #make python turtle graphics window 1260 pixels wide and 800 pixels tall\n setup(width = 1260, height = 800, startx = None, starty = None)\n reset()\n #text at top\n pen(pencolor=\"black\")\n pu()\n setpos(0,380)\n write(\"Welcome to your schedule. Use the arrow keys to toggle the day of the week\",move=False,align=\"center\",font=(\"Courier New\",10,\"normal\"))\n setpos(0,360)\n write(\"In Idle, type 'quit()' to exit turtle.\",move=False,align=\"center\",font=(\"Courier New\",10,\"normal\"))\n dayl = [\"Mon\",\"Tue\",\"Wed\",\"Thu\",\"Fri\",\"Sat\",\"Sun\"]\n setpos(0,-350)\n #writes venue at bottom of GUI\n write(venue,move=False,align=\"center\",font=(\"Courier New\",20,\"normal\"))\n #drawing the lines and timing\n #baseY = 300 because y = 300 is the height of the line for monday\n baseY = 300\n for ch in range(7):\n pu()\n #goes to relevant y position for respective day code\n setpos(-570,(baseY-(100*ch)))\n #writes day name at side\n write(dayl[ch],move=False,align=\"center\",font=(\"Courier New\",20,\"normal\"))\n pen(pencolor=\"black\",pensize=\"3\")\n #draws lines\n #for each hour\n for dh in range(19):\n #move right 60 steps\n setx(xcor()+60)\n pd()\n #move up 20 steps\n sety(ycor()+20)\n pu()\n #stop drawing. move up 10 steps and write hour\n sety(ycor()+10)\n write(str((600+(dh*100))),move=False,align=\"center\",font=(\"Courier New\",10,\"normal\"))\n #go back down 30 steps to main line\n sety(ycor()-30)\n #continue drawing\n pd()\n pu()\n #goes to each relevant timing to write module code\n #for every time range in timelist. dp stands for day parse\n for dp in range(len(timelist)):\n #if week day in timelist is not empty\n if len(timelist[dp]) >= 1:\n #for each timing in the week day. hp stands for hour parse\n for hp in range(1,len(timelist[dp])):\n #for each hour in the time range. pr is an arbitrary variable which helps to direct the turtle to the timings in between the start and end time to write the module code at the relevant location\n for pr in range(int((timelist[dp][hp][1]-timelist[dp][hp][0])/100)):\n #go to the relevant time and write the module code in between\n setpos((-840+(int(timelist[dp][hp][0]/100)+pr)*60),(410-timelist[dp][0]*100))\n write(timelist[dp][hp][2],move=False,align=\"center\",font=(\"Courier New\",8,\"normal\"))",
"def printSchedule():\r\n print(\"{0:^45}\".format(\"Your Schedule:\\n\"))\r\n print(\" Day Class Time\")\r\n if(len(classes) == 0):\r\n print(\"\\nThere are no classes\\n\")\r\n return\r\n for class_ in classes:\r\n print(class_.scheduleString())\r\n print()",
"def print_list(schedule):\n START_TIME = 0\n END_TIME = 1\n MEETING_TITLE = 2\n print(\"\\nYour schedule for the day:\")\n if len(schedule) == 0:\n print(\"(empty)\\n\")\n else:\n for row in schedule:\n print(\n f\"{row[START_TIME]} - {row[END_TIME]} {row[MEETING_TITLE]}\")\n print(\"\\n\")",
"def schedule_text():",
"def schedule_paragraph():",
"def print_team_schedule(\n sch: Schedule,\n team: str,\n team_list: list[str],\n capt_list: list[str],\n outfile: typing.Union[str, TextIOWrapper] = \"print\",\n):\n if outfile == \"print\":\n\n def pline(txt):\n print(txt)\n\n else:\n\n def pline(txt):\n outfile.write(txt + \"\\n\")\n\n line = \"\"\n\n pline(\"\\nTeam: \" + team + \"\\n\")\n for rnd in range(sch.nrounds):\n _rnd = sch.rounds[rnd]\n line = f\"{_rnd.play_date}\"\n game_not_found = True\n match = 0\n while game_not_found and match < _rnd.nmatches:\n _match = _rnd.matches[match]\n if _match.home == team:\n _teamidx = team_list.index(_match.away)\n _capt = capt_list[_teamidx]\n if \"Bye\" not in _match.away:\n line = line + f\" vs. {_match.away} ({_capt})\"\n else:\n line = line + \" --- BYE ---\"\n game_not_found = False\n elif _match.away == team:\n _teamidx = team_list.index(_match.home)\n _capt = capt_list[_teamidx]\n if \"Bye\" not in _match.home:\n line = line + f\" @ {_match.home} ({_capt})\"\n else:\n line = line + \" --- BYE ---\"\n game_not_found = False\n else:\n match = match + 1\n if game_not_found:\n logging.warning(\"Bye week is not expected.\")\n line = line + \"Bye Week\"\n pline(line)",
"def nflschedule(self, irc, msg, args, optlist, optteam):\n \n fullSchedule = False\n for (option, arg) in optlist:\n if option == 'full':\n fullSchedule = True\n \n optteam = optteam.upper()\n \n if optteam not in self._validteams():\n irc.reply(\"Team not found. Must be one of: %s\" % self._validteams())\n return\n \n lookupteam = self._translateTeam('yahoo', 'team', optteam) # don't need a check for 0 here because we validate prior.\n \n if fullSchedule: # diff url/method.\n url = self._b64decode('aHR0cDovL3Nwb3J0cy55YWhvby5jb20vbmZsL3RlYW1z') + '/%s/schedule' % lookupteam\n\n try:\n request = urllib2.Request(url)\n html = (urllib2.urlopen(request)).read()\n except:\n irc.reply(\"Failed to open: %s\" % url)\n return\n \n soup = BeautifulSoup(html)\n table = soup.find('table', attrs={'summary':'Regular Season Games'})\n \n if not table:\n irc.reply(\"ERROR: Failed to find schedule for: %s\") % optteam\n return\n \n tbody = table.find('tbody')\n rows = tbody.findAll('tr')\n\n append_list = []\n\n for row in rows:\n tds = row.findAll('td')\n week = tds[0]\n \n if row.find('td', attrs={'class':'title bye'}):\n date = \"BYE\"\n opp = \"\"\n score = \"\"\n appendString = \"W{0}-{1}\".format(ircutils.bold(week.getText()), ircutils.underline(\"BYE\"))\n else:\n date = tds[1].getText()\n dateSplit = date.split(',', 1) # take the date, dump the rest.\n date = dateSplit[1]\n opp = tds[2] # with how the Tag/string comes in, we need to extract one part and format the other.\n oppName = opp.find('span')\n if oppName:\n oppName.extract()\n oppTeam = opp.find('a').getText() \n #opp = tds[2].find('span').getText()\n #opp = self._translateTeam('team','full', opp) # use the db to make a full team small.\n score = tds[3].getText().replace('EDT','').replace('EST','').replace('pm','').replace('am','') # strip the garbage\n #score = score.replace('W', ircutils.mircColor('W', 'green')).replace('L', ircutils.mircColor('L', 'red'))\n appendString = \"W{0}-{1} {2} {3}\".format(ircutils.bold(week.getText()), date.strip(), oppTeam.strip(), score.strip())\n \n append_list.append(appendString)\n\n descstring = string.join([item for item in append_list], \" | \")\n output = \"{0} SCHED :: {1}\".format(ircutils.mircColor(optteam, 'red'), descstring)\n irc.reply(output)\n else:\n url = self._b64decode('aHR0cDovL3Nwb3J0cy55YWhvby5jb20vbmZsL3RlYW1z') + '/%s/calendar/rss.xml' % lookupteam\n \n try:\n req = urllib2.Request(url)\n response = urllib2.urlopen(req)\n html = response.read()\n except:\n irc.reply(\"Cannot open: %s\" % url)\n return\n\n # clean this stuff up\n html = html.replace('<![CDATA[','').replace(']]>','').replace('EDT','').replace('\\xc2\\xa0',' ')\n\n soup = BeautifulSoup(html)\n items = soup.find('channel').findAll('item')\n \n append_list = []\n\n for item in items:\n title = item.find('title').renderContents().strip() # title is good.\n day, date = title.split(',')\n desc = item.find('description') # everything in desc but its messy.\n desctext = desc.findAll(text=True) # get all text, first, but its in a list.\n descappend = (''.join(desctext).strip()) # list transform into a string.\n if not descappend.startswith('@'): # if something is @, it's before, but vs. otherwise.\n descappend = 'vs. ' + descappend\n descappend += \" [\" + date.strip() + \"]\"\n append_list.append(descappend) # put all into a list.\n\n \n descstring = string.join([item for item in append_list], \" | \")\n output = \"{0} {1}\".format(ircutils.bold(optteam), descstring)\n irc.reply(output)",
"def printSchedule(self):\n\t\tself.printWaiting()\n\t\tprint ' '.join(map(format,range(20),['2' for _ in range(20)]))\n\t\tprint \"\"",
"def get_timetable(self):\n\n # Download schedule\n log.debug('downloading timetable for \"{}\"'.format(self.school_year))\n download_file(self.schedule_url, self.schedule_filename)\n\n # Read XML data\n xml_data = read_file(self.schedule_filename)\n\n # Give it to Beautiful Soup for pretty parsing\n soup = BeautifulSoup(xml_data, 'html.parser')\n\n update_time_regex = re.compile(r'\\d{2}\\/\\d{2}\\/\\d{4}\\s?\\d{2}:\\d{2}:\\d{2}')\n update_time_str = update_time_regex.findall(soup.find('footer').get_text())[0]\n update_time_dt = datetime.datetime.strptime(update_time_str, '%d/%m/%Y %H:%M:%S')\n self.update_time = str(datetime.datetime.strftime(update_time_dt, '%d/%m/%Y %H:%M:%S')) \n\n self.save_update_time()\n\n # Compute a correspondance tables between 'rawweeks' and the first weekday\n self.week_dates_mapping = {\n span.alleventweeks.get_text(): span['date'] \n for span in soup.find_all('span')\n }\n\n log.debug('find all events for \"{}\".'.format(self.school_year))\n self.unformatted_events = soup.find_all('event')",
"def print_schedule(self):\n for entry in self.entries:\n print(entry.get_entry_string())",
"def output_schedule(self) -> None:\n with open(\"Output.txt\", \"w\") as out_file:\n for sem in self.plan:\n out_file.write(sem.title.center(15 + 20 + 50 + 5) + \"\\n\\n\")\n for course in sem.required_courses:\n if course.special:\n out_file.write(\"*\" * 10 + \" \" * 5 + f\"{course.special_type}\\n\")\n elif course.grade != \"\":\n out_file.write(\n course.sem_taken.ljust(15)\n + f\"{course.dept} {course.number}-{course.section}\".ljust(\n 20\n )\n + course.title.ljust(50)\n + course.grade.ljust(5)\n + \"\\n\"\n )\n else:\n out_file.write(\n \"AP/UNK\".ljust(15)\n + f\"{course.dept} {course.number}-{course.section}\".ljust(\n 20\n )\n + course.title.ljust(50)\n + \"AP/UNK\".ljust(5)\n + \"\\n\"\n )\n out_file.write(\"\\n\\n\")",
"def display_schedule(schedule):\n\n def display_patches(patches_sequence, margin=8):\n \"\"\"\n Displays a sequence of MatPlotLib patches in a MatPlotLib window\n :param patches_sequence: the patches to display\n :param margin:\n :return:\n \"\"\"\n plt.rcdefaults()\n fig, ax = plt.subplots()\n for p in patches_sequence:\n ax.add_patch(p)\n max_machines = max(rect.get_y() for rect in patches_sequence) + 1\n max_jobs = max(rect.get_x() + margin for rect in patches_sequence)\n plt.axis([0, max_jobs, 0, max_machines])\n plt.show()\n\n patches = list()\n colors = [\"black\", \"darksalmon\", \"DarkKhaki\", \"DarkViolet\", \"red\", \"blue\", \"green\", \"cyan\", \"magenta\", \"yellow\",\n \"black\", \"IndianRed\", \"Pink\", \"Lavender\", \"DarkOrange\", \"GreenYellow\", \"Teal\", \"SteelBlue\",\n \"MidnightBlue\", \"Maroon\", \"DimGray\"]\n\n for i, prof in enumerate(schedule):\n prof = prof[\"Exams\"]\n for eleve, heure in prof.items():\n rekt = mpatches.Rectangle((heure, i), durations[i], 1, color=colors[eleve], ec=\"black\")\n patches.append(rekt)\n\n display_patches(patches)",
"def print_schedule():\n clear_screen()\n print(\"====Current Schedule====\")\n days = ['sun', 'mon', 'tues', 'wed', 'thurs', 'fri', 'sat']\n with open('current_courses.json', 'r') as current_file:\n schedule = json.load(current_file)\n for day in days:\n for val, val2 in schedule.items():\n if day in val2[0]:\n print(day, val, str(val2[1])+'-'+str(val2[2])+\" Presumed Grade: \"+ val2[3])\n return 0",
"def genScheduleCSV():\r\n try: \r\n printSchedule()\r\n save_class_list()\r\n print(\"\\nSchedule generated, check working directory\")\r\n except Exception as e:\r\n print(\"Exception found\" + str(e))",
"def timeline(self, **kwargs):\n\n def rtm(n, multiple=10):\n \"\"\"Round to multiple.\"\"\"\n return int(multiple * round(float(n) / multiple))\n\n beginning_minutes = 7 * 60 + 20 # starting time is 7:20\n end_minutes = 21 * 60 # ending time is 21:00\n\n interval = 100 # 100 minutes for each period (90 + 10)\n\n total_minutes = ((end_minutes - beginning_minutes) // interval + 1) * interval\n number_of_intervals = total_minutes // interval\n\n segments = total_minutes // 10\n days = {i: [[' '] * segments + ['โ']] for i in range(5)}\n\n for course in self.get_sorted_courses(include_unscheduled=False):\n i = (rtm(course.time.start) - beginning_minutes) // 10\n width = (rtm(course.time.end) - rtm(course.time.start)) // 10\n\n day = 0\n for j in range(i, i + width):\n if days[course.weekday()][day][j] != ' ':\n day += 1\n if len(days[course.weekday()]) == day:\n days[course.weekday()].append([' '] * segments + ['โ'])\n\n days[course.weekday()][day][i] = '{'\n days[course.weekday()][day][i + width - 1] = '}'\n\n space = width - 2 # width minus { and }\n\n name = Ansi.color(\n course.abbreviation\n if len(course.abbreviation) <= space\n else course.abbreviation[: space - 1] + \".\",\n course_types[course.type].color,\n )\n\n # TODO: this doesn't center correctly, for some reason\n name = Ansi.center(name, space)\n\n days[course.weekday()][day][i + 1] = name\n for j in range(i + 2, i + width - 1):\n days[course.weekday()][day][j] = ''\n\n # print the header\n print(\n (\" โญ\" + \"โ\" * (total_minutes // 10) + \"โฎ\\n โ\")\n + \"\".join(\n Ansi.bold(\n minutes_to_HHMM(beginning_minutes + interval * i)\n .strip()\n .ljust(10, \" \")\n )\n for i in range(number_of_intervals)\n )\n + \"โ\\nโญโโโโโผโ\"\n + \"\".join(\n \"โ\" * number_of_intervals\n + (\"โ\" if i != number_of_intervals - 1 else \"โค\")\n for i in range(number_of_intervals)\n )\n )\n\n for i in range(5):\n x = f\"โ {WD_EN[i][:2].capitalize()} โ\"\n\n for j, day in enumerate(days[i]):\n if j == 0:\n print(x, end=\"\")\n else:\n print(\"โ โ\", end=\"\")\n\n print(\"\".join(day))\n\n # print the very last line\n print(\n \"โฐโโโโโดโ\"\n + \"\".join(\n \"โ\" * number_of_intervals\n + (\"โ\" if i != number_of_intervals - 1 else \"โฏ\")\n for i in range(number_of_intervals)\n )\n )",
"def scheduleusingsjf():\n df = []\n currentDate = date.today()\n\n currentTime = time() # Time initialized to zero\n completionTime = time() # Completion time initialized to zero.\n while True:\n\n # List of processes in the ready queue whose arrival time is less\n # than or equal to the current time\n arrivedProcesses = [x for x in readyList if x.arrivalTime <= currentTime]\n\n if arrivedProcesses: # At least one process has arrived\n if currentTime >= completionTime: # No process is currently undergoing execution\n # Find the shortest job from the list of arrived processes:\n shortestJob = arrivedProcesses[0]\n min = shortestJob.burstTime\n\n for x in arrivedProcesses:\n if x.burstTime < min:\n shortestJob = x\n min = x.burstTime\n\n readyList.remove(shortestJob) # Remove the selected job from ready list:\n\n # Update the completion time for the job taken up for execution\n shortestJob.completionTime = addTimes(currentTime, shortestJob.burstTime)\n sjfDoneList.append(shortestJob)\n completionTime = shortestJob.completionTime\n\n # Add the start and end times of the currently executing process to Gantt chart:\n df.append(dict(Task=shortestJob.processName, Start=str(currentDate) + \" \" + str(currentTime),\n Finish=str(currentDate) + \" \"\n + str(shortestJob.completionTime)))\n\n currentTime = addTimes(currentTime, time(second=1)) # Increment current time by 1 second\n\n if not readyList: # Ready list is empty. Break out from infinite loop\n break\n\n fig = ff.create_gantt(df, title=\"Shortest Job First\")\n fig.write_image(\"./sjf.png\")\n\n calculateTurnaroundAndWaitingTime(sjfDoneList)\n drawTable(sjfDoneList, \"sjfTable.png\")",
"def do_rt(self, arg):\n self.do_timesheet('report today')",
"def _view_schedule(self):\n def plus_top_attach(f):\n\n def plus(*args, **kwargs):\n top_attach, left_attach = f(*args, **kwargs)\n return top_attach + 1, left_attach + 1\n\n return plus\n\n @plus_top_attach\n def create_label(text, left_attach, right_attach,\n top_attach, bottom_attach, align=None):\n label = gtk.Label('<span font=\"%s\">%s</span>' %\n (Params().get_default_font(), text))\n label.set_use_markup(True)\n if align == 'left':\n label.set_alignment(xalign=0.0, yalign=0.5)\n elif align == 'right':\n label.set_alignment(xalign=1.0, yalign=0.5)\n self.table.attach(label, left_attach, right_attach,\n top_attach, bottom_attach, xoptions=gtk.FILL, yoptions=False)\n label.show()\n return top_attach, left_attach\n\n @plus_top_attach\n def create_separator(left_attach, right_attach,\n top_attach, bottom_attach):\n separator = gtk.HSeparator()\n self.table.attach(separator, left_attach, right_attach,\n top_attach, bottom_attach, xoptions=gtk.FILL, yoptions=False)\n separator.show()\n return top_attach, left_attach\n\n tattach, tlen, view_sch = 0, 0, Params().get_view_sch()\n for i in view_sch:\n if i:\n tlen += 1\n for day in ['Monday', 'Tuesday', 'Wednesday',\n 'Thursday', 'Friday', 'Saturday']:\n tattach = create_label('<b><span color=\"%s\">%s</span></b>' %\n (Params().get_day_color(), day), 0, tlen,\n tattach, tattach + 1, 'left')[0]\n tattach = create_separator(0, tlen, tattach, tattach + 1)[0]\n\n schedule = Schedule().get_schedule(day,\n Schedule().get_current_week() - 1)\n for i in range(8):\n if not schedule[i][1] == '' and \\\n (schedule[i][0] == Schedule().get_subgroup() or\n schedule[i][0] == 2):\n if not schedule[i][2]:\n label_color = '%s' % str(Params().get_lecture_color())\n elif schedule[i][2] == 1:\n label_color = '%s' % \\\n str(Params().get_laboratory_color())\n elif schedule[i][2] == 2:\n label_color = '%s' % str(Params().get_practice_color())\n else:\n label_color = '%s' % str(Params().get_non_color())\n\n label_template = '<span color=\"%s\">%s</span>'\n lattach = 0\n if view_sch[0]:\n lattach = create_label('<span color=\"%s\">%d.</span>' %\n (label_color, i),\n lattach, lattach + 1, tattach, tattach + 1)[1]\n if view_sch[1]:\n lattach = create_label(label_template % (label_color,\n '-'.join(Schedule().get_lessons_time()[i])),\n lattach, lattach + 1, tattach, tattach + 1)[1]\n if view_sch[2]:\n lattach = create_label(label_template %\n (label_color, schedule[i][1]),\n lattach, lattach + 1,\n tattach, tattach + 1, 'left')[1]\n if view_sch[3]:\n lattach = create_label(label_template %\n (label_color, schedule[i][3]),\n lattach, lattach + 1, tattach, tattach + 1)[1]\n if view_sch[4]:\n create_label(label_template %\n (label_color, schedule[i][4]),\n lattach, lattach + 1,\n tattach, tattach + 1, 'right')\n tattach += 1",
"def mainSchedule():\n\timport time\n\tc1 = Content(1,5,20)\n\tc2 = Content(2,6,30)\n\tc3 = Content(3,5,25)\n\tc1_ = Content(1,1,20)\n\tc5 = Content(5,3,29)\n\tc6 = Content(6,11,50)\n\tc7 = Content(7,7,34)\n\tc1__ = Content(1,3,20)\n\tc8 = Content(8,6,10)\n\ta1 = Area('a1',1.0)\n\ta2 = Area('a2',0.5)\n\ta3 = Area('a3',0.8)\n\tcontents = [c1,c2,c3,c1_,c5,c6,c7,c1__,c8]\n\tareas = [a1,a2,a3]\n\tsol_schedule = Schedule_solution()\n\tprint \"random sampling schedule:\\n\"\n\ttime_r = time.time()\n\tschedule_sols = sol_schedule.schedule_randomSampling(contents,areas)\n\tprint \"running time,\",time.time()-time_r\n\tprint \"local search schedule:\"\n\ttime_l = time.time()\n\tschedule_sols_local = sol_schedule.schedule_localSearch(contents,areas)\n\tprint \"running time,\",time.time()-time_l\n\tsol_selection = Selection_solution()\n\tsol_selection.select_bruteforce(4,*schedule_sols) #argument unpacking",
"def main(to_be_scheduled):\n\n tasks = order_by_ftime(to_be_scheduled)\n print select_activity(tasks)",
"def available_timing(filename,day,venue):\n #reading the file\n f = open(filename,\"r\")\n incsv = f.readlines()\n #removing affixes\n incsv[:] = [i.rstrip('\\n') for i in incsv]\n #lines into lists\n tempstr = \"\"\n templist = []\n for j in range(len(incsv)):\n #enters each line into temporary string variable\n tempstr = incsv[j]\n #enters the split string into a temporary list variable\n templist.append(tempstr.split(\",\"))\n #modify original line in original list with split list\n incsv[j] = templist\n #reset temporary variables\n tempstr = \"\"\n templist = []\n #finding occupied hours\n brlist = []\n #for all lines in file\n for k in range(len(incsv)):\n #if venue in line matches desired venue and day in line matches desired day\n if incsv[k][0][7] == venue and int(incsv[k][0][3]) == day:\n #add time range of line into brlist\n brlist.append([int(incsv[k][0][5]),int(incsv[k][0][6])])\n #pruning\n #tlist stands for timelist. stores remaining hours for synthesis\n tlist = []\n #list of hours\n tlist = [600,700,800,900,1000,1100,1200,1300,1400,1500,1600,1700,1800,1900,2000,2100,2200,2300,2400]\n #for line in brlist\n for l in range(len(brlist)):\n #for the range of hours of the line\n for m in range(int((brlist[l][1]-brlist[l][0])/100)):\n #if hours in range still in tlist\n if (brlist[l][0] + m*100) in tlist:\n #remove from tlist\n tlist.remove(brlist[l][0] + m*100)\n #plist for partition list. range of available timings appended here\n plist = []\n #check is for the start time of each available time ranges\n check = 0\n #formation of time ranges\n #for hours in tlist\n for n in range(len(tlist)):\n #if code is checking element 2. Could have used range(1,len(tlist)) but nevermind\n if n >= 1:\n #if 2 adjacent hours are not consecutive\n if tlist[n] != (tlist[n-1]+100):\n #add time range to plist\n plist.append((tlist[check],tlist[n-1]+100))\n #set check to next minimum available start time\n check = n\n #adding range with last hour\n #if last hour in tlist is 2400 and precedent hour in tlist is 2300\n if tlist[n] == 2400 and tlist[n-1] == 2300:\n #add time range\n plist.append((tlist[check],2400))\n return plist",
"def _create_schedules(self):\n\n ''''''",
"def week_schedule(year, stype, week):\n url = schedule_url(year, stype, week)\n try:\n dom = xml.parse(urllib.request.urlopen(url))\n except urllib.error.HTTPError:\n print >> sys.stderr, 'Could not load %s' % url\n return []\n\n games = []\n for g in dom.getElementsByTagName(\"g\"):\n gsis_id = g.getAttribute('eid')\n games.append({\n 'eid': gsis_id,\n 'wday': g.getAttribute('d'),\n 'year': year,\n 'month': int(gsis_id[4:6]),\n 'day': int(gsis_id[6:8]),\n 'time': g.getAttribute('t'),\n 'meridiem': None,\n 'season_type': stype,\n 'week': week,\n 'home': g.getAttribute('h'),\n 'away': g.getAttribute('v'),\n 'gamekey': g.getAttribute('gsis'),\n })\n\n for game in games:\n h = int(game['time'].split(':')[0])\n m = int(game['time'].split(':')[1])\n if 0 < h <= 5: # All games before \"6:00\" are PM until proven otherwise\n game['meridiem'] = 'PM'\n\n if game['meridiem'] is None:\n\n days_games = [g for g in games if g['wday'] == game['wday']]\n preceeding = [g for g in days_games if g['eid'] < game['eid']]\n proceeding = [g for g in days_games if g['eid'] > game['eid']]\n\n # If any games *after* this one are AM then so is this\n if any(g['meridiem'] == 'AM' for g in proceeding):\n game['meridiem'] = 'AM'\n # If any games *before* this one are PM then so is this one\n elif any(g['meridiem'] == 'PM' for g in preceeding):\n game['meridiem'] = 'PM'\n # If any games *after* this one have an \"earlier\" start it's AM\n elif any(h > t for t in [int(g['time'].split(':')[0]) for g in proceeding]):\n game['meridiem'] = 'AM'\n # If any games *before* this one have a \"later\" start time it's PM\n elif any(h < t for t in [int(g['time'].split(':')[0]) for g in preceeding]):\n game['meridiem'] = 'PM'\n\n if game['meridiem'] is None:\n if game['wday'] not in ['Sat', 'Sun']:\n game['meridiem'] = 'PM'\n if game['season_type'] == 'POST':\n game['meridiem'] = 'PM'\n\n return games",
"def tentative_schedule(request):\n\n\tshows_dict = {\n\t\t0: [],\n\t\t1: [],\n\t\t2: [],\n\t\t3: [],\n\t\t4: [],\n\t\t5: [],\n\t\t6: []\n\t}\n\n\tfor i in range(7):\n\t\tfor show in Show.objects.filter(day=i).order_by('time'):\n\t\t\t\tshow_time = show.time\n\t\t\t\tdj = str(show.dj)\n\t\t\t\tif show.co_dj and str(show.co_dj) != \"Unknown Dj\":\n\t\t\t\t\tdj += \" & \" + str(show.co_dj)\n\t\t\t\tshows_dict[i].append([dj, show_time.strftime('%I:%M %p')])\n\n\treturn render(request, 'tentative_schedule.html', {\n\t\t\t'shows_dict': shows_dict\n\t})",
"def fillSchedule(self, schedule):\n\n self.rooster = schedule\n\n # select courses from zaalrooster\n courses2 = []\n for key, value in self.rooster.items():\n if key == self.room:\n value = value\n for courses in value:\n for course in courses:\n course = str(course)\n courses2.append(course)\n\n # fill schedule with courses from zaalrooster\n for i in range(5):\n for j in range(5):\n self.w.create_text(100 + i, 150 + j, text = courses2[i], width = 80)\n self.w.create_text(100 + i, 250 + j, text = courses2[i+1], width = 80)\n self.w.create_text(100 + i, 350 + j, text = courses2[i+2], width = 80)\n self.w.create_text(100 + i, 450 + j, text = courses2[i+3], width = 80)\n self.w.create_text(300 + i, 150 + j, text = courses2[i+4], width = 80)\n self.w.create_text(300 + i, 250 + j, text = courses2[i+5], width = 80)\n self.w.create_text(300 + i, 350 + j, text = courses2[i+6], width = 80)\n self.w.create_text(300 + i, 450 + j, text = courses2[i+7], width = 80)\n self.w.create_text(500 + i, 150 + j, text = courses2[i+8], width = 80)\n self.w.create_text(500 + i, 250 + j, text = courses2[i+9], width = 80)\n self.w.create_text(500 + i, 350 + j, text = courses2[i+10], width = 80)\n self.w.create_text(500 + i, 450 + j, text = courses2[i+11], width = 80)\n self.w.create_text(700 + i, 150 + j, text = courses2[i+12], width = 80)\n self.w.create_text(700 + i, 250 + j, text = courses2[i+13], width = 80)\n self.w.create_text(700 + i, 350 + j, text = courses2[i+14], width = 80)\n self.w.create_text(700 + i, 450 + j, text = courses2[i+15], width = 80)\n self.w.create_text(900 + i, 150 + j, text = courses2[i+16], width = 80)\n self.w.create_text(900 + i, 250 + j, text = courses2[i+17], width = 80)\n self.w.create_text(900 + i, 350 + j, text = courses2[i+18], width = 80)\n self.w.create_text(900 + i, 450 + j, text = courses2[i+19], width = 80)\n\n\n mainloop()",
"def test_list_schedules(self):\n pass",
"def print_tlines(ty,slist,scaledtime, farright):\r\n xinc = 0.005\r\n yinc = 0.002\r\n if(scaledtime != []):\r\n if max(scaledtime)/1e6 < 1.0:\r\n yearscaler = 1e3\r\n yearscalestring = \" KYR\"\r\n else:\r\n yearscaler = 1e6\r\n yearscalestring = \" MYR\"\r\n if gv[\"eventimes\"] == False:\r\n for i in range(numpops-1):\r\n if (ty[i][1] > ty[i][0]):\r\n yline(ty[i][1],farright,1,2,gv[\"graylevel\"])\r\n yline(ty[i][0],farright,0.5,0,0)\r\n if (ty[i][2] < ty[i][0]):\r\n yline(ty[i][2],farright,1,2,gv[\"graylevel\"])\r\n if(scaledtime != []):\r\n scaledtime[i] /= yearscaler\r\n mtime = round(scaledtime[i],-int(math.log10(scaledtime[i])-2))\r\n nstr = str(mtime) + yearscalestring\r\n ## str(int(round(scaledtime[i],-int(math.log10(scaledtime[i])-2)))) + \" yrs\"\r\n dotext([xinc*(i+2),ty[i][0]+yinc],nstr,0, False)\r\n else:\r\n nstr = fround(slist[5][4][i][1]) + \"tu\"\r\n dotext([xinc*(i+2),ty[i][0]+yinc],nstr,0, False)\r\n if (ty[i][1] > ty[i][0]):\r\n arrow([xinc*(i+1),ty[i][1]],[xinc*(i+1),ty[i][0]],1, gv[\"black\"])\r\n if (ty[i][2] < ty[i][0]):\r\n arrow([xinc*(i+1),ty[i][2]],[xinc*(i+1),ty[i][0]],3, gv[\"black\"])\r\n else:\r\n for i in range(numpops-1):\r\n yline(ty[i][0],farright,0.5,0,0)\r\n if(scaledtime != []):\r\n scaledtime[i] /= yearscaler\r\n mtime = round(scaledtime[i],-int(math.log10(scaledtime[i])-2))\r\n nstr = str(mtime) + yearscalestring\r\n ## str(int(round(scaledtime[i],-int(math.log10(scaledtime[i])-2)))) + \" yrs\"\r\n dotext([xinc*(i+2),ty[i][0]+yinc],nstr,0, False)\r\n else:\r\n nstr = fround(slist[5][4][i][1]) + \"tu\"\r\n dotext([xinc*(i+2),ty[i][0]+yinc],nstr,0, False)\r\n return ty",
"def print_fun_facts(num_hours, num_minutes):\n\n # If the number of hours are less than 1, there are no real analytics that\n # can be given to the user, so the program exits\n if num_hours < 1:\n os._exit(1)\n\n print(\"\\nIn the time you spent on league, here's some things you\", \n \"could have done:\")\n\n # Get the total number of minutes that the user spent playing league in the\n # last week\n total_mins = num_hours * 60 + num_minutes\n\n # Number of hours it takes to fly coast to coast\n hours_to_fly_from_la_to_nyc = 5\n\n # Find how far or how many times the user could have flown coast to coast\n flying_data = time_to_perform_task(total_mins, hours_to_fly_from_la_to_nyc)\n\n # Check if the data returned is not a whole number, but a percentage\n # This will occur if hte user hasn't played enough league to complete more\n # than 1 flight from coast to coast\n if flying_data[0]:\n print(\"- Flown \", flying_data[1],\"% of the way from LA to NYC\", sep='')\n else:\n print(\"- Flown from LA to NYC\", flying_data[1], \"times\")\n\n # Repeating the same process, but with the Great Gatsby\n hours_to_read_great_gatsby = 2.62\n gatsby_data = time_to_perform_task(total_mins, hours_to_read_great_gatsby)\n if gatsby_data[0]:\n print(\"- Read \", gatsby_data[1],\"% of The Great Gatsby\", sep='')\n else:\n print(\"- Read The Great Gatsby \", gatsby_data[1], \" times\", sep='')\n \n # Again repeating the same process to print analytics about Avengers: Endgame\n hours_to_watch_endgame = 3.2\n endgame_data = time_to_perform_task(total_mins, hours_to_watch_endgame)\n if endgame_data[0]:\n print(\"- Watched \", endgame_data[1],\"% of Avengers: Endgame\", sep='')\n else:\n print(\"- Watched Avengers: Endgame \", endgame_data[1], \" times\", sep='')",
"def loadText(self,fileName,pickScheduleFile=None,imported=None):\n #--Localizing\n defs = self.defs\n log = self.log\n #--Re's\n reCell = re.compile(\"\\s*(\\\".*?\\\")\")\n reCodeCycle = re.compile(\"\\s*([1-4][ ,1-4]*)\")\n reComment = re.compile(r'\\s*\\#.*')\n reDef = re.compile(r'\\.([a-zA-Z]\\w+)')\n rePos = re.compile(\"-?\\d+\\s+-?\\d+\\s+-?\\d+\\s+-?\\d+\")\n reRepeat = re.compile('= (\\d)')\n reSleep = re.compile(r'([=+\\-\\*\\^~x])\\s+(.+)$')\n reWander = re.compile('wander +(\\d+)')\n reIsMember = re.compile('isMember +(\".+\")')\n #--Functions/Translators\n replDef = lambda a: defs[a.group(1)]\n #--0: awake, 1: sleep+trespass, 2: sleep 3: dim trespass\n sleepStates = {'=':None,'-':0,'+':1,'*':2,'^':3,'~':4,'x':5} \n #--Log\n header = os.path.split(fileName)[-1]\n if len(header) < 70: header += '='*(70-len(header))\n log.setHeader(header)\n #--Imported\n isTopFile = (imported == None)\n if isTopFile: imported = []\n #--Input variables\n section = None\n town = None\n townNpcs = set()\n townSchedule = None\n npcSchedule = None\n codeCycles = [0]\n #--Parse input file\n ins = file(fileName)\n for line in ins:\n #log(line.strip())\n #print line,\n #--Strip spaces and comments\n line = reComment.sub('',line)\n line = line.rstrip()\n #--Skip empty/comment lines\n if not line: continue\n #--Section header?\n if line[0] == '@':\n # (town|defs|night|code|npcName)[: npcCondition]\n parsed = line[1:].split(':',1)\n id = parsed[0].strip()\n #--Non-npc?\n if id in set(['town','defs','night','evening','code','import','project']):\n section = id\n if section in ('evening','night'):\n townSleep = self.sleep[town]\n elif section == 'code':\n cycles = [0]\n townCode = self.code[town] = [[],[],[],[],[]]\n else:\n section = 'npc'\n npc = id\n #--Any town,npc combination will overwrite any town,npc \n # combination from an imported file.\n if (town,npc) not in townNpcs:\n townNpcs.add((town,npc))\n townSchedule[npc] = []\n npcSchedule = [0,0,0,0]\n condition = (len(parsed) == 2 and parsed[1].strip())\n townSchedule[npc].append((condition,npcSchedule))\n if section not in set(('town','import','project')): \n log(' '+line[1:])\n #--Data \n else:\n #--Import\n if section == 'import':\n newPath = line.strip()\n log(_('IMPORT: ')+newPath)\n if not os.path.exists(newPath) and pickScheduleFile:\n caption = \"Find sub-import file %s:\" % (newPath,)\n newPath = pickScheduleFile(caption,newPath)\n if not (newPath and os.path.exists(newPath)):\n raise StateError(\"Unable to import schedule file: \"+line.strip())\n if newPath.lower() in [dir.lower() for dir in imported]:\n log(_(' [%s already imported.]') % (newPath,))\n else:\n log.indent += '> '\n imported.append(newPath)\n self.loadText(newPath,pickScheduleFile,imported)\n log.indent = log.indent[:-2]\n #--Project\n elif section == 'project' and isTopFile:\n self.project = line.strip()\n log(_('PROJECT: ')+self.project)\n #--Defs \n elif section == 'defs':\n (key,value) = line.strip().split(':',1)\n defs[key] = value.strip()\n #--Town\n elif section == 'town':\n town = line.strip()\n log.setHeader(town)\n if isTopFile:\n self.newTowns.add(town)\n if town not in self.schedule:\n self.schedule[town] = {}\n self.sleep[town] = {3:{},4:{}} \n townSchedule = self.schedule[town]\n npcSchedule = None\n codeCycles = []\n #--Code\n elif section == 'code':\n line = reDef.sub(replDef,line)\n maCodeCycle = reCodeCycle.match(line)\n if maCodeCycle:\n codeCycles = [int(x) for x in maCodeCycle.group(1).split(',')]\n continue\n for cycle in codeCycles:\n townCode[cycle].append(line)\n #--Evening/Night\n elif section in ('evening','night'):\n cycle = {'evening':3,'night':4}[section]\n line = reDef.sub(replDef,line)\n chunks = [chunk.strip() for chunk in line.split(';')]\n maSleep = reSleep.match(chunks[0])\n if not maSleep: continue\n (cell,defaultState) = (maSleep.group(2), sleepStates[maSleep.group(1)])\n cellStates = (defaultState,)\n for chunk in chunks[1:]:\n chunk = chunk.strip()\n maSleep = reSleep.match(chunk)\n if not maSleep or maSleep.group(1) == '=': \n raise MoshError(_('Bad sleep condition state for %s in %s: %s') \n % (section,town,line))\n condition,state = maSleep.group(2), sleepStates[maSleep.group(1)]\n condition = reIsMember.sub(r'getPCRank \\1 >= 0',condition)\n cellStates += ((condition,state),)\n townSleep[cycle][cell] = cellStates\n #--NPC\n elif section == 'npc':\n #--Get Cycle\n cycle = int(line[0])\n rem = line[2:]\n #--Repeater?\n maRepeat = reRepeat.match(rem)\n if maRepeat:\n oldCycle = int(maRepeat.group(1))\n npcSchedule[cycle-1] = npcSchedule[oldCycle-1]\n continue\n #--Replace defs\n rem = reDef.sub(replDef,rem)\n #--Cell\n maCell = reCell.match(rem)\n if not maCell:\n raise MoshError(_('Pos cell not defined for %s %s %d') % (town,npc,cycle))\n cell = maCell.group(1)\n rem = rem[len(cell):].strip()\n #--Pos\n maPos = rePos.match(rem)\n coords = maPos.group(0).strip().split()\n coords[-1] = `int(coords[-1])*57` #--Workaround interior rotation bug\n pos = 'positionCell %s %s' % (' '.join(coords),cell)\n rem = rem[len(maPos.group(0)):].strip()\n #--Wander/Travel\n ai = reWander.sub(r'wander \\1 5 10 ',rem)\n #--Save\n npcSchedule[cycle-1] = (pos,ai)\n ins.close()",
"def output_schedule_brief(cout, courses_to_schedule_d, courses_to_mt_d):\n cout.writerow([\"CourseCode\",\"DayWeek\",\"Start\",\"End\",\"Campus\"])\n\n # first write out the courses we just scheduled\n for cn in sorted(courses_to_schedule_d.keys()):\n meeting_time = courses_to_mt_d[cn]\n assert is_cross_list_canonical(cn)\n (subj, catalog) = sct.parse_canonical_course_name(cn)\n\n if print_area and subj != print_area:\n continue\n\n campus = \"Allston\" if will_be_allston_course_subj_catalog(subj, catalog) else \"Cambridge\"\n ct = ss.meeting_time_to_course_time(meeting_time)\n days = ct.days_of_week(separator='/')\n cout.writerow([cn, days, ct.time_start, ct.time_end, campus])\n\n # Now write out all the other courses\n for cn in sorted(sched_d.keys()):\n assert is_cross_list_canonical(cn)\n (subj, catalog) = sct.parse_canonical_course_name(cn)\n if print_area and subj != print_area:\n continue\n\n campus = \"Allston\" if will_be_allston_course_subj_catalog(subj, catalog) else \"Cambridge\"\n cts = sched_d[cn]\n for ct in cts:\n days = ct.days_of_week(separator='/')\n cout.writerow([cn, days, ct.time_start, ct.time_end, campus])"
]
| [
"0.79792595",
"0.6396415",
"0.6337348",
"0.63255966",
"0.6231173",
"0.60653955",
"0.6034211",
"0.5956495",
"0.5881271",
"0.5827585",
"0.58147764",
"0.5813226",
"0.57874453",
"0.5776859",
"0.57415116",
"0.5731401",
"0.56468225",
"0.56463355",
"0.56160754",
"0.5590078",
"0.55862665",
"0.55776405",
"0.5526123",
"0.5500247",
"0.5475438",
"0.5470287",
"0.5447873",
"0.54354346",
"0.5417355",
"0.5413515"
]
| 0.8151101 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.