query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Displays the status, the owner, the project name and the number of cart items. | def __str__(self):
return _(
"cart (status: %(status)s, owner: %(owner)s, project name: "
"%(project_name)s, number of cart items: %(nb_cart_items)d, "
"total amount: %(total_amount)d)"
) % {
'status': self.CART_STATUSES[self.status][1],
'owner': self.owner,
'project_name': self.project_name,
'nb_cart_items': self.nb_cart_items,
'total_amount': self.total_amount,
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def display(auth_context):\n\n cart = carts.get_cart(auth_context.get('uid'))\n for item in cart:\n product = product_catalog.get_product(item.item_id)\n item.info = product\n\n return render_template('cart.html',\n cart=cart,\n auth_context=auth_context,\n bucket=product_catalog.BUCKET)",
"def cart_contents(request):\n\n cart = request.session.get('cart', {})\n cart_items = []\n upvote_list = []\n price = 10\n total = 0\n ticket_count = 0\n\n for id, quantity in cart.items():\n ticket = get_object_or_404(Issues, pk=id)\n upvote_list.append(id)\n ticket_count += quantity\n total += quantity * price\n cart_items.append({'id': id, 'quantity': quantity,\n 'ticket': ticket, 'price': price})\n\n return {'ticket_count': ticket_count,\n 'cart_items': cart_items,\n 'total': total,\n 'upvote_list': upvote_list}",
"def project_detail(request, project_id):\n\n game_project = get_object_or_404(GameProject, pk=project_id)\n donation_options = Donation.objects.all()\n profile = get_object_or_404(Profile, user=request.user)\n\n game_project.total_amount = 0\n for order in Order.objects.filter(\n game_project=game_project).filter(status='PA'):\n game_project.total_amount += order.donation_item.amount\n\n template = 'gameproject/project_detail.html'\n context = {\n 'game_project': game_project,\n 'donation_options': donation_options,\n 'profile': profile,\n }\n return render(request, template, context)",
"def cart_detail(request):\n cart = Cart(request)\n return render(request, 'cart/cart.html', {'cart': cart})",
"def cart_detail(request):\n cart = Cart(request)\n # Allow user to change the quantity from the details page.\n for item in cart:\n # Remember that a cart is stored as a dictionary in the user's session.\n # Here, we're adding a new key/value pair to the cart.\n # Create an instance of CartAddProductForm for each item in the cart to\n # allow changing product quantities. Initialize the form with the current\n # item quantity and set the update field to True so that when we submit the\n # form to the cart_add view, the current quantity is replaced with the new\n # one.\n # I DON'T QUITE UNDERSTAND WHAT THIS CODE IS DOING.\n item['update_quantity_form'] = CartAddProductForm(\n initial={'quantity': item['quantity'],\n 'update': True})\n coupon_apply_form = CouponApplyForm()\n return render(request, 'cart/detail.html', {'cart': cart, 'coupon_apply_form': coupon_apply_form})",
"def cart_contents(request):\n\n price = 10\n total = 0\n tickets_count = 0\n\n cart = request.session.get('cart', {})\n\n cart_items = []\n upvote_list = []\n\n for id, quantity in cart.items():\n ticket = get_object_or_404(Ticket, pk=id)\n\n upvote_list.append(id)\n tickets_count += quantity # Items in cart\n total += quantity * price # Total to be paid\n\n cart_items.append({'id': id, 'quantity': quantity,\n 'ticket': ticket, 'price': price})\n\n return {'tickets_count': tickets_count,\n 'cart_items': cart_items,\n 'total': total,\n 'price': price,\n 'upvote_list': upvote_list}",
"def displaySummary(self):\r\n print('Project Name:' + self.project['name'])\r\n print('Project chip:' + self.project['chip'])\r\n print('Project includes: ' + ' '.join(self.project['incs']))\r\n print('Project defines: ' + ' '.join(self.project['defs']))\r\n print('Project srcs: ' + ' '.join(self.project['srcs']))",
"def view_cart(request):\n \n return render(request, \"cart.html\" )",
"def api_display_cart():\r\n\tconn = sqlite3.connect('Shopify_products.db')\r\n\tconn.row_factory = dict_factory\r\n\tcur = conn.cursor()\r\n\tcart = cur.execute('SELECT * FROM cart;').fetchall()\r\n\tcart.append(cur.execute('SELECT SUM(price) from cart;').fetchone())\r\n\treturn jsonify(cart)",
"def view_cart(request):\n\n try:\n cart = Order.objects.get(user=request.user,status='N')\n if cart is not None:\n cart_list = OrderLine.objects.filter(order=cart)\n\n # calculate total\n total=0\n for cart_item in cart_list:\n total+=cart_item.menu_item.price*cart_item.quantity\n\n return render(request, \"HotelMgmt/cart.html\", {'cart_list': cart_list})\n except Exception as e:\n print(str(e))\n return render(request, \"HotelMgmt/cart.html\")",
"def cart_contents(request):\n cart = request.session.get('cart', {})\n\n cart_items = []\n total = 0\n child_count = 0\n \n for id, donation in cart.items():\n child = get_object_or_404(Child, pk=id)\n total += donation\n child_count += 1\n cart_items.append({'id': id, 'donation': donation, 'child': child})\n \n return {'cart_items': cart_items, 'total': total, 'child_count': child_count}",
"def info(id):\n sql = \"select distinct name, description, stars, url, last_push_date, repo_id, created_date, avatar from python_repos where repo_id=\"+id\n db = get_db()\n cursor = db.execute(sql)\n repo_info = cursor.fetchall()\n return render_template('repo.html',info=repo_info)",
"def detail(request):\n # del request.session['cart_id']\n # del request.session['total_in_cart']\n data = {}\n if (cart_id := request.session.get('cart_id', None)):\n cart = Cart.objects.get(pk=cart_id)\n data['products_in_cart'] = cart.cartitems.all()\n data['total_price'] = cart.cart_price\n\n return render(request, 'cart/details.html', data)",
"def status(self,project_dir):\n \n if \"towercrane\" not in os.listdir(project_dir):\n print('(!) No project has been initialized yet.\\n => you can use \"towercrane init\" to start a new project.\\n => Or it might be because you have lost the \"towercrane config file\" ')\n \n elif \"towercrane\" in os.listdir(project_dir):\n TowercraneConfig = read_config(project_dir)\n project, files = self.db.get_project(TowercraneConfig[\"projectkey\"])\n files_table = tabulate([[file[1],file[0],file[2],file[-1]] for file in files], headers=['File Name', 'File Key','Size','status'], tablefmt='orgtbl')\n print(f'project:\"{TowercraneConfig[\"project_name\"]}\" with projectkey: \"{TowercraneConfig[\"projectkey\"]}\"\\nFiles added to the project: \\n\\n{files_table}')",
"def show_cart(update, context):\n bot = context.bot\n query = update.callback_query\n\n chat_id = update.effective_chat.id\n user = update.effective_user\n\n # all items ordered by user in message and his price to pay for them\n message_and_price = str_user_cart(chat_id, user.id)\n # InlineKeyboard back to start menu\n keyboard = [[InlineKeyboardButton(\"back to menu\", callback_data=str(ONE))]]\n # change last message send by bot\n bot.edit_message_text(chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n text=message_and_price['message'],\n reply_markup=InlineKeyboardMarkup(keyboard))\n # notify ConversationHandler of SEVENTH stage\n return SEVENTH",
"def view_cart(request):\n categories = all_categories()\n productTypes = all_productTypes()\n return render(request, \"cart.html\", {\"categories\": categories,\n \"productTypes\": productTypes})",
"def __str__(self):\n return (\n f'{self.quantity}x {self.item.name} '\n f'({self.shopping_cart.user.email})'\n )",
"def cart_contents(request):\n cart = request.session.get('cart', {})\n \n cart_items = []\n total = 0\n feature_count = 0\n \n for id, quantity in cart.items():\n feature = get_object_or_404(FeatureTicket, pk=id)\n print(feature)\n total += quantity * feature.contribution\n feature_count += quantity\n cart_items.append({'id': id, 'quantity': quantity, 'feature': feature})\n \n return { 'cart_items': cart_items, 'total': total, 'feature_count': feature_count }",
"def show():\n info(str(Project))",
"def index(self):\n s = \"\"\n\n sb = []\n for sim in self.simulations.values():\n url = \"{0.uid}/{0.password}/status\".format(sim)\n sb.append(\"<a href='{0}'>{1.uid}</a></br>\".format(\n url, sim))\n s += \"<b>Simulations running:</b></br>\"\n s += \"\\n\".join(sb)\n\n s += \"<b>List of items in shop:</b>\\n</br>\"\n s += \"\\n</br>\".join(self.shop.itemAndCostDict.keys())\n \n s += \"</br><b>List of all items:</b>\\n</br>\"\n s += \"\\n</br>\".join(item.items.keys())\n\n return s",
"def view_cart(request):\n\n return render(request, 'cart/cart.html')",
"def view_cart(request):\n\n return render(request, 'cart/cart.html')",
"async def status(self, ctx, project_name: str) -> discord.Message:\n if not ctx.projects.find_project(project_name):\n await ctx.send(\"This project doesn't exist.\")\n return\n progress_bar = ctx.projects.project_progress_bar(project_name)\n if not progress_bar:\n progress_bar = self.empty_progress_bar\n await ctx.send(progress_bar)",
"def cart_contents(request):\n cart = request.session.get('cart', {})\n\n cart_items = []\n total_cart = 0\n item_count = 0\n partial_value = []\n\n for item in cart:\n if item == 'car':\n id = cart['car']['item_id']\n quantity = cart['car']['quantity']\n instance = Car\n item_type = 'car'\n elif item == 'track_day':\n id = cart['track_day']['item_id']\n quantity = cart['track_day']['quantity']\n instance = TrackDayAddon\n item_type = 'track_day'\n elif item == 'insurance':\n id = cart['insurance']['item_id']\n quantity = cart['insurance']['quantity']\n instance = InsuranceAddon\n item_type = 'insurance'\n elif item == 'private_driver':\n id = cart['private_driver']['item_id']\n quantity = cart['private_driver']['quantity']\n instance = PrivateDriverAddon\n item_type = 'private_driver'\n\n item = get_object_or_404(instance, pk=id)\n total_cart += quantity * item.price\n item_total = quantity * item.price\n item_count += 1\n\n partial_value.append({\n 'item': item,\n 'item_type': item_type,\n 'id': id,\n 'item_total': item_total\n })\n cart_items.append({\n 'item': item,\n 'item_type': item_type,\n 'id': id,\n 'quantity': quantity,\n })\n\n return {'cart_items': cart_items, 'partial_value': partial_value,\n 'total_cart': total_cart, 'item_count': item_count}",
"def get_project_info():\n\n title = request.args.get('project')\n\n project_info_list = hackbright.get_project_by_title(title)\n\n html = render_template(\"project_info.html\",\n project_info_list=project_info_list)\n return html",
"def cart_contents(request):\n cart = request.session.get('cart', {})\n cart_items = []\n\n total = 0\n feature_count = 0\n\n for id, quantity in cart.items():\n feature = get_object_or_404(Feature, pk=id)\n total += quantity * feature.vote_price\n feature_count += quantity\n cart_items.append({'id': id, 'quantity': quantity, 'feature': feature})\n return {\n 'cart_items': cart_items,\n 'total': total,\n 'feature_count': feature_count\n }",
"def displayInventory(bag):\n print(\"Inventory:\")\n item_total = 0\n for k, v in bag.items():\n print(str(v) + ' ' + str(k))\n item_total += v\n print(\"Total number of items: \" + str(item_total))\n print('\\n')",
"def cart_contents(request):\n cart_items = []\n total = 0\n savings = 0\n product_count = 0\n points_available = 0\n points_earned = 0\n discount_applied = request.session.get('discount_applied')\n cart = request.session.get('cart', {})\n\n # Create a new dict so that items can be removed if needed\n new_dict = {k: v for k, v in cart.items()}\n\n for item, quantity in new_dict.items():\n # Use string created in cart view to isolate model ids\n product_id = item.split(\"_\")[0]\n size_id = item.split(\"_\")[1]\n nic_id = item.split(\"_\")[2]\n\n # Retrieve relevant objects for templating and remove if\n # no longer in database\n try:\n product = Product.objects.get(pk=product_id)\n except Product.DoesNotExist:\n del cart[item]\n messages.error(request, 'An item was removed from your cart as it is \\\n no longer available. Try to find a worthy replacement!')\n continue\n # Repeat for Size\n try:\n size = Size.objects.get(pk=size_id)\n except Size.DoesNotExist:\n del cart[item]\n messages.error(request, 'An item could not be added as its \\\n size is no longer available. \\\n Try to find a worthy replacement!')\n continue\n # Repeat for Nicotine\n try:\n nic = Nicotine.objects.get(pk=nic_id)\n except Nicotine.DoesNotExist:\n del cart[item]\n messages.error(request, 'An item could not be added as its \\\n nicotine options have changed. \\\n Try to find a worthy replacement!')\n continue\n\n # Check sale status and retrieve relevant price from Size model\n if product.on_sale:\n price = size.sale_price\n savings += (size.price - size.sale_price) * quantity\n else:\n price = size.price\n total += quantity * price\n product_count += quantity\n cart_items.append({\n 'item_id': item,\n 'product': product,\n 'size': size,\n 'nic': nic,\n 'price': price,\n 'quantity': quantity,\n })\n\n original_total = total\n request.session['cart'] = cart\n\n # Get user profile\n if request.user.is_authenticated:\n profile = get_object_or_404(UserProfile, user_id=request.user)\n\n else:\n profile = None\n\n # Check for available points\n if profile:\n points_available = profile.points\n\n # Check if user has chosen to redeem points and that the discount\n # will never take the total below zero\n if discount_applied:\n if total - Decimal(points_available / 100) <= 0:\n total = 0\n\n else:\n total -= Decimal(points_available / 100)\n\n if total < settings.FREE_DELIVERY_THRESHOLD:\n delivery = Decimal(settings.STANDARD_DELIVERY)\n free_delivery_delta = settings.FREE_DELIVERY_THRESHOLD - total\n\n else:\n delivery = 0\n free_delivery_delta = 0\n\n grand_total = delivery + total\n points_earned = int(math.floor(total))\n\n context = {\n 'cart_items': cart_items,\n 'total': total,\n 'original_total': original_total,\n 'savings': savings,\n 'product_count': product_count,\n 'delivery': delivery,\n 'free_delivery_delta': free_delivery_delta,\n 'free_delivery_threshold': settings.FREE_DELIVERY_THRESHOLD,\n 'grand_total': grand_total,\n 'points_available': points_available,\n 'discount_applied': discount_applied,\n 'points_earned': points_earned,\n }\n\n return context",
"def cart_detail(request):\n assert isinstance(request, HttpRequest)\n\n if request.method == \"POST\":\n cart_service.remove_from_cart(request)\n\n return render(\n request,\n 'cartapp/cart_detail.html',\n {\n 'title':'Cart Page',\n 'year':datetime.now().year,\n }\n )\n else:\n return render(\n request,\n 'cartapp/cart_detail.html',\n {\n 'title':'Cart Page',\n 'year':datetime.now().year,\n }\n )",
"def view_cart(request):\n return render(request, \"cart.html\")"
] | [
"0.65103585",
"0.60823727",
"0.59575856",
"0.5933486",
"0.5839279",
"0.5830037",
"0.5798492",
"0.57583416",
"0.57334095",
"0.5703823",
"0.5699384",
"0.5692979",
"0.56647104",
"0.56580704",
"0.56565565",
"0.56434786",
"0.563236",
"0.5614772",
"0.5598291",
"0.5586921",
"0.5568391",
"0.5568391",
"0.55413437",
"0.55256647",
"0.551651",
"0.5506415",
"0.5504714",
"0.5498697",
"0.5467686",
"0.5456587"
] | 0.7057877 | 0 |
Retrieves all cart items for a given cart. | def get_cart_items(self):
return CartItem.objects.filter(cart=self) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_cart_items(request):\n return CartItem.objects.filter(cart_id=_cart_id(request))",
"def get_cart_items(request):\n return CartItem.objects.filter(cart_id=_cart_id(request))",
"def get_cart_items(request):\n return CartItem.objects.filter(cart_id = get_cart_id_session(request))",
"def get_items_by_cart_page(request):\n items = Item.get_items_by_cart(request.GET['cart_id'])\n items = models_to_json(items)\n return JsonResponse({'items': items})",
"def all(cls):\n cls.logger.info(\"Processing all Shopcart Items\")\n return cls.query.order_by(cls.id).all()",
"def get_cart_ingredients(cls, cartid):\n\n cart_ings = Cart_Ingredient.query.filter_by(cart_id=cartid).all()\n\n return cart_ings",
"def test_get_cart_items(self):\n user_id = '111'\n cart_id = self.cart_item_manager.create_cart(user_id, 'test cart', False)\n item_ids = [\n self.cart_item_manager.add_cart_item(self.catalog, user_id, cart_id, '1', 'entity_type', 'entity_version'),\n self.cart_item_manager.add_cart_item(self.catalog, user_id, cart_id, '2', 'entity_type', 'entity_version'),\n self.cart_item_manager.add_cart_item(self.catalog, user_id, cart_id, '3', 'entity_type', 'entity_version'),\n self.cart_item_manager.add_cart_item(self.catalog, user_id, cart_id, '4', 'entity_type', 'entity_version')\n ]\n retrieved_item_ids = [item['CartItemId'] for item in\n self.cart_item_manager.get_cart_items(user_id, cart_id)]\n self.assertEqual(sorted(item_ids), sorted(retrieved_item_ids))",
"def all(cls):\n cls.logger.info(\"Processing all Shopcarts\")\n return cls.query.order_by(cls.id).all()",
"def get_cart(id):\n url = carts_service_host + '/cart/' + id\n cart = requests.get(url).json()\n total = self._get_cart_total(cart['items'])\n return (jsonify(dict(total=total, cart=cart)),\n 200)",
"def get_cart_contents(db):",
"def cart_contents(request):\n cart = request.session.get('cart', {})\n\n cart_items = []\n total = 0\n child_count = 0\n \n for id, donation in cart.items():\n child = get_object_or_404(Child, pk=id)\n total += donation\n child_count += 1\n cart_items.append({'id': id, 'donation': donation, 'child': child})\n \n return {'cart_items': cart_items, 'total': total, 'child_count': child_count}",
"def calculate_checkout(cart):\n out = []\n for item in cart:\n product = session().query(Product).get(item.get('id_product'))\n if product:\n out += product.get_calculated_values(item.get('quantity'))\n\n return out",
"def place_order(self, cart_id):\n # Add the items from the cart to a list and return it\n order = []\n for prod, _ in self.carts[cart_id]:\n order.append(prod)\n return order",
"def api_display_cart():\r\n\tconn = sqlite3.connect('Shopify_products.db')\r\n\tconn.row_factory = dict_factory\r\n\tcur = conn.cursor()\r\n\tcart = cur.execute('SELECT * FROM cart;').fetchall()\r\n\tcart.append(cur.execute('SELECT SUM(price) from cart;').fetchone())\r\n\treturn jsonify(cart)",
"def get_cart_by_id(cls, cart_id):\n\n cart = Cart.query.filter_by(cart_id=cart_id).one()\n\n return cart",
"def all_to_cart(self):\n c = len(self.__products_add_cart)\n for x in range(c):\n self.add_cart(x)",
"def get_all(conn, user_id: int) -> list:\n with conn.cursor() as cursor:\n cursor.execute(f\"\"\"select id_product from cart\n where id_user = {user_id}\"\"\")\n result = cursor.fetchall()\n return [i[0] for i in result]",
"def cart(request):\n return {'cart': get_cart_from_request(request)}",
"def get_items_for_catalog(catalog_id):\n pass",
"def get_items(self) -> list:\n if self._cached_items is None:\n self._cached_items = list(self.items.all())\n return self._cached_items",
"def GetCart(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def get_catalog_items(id):\n\n username = login_session.get('username', None)\n catalogs = session.query(Catalog).all()\n selected_catalog = session.query(Catalog).filter_by(id=id).one()\n items = selected_catalog.items\n catalogs_display = [\n {\n 'id': catalog.id,\n 'name': catalog.name\n } for catalog in catalogs]\n items_display = [{'id': item.id, 'title': item.title} for item in items]\n items_summary = '{0} Items ({1} items)'.format(\n selected_catalog.name,\n len(items_display))\n return render_template(\n 'home.html',\n catalogs_display=catalogs_display,\n items_display=items_display,\n items_summary=items_summary,\n username=username)",
"def get_restaurant_carts_page(request):\n restaurant_id = request.GET.get('restaurant_id')\n carts = Cart.restaurants_carts(Cart, restaurant_id)\n carts_dict = {'carts': models_to_json(carts)}\n return JsonResponse(carts_dict)",
"def cart_contents(request):\n\n price = 10\n total = 0\n tickets_count = 0\n\n cart = request.session.get('cart', {})\n\n cart_items = []\n upvote_list = []\n\n for id, quantity in cart.items():\n ticket = get_object_or_404(Ticket, pk=id)\n\n upvote_list.append(id)\n tickets_count += quantity # Items in cart\n total += quantity * price # Total to be paid\n\n cart_items.append({'id': id, 'quantity': quantity,\n 'ticket': ticket, 'price': price})\n\n return {'tickets_count': tickets_count,\n 'cart_items': cart_items,\n 'total': total,\n 'price': price,\n 'upvote_list': upvote_list}",
"def get_all(self, **kwargs):\n context = pecan.request.context\n policy.enforce(context, \"container:get_all\",\n action=\"container:get_all\")\n return self._get_containers_collection(**kwargs)",
"def get(self,request):\r\n try:\r\n if request.user.is_authenticated():\r\n cart = self.cart_obj.get_cart_by_user(request.user)\r\n else:\r\n cart = self.cart_obj.get_cart_by_id(request.session.get('cart_id',None))\r\n \r\n if not cart:\r\n self.context['no_items'] = True\r\n return render(request, 'cart.html', self.context)\r\n request.session['cart_id'] = cart.first().id\r\n cart_details_list =[]\r\n if cart:\r\n cart_details = self.cart_det_obj.get_cart_items(cart.first().id) \r\n \"\"\" \r\n :Note If face any issue with cart order by cartid and get the latest cartid.\r\n \"\"\"\r\n for cart in cart_details:\r\n product = Product.objects.filter(id=cart.product_id)\r\n cart_temp_dict = {}\r\n cart_temp_dict['product'] = product.first()\r\n cart_temp_dict['quantity'] = cart.quantity\r\n cart_temp_dict['price'] = product.first().price\r\n cart_temp_dict[cart.id] = cart.id\r\n cart_details_list.append(cart_temp_dict)\r\n \r\n self.context['cart_details'] = cart_details_list\r\n self.context['cart_count'] = cart_details.count()\r\n response = render(request, 'cart.html', self.context)\r\n return response\r\n except:\r\n print(\"500\")\r\n raise Exception",
"def view_cart(request):\n categories = all_categories()\n productTypes = all_productTypes()\n return render(request, \"cart.html\", {\"categories\": categories,\n \"productTypes\": productTypes})",
"def cart_contents(request):\n cart_items = []\n total = 0\n savings = 0\n product_count = 0\n points_available = 0\n points_earned = 0\n discount_applied = request.session.get('discount_applied')\n cart = request.session.get('cart', {})\n\n # Create a new dict so that items can be removed if needed\n new_dict = {k: v for k, v in cart.items()}\n\n for item, quantity in new_dict.items():\n # Use string created in cart view to isolate model ids\n product_id = item.split(\"_\")[0]\n size_id = item.split(\"_\")[1]\n nic_id = item.split(\"_\")[2]\n\n # Retrieve relevant objects for templating and remove if\n # no longer in database\n try:\n product = Product.objects.get(pk=product_id)\n except Product.DoesNotExist:\n del cart[item]\n messages.error(request, 'An item was removed from your cart as it is \\\n no longer available. Try to find a worthy replacement!')\n continue\n # Repeat for Size\n try:\n size = Size.objects.get(pk=size_id)\n except Size.DoesNotExist:\n del cart[item]\n messages.error(request, 'An item could not be added as its \\\n size is no longer available. \\\n Try to find a worthy replacement!')\n continue\n # Repeat for Nicotine\n try:\n nic = Nicotine.objects.get(pk=nic_id)\n except Nicotine.DoesNotExist:\n del cart[item]\n messages.error(request, 'An item could not be added as its \\\n nicotine options have changed. \\\n Try to find a worthy replacement!')\n continue\n\n # Check sale status and retrieve relevant price from Size model\n if product.on_sale:\n price = size.sale_price\n savings += (size.price - size.sale_price) * quantity\n else:\n price = size.price\n total += quantity * price\n product_count += quantity\n cart_items.append({\n 'item_id': item,\n 'product': product,\n 'size': size,\n 'nic': nic,\n 'price': price,\n 'quantity': quantity,\n })\n\n original_total = total\n request.session['cart'] = cart\n\n # Get user profile\n if request.user.is_authenticated:\n profile = get_object_or_404(UserProfile, user_id=request.user)\n\n else:\n profile = None\n\n # Check for available points\n if profile:\n points_available = profile.points\n\n # Check if user has chosen to redeem points and that the discount\n # will never take the total below zero\n if discount_applied:\n if total - Decimal(points_available / 100) <= 0:\n total = 0\n\n else:\n total -= Decimal(points_available / 100)\n\n if total < settings.FREE_DELIVERY_THRESHOLD:\n delivery = Decimal(settings.STANDARD_DELIVERY)\n free_delivery_delta = settings.FREE_DELIVERY_THRESHOLD - total\n\n else:\n delivery = 0\n free_delivery_delta = 0\n\n grand_total = delivery + total\n points_earned = int(math.floor(total))\n\n context = {\n 'cart_items': cart_items,\n 'total': total,\n 'original_total': original_total,\n 'savings': savings,\n 'product_count': product_count,\n 'delivery': delivery,\n 'free_delivery_delta': free_delivery_delta,\n 'free_delivery_threshold': settings.FREE_DELIVERY_THRESHOLD,\n 'grand_total': grand_total,\n 'points_available': points_available,\n 'discount_applied': discount_applied,\n 'points_earned': points_earned,\n }\n\n return context",
"def get_all_items(self):\n return self.api.state['items']",
"def test_get_cart(self):\n user_id = '123'\n cart_id = self.cart_item_manager.create_cart(user_id, 'Cart1', True)\n self.assertEqual(self.cart_item_manager.get_cart(user_id, cart_id),\n self.dynamo_accessor.get_item(config.dynamo_cart_table_name,\n keys={'UserId': user_id, 'CartId': cart_id}))"
] | [
"0.7381225",
"0.7381225",
"0.72456187",
"0.69202214",
"0.6845669",
"0.66992563",
"0.65180063",
"0.6440269",
"0.6359296",
"0.6044959",
"0.6000852",
"0.59828556",
"0.5950014",
"0.5811841",
"0.5804673",
"0.573022",
"0.5728143",
"0.568424",
"0.56840277",
"0.5662042",
"0.5657203",
"0.565515",
"0.5647667",
"0.5644854",
"0.56266874",
"0.5617266",
"0.55993396",
"0.55894905",
"0.55567276",
"0.55467665"
] | 0.7630341 | 0 |
Retrieves the number of distinct cart items for a given cart. | def nb_cart_items(self):
return CartItem.objects.filter(cart=self).count() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cart_distinct_item_count(request):\n return get_cart_items(request).count()",
"def get_cart_counter(request):\n return len(get_cart_items(request))",
"def num_carts(self):\n return self._num_carts",
"def __len__(self):\n return sum(item['qty'] for item in self.cart.values())",
"def __len__(self):\n \n return sum(item['quantity'] for item in self.cart.values())",
"def __len__(self):\n return sum(item['quantity'] for item in self.cart.values())",
"def get_total_of_cart(session_id):\n cart_items = CartItem.objects.filter(cart_id=session_id)\n cart_total_list = [cart_item.total() for cart_item in cart_items]\n return sum(cart_total_list)",
"def count_items(self):\n count = 0\n for o in self.order_lst:\n count += o.count()\n \n return count",
"def __len__(self):\n return sum(item['qty'] for item in self.basket.values()) # counts all the values of the key qty",
"def get_cart_items(request):\n return CartItem.objects.filter(cart_id=_cart_id(request))",
"def get_cart_items(request):\n return CartItem.objects.filter(cart_id=_cart_id(request))",
"def count(self, **query):\n # This may be optimised into one query in the future.\n result = 0\n for product_type, count in self._do_count_by_product(query):\n result += count\n\n return result",
"def count_deck(deck):\n return reduce(lambda x, y: x + y['quantity'], deck['cards'], 0)",
"def test_get_user_carts(self):\n self.cart_item_manager.create_cart('123', 'Cart1', True)\n self.cart_item_manager.create_cart('123', 'Cart2', False)\n self.cart_item_manager.create_cart('123', 'Cart3', False)\n self.cart_item_manager.create_cart('124', 'Cart2', True)\n self.assertEqual(3, len(self.cart_item_manager.get_user_carts('123')))",
"def get_total_item_size(dataset):\n total_items = 0\n for element in dataset:\n total_items += 1\n return total_items",
"def get_cart_items(self):\n return CartItem.objects.filter(cart=self)",
"def num_items(self):\n num_items = 0\n for line in self.lines.all():\n num_items += line.quantity\n return num_items",
"def get_catalog_size() -> int:\n return len(gift_catalog)",
"def __len__(self):\n return sum(item[\"quantity\"] for item in self.carro.values())",
"def get_cart_items(request):\n return CartItem.objects.filter(cart_id = get_cart_id_session(request))",
"def count_by_product(self, **query):\n return self._do_count_by_product(query)",
"def count(item):\n return len(item)",
"def get_item_count(self):\n resp = self.app.get('/items')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n return len(data)",
"def numero_cartao(self):\n return self._numero_cartao",
"def get_count(cls, project_id, session=None):\n return cls.db_repo.get_count(project_id, session=session)",
"def calculate_checkout(cart):\n out = []\n for item in cart:\n product = session().query(Product).get(item.get('id_product'))\n if product:\n out += product.get_calculated_values(item.get('quantity'))\n\n return out",
"def quantity(self) -> int:\n if self._cached_items is not None:\n return sum([item.quantity for item in self._cached_items])\n aggr = self.items.aggregate(quantity=models.Sum('quantity'))\n return aggr['quantity'] or 0",
"def topkCount(self, key, *items):\n params = [key]\n params += items\n\n return self.execute_command(self.TOPK_COUNT, *params)",
"def get_product_count(self):\n return self.products.count()",
"def test_get_cart_items(self):\n user_id = '111'\n cart_id = self.cart_item_manager.create_cart(user_id, 'test cart', False)\n item_ids = [\n self.cart_item_manager.add_cart_item(self.catalog, user_id, cart_id, '1', 'entity_type', 'entity_version'),\n self.cart_item_manager.add_cart_item(self.catalog, user_id, cart_id, '2', 'entity_type', 'entity_version'),\n self.cart_item_manager.add_cart_item(self.catalog, user_id, cart_id, '3', 'entity_type', 'entity_version'),\n self.cart_item_manager.add_cart_item(self.catalog, user_id, cart_id, '4', 'entity_type', 'entity_version')\n ]\n retrieved_item_ids = [item['CartItemId'] for item in\n self.cart_item_manager.get_cart_items(user_id, cart_id)]\n self.assertEqual(sorted(item_ids), sorted(retrieved_item_ids))"
] | [
"0.83708996",
"0.7122882",
"0.66277915",
"0.64961153",
"0.6374639",
"0.63468164",
"0.5831994",
"0.5810013",
"0.578669",
"0.5636934",
"0.5636934",
"0.56178385",
"0.56098634",
"0.5601126",
"0.55407643",
"0.54967433",
"0.5454864",
"0.54540205",
"0.54383975",
"0.5436613",
"0.53979284",
"0.53708696",
"0.5366422",
"0.5364782",
"0.5362142",
"0.53454566",
"0.53282034",
"0.53078485",
"0.53062123",
"0.5293061"
] | 0.73278224 | 1 |
Retrieves the total amount of cart items for a given cart. | def total_amount(self):
total_amount = 0
for cart_item in self.get_cart_items():
total_amount += cart_item.total_price
return total_amount | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_total_of_cart(session_id):\n cart_items = CartItem.objects.filter(cart_id=session_id)\n cart_total_list = [cart_item.total() for cart_item in cart_items]\n return sum(cart_total_list)",
"def cart_subtotal(request):\n cart_total = decimal.Decimal('0.00')\n cart_products = get_cart_items(request)\n for item in cart_products:\n cart_total += item.product.price * item.quantity\n return cart_total",
"def get_order_total(self):\n order_total = 0\n for item in self.cart_items:\n order_total += item['price']\n return order_total",
"def get_total_price(self):\n return sum(Decimal(item[\"price\"]) * item[\"qty\"] for item in self.cart.values())",
"def total_quantity(self) -> int:\n total = 0\n for i in self.order_items:\n total += i.quantity\n return total",
"def total_purchase(self):\n\n total_amount = 0\n #grab all the item\n items = self.item_set.all()\n for item in items:\n total_amount += item.price\n return total_amount",
"def total_qty(self):\n return sum(self.quantities)",
"def __len__(self):\n return sum(item['qty'] for item in self.cart.values())",
"def get_amount(data):\r\n data = json.loads(data)\r\n products = data.get(\"CartProduct\", {\"all\": []})\r\n\r\n # Make sure we get all products in the cart.\r\n if \"all\" in products: products = products[\"all\"]\r\n else : products = [products]\r\n\r\n amount = 0.0\r\n\r\n for p in products:\r\n try: amount += float(p[\"productPrice\"]) * float(p[\"productQuantity\"])\r\n except: pass\r\n \r\n return amount",
"def __len__(self):\n \n return sum(item['quantity'] for item in self.cart.values())",
"def __len__(self):\n return sum(item['quantity'] for item in self.cart.values())",
"def nb_cart_items(self):\n return CartItem.objects.filter(cart=self).count()",
"def calculate_checkout(cart):\n out = []\n for item in cart:\n product = session().query(Product).get(item.get('id_product'))\n if product:\n out += product.get_calculated_values(item.get('quantity'))\n\n return out",
"def get_total_price(items):\n total = 0\n # Loops all items and add the price to total\n for i in items:\n total += float(i['price'])\n return total",
"def get_cart_counter(request):\n return len(get_cart_items(request))",
"def get_cart(id):\n url = carts_service_host + '/cart/' + id\n cart = requests.get(url).json()\n total = self._get_cart_total(cart['items'])\n return (jsonify(dict(total=total, cart=cart)),\n 200)",
"def get_total_amount(self):\n total_price = 0.00\n\n for k, v in self.order.product_orders.items():\n total_price += v.quantity * v.product.price\n\n return total_price",
"def get_total_price(self):\n subtotal = sum(Decimal(item['price']) * item['qty'] for item in self.basket.values())\n\n if subtotal == 0:\n shipping = Decimal(0.00)\n else:\n shipping = Decimal(11.50)\n\n total = subtotal + Decimal(shipping)\n return total",
"def get_cart_quantity_and_total_price(\n *,\n request: 'HttpRequest',\n) -> 'CartPriceInfo':\n cart = get_cart_from_request(\n request=request,\n cart_queryset=Cart.objects.open().only(\n 'quantity',\n 'total_price',\n ),\n auto_create=False\n )\n if cart:\n quantity = cart.quantity\n total_price = cart.total_price\n else:\n quantity = total_price = 0\n\n return CartPriceInfo(\n quantity=quantity,\n total_price=total_price\n )",
"def test_shopping_cart_displays_total_cost(self):\n expected_cart_cost = 0\n for item in self.fill_session_cart():\n expected_cart_cost += item['price'] * item['amount']\n\n self.client.get(self.SHOP_CART_URL)\n self.assertEqual(self.client.session['cart_cost'], expected_cart_cost)",
"def quantity(self) -> int:\n if self._cached_items is not None:\n return sum([item.quantity for item in self._cached_items])\n aggr = self.items.aggregate(quantity=models.Sum('quantity'))\n return aggr['quantity'] or 0",
"def _grand_total(self):\n count = 0\n for product in self.products:\n count += product.price\n return count",
"def cart_contents(request):\n cart = request.session.get('cart', {})\n\n cart_items = []\n total = 0\n child_count = 0\n \n for id, donation in cart.items():\n child = get_object_or_404(Child, pk=id)\n total += donation\n child_count += 1\n cart_items.append({'id': id, 'donation': donation, 'child': child})\n \n return {'cart_items': cart_items, 'total': total, 'child_count': child_count}",
"def get_cart_items(self):\n return CartItem.objects.filter(cart=self)",
"def num_carts(self):\n return self._num_carts",
"def cart_distinct_item_count(request):\n return get_cart_items(request).count()",
"def total_price(self) -> Decimal:\n total_price: Decimal = ZERO_AMOUNT\n\n # Calculate the total price\n order_item: OrderItem\n for order_item in self.orderitem_set.all():\n total_price += order_item.total_price\n\n return total_price",
"def get_total_price(self):\n i = self.get_copy_with_resolved_dependencies()\n total_price = Decimal(0)\n for product in i['products']:\n billed_price = Decimal(str(product.get('price', 0))) * Decimal(str(product.get('quantity')))\n total_price += billed_price\n return total_price",
"def get_total_quote_price(self):\n return self.quoteitem_set.all().annotate(\n total_quote_price=F('price') * F('quantity')).aggregate(\n Sum('total_quote_price'))['total_quote_price__sum']",
"def get_cart_items(request):\n return CartItem.objects.filter(cart_id=_cart_id(request))"
] | [
"0.78944373",
"0.7428659",
"0.74166524",
"0.7226727",
"0.6956555",
"0.66739833",
"0.6626357",
"0.6577171",
"0.65215605",
"0.6469818",
"0.64654446",
"0.644342",
"0.6432942",
"0.6390239",
"0.6378628",
"0.62492317",
"0.6200687",
"0.6190806",
"0.61329013",
"0.6089602",
"0.6088345",
"0.6077698",
"0.60203373",
"0.592589",
"0.58993036",
"0.58972067",
"0.58944726",
"0.5881013",
"0.5843555",
"0.58433485"
] | 0.7810109 | 1 |
Test if this cart is empty. | def is_empty(self):
return self.id is None or self.nb_cart_items == 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_shopping_cart_is_empty(self):\n response = self.client.get(self.SHOP_CART_URL)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"Your shopping cart is empty.\")\n self.assertQuerysetEqual(response.context['contents'], [])",
"def test_shopping_cart_not_empty(self):\n expected_contents = self.fill_session_cart()\n response = self.client.get(self.SHOP_CART_URL)\n self.assertEqual(response.context['contents'], expected_contents)",
"def verify_if_basket_is_empty(self):\n self._basket.verify_if_basket_is_empty()",
"def is_empty(self):\n return self.items == []",
"def is_empty(self):\n\n return self.items == []",
"def is_empty(self):\n\n return self.items == []",
"def is_empty(self):\n return self.amount == 0",
"def is_empty(self):\n return self._items == []",
"def is_empty(self) -> bool:\n if self.num_items == 0:\n return True\n else:\n return False",
"def test_shopping_cart_is_empty(self):\n self.login_browser_user()\n url = self.live_server_url + reverse('orders:shopping_cart')\n self.browser.get(url)\n\n self.assertEqual(\n self.browser.find_element_by_tag_name('p').text,\n \"Your shopping cart is empty.\"\n )",
"def is_empty(self) -> bool:\n return self._items == []",
"def is_empty(self):\n return len(self.items) == 0",
"def is_empty(self):\n length = len(self.items)\n if length != 0:\n return False\n else:\n return True",
"def is_empty(self):\n if len(self.items) == 0:\n return True\n else:\n return False",
"def is_empty(self):\n return self.count == 0",
"def is_empty(self):\n return self.count == 0",
"def is_empty(self):\n\n return self.count == 0",
"def is_empty(self):\n return len(self._items) == 0",
"def is_empty(self):\n return len(self._items) == 0",
"def is_Empty(self):\n return self.size == 0",
"def its_empty(self) -> bool:\n return self.items == []",
"def is_empty(self) -> bool:\n return len(self._items) == 0",
"def is_empty(self):\n return self.size() == 0",
"def is_empty(self):\n return self.size() == 0",
"def is_empty( self ):\n \n return len(self.__deck) == 0",
"def is_empty(self):\n return self.size == 0",
"def is_empty(self):\n return self.size == 0",
"def is_empty(self):\n return self.size == 0",
"def is_empty(self):\n return self.size == 0",
"def is_empty(self):\n return self.size == 0"
] | [
"0.8263173",
"0.7682502",
"0.76717985",
"0.7634377",
"0.7626435",
"0.7626435",
"0.76094395",
"0.7590669",
"0.7586988",
"0.75739545",
"0.75577635",
"0.75392646",
"0.7537761",
"0.7518471",
"0.74757564",
"0.74757564",
"0.74580145",
"0.7455165",
"0.7455165",
"0.7435698",
"0.74259835",
"0.741631",
"0.74020284",
"0.74020284",
"0.7401751",
"0.7387082",
"0.7387082",
"0.7387082",
"0.7387082",
"0.7387082"
] | 0.8744867 | 0 |
Runs experiment using DP, QL or both. Creates new directory automatically Save result summary to summary file | def run_Experiment(DP = None, QL = None):
# Path information
output_path, exp_num = create_new_dir() #dirs Exp/1, Exp/2, ...
DP_path = join(output_path,'DP') #dirs Exp/1/DP
QL_path = join(output_path,'QL') #dirs Exp/1/QL
print("************ Exp ", exp_num, "************ \n")
# Exp_summary_data
method = get_method_str(DP, QL)
exp_summary = [str(exp_num), method]
# Run DP
if DP != None:
print("In Runner: Executing DP !!")
prob_file = DP[0]
createFolder(DP_path)
# output_params = [V_so, mean, variance, bad_count]
output_params = run_DP(setup_grid_params, prob_file, output_file, DP_path, threshold = threshold)
"""CHANGE ARGUMENT if return order of setup_grid() is changed"""
input_params = setup_grid_params[9].copy()
input_params.append(prob_file)
exp_summary = append_params_to_summary(exp_summary, input_params, output_params)
append_summary_to_summaryFile('Experiments/Exp_summary.csv', exp_summary)
print("In Runner: Executing DP Finished!!")
# Run QL
if QL != None:
print("In Runner: Executing QL !!")
QL_params = QL
createFolder(QL_path)
output_parameters_all_cases = run_QL(setup_grid_params, QL_params, QL_path, exp_num)
# run_QL(setup_grid_params, QL_params, QL_path)
print("In Runner: Executing QL Finished !!") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run_test(self):\n\n # populate *_ps sets\n self.enter_project_file()\n\n # populate *_dir sets\n self.enter_directories()\n\n # The files in the directories makes up the largest possible set of files\n self.result_files = self.result_files_dir\n self.design_files = self.design_files_dir\n self.design_space_files = self.design_space_files_dir\n self.test_bench_files = self.test_bench_files_dir\n\n # populate *_ms sets\n self.enter_meta_results_file()\n\n # populate *_OK sets\n self.check_analysis_status()\n\n df = {'design_files_dir' : list(self.design_files_dir),'design_files_pr' : list(self.design_files_pr),\n 'design_files_ms' : list(self.design_files_ms), 'design_files_OK' : list(self.design_files_OK)}\n\n ds = {'design_space_files_dir' : list(self.design_space_files_dir),\n 'design_space_files_pr' : list(self.design_space_files_pr)}\n\n rs = {'result_files_dir' : list(self.result_files_dir), 'result_files_ms' : list(self.result_files_ms),\n 'result_files_OK' : list(self.result_files_OK)}\n\n tb = {'test_bench_files_dir' : list(self.test_bench_files_dir),\n 'test_bench_files_ms' : list(self.test_bench_files_ms)}\n\n srl = SummaryReportsLinks(self.result_files_dir)\n\n lf = {'files_linked_from_sum_reps' : srl.get_files(),\n 'folders_linked_from_sum_reps' : srl.get_folders()}\n\n # 'test_bench_files_pr' : list(self.test_bench_files_pr),\n \n json_test = {'design_files' : df, 'design_space_files' : ds, 'result_files' : rs,\n 'test_bench_files' : tb, 'stat_files' : self.stat_files,\n 'files_linked_from_sum_reps' : lf}\n\n with open('test_run.json','wb') as f_out:\n json.dump(json_test, f_out, indent=4)",
"def main():\n\n # set up output directory and file\n output_file_folder = \"output/{}\".format(args.experiment_name)\n Path(output_file_folder).mkdir(parents=True, exist_ok=True)\n args.output_file_name = \"{}/{}.csv\".format(output_file_folder, args.model_name)\n args.checkpoint_name = \"{}/{}.pt\".format(output_file_folder, args.model_name + \"_best_model\")\n\n # read lcquad merged data\n if args.dataset_name == \"lcquad\":\n df_train = pd.read_csv(\"./data/lcquad/gold_db/train_gold.csv\")\n df_valid = pd.read_csv(\"./data/lcquad/gold_db/valid_gold.csv\")\n df_test = pd.read_csv(\"./data/lcquad/gold_db/lcquad_test_sorted.csv\")\n args.gold_file_name = \"lcquad/lcquad_gt_5000.csv\"\n # elif args.dataset_name == \"qald9\":\n # df_train = pd.read_csv(\"./data/qald-9/train_gold.csv\")\n # df_valid = pd.read_csv(\"./data/qald-9/valid_gold.csv\")\n # df_test = pd.read_csv(\"./data/qald-9/test_gold.csv\")\n # args.gold_file_name = \"qald/qald_data_gt.csv\"\n # elif args.dataset_name == \"webqsp\":\n # df_train = pd.read_csv(\"./data/webqsp/train_gold.csv\")\n # df_valid = pd.read_csv(\"./data/webqsp/valid_gold.csv\")\n # df_test = pd.read_csv(\"./data/webqsp/test_gold.csv\")\n # args.gold_file_name = \"webqsp/webqsp_data_gt.csv\"\n\n train_data = read_data_file(df_train, device, \"train\")\n valid_data = read_data_file(df_valid, device, \"valid\")\n test_data = read_data_file(df_test, device, \"test\")\n\n # train model and evaluate\n if args.model_name == \"pure\":\n model = PureNameLNN(args.alpha, -1, False)\n elif args.model_name == \"ctx\":\n model = None\n elif args.model_name == 'type':\n model = None\n elif args.model_name == \"pure_ctx\":\n model = None\n elif args.model_name == \"pure_type\":\n model = None\n elif args.model_name == \"ctx_type\":\n model = None\n\n model = model.to(device)\n print(\"model: \", args.model_name, args.alpha)\n\n # training\n train(model, train_data, valid_data, test_data, args.checkpoint_name, args.num_epoch, args.margin, args.learning_rate)",
"def run_experiments(size, approach):\n\n # Create folder (if needed) where to store query evaluation logs and raw results\n if not os.path.isdir(f\"{path_reports}/eval-query-{current_run}\"):\n shell(f\"mkdir -p {path_reports}/eval-query-{current_run}\")\n\n # Create folder (if needed) where to store store evaluation logs and raw results\n if not os.path.isdir(f\"{path_reports}/eval-store-{current_run}\"):\n shell(f\"mkdir -p {path_reports}/eval-store-{current_run}\")\n\n # Helper function computing the path of a file/folder for the given query evaluation iteration \n def query_path(iteration, filename=None):\n folder = size[\"id\"] + \"_\" + str(iteration)\n return path_reports + \"/eval-query-\" + current_run + \"/\" + folder + (\"/\" + filename if filename != None else \"\") \n\n # Helper function computing the path of a file/folder for the given store evaluation test / cpu setting\n def store_path(test, cpu, filename=None):\n folder = size[\"id\"] + \"_\" + approach[\"id\"] + \"_\" + test + (\"_\" + cpu[\"id\"] if cpu != None else \"\")\n return path_reports + \"/eval-store-\" + current_run + \"/\" + folder + (\"/\" + filename if filename != None else \"\")\n \n # Determine whether partial traces and named graphs are supported\n partial = approach[\"supports_partial\"]\n graphs = approach[\"supports_graphs\"]\n \n # Skip setting if all data is available (check for presence of log files - delete them to repeat test)\n may_skip = (not sp_enable or not partial or not graphs or os.path.isfile(store_path(\"sp\", None, \"eval.log\")))\n may_skip = may_skip and (not sf_enable or os.path.isfile(store_path(\"sf\", None, \"eval.log\")))\n if query_enable and approach[\"id\"] == query_approach_id:\n for i in range(0, query_num_iterations):\n may_skip = may_skip and os.path.isfile(query_path(i, \"eval.log\"))\n for cpu in cpus:\n may_skip = may_skip and (not pp_enable or not partial or not graphs or os.path.isfile(store_path(\"pp\", cpu, \"eval.log\")))\n may_skip = may_skip and (not pf_enable or os.path.isfile(store_path(\"pf\", cpu, \"eval.log\")))\n if may_skip:\n return\n\n # Delete (if needed) and extract again the repository from its .tar.xz file, so to work on a clean repository (at the end of this test, the repository is no more clean)\n path_dump = path_data + \"/\" + size[\"id\"] + \"_\" + approach[\"id\"] + \".tar.lz\"\n path_repo = path_repos + \"/\" + size[\"id\"] + \"_\" + approach[\"id\"]\n if not os.path.isfile(path_dump):\n log(f\"Missing required file {path_dump}\")\n sys.exit()\n if os.path.isdir(path_repo):\n shell(f\"rm -Rf {path_repo}\")\n shell(f\"{cmd_plzip} -kdc {path_dump} | tar xf - -C {path_repos}\")\n\n # Locate the repository URL\n repo_url = f\"http://localhost:{server_port}/repositories/promo\"\n \n # Query test (if enabled)\n if query_enable and approach[\"id\"] == query_approach_id:\n for i in range(0, query_num_iterations):\n if not os.path.isfile(query_path(i, \"eval.log\")):\n shell(f\"mkdir -p {query_path(i)}\")\n shell(f\"taskset -a {query_taskset} {cmd_graphdb} start {path_repo}\")\n shell(f\"taskset -a {query_taskset} {cmd_mokip} eval-query -w -u {repo_url} -q {path_queries} -r {query_path(i)} \" +\n f\"| tee {query_path(i, 'eval.log')}\")\n shell(f\"taskset -a {query_taskset} {cmd_graphdb} stop {path_repo}\")\n\n # Sequential Partial test (to assess store times per trace and their components)\n if sp_enable and partial and graphs and not os.path.isfile(store_path(\"sp\", None, \"eval.log\")):\n shell(f\"mkdir -p {store_path('sp', None)}\")\n shell(f\"taskset -a {sp_taskset} {cmd_graphdb} start {path_repo}\")\n shell(f\"taskset -a {sp_taskset} {cmd_mokip} eval-store -d {path_data}/traces_pp.jsonl.gz \" + \n f\"-u {repo_url} -i {approach['inference']} -U REPLACE_GRAPH_PROTOCOL \" + \n f\"-o {path_ontology} --trace-namespace '{namespace_trace}' --graph-namespace '{namespace_graph}' \" +\n f\"-T {timeout} -r {store_path('sp', None)} -t 1 -w 50 -p -D \" + \n f\"| tee {store_path('sp', None, 'eval.log')}\")\n shell(f\"taskset -a {sp_taskset} {cmd_graphdb} stop {path_repo}\")\n\n # Parallel Partial (to assess throughput, varying # of CPU cores)\n for cpu in cpus:\n if pp_enable and partial and graphs and not os.path.isfile(store_path(\"pp\", cpu, \"eval.log\")):\n shell(f\"mkdir -p {store_path('pp', cpu)}\")\n shell(f\"taskset -a {cpu['taskset']} {cmd_graphdb} start {path_repo}\")\n shell(f\"taskset -a {cpu['taskset']} {cmd_mokip} eval-store -d {path_data}/traces_pp.jsonl.gz \" +\n f\"-u {repo_url} -i {approach['inference']} -U REPLACE_GRAPH_PROTOCOL \" + \n f\"-o {path_ontology} --trace-namespace '{namespace_trace}' --graph-namespace '{namespace_graph}' \" + \n f\"-T {timeout} -r {store_path('pp', cpu)} -t {max(2, cpu['num_threads'])} -w 50 -p -D \" + \n f\"| tee {store_path('pp', cpu, 'eval.log')}\")\n shell(f\"taskset -a {cpu['taskset']} {cmd_graphdb} stop {path_repo}\")\n\n # Sequential Full test (to assess store times per trace and their components)\n if sf_enable and not os.path.isfile(store_path(\"sf\", None, \"eval.log\")):\n shell(f\"mkdir -p {store_path('sf', None)}\")\n shell(f\"taskset -a {sf_taskset} {cmd_graphdb} start {path_repo}\")\n shell(f\"taskset -a {sf_taskset} {cmd_mokip} eval-store -d {path_data}/traces_sf.jsonl.gz \" + \n f\"-u {repo_url} -i {approach['inference']} -U {'APPEND' if graphs else 'APPEND_DEFAULT_GRAPH'} \" +\n f\"-o {path_ontology} --trace-namespace '{namespace_trace}' --graph-namespace '{namespace_graph}' \" +\n f\"-T {timeout} -r {store_path('sf', None)} -t 1 -w 50 \" + \n f\"| tee {store_path('sf', None, 'eval.log')}\")\n shell(f\"taskset -a {sf_taskset} {cmd_graphdb} stop {path_repo}\")\n\n # Parallel Full (to assess throughput where data is also deleted, varying # of CPU cores)\n for cpu in cpus:\n if pf_enable and not os.path.isfile(store_path(\"pf\", cpu, \"eval.log\")):\n update = \"APPEND\" if graphs else \"APPEND_DEFAULT_GRAPH\"\n shell(f\"mkdir -p {store_path('pf', cpu)}\")\n shell(f\"taskset -a {cpu['taskset']} {cmd_graphdb} start {path_repo}\")\n shell(f\"taskset -a {cpu['taskset']} {cmd_mokip} eval-store -d {path_data}/traces_pf_{cpu['id']}.jsonl.gz \" +\n f\"-u {repo_url} -i {approach['inference']} -U {'APPEND' if graphs else 'APPEND_DEFAULT_GRAPH'} \" + \n f\"-o {path_ontology} --trace-namespace '{namespace_trace}' --graph-namespace '{namespace_graph}' \" +\n f\"-T {timeout} -r {store_path('pf', cpu)} -t {max(2, cpu['num_threads'])} -w 50 \" + \n f\"| tee {store_path('pf', cpu, 'eval.log')}\")\n shell(f\"taskset -a {cpu['taskset']} {cmd_graphdb} stop {path_repo}\")\n\n # Drop the repository (both to save space and since it is not clean anymore)\n shell(f\"rm -Rf {path_repo}\")",
"def save(self, experiment_dir):\n date_time = time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime())\n\n if self.eval_results is not None:\n # print(self.eval_results)\n assert isinstance(self.eval_results, dict)\n # present the dict in str form\n # res_str = ''.join(''.join(str(x) for x in tup) for tup in self.eval_results.items())\n\n self._path = os.path.join(\n experiment_dir, self.CHECKPOINT_DIR_NAME, date_time,\n )\n path = self._path\n\n if os.path.exists(path):\n shutil.rmtree(path)\n os.makedirs(path)\n\n torch.save(\n {'epoch': self.epoch, 'optimizer': self.optimizer},\n os.path.join(path, self.TRAINER_STATE_NAME)\n )\n torch.save(self.model, os.path.join(path, self.MODEL_NAME))\n\n # save parameters to txt\n txt_file = open(os.path.join(path, self.PARAMETERS), \"w\")\n\n txt_file.write(f\"ckpt name: '{date_time}'\\n\")\n txt_file.write(f\"epoch: {self.epoch}\\n\")\n\n if self.eval_results is not None: \n for key, value in self.eval_results.items():\n txt_file.write(str(key)+': '+str(value)+'\\n')\n # if 'acc' in self.eval_results:\n # txt_file.write(f\"acc: {self.eval_results['acc']}\\n\")\n # if 'p' in self.eval_results:\n # txt_file.write(f\"p: {self.eval_results['p']}\\n\")\n # if 'r' in self.eval_results:\n # txt_file.write(f\"r: {self.eval_results['r']}\\n\")\n # if 'f1' in self.eval_results:\n # txt_file.write(f\"f1: {self.eval_results['f1']}\\n\")\n \n txt_file.close()\n\n return path",
"def Finish(experiment, ln):\n # Move to the \"results\" folder within the experiment's home directory\n os.chdir(experiment[\"Folder\"] + \"results/\")\n # Make a folder of the best structures in each library\n list = os.listdir(\"./\")\n # If a \"best\" folder is not already in the \"results\" folder, make it\n if \"best\" not in list:\n os.mkdir(\"best\")\n # Move to the \"best\" folder\n os.chdir(\"best\")\n # Make a folder for the library\n os.mkdir(\"library\" + str(ln))\n os.chdir(\"library\" + str(ln))\n # Find the best iteration in the library's results folder\n folder = experiment[\"Folder\"] + \"library\" + str(ln) + \"/results/\"\n list = os.listdir(folder)\n best = 0\n # Go through the information in the \"results\" folder\n for name in list:\n if name.startswith(\"iteration\"):\n # Get the iteration number\n iteration = int(name[9:])\n # If it is higher than \"best\", then store its value\n if iteration > best:\n best = iteration\n # Copy the information from the \"best\" in that folder into the experiment's\n # home results folder\n folder += \"iteration\" + str(best) + \"/\"\n # List the files within this folder\n files = os.listdir(folder)\n # Copy each file to the experiment's results \"best\" folder\n for file in files:\n os.system(\"cp \" + folder + file + \" ./\") \n # List the sequence information and energy information in the summary file\n text = \"LIBRARY \" + str(ln) + \" RESULTS\\n\"\n # Gather the total number of groups to have their information output\n groups = len(experiment)\n # Create a list of all Target Molecules in the experiment\n target_molecules = []\n # Go through all of the Molecules in the experiment\n for molecule in experiment[0]:\n # If it is a Target Molecule\n if not molecule.design:\n # Then store it\n target_molecules.append(molecule.name)\n # Now gather all of the Design Molecules\n molecules = []\n # Go through the files\n for file in files:\n # If it is a Molecule File, get the name of the Molecule\n name = file.split(\".\")[0][-1]\n # If it is in the 1st Binding Assembly (to avoid redundancy), store it\n # if it is not in the list of Target Molecules, meaning it is a Design\n # Molecule\n if file.startswith(\"Group1_Molecule\") and name not in target_molecules:\n molecules.append(name)\n molecules.sort()\n # Create a Summary of the amino acids used within each CDR, as well as the\n # canonical structures used to make the CDRs\n # List the canonical structure information\n # Get the optimal set of canonical structures\n solution = experiment[\"Scores\"][ln-1][1]\n # Output the score\n canonical = \"The score for the set of canonical structures used is \"\n canonical += str(solution[\"Score\"]) + \"\\n\"\n # Store the position information for each of the CDRs\n ranges = {1: range(27, 39), 2: range(56, 66), 3: range(105, 118)}\n # Go thorugh each of the CDRs and output the canonical structure used\n associations = molecule_name_association(experiment, molecules)\n cdrs = list(associations.keys())\n cdrs.sort()\n # Store the sequence information in this string\n sequence = \"\"\n for num, cdr in enumerate(cdrs):\n # Add the canonical structure information\n canonical += \"The \" + cdr + \" CDR used canonical structure #\"\n canonical += str(solution[num+1]) + \"\\n\" \n # Get the appropriate Molecule for the CDR\n name = \"Group1_Molecule\" + associations[cdr] + \".pdb\"\n mol = MOLECULES.MoleculeFile(name)[0]\n # Go through all of the residues\n for res in mol:\n # Get its name so that its position may be extracted\n rName = res.name\n # If the name is composed of only digits\n if rName.isdigit():\n pass\n # If the last character is a letter\n elif rName[:-1].isdigit() and rName[-1].isalpha():\n rName = rName[:-1] \n # Convert the name to an integer\n rName = int(rName)\n # If this position lies within the CDR position, add its sequence\n # information\n if rName in ranges[int(cdr[-1])]:\n sequence += cdr + \" Residue \" + str(rName) + \" in Molecule \"\n sequence += mol.name + \": \" + res.kind + \"\\n\"\n # Store the Energy information\n energy = \"\"\n # Go through the Binding Assemblies\n for gn in range(1, groups + 1):\n # Open the Energy file\n name = \"Group\" + str(gn) + \"_Energies.txt\"\n f = open(name, \"r\")\n # Go through the file\n for line in f:\n # Split the line on white space\n items = line.split()\n # Add the text to the energy string\n energy += \"The \" + items[0] + \" \" + items[1][:-1] + \" of Design \"\n energy += \"Group \" + str(gn) + \" is \" + items[2] + \" kcal / mol\\n\" \n # Close the file\n f.close()\n # Change back to the Experiment's home directory\n os.chdir(experiment[\"Folder\"])\n # Add all of this information to the Summary file\n experiment[\"Summary\"] += canonical + sequence + energy + \"\\n\\n\"\n name = SHARING.summary_name(SHARING.get_current())\n f = open(name, \"a\")\n f.write(experiment[\"Summary\"])\n f.close() \n # Move the library to the results folder\n command = \"mv library\" + str(ln) + \" results/\" \n os.system(command)\n # If this is the final library, delete the SCORES.txt file\n if ln == experiment['Optcdr Libraries']:\n os.system(\"rm SCORES.txt\")",
"def main():\n # Parameters:\n parser = define_parser()\n args = parser.parse_args()\n # General:\n save_policy = args.save_policy\n verbose = args.verbose\n wb = args.wandb\n benchmark = args.benchmark\n # Training:\n total_timesteps = args.total_timesteps\n # DQN:\n batch_size = args.batch_size\n epsilon_0 = args.epsilon_0\n train_freq = args.train_freq\n discount_factor = args.gamma\n learning_rate = args.learning_rate\n epsilon_min = args.eps_min\n exploration_fraction = args.exploration_fraction\n buffer_size = args.buffer_size\n tau = args.tau\n update_interval = args.update_interval\n gradient_steps = args.gradient_steps\n min_exp = args.min_exp\n\n timestamp = datetime.now().strftime(\"%Y%m%d.%H%M%S\")\n random_tag = \"\".join(random.choices(string.ascii_lowercase + string.digits, k=8))\n run_id = f\"{timestamp}-{random_tag}\"\n\n # Define path for logs:\n log_dir = Path(args.log_dir).resolve().joinpath(run_id)\n # Create directory if not already existing:\n log_dir.mkdir(parents=True, exist_ok=True)\n\n config = {\n \"total_timesteps\": total_timesteps,\n \"batch_size\": batch_size,\n \"buffer_size\": buffer_size,\n \"min_exp\": min_exp,\n \"target_update_interval\": update_interval,\n \"exploration_fraction\": exploration_fraction,\n \"epsilon_0\": epsilon_0,\n \"epsilon_min\": epsilon_min,\n \"train_freq\": (train_freq, \"episode\"),\n \"discount_factor\": discount_factor,\n \"learning_rate\": learning_rate,\n \"tau\": tau,\n \"gradient_steps\": gradient_steps,\n }\n\n # Weights & Biases (https://wandb.ai):\n if wb:\n import wandb\n from wandb.integration.sb3 import WandbCallback\n\n os.environ[\"WANDB_DISABLE_GIT\"] = \"True\"\n run = wandb.init(\n project=\"simulink_gym\",\n group=\"simulink_cartpole_env\" if not benchmark else \"gym_cartpole_env\",\n job_type=\"examples\",\n tags=[\"DQN\"],\n sync_tensorboard=True,\n config=config,\n dir=log_dir,\n save_code=False,\n id=run_id,\n )\n callback = WandbCallback()\n else:\n callback = None\n\n # Create training environment:\n if not benchmark:\n env = CartPoleSimulink()\n else:\n import gym\n\n env = gym.make(\"CartPole-v1\")\n\n # Create learning agent:\n agent = DQN(\n \"MlpPolicy\",\n env,\n buffer_size=config[\"buffer_size\"],\n batch_size=config[\"batch_size\"],\n gamma=config[\"discount_factor\"],\n learning_rate=config[\"learning_rate\"],\n learning_starts=config[\"min_exp\"],\n target_update_interval=config[\"target_update_interval\"],\n exploration_fraction=config[\"exploration_fraction\"],\n exploration_initial_eps=config[\"epsilon_0\"],\n exploration_final_eps=config[\"epsilon_min\"],\n train_freq=config[\"train_freq\"],\n tau=config[\"tau\"],\n gradient_steps=config[\"gradient_steps\"],\n verbose=verbose,\n tensorboard_log=str(log_dir),\n )\n\n # Train agent:\n agent.learn(\n total_timesteps=config[\"total_timesteps\"],\n log_interval=4,\n callback=callback,\n progress_bar=True,\n )\n\n # Save policy:\n if save_policy:\n policy = agent.policy\n policy.save(f\"{log_dir}/learned_policy\")\n\n env.close()\n\n if wb:\n run.finish()",
"def save_run_data(path_to_dir, hp):\n print('Saving run data to: {}'.format(path_to_dir))\n if os.path.isdir(path_to_dir):\n print('Data already exists in this directory (presumably from a previous run)')\n inp = input('Enter \"y\" if you are sure you want to remove all the old contents: ')\n if inp == 'y':\n print('Removing old contents')\n shutil.rmtree(path_to_dir)\n else:\n print('Exiting')\n raise SystemExit\n print('Creating directory and saving data')\n os.mkdir(path_to_dir)\n\n # Redirect stdout (print statements) to file\n # if not hp.debug:\n # sys.stdout = FlushFile(open(os.path.join(path_to_dir, 'stdout.txt'), 'w'))\n\n # Save snapshot of code\n snapshot_dir = os.path.join(path_to_dir, 'code_snapshot')\n if os.path.exists(snapshot_dir): # shutil doesn't work if dest already exists\n shutil.rmtree(snapshot_dir)\n copy_tree_ignore_except('.', snapshot_dir)\n\n # Save hyperparms\n save_file(vars(hp), os.path.join(path_to_dir, 'hp.json'), verbose=True)\n\n # Save some command used to run, start time\n with open(os.path.join(path_to_dir, 'run_details.txt'), 'w') as f:\n f.write('Command:\\n')\n cmd = ' '.join(sys.argv)\n start_time = datetime.now().strftime('%B%d_%H-%M-%S')\n f.write(cmd + '\\n')\n f.write('Start time: {}'.format(start_time))\n print('Command used to start program:\\n', cmd)\n print('Start time: {}'.format(start_time))",
"def run(self):\n\n self.preprocess()\n self.restore_ratings()\n self.prepare_UI()\n self.loop_through_units()\n self.cleanup()\n\n print('\\nAll Done - results are available in:\\n\\t{}'.format(self.out_dir))",
"def data_abex_results_dir(experiment_name: str) -> Path: # pragma: no cover\n return experiment_dir(experiment_name) / \"Results\"",
"def setup_experiment(name: str, cfg: DictConfig, remove_if_exists: bool = False):\n # Create result directory\n results_dir = make_results_dir(\n results_dir=cfg.results_dir,\n experiment_name=name,\n tag=cfg.tag,\n dataset_name=cfg.dataset,\n remove_if_exists=remove_if_exists,\n )\n # Save args to file\n # save_args(results_dir, cfg)\n\n # Save args to file\n print(f\"Results directory: {results_dir}\")\n seed_all_rng(cfg.seed)\n cudnn.benchmark = True\n return results_dir, cfg",
"def main():\n run_test_summary1a()\n run_test_summary1c()\n run_test_summary1c()",
"def main(path_gt, path_pred, eval_dir):\n\n if not os.path.exists(eval_dir):\n os.makedirs(eval_dir)\n\n if os.path.isdir(path_gt) and os.path.isdir(path_pred):\n\n metrics_out, phase, measure_names, file_names = compute_metrics_on_directories_raw(path_gt, path_pred)\n df = mat_to_df(metrics_out, phase, measure_names, file_names)\n print_stats(df)\n print_table1(df, eval_dir)\n print_table2(df, eval_dir)\n\n [dice1, dice2, dice3, vold1, vold2, vold3] = compute_metrics_on_directories(path_gt, path_pred)\n\n logging.info('------------Average Dice Figures----------')\n logging.info('Dice 1: %f' % dice1)\n logging.info('Dice 2: %f' % dice2)\n logging.info('Dice 3: %f' % dice3)\n logging.info('Mean dice: %f' % np.mean([dice1, dice2, dice3]))\n logging.info('------------------------------------------')\n\n else:\n raise ValueError(\n \"The paths given needs to be two directories or two files.\")",
"def main(config):\n save_main_dir = config[\"save_main_dir\"]\n\n if not os.path.isdir(save_main_dir):\n print(\"{} does not exist, creating it now...\", save_main_dir)\n pathlib.Path(save_main_dir).mkdir(parents=True, exist_ok=True)\n\n app = inviwopy.app\n network = app.network\n \n # Resize the canvas to improve rendering speed, only affects visual output\n if config[\"should_resize\"]:\n ivw_helpers.set_canvas_sizes(128, 128)\n\n hdf5_path = os.path.join(save_main_dir, config[\"hdf5_name\"])\n with h5py.File(hdf5_path, mode='w', libver='latest') as hdf5_file:\n hdf5_file.swmr_mode = True\n create_hdf_storage(hdf5_file, config)\n count = {\"train\": 0, \"val\": 0}\n for set_type in 'train', 'val':\n capture_lf_samples(hdf5_file, set_type, config, network, count) \n print(\"Finished writing to HDF5 in {}\".format(hdf5_path))",
"def driver(rootdir, destination, dataset_name):\n global metric_result \n global result\n metric_result = {\"query image\": [], \n \"k\": [], \n \"precision for k = 3\": [], \n \"reciprocal rank for k = 3\": [],\n \"precision for k = 5\": [], \n \"reciprocal rank for k = 5\": [], \n \"precision for k = 7\": [],\n \"reciprocal rank for k = 7\": [], \n \"time in seconds\": []}\n \n siamese_model = get_siamese(input_shape=(1, 48, 48))\n siamese_model.summary()\n APlist_3 = []\n RRlist_3 = []\n APlist_5 = []\n RRlist_5 = []\n APlist_7 = []\n RRlist_7 = []\n # destination = \"..\\\\result\\\\seamese_net_avg_images_seed_np_2_tf_2\\\\\" # + subdir1.split(\"\\\\\")[-1]\n \n \n for subdir1, dirs1, files1 in os.walk(rootdir):\n start = time.time()\n query1_name = subdir1.split(\"\\\\\")[-1]\n \n os.makedirs(destination, exist_ok=True)\n \n query1_average_image_time_start = time.time()\n query1 = averageImage(subdir1)\n query1_average_image_time_end = time.time()\n \n result = {\"query1\": [], \"query2\":[], \"size\": [], \"siamese_distance\": [], \"average_image_time_query1\": [], \"average_image_time_query2\": [], \"patch_retrieval_time\": [], \"image_comparison_time\": [],\"total_time\": []}\n \n \n if not subdir1.endswith(\"\\\\\"+ dataset_name +\"\\\\\"):\n for subdir2, dirs2, files2 in os.walk(rootdir):\n if not subdir2.endswith(\"\\\\\"+ dataset_name +\"\\\\\"):\n if (subdir1 != subdir2):\n \n start_per_image = time.time()\n \n query2_name = subdir2.split(\"\\\\\")[-1]\n # print(subdir1, subdir2)\n \n query2_average_image_time_start = time.time()\n query2 = averageImage(subdir2)\n query2_average_image_time_end = time.time()\n\n siamese_distance = compare(siamese_model, query1, query2)\n # print(\"siamese_distance between {} and {} value : {}\".format(query1_name, query2_name, siamese_distance))\n end_per_image = time.time()\n \n result[\"query1\"].append(query1_name)\n result[\"query2\"].append(query2_name)\n result[\"size\"].append((496, 512))\n result[\"siamese_distance\"].append(siamese_distance)\n result[\"average_image_time_query1\"].append(query1_average_image_time_end - query1_average_image_time_start)\n result[\"average_image_time_query2\"].append(query2_average_image_time_end - query2_average_image_time_start)\n result[\"total_time\"].append(end_per_image - start_per_image)\n \n #save result tp csv file sorted w.r.t siamese_distance\n df = pd.DataFrame(data=result)\n df = df.sort_values(by=[\"siamese_distance\"])\n df.to_csv(destination + \"\\\\\" + query1_name +\".csv\")\n \n APlist_3.append(calculateAvgPrecision(df, 3))\n RRlist_3.append(calculateReciprocalRank(df, 3))\n \n APlist_5.append(calculateAvgPrecision(df, 5))\n RRlist_5.append(calculateReciprocalRank(df, 5))\n \n APlist_7.append(calculateAvgPrecision(df, 7))\n RRlist_7.append(calculateReciprocalRank(df, 7))\n \n # print(APlist, RRlist)\n end = time.time()\n metric_result[\"query image\"].append(query1_name)\n metric_result[\"k\"].append(\"3, 5, 7\")\n metric_result[\"precision for k = 3\"].append(calculateAvgPrecision(df, 3))\n metric_result[\"reciprocal rank for k = 3\"].append(calculateReciprocalRank(df, 3))\n \n metric_result[\"precision for k = 5\"].append(calculateAvgPrecision(df, 5))\n metric_result[\"reciprocal rank for k = 5\"].append(calculateReciprocalRank(df, 5))\n \n metric_result[\"precision for k = 7\"].append(calculateAvgPrecision(df, 7))\n metric_result[\"reciprocal rank for k = 7\"].append(calculateReciprocalRank(df, 7))\n metric_result[\"time in seconds\"].append((end - start))\n \n print(\"Average Precision (AP) considering K = 3 : {}\".format(sum(APlist_3)/len(APlist_3)))\n print(\"Reciprocal Rank (RR) considering K = 3 : {}\".format(sum(RRlist_3)/len(RRlist_3)))\n \n print(\"Average Precision (AP) considering K = 5 : {}\".format(sum(APlist_5)/len(APlist_5)))\n print(\"Reciprocal Rank (RR) considering K = 5 : {}\".format(sum(RRlist_5)/len(RRlist_5)))\n \n print(\"Average Precision (AP) considering K = 7 : {}\".format(sum(APlist_7)/len(APlist_7)))\n print(\"Reciprocal Rank (RR) considering K = 7 : {}\".format(sum(RRlist_7)/len(RRlist_7)))\n \n metric_result[\"query image\"].append(\"Average AP and Average RR\")\n metric_result[\"k\"].append(\"3, 5, 7\")\n metric_result[\"precision for k = 3\"].append(sum(APlist_3)/len(APlist_3))\n metric_result[\"reciprocal rank for k = 3\"].append(sum(RRlist_3)/len(RRlist_3))\n \n metric_result[\"precision for k = 5\"].append(sum(APlist_5)/len(APlist_5))\n metric_result[\"reciprocal rank for k = 5\"].append(sum(RRlist_5)/len(RRlist_5))\n \n metric_result[\"precision for k = 7\"].append(sum(APlist_7)/len(APlist_7))\n metric_result[\"reciprocal rank for k = 7\"].append(sum(RRlist_7)/len(RRlist_7))\n \n metric_result[\"time in seconds\"].append(sum(metric_result[\"time in seconds\"]))\n\n\n MAP = (sum(APlist_3)/len(APlist_3) + sum(APlist_5)/len(APlist_5) + sum(APlist_7)/len(APlist_7))/3\n MRR = (sum(RRlist_3)/len(RRlist_3) + sum(RRlist_5)/len(RRlist_5) + sum(RRlist_7)/len(RRlist_7))/3\n \n metric_result[\"query image\"].append(\"MAP and MRR\")\n metric_result[\"k\"].append(\"3, 5, 7\")\n metric_result[\"precision for k = 3\"].append(MAP)\n metric_result[\"reciprocal rank for k = 3\"].append(MRR)\n \n metric_result[\"precision for k = 5\"].append(0)\n metric_result[\"reciprocal rank for k = 5\"].append(0)\n \n metric_result[\"precision for k = 7\"].append(0)\n metric_result[\"reciprocal rank for k = 7\"].append(0)\n \n \n metric_result[\"time in seconds\"].append(0)\n \n \n metric_df = pd.DataFrame(data=metric_result)\n metric_df.to_csv(destination + \"\\\\\" + \"CBIR metric.csv\")\n \n del siamese_model\n return MAP, MRR",
"def build_result_folder(timestamp=str(int(time.time()))):\n out_path = os.path.abspath(os.path.join(os.path.curdir, \"runs\", timestamp))\n print(\"Writing to {}\\n\".format(out_path))\n\n data_path = os.path.abspath(os.path.join(out_path, \"data\"))\n evaluation_path = os.path.abspath(os.path.join(out_path, \"evaluation\"))\n\n if not os.path.exists(out_path):\n os.makedirs(data_path)\n os.makedirs(evaluation_path)\n return out_path",
"def synthesize_results(parent_dir):\r\n # Aggregate metrics from args.parent_dir directory\r\n metrics = dict()\r\n aggregate_metrics(parent_dir, metrics) # metrics在函数里面被修改\r\n #\r\n table = metrics_to_table(metrics)\r\n # Display the table to terminal\r\n print(table)\r\n\r\n # Save results in parent_dir/results.md\r\n save_file = os.path.join(parent_dir, \"results.md\")\r\n with open(save_file, 'w') as f:\r\n f.write(table)",
"def main():\n parser = get_parser()\n options = get_options(parser)\n\n # Set up the logger.\n logger = logging.getLogger(consts.MAIN)\n logger.setLevel(logging.DEBUG if options[consts.DEBUG] else logging.INFO)\n file_handler = logging.FileHandler(os.path.join(options[consts.EXPORT_DIR], 'log.txt'), mode='w')\n logger.addHandler(file_handler)\n console_handler = logging.StreamHandler()\n logger.addHandler(console_handler)\n\n # Log the options given through the command-line arguments.\n logger.info('options: {}'.format(str(options)))\n\n experiment_id = 0\n status_path = os.path.join(options[consts.EXPORT_DIR], \"status.pickle\")\n # Check if the execution is a new one or a resumption of a previous experiment.\n if not options[consts.CONTINUE]:\n # Set up a new execution.\n options_path = os.path.join(options[consts.EXPORT_DIR], 'options.pickle')\n with open(options_path, 'wb') as file:\n pickle.dump(options, file)\n best_experiment_test_score = -float('inf')\n best_experiment_id = -1\n best_epoch_num = -1\n best_config = None\n status = 'working'\n with open(status_path, 'wb') as file:\n pickle.dump([best_experiment_test_score, best_experiment_id, best_config, status], file)\n with open(os.path.join(options[consts.EXPORT_DIR], 'id'), 'w') as file:\n file.write(experiments.experiment.execution_identifier)\n else:\n # Load the old execution from the export directory.\n epoch_stamp_path = os.path.join(options[consts.EXPORT_DIR], \"epoch_stamp.pickle\")\n with open(epoch_stamp_path, 'rb') as file:\n dictionary = pickle.load(file)\n with open(status_path, 'rb') as file:\n best_experiment_test_score, best_experiment_id, best_epoch_num, best_config, status = pickle.load(file)\n with open(os.path.join(options[consts.EXPORT_DIR], 'id'), 'r') as file:\n experiments.experiment.execution_identifier = file.read()\n\n # Check if the execution is still in progress. This check should fail when an ended execution is resumed.\n if status == 'working':\n # Iterate through the different configurations of hyperparameters ad create an experiment for each.\n for config in iterate_configs(parser, options):\n # If this a resumed execution, check if this experiment has already had finished.\n if options[consts.CONTINUE] and experiment_id < dictionary[consts.EXPERIMENT_ID]:\n experiment_id += 1\n continue\n # If this a resumed execution and this is the experiment that was running when the last checkpoint was\n # created.\n elif options[consts.CONTINUE] and experiment_id == dictionary[consts.EXPERIMENT_ID]:\n # Log the configurations of the present experiment.\n logger.info('continuing on config: {}'.format(str(config)))\n checkpoint_dir = os.path.join(config[consts.EXPORT_DIR],\n \"checkpoints\",\n \"experiment_%09d\" % experiment_id,\n \"epoch_%09d\" % dictionary[consts.EPOCH_NUMBER])\n # Create an experiment for the configuration at hand.\n experiment = Experiment(config=config, experiment_id=experiment_id,\n load_from_directory=checkpoint_dir)\n # If this is a new experiment.\n else:\n logger.info('starting on config: {}'.format(str(config)))\n # Create an experiment for the configuration at hand.\n experiment = Experiment(config=config, experiment_id=experiment_id)\n\n # Run the present experiment.\n experiment_test_score = experiment.run()\n\n # Record the results of the experiment and compare them to the results so far.\n logger.info('Experiment {} test score: {}'.format(experiment_id, experiment_test_score))\n if experiment_test_score > best_experiment_test_score:\n best_experiment_test_score = experiment_test_score\n best_experiment_id = experiment_id\n best_epoch_num = experiment.best_epoch_number\n best_config = config\n\n # Store the best results so far in a file.\n with open(status_path, 'wb') as file:\n pickle.dump([best_experiment_test_score, best_experiment_id, best_epoch_num, best_config, status],\n file)\n experiment_id += 1\n\n # Mark the execution as over.\n status = 'ended'\n\n # Store the best results in a file.\n with open(status_path, 'wb') as file:\n pickle.dump([best_experiment_test_score, best_experiment_id, best_epoch_num, best_config, status], file)\n \n # Report the best results.\n logger.info('Execution is over. Best experiment test score: {}'\n '\\nBest experiment config: {}'.format(best_experiment_test_score, str(best_config)))",
"def test(self):\n pdb_path = join_path(self.test_suite.current_test_cache_dir, \"pdb\")\n self.run_test(\n \"mkdssp\",\n options=[\"1ALK.pdb\", \"1alk.dssp\"],\n purpose=\"test: calculating structure for example\",\n installed=True,\n work_dir=pdb_path,\n )",
"def main() -> int:\n parser = argparse.ArgumentParser()\n parser.add_argument('--dir-metadata',\n type=pathlib.Path, required=True)\n\n args = parser.parse_args()\n\n with LockedMetadata(args.dir_metadata, __file__) as md:\n summary_dict = {}\n passing_tests = []\n failing_tests = []\n for f in md.tests_pickle_files:\n try:\n trr = TestRunResult.construct_from_pickle(f)\n summary_dict[f\"{trr.testname}.{trr.seed}\"] = \\\n ('PASS' if trr.passed else\n 'FAILED' + (\" {T}\" if (trr.failure_mode == Failure_Modes.TIMEOUT) else \"\"))\n if trr.passed:\n passing_tests.append(trr)\n else:\n failing_tests.append(trr)\n except RuntimeError as e:\n failing_tests.append(\n TestRunResult(\n name='broken_test',\n failure_message=str(e)\n ))\n\n md.regr_log = md.dir_run/'regr.log'\n md.regr_log_junit = md.dir_run/'regr_junit.xml'\n md.regr_log_junit_merged = md.dir_run/'regr_junit_merged.xml'\n\n # Write results as junit_xml\n with open(md.regr_log_junit,\n 'w',\n encoding='UTF-8') as junit_xml,\\\n open(md.regr_log_junit_merged,\n 'w',\n encoding='UTF-8') as junit_merged_xml:\n output_run_results_junit_xml(passing_tests, failing_tests,\n junit_xml,\n junit_merged_xml)\n\n with open(md.regr_log, 'w', encoding='UTF-8') as outfile:\n # Write results as regr.log (custom logfile format)\n output_results_text(passing_tests, failing_tests, summary_dict,\n outfile)\n\n test_summary_dict = create_test_summary_dict(passing_tests +\n failing_tests)\n\n cov_summary_dict = {}\n if md.simulator == \"xlm\":\n cov_summary_dict = create_cov_summary_dict(md)\n else:\n print(\"Warning: Not generating coverage summary, unsupported \" \\\n f\"simulator {md.simulator}\")\n\n html_report_filename = md.dir_run/'report.html'\n with open(html_report_filename, 'w') as outfile:\n output_results_html(md, passing_tests + failing_tests,\n test_summary_dict, cov_summary_dict, outfile)\n\n json_report_filename = md.dir_run/'report.json'\n with open(json_report_filename, 'w') as json_report_file:\n output_results_dvsim_json(md, test_summary_dict, cov_summary_dict,\n json_report_file)\n\n svg_summary_filename = md.dir_run/'summary.svg'\n with open(svg_summary_filename, 'w') as svg_summary_file:\n output_results_svg(test_summary_dict, cov_summary_dict,\n svg_summary_file)\n\n # Print a summary line to the terminal\n print(gen_summary_line(passing_tests, failing_tests))\n\n # Succeed if no tests failed\n return 1 if failing_tests else 0",
"def do_dir(arguments):\n #print(\"Outputting in directory: \" + dsum)\n \n if not os.path.exists(arguments.file_pathout): \n os.mkdir(arguments.file_pathout)\n\n num = 0\n detected = 0\n fileCount = 0\n zero_image = 0\n bad_image = 0\n bad_image_paths = []\n\n # debug/verbose\n if arguments.v:\n print('DEBUG: shape=%g area=%g contour=%g' % (arguments.shape,arguments.area,arguments.contour))\n \n ffs = glob.glob(arguments.file_pathin+'/*.FIT') + glob.glob(arguments.file_pathin+'/*.fit') + \\\n glob.glob(arguments.file_pathin+'/*.FTS') + glob.glob(arguments.file_pathin+'/*.fts') + \\\n glob.glob(arguments.file_pathin+'/*.FITS') + glob.glob(arguments.file_pathin+'/*.fits')\n ffs = list(set(ffs)) # needed for dos\n ffs.sort() # on linux wasn't sorted, on dos it was \n f = open(arguments.file_pathout+'/summary.txt','w') # Creates summary text file\n f.write('Streaks found in files: \\n') #Creates first line for summary file\n\n sf = arguments.start_frame\n ef = arguments.end_frame\n \n if sf <= 0:\n sf = 1\n \n if ef <= 0 or ef > len(ffs):\n ef = len(ffs)\n \n if ef < sf:\n temp = sf\n sf = ef\n ef = temp\n\n print('Processing %d files from %d to %d' % ((ef-sf+1), sf, ef))\n for ff in ffs[sf-1:ef]:\n # creates directory one directory back from the folder which contains fits files\n \n num = do_one(ff,arguments.file_pathout+'/'+ff[ff.rfind(os.sep)+1:ff.rfind('.')],arguments.shape,arguments.area,arguments.contour)\n \n \n if num == 0:\n zero_image += 1\n elif num < 0:\n bad_image += 1\n bad_image_paths.append(ff)\n else:\n detected += int(num) #Counter of how many streaks detected\n f.write(ff + '\\n') \n fileCount += 1 #Counter for how many files analyzed \n print(\"\\n\")\n # Produce and write summary file \n f.write('\\n' 'Files analyzed: ' + str(fileCount)+ '\\n' )\n f.write('Streaks detected: ' + str(detected) + '\\n' )\n f.write('Files with no detections: ' + str(zero_image) + '\\n')\n f.write('Bad files: ' + str(bad_image)+ '\\n')\n \n temp_string = \"\\n\"\n temp_string = temp_string.join(bad_image_paths)\n f.write(temp_string)\n \n f.write('\\n\\n')\n\n if arguments.diff:\n f.write('Streaks found in Files: \\n')\n num = 0\n detected = 0\n fileCount = 0\n zero_image = 0\n bad_image = 0\n bad_image_paths = []\n dfs = []\n# print('Computing %d differences' % (ef-sf+1))\n for i in range(len(ffs)-1):\n dfs.append(arguments.file_pathout+'/'+ffs[i+1][len(arguments.file_pathin):]+'DIFF')\n# mk_diff(ffs[i],ffs[i+1],dfs[i],v)\n \n if sf <= 0:\n sf = 1\n\n if ef <= 0 or ef > len(dfs):\n ef = len(dfs)\n \n if ef <= sf:\n temp = sf\n sf = ef\n ef = temp\n\n print('Processing %d files from %d to %d' % ((ef-sf+1), sf, ef))\n i = sf-1\n for df in dfs[sf-1:ef]:\n try:\n mk_diff(ffs[i],ffs[i+1],dfs[i],arguments.v)\n # num = do_one(df,dsum+'/'+df[df.rfind(os.sep)+1:df.rfind('.')],shape,area,contour)\n #diff_file = dsum+'/'+df[df.rfind(os.sep)+1:df.find('.')]+'DIFF'\n \n #directory one directory back\n new_dir = arguments.file_pathout+'/'+df[df.rfind(os.sep)+1:df.rfind('.')]+'DIFF'\n num = do_one(df,new_dir,arguments.shape,arguments.area,arguments.contour)\n os.remove(df)\n \n except:\n num=-1\n sys.stdout.write('X')\n \n\n\n if num == 0:\n zero_image += 1\n elif num < 0:\n bad_image += 1\n bad_image_paths.append(df)\n else:\n detected += int(num) #Counter of how many streaks detected\n f.write(df + '\\n') \n fileCount += 1 #Counter for how many files analyzed \n i += 1\n print(\"\\n\")\n # Produce and write summary file \n f.write('\\n' 'Files analyzed: ' + str(fileCount)+ '\\n' )\n f.write('Streaks detected: ' + str(detected) + '\\n' )\n f.write('Files with no detections: ' + str(zero_image) + '\\n')\n f.write('Bad files: ' + str(bad_image)+ '\\n')\n\n temp_string = \"\\n\"\n temp_string = temp_string.join(bad_image_paths)\n f.write(temp_string)\n\n f.close()\n else:\n f.close()",
"def qa_test():\r\n # Reads Code and Runs Code Metrics\r\n with open(\"BrainDataVisualiser.py\",\"r\") as file:\r\n code = file.read()\r\n with open(\"QA_LOGS.txt\",\"a\") as file:\r\n # Timestamp and append metric results to log\r\n file.write(datetime.date.today().strftime(\"%b-%d-%Y\")+\"\\n\\t\")\r\n file.write(\"General Analysis\\n\\t\\t\")\r\n file.write(str(analyze(code))+\"\\n\\t\")\r\n file.write(\"Cyclomatic Complexity\\n\")\r\n for i in cc_visit(code):\r\n file.write(\"\\t\\t\"+cc_rank(i.complexity)+\" \"+str(i)+\"\\n\")",
"def _create_result_directory(self):\n\t\tFileSystem.create_dir(self._result_directory_name)\n\t\tFileSystem.create_dir(self._result_directory_name + \"/\" + \"Log\")\n\t\tFileSystem.create_dir(self._result_directory_name + \"/\" + \"Dump\")",
"def __create_folder(self, stamp_unique=True):\n if 'path_out' not in self.params:\n raise ValueError('missing \"path_out\" among %r' % self.params.keys())\n # create results folder for experiments\n path_exp = create_experiment_folder(\n self.params.get('path_out'), self.__class__.__name__, self.params.get('name'), stamp_unique\n )\n self.params['path_exp'] = path_exp\n save_config_yaml(os.path.join(path_exp, self.NAME_CONFIG_YAML), self.params)",
"def emPerformanceTest(filesAndDirectories='None', resultsFileName='None', options='None'):\n\n pass",
"def main():\n\t# \"\"\"\n\t# \tMain function of test python module\n\t# \"\"\"\n\t# random.seed(os.urandom(345634)) # initialize random generator\n\t# t = np.linspace(0.0, 24.0, 96.0) # define the time axis of a day, here we use 96 values every quarter of an hour\n\t# # standard load profile -- input\n\t# q = extra.read_slp(t,\n\t# 'Profielen-Elektriciteit-2015-versie-1.00 Folder/profielen Elektriciteit 2015 versie 1.00.csv') # read the sample standard load profile, can be any length, can be resized given a low/high resolution time axis\n\t# q = q / np.sum(q) # normalization of standard load profile\n\t# # process duration\n\t# duration_axis = np.linspace(0.0, 24.0, 96.0)\n\t# (p_d, E_p) = extra.app_time(duration_axis, 10, 2, 0.0,\n\t# 24.0) # function that define the pdf of duration of a process\n\t# # process consumption\n\t# consumption_axis = np.linspace(0.0, 3.5, 96.0)\n\t# (p_k, E_k) = extra.app_consumption(consumption_axis, 10, 2, 0.0,\n\t# 3.5) # function that define the pdf of duration of a process\n\t# # pdf of starting time\n\t# p_t_0 = lpd.infer_t_0(q, p_d, E_k) # computes the pdf of starting time of processes\n\t# p_t_0 = p_t_0 / np.sum(p_t_0) # normalization of the pdf to sum up to zero\n #\n\t# \"\"\"\n\t# 1st Approach, starting time of processes is a discrete propapibility density function\n\t# \"\"\"\n\t# # synthetic profile of D processes\n\t# D = 2000\n\t# synthetic_profile = lpd.synthetic_profile(D, t, p_d, consumption_axis, p_k, p_t_0)\n\t# synthetic_profile_1 = lpd.synthetic_profile(D, t, p_d, consumption_axis, p_k, p_t_0)\n\t# # expected value of D processes\n\t# q_e_e = lpd.infer_q_e(t, p_t_0, p_d, E_k, D)\n\t# # plot\n\t# plt.step(t, synthetic_profile, \"g-\")\n\t# plt.step(t, q_e_e, \"b--\")\n #\n\t# \"\"\"\n\t# 2nd Approach, starting time of processes is a continuous propapibility density function\n\t# \"\"\"\n\t# # synthetic profile of D processes\n\t# ts, cs = lpd.continous_synthetic_profile(D, t, p_d, consumption_axis, p_k, p_t_0)\n\t# plt.step(ts / len(t) * t[-1], cs, where='post', c='r')\n\t# plt.xlim(0, 24.0)\n\t# plt.legend([\"synthetic\", \"expected\", \"continuous\"], loc=0)\n\t# plt.show()\n #\n\t# \"\"\"\n\t# Time discretization\n\t# \"\"\"\n\t# n_intervals = 24 * 1 # discretized in minutes\n\t# discrete_timeaxis = np.linspace(0.0, 24.0, n_intervals + 1)\n\t# discrete_consumption = lpd.signal_discretization(discrete_timeaxis, t, ts, cs)\n\t# plt.step(ts / len(t) * t[-1], cs, where='post', c='r')\n\t# plt.step(discrete_timeaxis, discrete_consumption, where='post', c='k', ls='--', lw=2)\n\t# plt.legend([\"continuous\", \"discretized\"], loc=0)\n\t# plt.show()\n #\n #\n\t# \"\"\"\n\t# Repeated day synthetic profile creation\n\t# \"\"\"\n\t# # synthetic profile of D processes\n\t# D = 2000\n\t# n = 10\n\t# slp = lpd.synthetic_profile_repeated(D, t, p_d, consumption_axis, p_k, p_t_0, n)\n\t# plt.step(range(len(slp)), slp, \"g-\")\n\t# plt.show()\n\tt = np.linspace(0.0, 24.0, 96.0)\n\tload_profile = extra.read_slp(t, 'Profielen-Elektriciteit-2015-versie-1.00 Folder/profielen Elektriciteit 2015 versie 1.00.csv')\n\tslp = synthetic.create_synthetic_load(load_profile, 5.0, 5)\n\tplt.step(range(len(slp)), slp)\n\tplt.show()",
"def main():\n args = get_arguments()\n\n mode = args.mode\n sdf_path = os.path.expandvars(args.sdf_path)\n summary_file = os.path.expanduser(args.summary_file)\n assert os.path.exists(sdf_path), \"sdf-path not exists: {}\".format(sdf_path)\n\n if mode == \"SUM\":\n summary(sdf_path, summary_file)\n elif mode == \"VAL\":\n validate(sdf_path, summary_file)",
"def test_execute_review_7(self):\n review.execute_review(self.alchemist, self.test_dir,\n self.review_test_dir.name,\n s_report=True)\n\n self.assertTrue(self.review_test_dir.is_dir())\n\n summary_report_file = self.review_test_dir.joinpath(\"SummaryReport.txt\")\n self.assertTrue(summary_report_file.is_file())",
"def organise_qa_output(metadata, base_dir, write_tag):\n filenames = metadata['FITSImageFilename']\n for i, fits_file in enumerate(filenames):\n kat_target = katpoint.Target(metadata['KatpointTargets'][i])\n\n # Move QA report and create metadata\n pb_filebase = os.path.splitext(fits_file)[0] + '_PB'\n qa_report = pb_filebase + '_continuum_validation_snr5.0_int'\n pb_dir = _productdir(metadata, base_dir, i, '_PB', write_tag)\n\n qa_dir = _productdir(metadata, base_dir, i, '_QA', write_tag)\n os.mkdir(qa_dir)\n os.rename(os.path.join(pb_dir, qa_report), qa_dir)\n make_report_metadata(metadata, qa_dir)\n\n # Move RMS image and create metadata\n rms_dir = _productdir(metadata, base_dir, i, '_RMS', write_tag)\n os.mkdir(rms_dir)\n rms_image = pb_filebase + '_aegean_rms'\n mean_pb_rms = _calc_rms(os.path.join(pb_dir, rms_image + FITS_EXT))\n\n make_image_metadata(metadata, '_PB', pb_dir, i,\n 'Continuum Image PB corrected',\n 'Continuum image PB corrected',\n mean_pb_rms)\n\n os.rename(os.path.join(pb_dir, rms_image + FITS_EXT),\n os.path.join(rms_dir, rms_image + FITS_EXT))\n _add_missing_axes(os.path.join(rms_dir, rms_image + FITS_EXT))\n _caption_pngs(rms_dir, rms_image, kat_target, 'RMS PB Corrected')\n make_image_metadata(metadata, '_PB_aegean_rms', rms_dir, i,\n 'Continuum PB Corrected RMS Image',\n 'Continuum PB Corrected RMS image',\n mean_pb_rms)\n\n # Move MEAN image and create metadata\n bkg_dir = _productdir(metadata, base_dir, i, '_BKG', write_tag)\n os.mkdir(bkg_dir)\n bkg_image = pb_filebase + '_aegean_bkg'\n os.rename(os.path.join(pb_dir, bkg_image + FITS_EXT),\n os.path.join(bkg_dir, bkg_image + FITS_EXT))\n _add_missing_axes(os.path.join(bkg_dir, bkg_image + FITS_EXT))\n _caption_pngs(bkg_dir, bkg_image, kat_target, 'MEAN PB Corrected')\n make_image_metadata(metadata, '_PB_aegean_bkg', bkg_dir, i,\n 'Continuum PB Corrected Mean Image',\n 'Continuum PB Corrected Mean image',\n mean_pb_rms)\n\n # Remove .writing tag\n dir_list = [pb_dir, qa_dir, rms_dir, bkg_dir]\n for product_dir in dir_list:\n os.rename(product_dir, os.path.splitext(product_dir)[0])",
"def main():\n parser = argparse.ArgumentParser(description=\"Process the results of an experiment.\")\n parser.add_argument(\"experiment\")\n arguments = parser.parse_args()\n path = f\"experiments/{arguments.experiment}\"\n if not os.path.exists(path):\n raise SystemExit(f\"Path {path} does not exists.\")\n\n # For efficiency, one should generate the results from the parts without merging them.\n files = [file for file in os.listdir(path) if os.path.isfile(os.path.join(path, file))]\n frames = []\n for file in files:\n device, experiment, _ = file.split(\".\")\n frame = pandas.read_csv(\n os.path.join(path, file),\n index_col=\"variable\",\n usecols=[\"variable\", \"group_index\", \"value_i\"], dtype={\"value_i\": \"Int64\"}\n )\n frame[\"board\"] = device\n frame[\"experiment\"] = experiment\n frames.append(frame)\n dataframe = pandas.concat(frames)\n frames = None\n\n current_grouping = dataframe.groupby([\"group_index\", \"variable\"])\n \n data = current_grouping.agg([\n numpy.median,\n _percentile_factory(95),\n numpy.mean,\n numpy.std,\n \"count\"\n ])\n\n print(data)\n \n data = data.droplevel([0], axis=1)\n data = data.unstack()\n data.columns = data.columns.map('_'.join)\n data.to_csv(f\"{arguments.experiment}.csv\")",
"def run(self):\n for i in range(self.exploration_steps):\n self.single_step(i)\n if self.save_checkpoints:\n self.save_results()\n self.save_results()\n if self.verbose:\n print(\"\\nExploration completed\")\n return"
] | [
"0.6303137",
"0.5954984",
"0.5947202",
"0.5942798",
"0.5933814",
"0.5922406",
"0.58769554",
"0.58608824",
"0.58519894",
"0.5840858",
"0.5767799",
"0.5748718",
"0.5712309",
"0.5707479",
"0.5706759",
"0.56962764",
"0.56911665",
"0.56899905",
"0.56842196",
"0.567968",
"0.5671062",
"0.56666917",
"0.5644949",
"0.56378955",
"0.56186926",
"0.56173825",
"0.5616855",
"0.5612523",
"0.5599188",
"0.5590854"
] | 0.76796246 | 0 |
start point of scraping use urls, pass soup tag to Unvs return a list of 100 unvs(university) object | def scrape():
url_base='https://www.usnews.com/best-colleges/rankings/national-universities'
unvss=[]
for page in range(N_PAGE):
url=url_base+'?_page={}'.format(page+1)
soup=get_soup(url)
unvs_tags=soup.find_all('li',id=re.compile(r'^view-.*'),class_='block-normal block-loose-for-large-up')
for unvs_tag in unvs_tags:
u=Unvs(unvs_tag)
print("Collect info of {}".format(u.name))
unvss.append(u)
return unvss | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_national_university_data(univ_url):\n f_name = 'national_university_html.json'\n base_url = 'https://www.usnews.com'\n html_cache = load_cache(f_name)\n\n if univ_url not in html_cache:\n resp = requests.get(base_url + univ_url, headers=agent)\n html_cache[univ_url] = resp.text\n save_cache(html_cache, f_name)\n\n soup = BeautifulSoup(html_cache[univ_url], 'html.parser')\n\n map_chunk = soup.find('section', attrs={'class': 'hero-stats-widget-map'})\n address = map_chunk.find('p').find('strong').text.strip()\n info_list = soup.find_all('div', attrs={'class': 'block-looser'})[1].find_all('ul')\n stats_list = soup.find('section', attrs={'class': 'hero-stats-widget-stats'}).find('ul').find_all('strong')\n salary_chunk = soup.find_all('div', attrs={'class': 'block-looser'})[4].find('span', attrs={'class': 'text-strong'})\n\n if univ_url + '/student-life' not in html_cache:\n life_resp = requests.get(base_url + univ_url + '/student-life', headers=agent)\n html_cache[univ_url + '/student-life'] = life_resp.text\n save_cache(html_cache, f_name)\n\n life_soup = BeautifulSoup(html_cache[univ_url + '/student-life'], 'html.parser')\n life_chunk = life_soup.find('div', attrs={'id': 'StudentBody'})\n gender_chunk = life_chunk.find('span', attrs={'data-test-id': 'v_percent'})\n\n if univ_url + '/academics' not in html_cache:\n academic_resp = requests.get(base_url + univ_url + '/academics', headers=agent)\n html_cache[univ_url + '/academics'] = academic_resp.text\n save_cache(html_cache, f_name)\n\n academic_soup = BeautifulSoup(html_cache[univ_url + '/academics'], 'html.parser')\n faculty_chunk = academic_soup.find('div', attrs={'data-field-id': 'vStudentFacultyRatio'})\n\n found_year = info_list[1].find('span', attrs={'class': 'heading-small'}).text\n if found_year == 'N/A':\n found_year = None\n else:\n found_year = int(found_year)\n\n endowment = info_list[5].find('span', attrs={'class': 'heading-small'}).text\n endowment = endowment.replace('$', '').replace(' +', '').strip()\n if endowment == 'N/A':\n endowment = None\n else:\n endowment_list = endowment.split()\n if len(endowment_list) == 1:\n endowment = float(endowment.replace(',', '')) / 1000\n elif endowment_list[1] == 'billion':\n endowment = float(endowment_list[0]) * 1000\n else:\n endowment = float(endowment_list[0])\n\n median_salary = salary_chunk.text.replace('*', '').strip() if salary_chunk is not None else None\n if median_salary is not None:\n median_salary = int(median_salary.replace('$', '').replace(',', ''))\n\n student_faculty = faculty_chunk.find('p').find('span', attrs={'class': 'text-strong'}).text.strip()\n if student_faculty == 'N/A':\n student_faculty = None\n else:\n student_faculty = int(student_faculty.split(':')[0])\n\n tuition_in_state = stats_list[0].text.split()[0]\n if tuition_in_state == 'N/A':\n tuition_in_state = None\n else:\n tuition_in_state = int(tuition_in_state.replace('$', '').replace(',', ''))\n\n female = gender_chunk.text if gender_chunk is not None else None\n if female is not None:\n female = float(female.replace('%', '')) / 100\n\n univ_dict = dict(name=soup.find('h1', attrs={'class': 'hero-heading'}).text.strip().replace('1', ''),\n ranking=soup.find('strong').text.strip().split()[0].replace(\"#\", \"\").replace('-', ' - '),\n state=address.rsplit(', ', 1)[1],\n city=address.rsplit(', ', 1)[0],\n type=info_list[0].find('span', attrs={'class': 'heading-small'}).text.split(', ')[0],\n found_year=found_year,\n endowment=endowment,\n median_salary=median_salary,\n student_faculty=student_faculty,\n female=female,\n tuition_in_state=tuition_in_state)\n\n if univ_dict['type'] == 'Public':\n tuition_out_state = stats_list[1].text.split()[0]\n enrollment = stats_list[3].text\n else:\n tuition_out_state = stats_list[0].text.split()[0]\n enrollment = stats_list[2].text\n\n if tuition_out_state == 'N/A':\n tuition_out_state = None\n else:\n tuition_out_state = int(tuition_out_state.replace('$', '').replace(',', ''))\n\n if enrollment == 'N/A':\n enrollment = None\n else:\n enrollment = int(enrollment.replace(',', ''))\n\n univ_dict.update(dict(tuition_out_state=tuition_out_state,\n enrollment=enrollment))\n\n return univ_dict",
"def get_national_university_page(page):\n base_url = 'https://www.usnews.com'\n page_url = '/best-colleges/rankings/national-universities?_mode=table&_page=' + str(page)\n\n resp = requests.get(base_url + page_url, headers=agent)\n soup = BeautifulSoup(resp.text, 'html.parser')\n\n table_chunk = soup.find('tbody', attrs={'data-js-id': 'items'})\n univ_list = table_chunk.find_all('tr', attrs={'data-view': 'colleges-search-results-table-row'})\n\n output_list = []\n for univ_chunk in univ_list:\n univ_url = univ_chunk.find('a')['href']\n output_list.append(get_national_university_data(univ_url))\n\n return output_list",
"def scrape_overview(self,unvs_tag):\n base='https://www.usnews.com'\n name_tag=unvs_tag.find('h3',class_='heading-large block-tighter').a\n assert(name_tag!=None)\n self.name=name_tag.string.strip()\n self.page_url=base+name_tag.get('href')\n assert(self.page_url!=None)\n self.address=unvs_tag.find('div',class_='block-normal text-small').string.strip()\n rank_msg=unvs_tag.find('div',style='margin-left: 2.5rem;').find('div').stripped_strings.__next__()\n match=re.search(r'\\d+',rank_msg)\n assert(match)\n self.rank=int(match.group())\n self.n_ug=int(unvs_tag.find('span',string=re.compile(r'\\s*Undergraduate Enrollment\\s*'))\\\n .parent.strong.string.strip().replace(',',''))\n tn_tag=unvs_tag.find('a',class_='display-block right')\n if tn_tag:\n self.thumbnail=base+unvs_tag.find('a',class_='display-block right').get('href')",
"def get_study_data(self, soup, url):\n pass",
"def __uol(soup):\n news = []\n container = soup.select('.mais-lidas-container')[0]\n most_read = container.find_all('li')\n\n for item in most_read:\n title = item.find('span', class_='cor-transition').get_text()\n news.append(dict(title=title, link=item.a['href']))\n return news",
"def parse_soup(self, soup):\n # find all class_='gs_r gs_or gs_scl' => each result\n return soup.find_all('li', class_='ais-InfiniteHits-item')",
"def get_all_national_university():\n f_name = 'national_university_info.json'\n\n data_list = load_cache(f_name, data_type='list')\n if len(data_list) == 0:\n print('Request National University Info through Website...')\n for page in range(1, 17):\n data_list += get_national_university_page(page)\n save_cache(data_list, f_name)\n else:\n print('Get National University Info from Cache File...')\n\n nu_obj_list = [NationalUniversity(data_dict=data_dict) for data_dict in data_list]\n return nu_obj_list",
"def data_collector(self, n, url, ret):\n try:\n html = urllib2.urlopen(url).read()\n soup = BeautifulSoup(html)\n ret[n] = [soup.title.string, url, html[0:100]]\n except:\n ret[n] = [\"Error\", url, \"Error\"]",
"def _scrape(self):",
"def __local_rj(soup):\n news = []\n container = soup.find('div', id='lidas')\n links = container.find_all('a')\n\n for a in links:\n news.append(dict(title=a.string, link=a['href']))\n return news",
"def __local_rj(soup):\n news = []\n container = soup.find('div', id='lidas')\n links = container.find_all('a')\n\n for a in links:\n news.append(dict(title=a.string, link=a['href']))\n return news",
"def create_soup(u):\n req = requests.get(u)\n html = req.text\n s = BeautifulSoup(html, \"html.parser\")\n return s",
"def load_website(self):\n# r = urllib.request.urlopen(self.url).read()\n r = requests.get(self.url).content \n self.soup = bs(r, \"lxml\")",
"def _grab_tags(self, url):\n a = self._api_request(url)\n return bs4.BeautifulSoup(a,features=\"html.parser\")",
"def __uol(soup):\n news = []\n container = soup.find('ol', class_='mostRead')\n most_read = container.find_all('li')\n\n for item in most_read:\n title = item.a['title']\n link = item.a['href']\n if \"folha.uol\" in link:\n link = replace_original_link_with_outline_call(link)\n news.append(dict(title=title, link=link))\n return news",
"def _get_soup(self, url):\n\n # generate a random header \n headers = {'User-Agent': self._random_user_agent()}\n # send a request and get the soup\n response = requests.get(url, headers=headers)\n results = response.content\n if not response.status_code == 404:\n soup = BeautifulSoup(results, 'lxml')\n return soup",
"def issueListing(self, v, i):\n #list of URLS within the issue\n# links = []\n issURL = self.link(vol = v, iss = i )\n html=urlopen(issURL)\n soup=BeautifulSoup(html,'html.parser')\n URLs = [] #Empty list\n \n# titles = soup.find_all('h5', class_=\"title\")\n# authors = soup.find_all('h6', class_=\"authors\")\n# pubs = soup.find_all('h6', class_=\"pub-info\")\n# for t, a, p in zip(titles, authors, pubs):\n blocks = soup.find_all('div', class_=\"article panel article-result\")\n for b in blocks:\n# print(b)\n titletag = b.find('h5', class_=\"title\")\n title = titletag.get_text()\n #Extract abstract url from title head\n aURL = titletag.find('a', href = True)['href']\n alink = 'https://journals.aps.org' + aURL\n #Print out the scraped information\n print(title)\n print(alink)\n #Extract research area and topic keywords\n kwlist = b.find('ul', class_=\"inline-list subjects\")\n #If the list tag exists\n if kwlist:\n lis = kwlist.find_all('li')\n kws = [li.get_text() for li in lis] \n print(kws)\n #Add utf-8 encode\n# print(kws.encode('utf-8')) \n print('----------------------------------------------------------------') \n #Collect URLs in the issue\n URLs.append('https://journals.aps.org' + aURL)\n return URLs",
"def internallinks(url, number_of_pages):\n hotelslist = set()\n request = get(url)\n parser = BeautifulSoup(request.text, 'html.parser')\n page_load = 5\n for link in parser.findAll(\"a\", href=re.compile(\"^(/|.*)(?=REVIEWS)\")):\n if link.attrs['href'] is not None:\n hotelurl = link.attrs['href']\n url = 'https://www.tripadvisor.es' + str(hotelurl)\n hotelslist.add(url)\n else:\n pass\n next_page = parser.find(class_=\"prw_rup prw_common_standard_pagination_resp\").find(\"a\", href=re.compile(\"^(/|.*)\"))\n next_page_url = next_page.attrs['href']\n while number_of_pages > 1:\n url = 'https://www.tripadvisor.es' + str(next_page_url)\n request = get(url)\n parser = BeautifulSoup(request.text, 'html.parser')\n for link in parser.findAll(\"a\", href=re.compile(\"^(/|.*)(?=REVIEWS)\")):\n if link.attrs['href'] is not None:\n hotelurl = link.attrs['href']\n url = 'https://www.tripadvisor.es' + str(hotelurl)\n hotelslist.add(url)\n else:\n pass\n try:\n next_page = parser.find(class_=\"prw_rup prw_common_standard_pagination_resp\").find(\"a\", href=re.compile(\n \"^(/|.*)\"))\n next_page_url = next_page.attrs['href']\n print(next_page_url)\n number_of_pages = number_of_pages - 1\n if page_load < 5:\n page_load = page_load + (5 - page_load)\n else:\n pass\n except:\n print(\n \"IndexError(Encontramos un error al extraer la {0} página volvemos a ejecutar el contenido de esa \"\n \"pagina)\".format(str(number_of_pages)))\n sleep(1)\n if page_load > 0:\n page_load = page_load - 1\n pass\n else:\n raise IndexError(\"Encontramos un error al extraer la {0} multiples fallos \"\n \"salimos \").format(str(number_of_pages))\n return hotelslist",
"def scrape(self):\n try:\n self.result = urlfetch.fetch(self.url)\n except DownloadError:\n self.result = urlfetch.fetch(self.url) \n if ((self.result.status_code == 200) and\n (self.result.content_was_truncated == 0)):\n self.soup = BeautifulSoup(self.result.content)\n else:\n logging.critical(\"Bad Status Code: \", self.result.status_code, self.url)\n sys.exit(1)",
"def get_university_news():\n\tresponse = requests.get('https://cumoodle.coventry.ac.uk')\n\tmoodleContent = BeautifulSoup(response.content, 'html.parser')\n\tpostLinks =[]\n\theadings = []\n\tdates = []\n\tdata = \"\"\n\tfor title in moodleContent.findAll('div',{'class':'subject'}):\n\t\theadings.append(title.text+\"</a></p>\")\n\tfor link in moodleContent.findAll('div',{'class':'link'}):\n\t\tpostLinks.append(\"<p style = 'font-size:120%;'> <a href = '\"+link.a['href']+\"'>\") \n\tfor date in moodleContent.findAll('div',{'class':'author'}):\n\t\tdates.append(\"<p style='font-size:90%;'>\"+date.text[18:]+\"</p>\")\n\tresults = zip(postLinks, headings, dates)\n\tfor result in results:\n\t\tdata+=(''.join(result))\n\treturn data",
"def get_course_all_slugs(self):\n\n unit_lessons_counter = 0\n # Unit Page -> Subunit Header + Subunit Block -> Lesson Block -> Lesson Title\n for course_unit_url, course_unit_slug in zip(\n self.course_unit_urls, self.course_unit_slugs\n ):\n\n unit_lessons_counter = 0\n # -> Unit Page\n try:\n course_unit_page = BeautifulSoup(\n requests.get(ROOT_URL + course_unit_url).text, \"lxml\"\n )\n except requests.ConnectionError as e:\n print(\"Error Connecting!\\n\", e)\n sys.exit(1)\n except requests.exceptions.HTTPError as errh:\n print(\"Http Error:\", errh)\n sys.exit(1)\n except requests.exceptions.ConnectionError as errc:\n print(\"Error Connecting:\", errc)\n sys.exit(1)\n except requests.exceptions.Timeout as errt:\n print(\"Timeout Error:\", errt)\n sys.exit(1)\n except requests.exceptions.RequestException as err:\n print(\"OOps: Something Else\", err)\n sys.exit(1)\n\n subunit_couter = 0\n\n # -> Subunit Header -> Subunit Block\n for course_subunit_title, course_subunit_body in zip(\n course_unit_page.find_all(attrs=COURSE_SUBUNIT_TITLE_ATTRS),\n course_unit_page.find_all(\n COURSE_SUBUNIT_BODY[\"tag\"], class_=COURSE_SUBUNIT_BODY[\"class\"]\n ),\n ):\n\n logging.debug(\"course_subunit_title:{}\".format(course_subunit_title))\n lesson_counter = 0\n # -> Lesson Block\n for course_lesson_body in course_subunit_body.find_all(\n COURSE_LESSON_BODY[\"tag\"],\n {\n \"class\": [\n COURSE_LESSON_BODY[\"class_i\"],\n COURSE_LESSON_BODY[\"class_ii\"],\n ]\n },\n ):\n course_lesson_span = course_lesson_body.find_all(\n COURSE_LESSON_SPAN[\"tag\"], class_=COURSE_LESSON_SPAN[\"class\"]\n )\n course_lesson_aria_label = course_lesson_span[0][\n COURSE_LESSON_LABEL\n ]\n logging.debug(\n \"course_lesson_aria_label:{}\".format(course_lesson_aria_label)\n )\n # -> Lesson Title\n # Check whether lesson block is a video\n if course_lesson_aria_label == \"Video\":\n lesson_title = course_lesson_body.find(\n COURSE_LESSON_TITLE[\"tag\"],\n class_=COURSE_LESSON_TITLE[\"class\"],\n )\n\n logging.debug(\n \"course_lesson_title:{}\".format(lesson_title.text)\n )\n self.lesson_titles.append(lesson_title.text)\n self.course_all_slugs.append(\n self.output_rel_path\n + course_unit_slug\n + \"/\"\n + str(subunit_couter)\n + \"_\"\n + course_subunit_title.text.replace(\" \", \"_\")\n + \"/\"\n + str(lesson_counter)\n + \"_\"\n + lesson_title.text.replace(\" \", \"_\")\n )\n\n lesson_counter += 1\n unit_lessons_counter += lesson_counter\n subunit_couter += 1\n self.unit_slugs_counter[course_unit_url] = unit_lessons_counter\n logging.info(\"Course - All slugs generated\")",
"def getUniverses(limit=None):\n url = f\"https://develop.roblox.com/v1/user/universes?limit={limit}&sortOrder=Desc\"\n if limit in (10, 25, 50):\n r = requests.get(url, cookies=cookie)\n j = json.loads(r.text)\n return j\n else:\n limit = 50\n r = requests.get(url, cookies=cookie)\n j = json.loads(r.text)\n return j",
"def scrape(self, years=[2016]):\n data = []\n for person in self._get_persons():\n for year in years:\n person_id = person[1]\n url = \"http://www.kongehuset.no/programarkiv.html?tid=30387&sek=30041&person=%s&ar=%s\" % (person_id, year)\n print(\"Scrape %s\" % url)\n r = requests.get(url)\n person_data = self._parse_events(r.text)\n for row in person_data:\n row[\"person\"] = person[0]\n row[\"url\"] = url\n data += person_data\n\n return data",
"def _get_apt_urls_ensemble(self, \n verbose=False, \n test=False):\n\n pg_num = 1 # initial page number\n stop = False # a flag to indicate whether or not to stop \n apt_urls = [] # a list that contains a complete set of URLs\n \n # keep going until reaching the last page \n while not stop:\n \n if test and pg_num == 2:\n break\n \n if pg_num%50 == 0:\n # sleep 15 seconds for every batch \n if verbose:\n print('50 pages scraped, sleep 15 seconds')\n time.sleep(15)\n \n if pg_num == 845:\n break\n \n webpage = self._get_webpage(pg_num)\n soup_pg = self._soup_attempts(webpage)\n apt_urls_pg = self._get_apt_urls_per_page(soup_pg)\n more_listings = soup_pg.find('div', class_='_grid33 _alpha')\n\n # try to make sure we reach the last page \n # condition 1 - if there're no more contents in regular page\n # condition 2 - subscriped contents also non-existent \n if (not apt_urls_pg) and (not more_listings):\n attempts = 0\n while attempts < 5:\n time.sleep(3)\n # another 5 attempts to request a soup \n soup_pg = self._soup_attempts(webpage)\n apt_urls_pg = self._get_apt_urls_per_page(soup_pg)\n more_listings = soup_pg.find('div', class_='_grid33 _alpha')\n \n # if we finally get results\n if apt_urls_pg or more_listings:\n apt_urls += apt_urls_pg\n if verbose:\n print(f'apartment URLs in page {pg_num} all scraped')\n pg_num += 1\n break # break the loop \n attempts += 1\n \n if pg_num < 470:\n # last check - we know the total number of pages is\n # greater than 470 \n stop = False\n else: \n # the last page has been reached \n stop = True\n else:\n # have not reached the end page yet, keep going \n apt_urls += apt_urls_pg\n if verbose:\n print(f'apartment URLs in page {pg_num} all scraped')\n pg_num += 1 # next page \n \n return apt_urls",
"def scrape_BI(url):\n response = requests.get(url)\n soup = BeautifulSoup(response.text)\n companies = soup.find_all('h3', class_='slide-title')\n #names = []\n driver = init_driver()\n for company in companies[:]:\n name = company.getText().strip()\n # if \" \" in name:\n # name.replace(' ','+')\n html_code = load_google(driver, name)\n #name, address = scrape_google(html_code)\n url = scrape_google(html_code)\n print(name,url)\n #names.append(name)\n driver.quit()\n #print(names)",
"def scrape(self):\n pass",
"def parsing_all_page(url):\n html_doc = get_html(url)\n# html_doc = get_html_local()\n page_count = get_html_count(html_doc)\n print 'All have find pages %d' % page_count\n\n projects = []\n\n for page in range(1, page_count + 1):\n print 'Parsing %d%%' % (page*100/page_count)\n\n url = BASE_URL + '?page=%d' % page\n projects.extend(process_page(url))\n\n return projects",
"def __local_al(soup):\n news = []\n ns = get_ns('localAL')\n\n divs = soup.find_all('div', class_='card-news-small')\n # Incrementer, we only need 4 hits\n i = 0\n for div in divs:\n title = div.find('span', class_='card-news__title')\n news.append(dict(title=title.string,\n link=ns.url + title.parent['href']))\n i += 1\n if i == 4:\n break\n return news",
"def general_scraper(section_url):\n\n prefix = \"http://mesva.univaq.it\"\n\n request = []\n news = []\n\n for i, url in enumerate(section_url):\n request.append(requests.get(url))\n news_division = BeautifulSoup(request[i].text, \"html.parser\").find(class_=\"view-content\")\n\n discab_news = news_division.find_all(\"div\", recursive=False)[0:5]\n\n for single_news in discab_news:\n news.append({\n 'description': '',\n 'title': single_news.a.string,\n 'link': prefix + single_news.a['href']\n })\n\n return news",
"def soup(url):\n handle = ''\n max_tries = 10\n for i in range(max_tries):\n try:\n handle = urlopen(url)\n handle = handle.read()\n break\n except:\n logging.exception('urlopen failed (attempt %d)', i + 1)\n if i == max_tries - 1:\n logging.error('the maximum urlopen attempts have been reached')\n raise\n time.sleep(1)\n\n s = BeautifulSoup(handle)\n return s"
] | [
"0.6813396",
"0.6677755",
"0.6292874",
"0.6277937",
"0.62407184",
"0.6227392",
"0.6170943",
"0.60358685",
"0.6008686",
"0.5971656",
"0.5971656",
"0.5909",
"0.5846324",
"0.5824053",
"0.5823654",
"0.5777535",
"0.57656276",
"0.5758727",
"0.5701715",
"0.57001144",
"0.5692121",
"0.5683626",
"0.5683169",
"0.5665919",
"0.566149",
"0.5653018",
"0.5649775",
"0.5644075",
"0.56411433",
"0.5639204"
] | 0.8051392 | 0 |
use the url to scrape detailed info | def scrape_detail(self,url):
soup=get_soup(url)
self.zip=soup.find('p',class_='block-normal hide-for-small-only text-small hero-ranking-data-contact').stripped_strings.__next__()[-5::1]
if self.zip in zips:
#print('DUPLICATE!')
zips.append(self.zip)
info_tags=soup.find_all('span',class_='heading-small text-black text-tight block-flush display-block-for-large-up')
self.type=info_tags[0].string.strip()
self.year_founded=int(info_tags[1].string.strip())
self.setting=info_tags[4].string.strip()
self.endowment=info_tags[5].string.strip() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_info_of_url(url):\n pass",
"def get_details(self):\n # For every URL in our list of links that we got from the parser's\n # 'lookup()' method we get the data from that URL, set it in our\n # parser's buffer, and then let the parser do the rest of the work.\n #\n for i,link in enumerate(self.links):\n # NOTE: Buffers are 1-based, not 0-based.\n #\n link_data = link.get()\n self.scraper.parser.set_buffer(i+1, link_data)\n\n # And in the final buffer we set the id. The scraper we have\n # loaded knows how many bits of url data it expects and in which\n # buffer the id will be in.\n #\n i += 1\n self.scraper.parser.set_buffer(i+1, self.id)\n self.xml_details = self.scraper.parser.parse(FN_GET_DETAILS,\n self.scraper.settings)",
"def scrape_details(self, listings_dict, url):\n try:\n next_page_url = None\n if \"paginationNext\" in listings_dict['props']['_page']['linkTags']:\n next_page_url = listings_dict['props']['_page']['linkTags']['paginationNext']['href']\n listings = listings_dict['props']['searchData']['homes']\n for listing in listings:\n try:\n full_address = listing['location']['partialLocation']\n address, unitNum = find_unit_num(full_address)\n if address == \"Address Not Disclosed\":\n skip_listing(self.college, 'data', 'Trulia')\n continue\n full_price = listing['price']['formattedPrice']\n price_low, price_high = find_prices(full_price)\n beds = listing['bedrooms']['formattedValue']\n beds = ''.join([x for x in beds if x.isdigit()])\n if beds:\n if '-' in beds:\n beds = int(beds[:beds.find('-')])\n else:\n beds = int(beds)\n else:\n beds = None\n baths = listing['bathrooms']['formattedValue']\n baths = ''.join([x for x in baths if not x.isalpha()])\n if baths:\n if '-' in baths:\n baths = float(baths[:baths.find('-')])\n else:\n baths = float(baths)\n else:\n baths = None\n sqft = None\n if 'floorSpace' in listing and listing['floorSpace']:\n sqft = listing['floorSpace']['formattedDimension']\n sqft = int(''.join([x for x in sqft if x.isdigit()])) if sqft else None\n tags = listing['tags']\n pets = None\n for tag in tags:\n if \"PET FRIENDLY\" in tag.values():\n pets = True\n photos = listing['media']['photos']\n images = list()\n for photo in photos:\n images.append(photo['url']['small'])\n detail_link = 'https://www.trulia.com' + listing['url']\n latitude = listing['location']['coordinates']['latitude']\n longitude = listing['location']['coordinates']['longitude']\n # Build document for DB\n unit = {\n 'address': address,\n 'unitNum': unitNum,\n 'price_high': price_high,\n 'price_low': price_low,\n 'beds': beds,\n 'baths': baths,\n 'pets': pets,\n 'sqft': sqft,\n 'provider': 'Trulia',\n 'images': images,\n 'URL': detail_link,\n 'original_site': None,\n 'available': 'Now',\n 'latitude': latitude,\n 'longitude': longitude\n }\n write_to_raw_json(unit, self.college)\n\n # Print Scraping errors and write to log file\n except Exception as e:\n write_to_error_log(self.college, 'Trulia', e, link=url)\n skip_listing(self.college, 'error', 'Trulia')\n continue\n\n except Exception as e:\n write_to_error_log(self.college, 'Trulia', e, link=url)\n skip_listing(self.college, 'error', 'Trulia')\n\n return next_page_url",
"def _scrape(self):",
"def scrape_url(url):\n r = requests.get(url)\n url_list = get_urls(r.text)\n email_list = get_email_addresses(r.text)\n phone_list = get_phone_numbers(r.text)\n\n print_list('Urls', url_list)\n print_list('Emails', email_list)\n print_list('Phone Numbers', phone_list)",
"def scrape(self):\n pass",
"def find_details_json(self, url):\n response = self.get_response(url)\n if response:\n html_soup = BeautifulSoup(response.text, 'html.parser')\n listings_json = html_soup.find('script', id='__NEXT_DATA__')\n if listings_json:\n listings_json = str(listings_json)\n listings_json = listings_json.replace(\"<script id=\\\"__NEXT_DATA__\\\" type=\\\"application/json\\\">\", \"\").replace(\"</script>\", \"\")\n listings = json.loads(listings_json)\n return listings\n else:\n skip_scraper(self.college, 'Trulia')",
"def parse(self, url):\n pass",
"def get_info(self, url, logger, posts=True):\n self.logger = logger\n proxy = random.choice(self.proxies_pool())\n user_agent = get_user_agent(logger)\n headers = {'User-Agent': user_agent}\n proxies = {'proxies': proxy}\n attrs = {'class': 'thing'}\n html = requests.get(url, headers=headers, proxies=proxies)\n soup = BeautifulSoup(html.text, 'html.parser') # calling the url with beautiful soup\n if posts:\n posts = soup.find_all('div', attrs=attrs)\n return soup, posts\n else:\n return soup",
"def getVotacion(self, url):",
"def get_study_data(self, soup, url):\n pass",
"def get_data_from_page(url):\n # Initialise a dictionary to store our information\n row = {'url': url}\n\n # get the html content of a website using the requests library and the get function\n # the '.content' returns the contents of the request, without it would return the HTTP status code\n page_contents = requests.get(url).content\n\n # parse the contents with beautiful soup\n soup = BeautifulSoup(page_contents, 'lxml')\n\n # Get the text relating to the campaign title which belong to that specific class.\n for element in soup.find_all(class_=\"a-campaign-title\"):\n row['title'] = element.text\n\n # Interrogating the source we found that the tags are URLs that always contain the 'discover' path\n # So we just need to find all the URLs that contain that string and return them\n tags = []\n for link in soup.find_all('a', href=True):\n if 'discover' in link['href']:\n tags.append(link.text)\n\n row['tags'] = tags\n\n # Progress meter is a single string in this class.\n for link in soup.findAll('h2', {'class': 'm-progress-meter-heading'}):\n goal = link.text\n\n # The format is strictly defined so we can do some string parsing to get the information we want\n row['current amount'] = goal.split()[0]\n row['total_amount'] = goal.split()[-2]\n\n for link in soup.findAll('div', {'class': 'p-campaign-description'}):\n row['description'] = link.text\n\n for link in soup.findAll('span', {'class': 'm-campaign-byline-created a-created-date'}):\n row['created'] = link.text\n\n # Some information exists only in the footer of the \"donations\" page, which is summarised in the side bar\n # We can't access it directly but the page URL is well formated so we can use some string manipulation to create\n # the URL and parse it.\n\n # the gofundme URLs are well formated along the lines of www.gofundme.com/f/name of fundraiser?qidSOMEHEXCODE\n # We can use the '?qid' as an anchor and replace it with '/donations?qid' to get the URL of the donations page\n donations_url = url.replace('?qid', '/donations?qid')\n\n soup = BeautifulSoup(requests.get(donations_url).content, 'lxml')\n text_soup = str(soup)\n\n # The information in this section is outside of the usual html format, but it is well structured\n # We can extract it using regular expressions that look for the information\n for dates in re.findall('launch_date\\\":\\\"[0-9-]+', text_soup):\n row['launch_date'] = dates.rsplit('\"', 1)[-1]\n\n for country in re.findall('country\\\":\\\"[A-Z]+', text_soup):\n row['country'] = country.rsplit('\"', 1)[-1]\n\n for donation_count in re.findall('donation_count\\\":[0-9]+', text_soup):\n row['donation_count'] = donation_count.rsplit(':', 1)[-1]\n\n for charity in re.findall('charity\\\":[a-z]+', text_soup):\n row['is_charity'] = charity.rsplit(':', 1)[-1]\n\n return row",
"def data_collector(self, n, url, ret):\n try:\n html = urllib2.urlopen(url).read()\n soup = BeautifulSoup(html)\n ret[n] = [soup.title.string, url, html[0:100]]\n except:\n ret[n] = [\"Error\", url, \"Error\"]",
"def getFullInformation(self):\n request = requests.get(self.url, headers=REQUEST_HEADERS)\n if request.status_code == 200:\n # Got a valid response\n souped = BeautifulSoup(request.text, \"html5lib\")\n description = souped.find(\"div\", id=\"vip-description-text\").string\n if description:\n self._description = description.strip()\n else:\n self._description = \"\"\n contact = souped.find(class_=\"phone\")\n if not contact:\n self._contact_name, self._contact_number = [\"\",\"\"]\n else:\n if \" on \" in contact.string:\n self._contact_name, self._contact_number = contact.string.split(\" on \")\n else:\n self._contact_name, self._contact_number = [\"\", contact.string]\n\n gmaps_link = souped.find(\"a\", class_=\"open_map\")\n if gmaps_link:\n self._latitude, self._longitude = re.search(\"center=(-?\\w.*),(-?\\d.*)&sensor\", gmaps_link.get(\"data-target\")).groups()\n else:\n self._latitude, self._longitude = [\"\", \"\"]\n\n return\n else:\n # TODO: Add error handling\n print (\"Server returned code: \" + request.status_code + \" for \" + url)\n return []",
"def get_details(self):",
"def get_info(url):\r\n soup = make_request(url)\r\n\r\n #get press release title\r\n title_text = soup.find(\"h2\", \"con-title\").text.strip()\r\n title = title_text.partition('\\n')[0]\r\n\r\n #get press release content and date\r\n div = soup.find_all(\"div\") #find div tags\r\n for ele in div:\r\n for div2 in ele(\"div\",\"text-right\"):\r\n if \"發佈日期\" in div2.text:\r\n text = ele.text\r\n date = re.findall(\"\\d\\d\\d\\d-\\d\\d-\\d\\d\", div2.text)[0]\r\n break #prevents reiterating upwards to all div parents\r\n return date, title, text",
"def parse_detail_page(self, response):\n self.logger.info('Parse Detail Page function called on %s', response.url)\n item = response.meta.get('item', {})\n item['url'] = response.url\n item['title'] = response.css(TITLE_SELECTOR).extract_first(\"\").strip()\n item['price'] = self.get_price(response)\n return item",
"def get_info(self):\r\n\r\n self.driver.get(WEBSITE)\r\n time.sleep(3)\r\n self.driver.find_element_by_xpath(\"\"\"//*[@id=\"modalContent\"]/div/button/i\"\"\").click()\r\n time.sleep(3)\r\n #gets prices and appends to list\r\n all_prices = self.driver.find_elements_by_class_name(\"firstPrice\")\r\n for price in all_prices:\r\n text = price.text\r\n new_p = text.replace(\".\", \"\")\r\n price_int = int(new_p.split(\" \")[1])\r\n self.price_list.append(price_int)\r\n #gets addresses\r\n all_addresses = self.driver.find_elements_by_class_name(\"postingCardLocationTitle\")\r\n for address in all_addresses:\r\n self.address_list.append(address.text)\r\n print(self.address_list)\r\n # gets info\r\n ad_info = self.driver.find_elements_by_css_selector(\"a.go-to-posting\")\r\n for info in ad_info:\r\n links = info.get_attribute('href') #gets href link inside the css\r\n self.all_links.append(links)\r\n self.all_info.append(info.text)\r\n\r\n # Just for tests\r\n print(self.price_list)\r\n print(self.all_info)\r\n print(self.all_links)",
"def scrap_site(link):\n pass # Scrapy or BeautifulSoup",
"def parse_detail(self, response):\n\n self.logger.log(self.log_lvl, 'scraping data @ {}'.format(response.url))\n\n item_list = list()\n image_urls = list()\n # extract image\n try:\n pattern = re.compile(r\"(.*imagearray:)(.*)(,.*displaymode.*)\", re.MULTILINE | re.DOTALL)\n javascript_containing_images = response.xpath('//script[contains(., \"var mygallery=\")]/text()').extract()[0]\n images = re.match(pattern, javascript_containing_images).group(2)\n image_array = json.loads(images)\n image_urls = [urlparse.urljoin(response.url, itm[1]) for itm in image_array]\n except Exception as e:\n print(\"{} - {}\".format(type(e), str(e)))\n\n tipe_mobil = response.css('#content font.vehicleinfo ~ font.warning::text').extract_first()\n model_mobil = response.css('#content font.vehicleinfo::text').extract_first()\n if tipe_mobil.lower() == model_mobil.lower():\n tipe_mobil = response.meta.get('type', None)\n main_group = response.meta.get('main_group', None)\n assembly_set = response.css('#content font.title b::text').extract_first()\n\n # sparepart items\n for row in response.css('div#content div.content table tr'):\n item = IsuzuSparepartItem()\n\n # source_url\n item['source_url'] = response.url\n\n # car model\n item['merk'] = self.name\n item['tipe_mobil'] = tipe_mobil\n item['model_mobil'] = model_mobil\n\n # images\n item['image_urls'] = image_urls\n\n # grouping/assembly\n item['main_group'] = main_group\n item['assembly_set'] = assembly_set\n\n item['key'] = row.css('td.intable:nth-child(1) .detailcontent::text').extract_first()\n item['part_number'] = row.css('td.intable:nth-child(2) .detailcontent::text').extract_first()\n item['itc'] = row.css('td.intable:nth-child(3) .detailcontent::text').extract_first()\n item['description'] = row.css('td.intable:nth-child(4) .detailcontent::text').extract_first()\n item['qty'] = row.css('td.intable:nth-child(5) .detailcontent::text').extract_first()\n item['app_date'] = row.css('td.intable:nth-child(6) .detailcontent::text').extract_first()\n item['lr'] = row.css('td.intable:nth-child(7) .detailcontent::text').extract_first()\n item['model'] = row.css('td.intable:nth-child(8) .detailcontent::text').extract_first()\n item['remarks'] = row.css('td.intable:nth-child(9) .detailcontent::text').extract_first()\n\n item_list.append(item)\n\n return item_list",
"def report(self, url):\n\n print(self.get(url))",
"def extractInfo(Link):\r\n response = urlopen(Link)\r\n html = response.read()\r\n #LinkInfo = ds.Links()\r\n #html = refinehtmltags(html)\r\n pagetitle = html[html.find('<title>') + 7 : html.find('</title>')]\r\n startindex = html.find('<meta name=\"description\" content=\"')\r\n desc = html[startindex + 34 : html.find('\"',startindex + 38)]\r\n print pagetitle\r\n print desc\r\n #### Use the links to\r\n #### Extract the information as\r\n #### pagetitle\r\n #### description\r\n #return LinkInfo\r",
"def query(url):",
"def _get_one(self,url):\n pass",
"def get_info_url(self):\n return self.get_info(\"URL\")",
"def get_details(self):\n url_data = self.url.get()\n\n self.scraper.parser.set_buffer(1, url_data)\n self.scraper.parser.set_buffer(2, self.id)\n ep_details = self.scraper.parser.parse(FN_GET_EPISODE_DETAILS,\n self.scraper.settings)\n \n self.extended_details = ep_details\n self.actors = []\n self.credits = []\n\n self.scraper.logger.debug(\"set_details: %s\" % repr(ep_details))\n dom = parseString(ep_details)\n episode = dom.firstChild\n\n self.title = get_child_data(episode, \"title\", self.title)\n self.plot = get_child_data(episode, \"plot\", \"\")\n self.aired = get_child_data(episode, \"aired\")\n self.thumbnail = get_child_data(episode, \"thumb\")\n self.director = get_child_data(episode, \"director\")\n self.rating = try_float(get_child_data(episode, \"rating\"))\n self.episode_number = try_int(get_child_data(episode, \"episode\"))\n self.season_number = try_int(get_child_data(episode, \"season\"))\n\n credit = first_child(episode, \"credits\")\n while credit:\n if credit.firstChild and len(credit.firstChild.data) > 0:\n self.credits.append(credit.firstChild.data)\n credit = next_sibling(credit, \"credits\")\n\n actor = first_child(episode, \"actor\")\n while actor:\n actor_name = get_child_data(actor, \"name\")\n if actor_name is not None:\n self.actors.append(actor_name)\n actor = next_sibling(actor, \"actor\")\n\n dom.unlink()\n dom = None\n return",
"def get_results():\n # store info in a dictionary {name -> shortname}\n res = {}\n session = requests.Session()\n handle_url('http://www.creators.com/comics/cat-seeall.html', session, res)\n save_result(res, json_file)",
"def get_results():\n # store info in a dictionary {name -> shortname}\n res = {}\n session = requests.Session()\n handle_url('http://www.gocomics.com/features', session, res)\n handle_url('http://www.gocomics.com/explore/editorial_list', session, res)\n handle_url('http://www.gocomics.com/explore/sherpa_list', session, res)\n save_result(res, json_file)",
"def get(self, url):\n self.notifier.write('Parsing %s...' % url, DEBUG)\n soup = BeautifulSoup.BeautifulSoup(self.provider.get(url))\n\n people = []\n\n # Construct some regular expressions we'll need.\n r_electorate = re.compile('Electoral Division of .*')\n r_title = re.compile('.*Title.*')\n r_party = re.compile('.*Party.*')\n r_telephone = re.compile('.*Tel:.*')\n r_fax = re.compile('.*Fax:.*')\n r_telephone_tollfree = re.compile('.*Toll Free:.*')\n r_address_parliament = re.compile('.*Parliament House Contact.*')\n r_address_office = re.compile('.*(Location)|(Postal Address).*')\n r_email = re.compile('mailto:(?!web\\.reps@aph\\.gov\\.au)')\n\n for page in soup.findAll('a', href=re.compile(CONTACT_LINK)):\n self.notifier.write('Parsing %s (referenced by %s)...' \\\n % (page['href'], url), DEBUG)\n moresoup = BeautifulSoup.BeautifulSoup(self.provider.get(\\\n urlparse.urljoin(url, page['href'])))\n person = {}\n\n # Electorate\n elem = moresoup.findAll('p', text=r_electorate)\n if elem:\n person['electorate'] = \\\n elem[0].strip()[len('Electoral Division of '):]\n\n # Name\n elem = moresoup.findAll('h2')\n if elem:\n fullname = elem[0].string\n for p in VALID_PREFIX:\n if fullname.startswith(p):\n person['prefix'] = p\n fullname = fullname[len(p):]\n break\n parts = fullname.split()\n if len(parts) >= 2:\n person['firstname'] = parts[0]\n person['surname'] = parts[1]\n person['suffix'] = ' '.join(parts[2:])\n else:\n self.notifier.writeError(\\\n 'No name found for individual on %s' % page['href'], \\\n DEBUG)\n # Title\n elem = moresoup.findAll('p', text=r_title)\n if elem:\n try:\n elem = elem[0].next\n person['title'] = elem.string.strip()[1:-1].strip()\n except Exception as inst:\n self.notifier.writeError(\\\n '%s while determining title on page %s' % (str(inst), \\\n page['href']), DEBUG)\n\n # Party\n elem = moresoup.findAll('p', text=r_party)\n if elem:\n try:\n elem = elem[0].next\n person['party'] = elem.string.strip()[1:].strip()\n except Exception as inst:\n self.notifier.writeError(\\\n '%s while determining party on page %s' % (str(inst), \\\n page['href']), DEBUG)\n\n # Parliament house address\n elem = moresoup.findAll('p', text=r_address_parliament)\n if elem:\n try:\n person['address'] = '%s\\n%s\\n%s' % \\\n (elem[0].next.string.strip(), \\\n elem[0].next.next.next.string.strip(), \\\n elem[0].next.next.next.next.next.string.strip())\n elem = elem[0].next.next.next.next.next.next.next.next\n person['suburb'], person['state'], person['postcode'] = \\\n elem.string.split()[:3]\n except Exception as inst:\n self.notifier.writeError(\\\n '%s while determining address on page %s' % \\\n (str(inst), page['href']), DEBUG)\n\n # Telephone\n elem = moresoup.findAll('p', text=r_telephone)\n counter = 0\n for s in elem:\n try:\n person['telephone%s' % (counter or '')] = \\\n re.sub(r'[^0-9]', '', s.string.strip()[len('Tel:'):])\n counter = counter + 1\n except Exception as inst:\n self.notifier.writeError(\\\n '%s while determining phone number on page %s' % \\\n (str(inst), page['href']), DEBUG)\n\n # Toll free numbers\n elem = moresoup.findAll('p', text=r_telephone_tollfree)\n for s in elem:\n try:\n person['telephone%s' % (counter or '')] = \\\n re.sub(r'[^0-9]', '', \\\n s.string.strip()[len('Toll Free:'):])\n counter = counter + 1\n except Exception as inst:\n self.notifier.writeError(\\\n '%s while determining phone number on page %s' % \\\n (str(inst), page['href']), DEBUG)\n \n # Fax\n elem = moresoup.findAll('p', text=r_fax)\n counter = 0\n for s in elem:\n try:\n person['fax%s' % (counter or '')] = \\\n re.sub(r'[^0-9]', '', s.string.strip()[len('Fax:'):])\n counter = counter + 1\n except Exception as inst:\n self.notifier.writeError(\\\n '%s while determining fax number on page %s' % \\\n (str(inst), page['href']), DEBUG)\n\n # Office address(es)\n elem = moresoup.findAll('p', text=r_address_office)\n counter = 1\n for s in elem:\n try:\n s = s.next.next\n person['address%s' % counter] = s.string.strip()\n s = s.next.next\n person['suburb%s' % counter] = \\\n ' '.join(s.string.split()[:-2])\n person['state%s' % counter], person['postcode%s' % \\\n counter] = s.string.split()[-2:]\n counter = counter + 1\n except Exception as inst:\n self.notifier.writeError(\\\n '%s while determining address on page %s' % \\\n (str(inst), page['href']), DEBUG)\n\n # Email\n elem = moresoup.findAll('a', href=r_email)\n try:\n if elem:\n person['email'] = elem[0]['href'][len('mailto:'):]\n except Exception as inst:\n self.notifier.writeError(\\\n '%s while determining email on page %s' % (str(inst), \\\n page['href']), DEBUG)\n\n # URLs\n for (attribute, text) in [('biography', 'Biography'), \\\n ('firstspeech', 'First speech'), \\\n ('homepage', 'Personal Home Page')]:\n try:\n person['url_%s' % attribute] = urlparse.urljoin( \\\n url, moresoup.findAll('a', text=text)[0].parent['href'])\n except Exception as inst:\n self.notifier.writeError(\\\n '%s while determining %s on page %s' % \\\n (str(inst), attribute, page['href']), DEBUG)\n\n # General details\n person['level'] = 'federal'\n person['house'] = 'house of representatives'\n\n people.append(person)\n return people",
"def get_site_info(self, passed_url, options={}):\n uri = self.get_site_info_url(passed_url)\n params = self.get_site_info_query_params(options)\n response = requests.get(uri, params)\n return response.json()"
] | [
"0.70981526",
"0.6877993",
"0.6745803",
"0.67396754",
"0.66712624",
"0.66406703",
"0.6620889",
"0.6527329",
"0.65011233",
"0.64701736",
"0.6441623",
"0.6421658",
"0.6386159",
"0.63498217",
"0.6328521",
"0.6304512",
"0.62901115",
"0.625672",
"0.6245974",
"0.62436974",
"0.62268084",
"0.6207581",
"0.6149481",
"0.6138926",
"0.6135768",
"0.6113072",
"0.60782254",
"0.6061793",
"0.60499084",
"0.6038474"
] | 0.7324095 | 0 |
Swap the byteordering in a packet with N=4 bytes per word | def byteswap(data, word_size=4):
return reduce(lambda x,y: x+''.join(reversed(y)), chunks(data, word_size), '') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def byte_swap(data, word_size):\n \n bs_data = [0]*len(data)\n for ii in range(0, len(data), word_size):\n bs_data[ii:ii+word_size] = data[ii:ii+4][::-1]\n return(bytes(bs_data))",
"def swapNibbles(inputByte):\n return (inputByte << 4 | inputByte >> 4) & 0xff",
"def swap_bytes(word_val):\n msb = word_val >> 8\n lsb = word_val % 256\n return (lsb << 8) + msb",
"def swap_nib(x):\n a = (x & 0xF) << 4\n b = (x & 0xF0) >> 4\n c = a + b\n return c",
"def swapbytes(bal):\n if len(bal) == 3:\n return [bal[1], bal[0], bal[2]]\n elif len(bal) == 2:\n return [bal[1], bal[0]]\n else:\n raise Exception(\"Can only swap 2 or 3 bytes\")",
"def swap(value: int, size: int):\n return unpack(pack(value, size, \">\"), size, \"<\")",
"def setPacketLength(self):\n self.packetLength = len(self) - PRIMARY_HEADER_BYTE_SIZE - 1",
"def ipv4_reassembly(packet, *, count=NotImplemented):\n if 'IP' in packet:\n ipv4 = packet['IP']\n if ipv4.flags.DF: # dismiss not fragmented packet\n return False, None\n data = dict(\n bufid=(\n ipaddress.ip_address(ipv4.src), # source IP address\n ipaddress.ip_address(ipv4.dst), # destination IP address\n ipv4.id, # identification\n TP_PROTO.get(ipv4.proto).name, # payload protocol type\n ),\n num=count, # original packet range number\n fo=ipv4.frag, # fragment offset\n ihl=ipv4.ihl, # internet header length\n mf=bool(ipv4.flags.MF), # more fragment flag\n tl=ipv4.len, # total length, header includes\n header=bytearray(ipv4.raw_packet_cache), # raw bytearray type header\n payload=bytearray(bytes(ipv4.payload)), # raw bytearray type payload\n )\n return True, data\n return False, None",
"def endian_swap(value):\n\n return (((value >> 24) & 0xff) | ((value >> 8) & 0xff00)\n | ((value << 8) & 0xff0000) | (value << 24))",
"def __udp_preprocess_packet(self, seq):\n return b'06' + seq.to_bytes(4, 'big') \\\n + self.packets_status[seq][\"size\"].to_bytes(2, 'big') \\\n + self.packets_status[seq][\"payload\"]",
"def swap32(x):\n return (((x << 24) & 0xFF000000) |\n ((x << 8) & 0x00FF0000) |\n ((x >> 8) & 0x0000FF00) |\n ((x >> 24) & 0x000000FF))",
"def next_n_bytes(packet, n):\n ret = packet[:n]\n remaining_packet = packet[n:]\n return bytes(ret), bytes(remaining_packet)",
"def sub_bytes(state, s_box=s_box):\n for i in range(4):\n for j in range(4):\n state[i][j] = s_box[state[i][j]]",
"def _transpose_by_2_vnchwconv(tik_inst, dst, src, sub_hw_size):\n\n # whether the sub_h_size is block align or not should be decided before transferring in\n sub_h_size, sub_w_size = sub_hw_size\n data_size_one_block = _get_elment_cnt_one_block(src.dtype)\n w_block_cnt = _ceil_div(sub_w_size, data_size_one_block)\n fp16_src = src.reinterpret_cast_to(\"float16\")\n fp16_dst = dst.reinterpret_cast_to(\"float16\")\n fp16_data_one_block = _get_elment_cnt_one_block(\"float16\")\n # vnchwconv get two bytes per time\n if src.dtype.lower() in (\"float32\", \"int32\", \"uint32\"):\n vnc_one_line_len = w_block_cnt * data_size_one_block * sub_h_size * 2\n elif src.dtype.lower() in (\"float16\", \"int16\", \"uint16\"):\n vnc_one_line_len = w_block_cnt * data_size_one_block * sub_h_size\n else:\n error_detail = \"not support the dtype\"\n error_manager_vector.raise_err_two_input_dtype_invalid(\"transpose_d\", \"in_dtype\",\n \"dst_dtype\", error_detail)\n\n # do 16hc to hc16 transfer\n src_addr_list = [fp16_src[vnc_one_line_len * i] for i in ADDR_IDX_LIST]\n dst_addr_list = [fp16_dst[fp16_data_one_block * i] for i in ADDR_IDX_LIST]\n repeat_cnt = _ceil_div(vnc_one_line_len, fp16_data_one_block)\n with tik_inst.new_stmt_scope():\n src_stride = tik_inst.Scalar(\"int64\")\n dst_stride = tik_inst.Scalar(\"int64\")\n with tik_inst.if_scope(repeat_cnt == 1):\n src_stride.set_as(0)\n dst_stride.set_as(0)\n with tik_inst.else_scope():\n src_stride.set_as(1)\n dst_stride.set_as(16)\n tik_inst.vnchwconv(False, False,\n dst_addr_list, src_addr_list,\n repeat_cnt, dst_stride, src_stride)\n\n # do hc16 to ch16 transfer\n with tik_inst.if_scope(sub_h_size > sub_w_size):\n with tik_inst.for_range(0, sub_w_size) as w_size_idx:\n tik_inst.data_move(\n fp16_src[w_size_idx * sub_h_size * fp16_data_one_block * 2],\n fp16_dst[w_size_idx * fp16_data_one_block * 2],\n 0, sub_h_size, 2, (w_block_cnt * data_size_one_block - 1) * 2, 0)\n with tik_inst.else_scope():\n with tik_inst.for_range(0, sub_h_size) as h_size_idx:\n tik_inst.data_move(\n fp16_src[h_size_idx * fp16_data_one_block * 2],\n fp16_dst[h_size_idx * w_block_cnt * data_size_one_block * fp16_data_one_block * 2],\n 0, sub_w_size, 2, 0, (sub_h_size - 1) * 2)\n\n # do ch16 to 16ch transfer\n src_addr_list = [fp16_src[fp16_data_one_block * i] for i in ADDR_IDX_LIST]\n dst_addr_list = [fp16_dst[vnc_one_line_len * i] for i in ADDR_IDX_LIST]\n repeat_cnt = _ceil_div(vnc_one_line_len, fp16_data_one_block)\n with tik_inst.new_stmt_scope():\n src_stride = tik_inst.Scalar(\"int64\")\n dst_stride = tik_inst.Scalar(\"int64\")\n with tik_inst.if_scope(repeat_cnt == 1):\n src_stride.set_as(0)\n dst_stride.set_as(0)\n with tik_inst.else_scope():\n src_stride.set_as(16)\n dst_stride.set_as(1)\n tik_inst.vnchwconv(False, False,\n dst_addr_list, src_addr_list,\n repeat_cnt, dst_stride, src_stride)",
"def swap_byte(byte_array, index):\n\n if byte_array[index] == 0:\n changed_byte_array = byte_array[0:index] + b\"\\xff\" + byte_array[index + 1 :]\n changed_byte_array = byte_array[0:index] + b\"\\x00\" + byte_array[index + 1 :]\n return changed_byte_array",
"def _decode_fixed_length(file_bytes, fields):\n # Setup a dictionary mapping a bit offset to each field. It is assumed\n # that the `fields` array contains entries for the secondary header.\n packet_nbytes = file_bytes[4] * 256 + file_bytes[5] + 7\n body_nbytes = sum(field._bit_length for field in fields) // 8\n counter = (packet_nbytes - body_nbytes) * 8\n \n bit_offset = {}\n\n for i, field in enumerate(fields):\n if i == 0 and field._bit_offset is not None:\n # case: using bit_offset to fix the start position\n bit_offset[field._name] = field._bit_offset\n counter = field._bit_offset + field._bit_length\n elif field._bit_offset is None:\n # case: floating start position such that packet def fills to\n # to end of packet. What's missing is assumed to be header at the beginning.\n bit_offset[field._name] = counter\n counter += field._bit_length\n elif field._bit_offset < counter:\n # case: bit_offset specifying to backtrack. This condition\n # seems odd and unlikely. Eg. one or more bits of a packet overlap?\n bit_offset[field._name] = field._bit_offset\n # don't update counter unless the the overlap goes past counter\n counter = max(field._bit_offset + field._bit_length, counter)\n elif field._bit_offset >= counter:\n # case: otherwise, bit_offset is ahead of counter and we're skipping \n # definition of 0 or more bits.\n bit_offset[field._name] = field._bit_offset\n counter = field._bit_offset + field._bit_length\n else:\n raise RuntimeError((\"Unexpected case: could not compare\"\n \" bit_offset {} with counter {} for field {}\"\n ).format(field._bit_offset, counter, field._name))\n\n if all(field._bit_offset is None for field in fields):\n assert counter == packet_nbytes * 8, \\\n 'Field definition != packet length'.format(n=counter-packet_nbytes*8)\n elif counter > packet_nbytes * 8:\n raise RuntimeError((\"Packet definition larger than packet length\"\n \" by {} bits\").format(counter-(packet_nbytes*8)))\n \n # Setup metadata for each field, consiting of where to look for the field in\n # the file and how to parse it.\n FieldMeta = namedtuple('Meta', ['nbytes_file', 'start_byte_file',\n 'nbytes_final', 'np_dtype'])\n field_meta = {}\n\n for field in fields:\n nbytes_file = np.ceil(field._bit_length/8.).astype(int)\n\n if (bit_offset[field._name] % 8 and\n bit_offset[field._name] % 8 + field._bit_length > 8):\n nbytes_file += 1\n\n nbytes_final = {3: 4, 5: 8, 6: 8, 7: 8}.get(nbytes_file, nbytes_file)\n start_byte_file = bit_offset[field._name] // 8\n\n # byte_order_symbol is only used to control float types here.\n # - uint and int byte order are handled with byteswap later\n # - fill is independent of byte order (all 1's)\n # - byte order is not applicable to str types\n byte_order_symbol = \"<\" if field._byte_order == \"little\" else \">\"\n np_dtype = {\n 'uint': '>u%d' % nbytes_final,\n 'int': '>i%d' % nbytes_final,\n 'fill': '>u%d' % nbytes_final,\n 'float': '%sf%d' % (byte_order_symbol, nbytes_final),\n 'str': 'S%d' % nbytes_final,\n }[field._data_type]\n \n field_meta[field] = FieldMeta(\n nbytes_file, start_byte_file, nbytes_final, np_dtype)\n\n # Read the file and calculate length of packet and number of packets in the\n # file. Trim extra bytes that may have occurred by a break in the downlink\n # while a packet was beign transferred.\n extra_bytes = file_bytes.size % packet_nbytes\n\n if extra_bytes > 0:\n file_bytes = file_bytes[:-extra_bytes]\n \n packet_count = file_bytes.size // packet_nbytes\n \n # Create byte arrays for each field. At the end of this method they are left\n # as the numpy uint8 type.\n field_bytes = {}\n\n for field in fields:\n meta = field_meta[field]\n arr = np.zeros(packet_count * meta.nbytes_final, 'u1')\n xbytes = meta.nbytes_final - meta.nbytes_file\n\n for i in range(xbytes, meta.nbytes_final):\n arr[i::meta.nbytes_final] = (\n file_bytes[meta.start_byte_file + i - xbytes::packet_nbytes]\n )\n field_bytes[field] = arr\n\n # Switch dtype of byte arrays to the final dtype, and apply masks and shifts\n # to interpret the correct bits.\n field_arrays = OrderedDict()\n\n for field in fields:\n meta = field_meta[field]\n arr = field_bytes[field]\n arr.dtype = meta.np_dtype\n\n if field._data_type in ('int', 'uint'):\n xbytes = meta.nbytes_final - meta.nbytes_file\n\n bitmask_left = (bit_offset[field._name]\n + 8 * xbytes\n - 8 * meta.start_byte_file)\n\n bitmask_right = (8 * meta.nbytes_final\n - bitmask_left\n - field._bit_length)\n \n bitmask_left, bitmask_right = (\n np.array([bitmask_left, bitmask_right]).astype(meta.np_dtype)\n )\n \n bitmask = np.zeros(arr.shape, meta.np_dtype)\n bitmask |= (1 << int(8 * meta.nbytes_final - bitmask_left)) - 1\n tmp = np.left_shift([1], bitmask_right)\n bitmask &= np.bitwise_not(tmp[0] - 1).astype(meta.np_dtype)\n \n arr &= bitmask\n arr >>= bitmask_right\n \n if field._byte_order == 'little':\n arr.byteswap(inplace=True)\n\n field_arrays[field._name] = arr\n\n return field_arrays",
"def byteswap(self, inplace=False):\n if inplace:\n if self.fragmented:\n (self[self._begin:].view(ndarray)).byteswap(inplace)\n (self[:self._end].view(ndarray)).byteswap(inplace)\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n (part.view(ndarray)).byteswap(inplace)\n\n return self.view(ndarray)\n else:\n out = empty_like(self)\n if self.fragmented:\n k = self._capacity - self._begin # fragmentation index\n out[:k] = (self[self._begin:].view(ndarray)).byteswap(inplace)\n out[k:] = (self[:self._end].view(ndarray)).byteswap(inplace)\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n out = (part.view(ndarray)).byteswap(inplace)\n\n return (out)",
"def Wang_fixA5_2(data, do_padding=True, endianness='little'):\n if do_padding:\n M = MD4_get_words(MD4_pad_data(data, endianness))\n else:\n if len(data) < 64:\n data += b'\\x00'*(64-len(data))\n M = MD4_get_words(data, endianness) \n \n A, B, C, D = MD4_get_IVs(M) \n \n #print(A[0:6],B[0:6],C[0:6],D[0:6],'\\n') \n \n A[5] = bitset(A[5], 18, bitget(C[4], 18))\n A[5] = bitset(A[5], 25, 1)\n A[5] = bitset(A[5], 26, 0)\n A[5] = bitset(A[5], 28, 1)\n A[5] = bitset(A[5], 31, 1)\n \n M[0] = (rrot_32(A[5], 3) - A[4] - G(B[4], C[4], D[4]) - MGK_1) % 2**32\n \n A[1] = lrot_32( (A[0] + F(B[0], C[0], D[0]) + M[0]) % 2**32, 3)\n \n # Modify M to be consistent with new A[5]...all message bytes prior to \n # calculation of A[5] contribute to A[5], and may need to be adjusted\n M[1] = rrot_32(D[1], 7) - D[0] - F(A[1], B[0], C[0]) % 2**32\n M[2] = rrot_32(C[1], 11) - C[0] - F(D[1], A[1], B[0]) % 2**32\n M[3] = rrot_32(B[1], 19) - B[0] - F(C[1], D[1], A[1]) % 2**32\n M[4] = rrot_32(A[2], 3) - A[1] - F(B[1], C[1], D[1]) % 2**32\n \n # Check constraints...a5;19 = c4;19, a5;26 = 1, a5;27 = 0, a5;29 = 1, \n # a5;32 = 1\n A,B,C,D = MD4_get_IVs(M)\n #print(A[0:6],B[0:6],C[0:6],D[0:6],'\\n')\n #assert(bitget(A[5], 18) == bitget(C[4], 18))\n #assert(bitget(A[5], 25) == 1)\n #assert(bitget(A[5], 26) == 0)\n #assert(bitget(A[5], 28) == 1) \n #assert(bitget(A[5], 31) == 1)\n \n return(MD4_get_data(M, endianness))",
"def _rank_diff_bytes(diff_bytes):\n mul = 1\n if diff_bytes < 0:\n mul = -1\n diff_bytes = -diff_bytes\n if diff_bytes < 2 * 1024:\n return 0\n if diff_bytes < 16 * 1024:\n return 1 * mul\n if diff_bytes < 128 * 1024:\n return 2 * mul\n return 3 * mul",
"def updateHeaderSizeFromDataLength( self ):\n self.updateHeaderSize( int( len( self.data ) * self.nNbrBitsPerSample / 8 ) )",
"def block2ns(data):\n data = bytearray(data)\n return (\n data[7] | data[6] << 8 | data[5] << 16 | data[4] << 24,\n data[3] | data[2] << 8 | data[1] << 16 | data[0] << 24,\n )",
"def _handle_ordered_packet(self, packet):\n pass",
"def _transpose_by_2_vnchwconv_not_last_dim(tik_inst, dst, src, sub_dim_size):\n\n # whether the sub_h_size is block align or not should be decided before transferring in\n sub_axis_1, sub_axis_0, axis_2 = sub_dim_size\n data_size_one_block = _get_elment_cnt_one_block(src.dtype)\n axis_2_block_cnt = _ceil_div(axis_2, data_size_one_block)\n fp16_src = src.reinterpret_cast_to(\"float16\")\n fp16_dst = dst.reinterpret_cast_to(\"float16\")\n fp16_data_one_block = _get_elment_cnt_one_block(\"float16\")\n # vnchwconv get two bytes per time\n if src.dtype.lower() in (\"float32\", \"int32\", \"uint32\"):\n vnc_one_line_len = axis_2_block_cnt * data_size_one_block * sub_axis_1 * sub_axis_0 * 2\n elif src.dtype.lower() in (\"float16\", \"int16\", \"uint16\"):\n vnc_one_line_len = axis_2_block_cnt * data_size_one_block * sub_axis_1 * sub_axis_0\n else:\n error_detail = \"not support the dtype\"\n error_manager_vector.raise_err_two_input_dtype_invalid(\"transpose_d\", \"in_dtype\",\n \"dst_dtype\", error_detail)\n\n # do 16hc to hc16 transfer\n src_addr_list = [fp16_src[vnc_one_line_len * i] for i in ADDR_IDX_LIST]\n dst_addr_list = [fp16_dst[fp16_data_one_block * i] for i in ADDR_IDX_LIST]\n repeat_cnt = _ceil_div(vnc_one_line_len, fp16_data_one_block)\n with tik_inst.new_stmt_scope():\n src_stride = tik_inst.Scalar(\"int64\")\n dst_stride = tik_inst.Scalar(\"int64\")\n with tik_inst.if_scope(repeat_cnt == 1):\n src_stride.set_as(0)\n dst_stride.set_as(0)\n with tik_inst.else_scope():\n src_stride.set_as(1)\n dst_stride.set_as(16)\n tik_inst.vnchwconv(False, False,\n dst_addr_list, src_addr_list,\n repeat_cnt, dst_stride, src_stride)\n\n # do sub_axis_1*sub_axis_0*16 to sub_axis_1*sub_axis_0*axis_2 transfer\n with tik_inst.for_range(0, sub_axis_1) as sub_axis_1_idx:\n tik_inst.data_move(\n fp16_src[sub_axis_1_idx * sub_axis_0 * axis_2 * fp16_data_one_block * 2],\n fp16_dst[sub_axis_1_idx * sub_axis_0 * fp16_data_one_block * fp16_data_one_block],\n 0, sub_axis_0, 2 * axis_2, fp16_data_one_block - 2 * axis_2, 0)\n\n # do ch16 to 16ch transfer\n src_addr_list = [fp16_src[fp16_data_one_block * i] for i in ADDR_IDX_LIST]\n dst_addr_list = [fp16_dst[vnc_one_line_len * i] for i in ADDR_IDX_LIST]\n repeat_cnt = _ceil_div(vnc_one_line_len, fp16_data_one_block)\n with tik_inst.new_stmt_scope():\n src_stride = tik_inst.Scalar(\"int64\")\n dst_stride = tik_inst.Scalar(\"int64\")\n with tik_inst.if_scope(repeat_cnt == 1):\n src_stride.set_as(0)\n dst_stride.set_as(0)\n with tik_inst.else_scope():\n src_stride.set_as(16)\n dst_stride.set_as(1)\n tik_inst.vnchwconv(False, False,\n dst_addr_list, src_addr_list,\n repeat_cnt, dst_stride, src_stride)",
"def _unpack_with_byteorder(self, fmt, data):\n return struct.unpack(self.struct_byteorder + fmt, data)",
"def fix(d):\n line=d\n n=2\n return ''.join([line[i:i+n]*2 for i in range(0, len(line), n)])\n shorts = struct.unpack('<' + 'h' * (len(d)/2), d)\n dbl = reduce(lambda x,y: x+y, zip(shorts, shorts))\n return struct.pack('<' + 'h' * len(d), *dbl)",
"def swap_buffers(self):\n raise NotImplementedError()",
"def endianness(self):",
"def spoof_packet(packet):",
"def read_packet(self):\n\n\t\t#self.debug(\"READ BUFFER SIZE: %d\" % len(self.buff))\n\t\tbackup = self.buff[:]\n\t\tpacket = Packet()\n\t\ttry:\n\t\t\tpacket.direction = self.node\n\t\t\tpacket.ident = self.unpack('ubyte')\n\t\t\t\n\t\t\t#Defined structs from huge dict\n\t\t\tfor datatype, name in self.get_struct(packet):\n\t\t\t\t# this populates packet.data with {name: value}\n\t\t\t\tpacket.data[name] = self.unpack(datatype)\n\n\t\t\t# I believe the following are packet-type specific fixes for variable-length packets.\n\n\t\t\t#0x17\n\t\t\tif packet.ident == 0x17:\n\t\t\t\tif packet.data['unknown'] > 0:\n\t\t\t\t\tpacket.data['x2'] = self.unpack('short')\n\t\t\t\t\tpacket.data['y2'] = self.unpack('short')\n\t\t\t\t\tpacket.data['z2'] = self.unpack('short')\n\t\t\n\t\t\t#0x33\n\t\t\tif packet.ident in (0x33, 0x34):\n\t\t\t\tpacket.data['data'] = self.unpack_array_fast('byte', packet.data['data_size'])\n\t\t\t\tdel packet.data[\"data_size\"]\n\t\t\n#\t\t\t#0x34\n#\t\t\tif packet.ident == 0x34:\n#\t\t\t\tcoords = self.unpack_array_fast('short', packet.data['data_size'])\n#\t\t\t\tbtype = self.unpack_array_fast('byte', packet.data['data_size'])\n#\t\t\t\tmetadata = self.unpack_array_fast('byte', packet.data['data_size'])\n#\t\t\t\tpacket.data[\"blocks\"] = []\n#\t\t\t\tfor i in zip(coords, btype, metadata):\n#\t\t\t\t\tblock = {}\n#\t\t\t\t\tblock[\"x\"] =\t\ti[0] >> 12\n#\t\t\t\t\tblock[\"z\"] = 0x0F & i[0] >> 8\n#\t\t\t\t\tblock[\"y\"] = 0xFF & i[0]\n#\t\t\t\t\tblock[\"type\"] = i[1]\n#\t\t\t\t\tblock[\"metadata\"] = i[2]\n#\t\t\t\t\tpacket.data[\"blocks\"].append(block)\n#\t\t\t\tdel packet.data[\"data_size\"]\n\t\t\n\t\t\t#0x3C\n\t\t\tif packet.ident == 0x3C:\n\t\t\t\trecords = self.unpack_array_fast('byte', packet.data['data_size']*3)\n\t\t\t\ti = 0\n\t\t\t\tpacket.data[\"blocks\"] = []\n\t\t\t\twhile i < packet.data['data_size']*3:\n\t\t\t\t\tpacket.data[\"blocks\"].append(dict(zip(('x','y','z'), records[i:i+3])))\n\t\t\t\t\ti+=3\n\t\t\t\tdel packet.data[\"data_size\"]\n\t\t\n\t\t\t#0x68\n\t\t\tif packet.ident == 0x68:\n\t\t\t\tpacket.data[\"slots_data\"] = self.unpack_array('slot', packet.data[\"data_size\"])\n\t\t\t\tdel packet.data[\"data_size\"]\n\t\t\t#0x82:\n\t\t\tif packet.ident == 0x82:\n\t\t\t\tpacket.data[\"text\"] = []\n\t\t\t\tfor i in range(4):\n\t\t\t\t\tpacket.data[\"text\"].append(packet.data[\"line_%s\" % (i+1)])\n\t\t\t\t\t\n\t\t\t#0x83\n\t\t\tif packet.ident == 0x83:\n\t\t\t\tpacket.data[\"data\"] = self.unpack_array_fast('byte', packet.data['data_size'])\n\t\t\t\tdel packet.data[\"data_size\"]\n\n\t\t\t# Sets packet.original to the byte string that the packet was decoded from.\n\t\t\tpacket.original = backup[:len(backup) - len(self.buff)]\n\n\t\t\treturn packet\n\n\t\texcept IncompleteData:\n\t\t\tself.buff = backup\n\t\t\treturn None\n\t\texcept Exception, ex:\n\t\t\tself.buff = backup\n\t\t\tex.args += (self.buff[20:],)\n\t\t\traise",
"def intPackBytes(n, length, endian='big'):\r\n \r\n if length == 0:\r\n return ''\r\n h = '%x' % n\r\n # There must be a better way to do this\r\n s = unhexlify(str.zfill(('0'*(len(h) % 2) + h), length*2))\r\n if endian == 'big':\r\n return s\r\n else:\r\n #return s[::-1]\r\n \r\n return int.to_bytes(n, length, 'little')\r\n return n"
] | [
"0.6286982",
"0.6104455",
"0.57370377",
"0.5551212",
"0.5419383",
"0.541504",
"0.5307678",
"0.524828",
"0.5197568",
"0.51582634",
"0.5000781",
"0.49925607",
"0.49657536",
"0.49612567",
"0.49490094",
"0.49409774",
"0.49387702",
"0.48654178",
"0.48503458",
"0.48486102",
"0.484535",
"0.48414186",
"0.48406512",
"0.4810221",
"0.48075783",
"0.4803706",
"0.48024604",
"0.4760599",
"0.4738832",
"0.47316378"
] | 0.62880516 | 0 |
returns true if response is HTML | def is_good_response(self, resp):
content_type = resp.headers['Content-Type'].lower()
return (resp.status_code == 200 and content_type is not None and content_type.find('html') > -1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_good_response(self, resp):\r\n\t\tcontent_type = resp.headers['Content-Type'].lower()\r\n\t\treturn (resp.status_code == 200 and content_type is not None and content_type.find('html') > -1)",
"def is_html(self):\r\n return self.__content_type == html_ctype",
"def is_html(self):\n return self.__content_type == html_ctype",
"def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200 and content_type is not None\n and content_type.find('html') > -1)",
"def isGoodResponse(resp):\r\n content_type = resp.headers['Content-Type'].lower()\r\n return (resp.status_code == 200\r\n and content_type is not None\r\n and content_type.find('html') > -1)",
"def is_good_response(resp) -> bool:\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)",
"def is_good_response(resp):\r\n content_type = resp.headers['Content-Type'].lower()\r\n return (resp.status_code == 200 \r\n and content_type is not None \r\n and content_type.find('html') > -1)",
"def is_good_response(resp):\n\tcontent_type = resp.headers['Content-Type'].lower()\n\treturn (resp.status_code == 200 \n\t\tand content_type is not None \n\t\tand content_type.find('html') > -1)",
"def is_good_response(resp):\n\tcontent_type = resp.headers['Content-Type'].lower()\n\treturn (resp.status_code == 200 and content_type is not None and content_type.find('html') > -1)",
"def is_good_response(resp):\r\n content_type = resp.headers['Content-Type'].lower()\r\n return (resp.status_code == 200\r\n and content_type is not None\r\n and content_type.find('html') > -1)",
"def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200 \n and content_type is not None \n and content_type.find('html') > -1)",
"def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200 \n and content_type is not None \n and content_type.find('html') > -1)",
"def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200 \n and content_type is not None \n and content_type.find('html') > -1)",
"def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200 \n and content_type is not None \n and content_type.find('html') > -1)",
"def is_good_response(res):\n content_type = res.headers['Content-Type'].lower()\n return (res.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)",
"def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)",
"def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)",
"def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)",
"def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)",
"def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)",
"def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)",
"def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)",
"def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)",
"def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)",
"def __is_html(self, response_headers):\n\n if 'Content-Type' in result.headers:\n content_type_data = Compare([\"text/html\"])\n match_result = content_type_data.eval(response_headers['Content-Type'])\n\n if match_result:\n similarity = float(match_result[0]['similarity'].strip(\"%\"))\n\n if similarity >= 50:\n return True\n \n return False",
"def is_good_response(resp):\n content_type = resp.headers[\"Content-Type\"].lower()\n return (\n resp.status_code == 200\n and content_type is not None\n and content_type.find(\"html\") > -1\n )",
"def is_html(self):\n return self._tag == 'html'",
"def is_html(self):\n return self._tag == 'html'",
"def isHTML(content):\n\n return '<html' in content or 'html>' in content",
"def IsHtml(data):\n # Remove banners and XML header. Convert to lower case for easy search.\n data = ''.join(data.split('\\n')).lower()\n pattern = re.compile('<html>.*?<body.*?>.*?</body>.*?</html>')\n if pattern.findall(data):\n return True\n else:\n return False"
] | [
"0.7978629",
"0.7896287",
"0.7848669",
"0.7823706",
"0.78176934",
"0.77728",
"0.7765719",
"0.77475446",
"0.7746658",
"0.77149415",
"0.76882756",
"0.76882756",
"0.76882756",
"0.76882756",
"0.7683412",
"0.7676108",
"0.7676108",
"0.7676108",
"0.7676108",
"0.7676108",
"0.7676108",
"0.7676108",
"0.7676108",
"0.7676108",
"0.76564074",
"0.76201236",
"0.7530529",
"0.7530529",
"0.73324645",
"0.7259585"
] | 0.80010843 | 0 |
Returns the Guide data used by the Rig Component to define the layout of the final rig. | def getRigBuildData(self):
data = super(SimpleControlComponentGuide, self).getRigBuildData()
data["ctrlSize"] = self.ctrlSizeInputAttr.getValue()
data["ctrlXfo"] = self.mainCtrl.xfo
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def saveData(self):\n data = super(SimpleControlComponentGuide, self).saveData()\n\n data[\"ctrlSize\"] = self.ctrlSizeInputAttr.getValue()\n data[\"ctrlXfo\"] = self.mainCtrl.xfo\n\n return data",
"def saveData(self):\n\n data = super(OSSMouthGuide, self).saveData()\n\n # this should live in the GuideClass - also should considere Inherited Types\n data = self.saveAllObjectData(data, \"Control\")\n data = self.saveAllObjectData(data, \"Transform\")\n\n return data",
"def getRigBuildData(self):\n\n # Values\n mouthPosition = self.jawCtrl.xfo.tr\n jawEndPosition = self.jawEndCtrl.xfo.tr\n mouthLen = mouthPosition.subtract(jawEndPosition).length()\n\n # Calculate Mouth Xfo\n\n # atVector\n # mouthUpV = Vec3(0.0, 1.0, 0.0)\n\n # rootToEnd = jawEndPosition.subtract(mouthPosition).unit()\n # rootToUpV = mouthUpV.subtract(mouthPosition).unit()\n # bone1ZAxis = rootToUpV.cross(rootToEnd).unit()\n # bone1Normal = bone1ZAxis.cross(rootToEnd).unit()\n\n jawXfo = self.jawEndCtrl.xfo\n # jawXfo.setFromVectors(rootToEnd, bone1Normal, bone1ZAxis, mouthPosition)\n\n\n\n data = super(OSSMouthGuide, self).getRigBuildData()\n\n # should include getCurveData\n data = self.saveAllObjectData(data, \"Control\")\n data = self.saveAllObjectData(data, \"Transform\")\n data['jawXfo'] = self.jawCtrl.xfo\n data['mouthLen'] = mouthLen\n return data",
"def getComponentType(cls):\n\n return 'Guide'",
"def getComponentType(cls):\n\n return 'Guide'",
"def get_guides(data):\n return data.groups[\"Frames\"].objects",
"def get_description(self):\n d = {}\n d[\"type\"] = \"DirectHomogeneousQuadrature\"\n d[\"qr\"] = self._QR.get_description()\n return d",
"def build_guide(self, **kwargs):\n\n # This builds your guide master and updates your options\n self.create_guide_master(**kwargs)\n\n prefix = self.prefix\n options = self.options\n mirror_value = self.mirror_value\n\n number_mid_ctrl = options.get('numberMidCtrls')\n num_joints = options.get('numberJoints')\n create_jaw = options.get('createJaw')\n create_skull = options.get('createReverseJaw')\n surface = options.get('createSurfaceDriver')\n create_fk_ctrls = options.get('createFKShaperCtrls')\n\n noxform_grp = self.guide_master + '_NOX'\n\n if mc.objExists ('drivenNeck_chest_Mid_bind'):\n mc.delete ('drivenNeck_chest_Mid_bind')\n\n\n pp = env.get_parts_paths()[-1]\n branch = r'BidepAutoRig\\part_joints\\neck_skel.mb'\n import_path = pp.replace('partsLibrary', branch)\n mc.file(import_path, i=1)\n\n if mc.objExists ('snap_chest_Mid_jnt'):\n mc.delete (mc.parentConstraint ('snap_chest_Mid_bind', 'drivenNeck_chest_Mid_bind'))\n\n\n snaps=[u'head_Mid_bind', u'headEnd_Mid_jnt', u'eye_Lt_bind', u'eye_Rt_bind', u'headTop_Mid_bind',\n u'headRear_Mid_bind', u'headSide_Lt_bind', u'headSide_Rt_bind', u'neck01_Mid_bind', u'neck02_Mid_bind',\n u'neck03_Mid_bind', u'neckEnd_Mid_jnt']\n\n for snap in snaps:\n target='snap_'+snap\n if mc.objExists (target):\n mc.delete (mc.parentConstraint (target, snap))\n\n\n\n\n # This finalizes your guide.\n self.finalize_guide()\n jnts_grp = self.guide_master + '_JNTS'\n mc.parent ('drivenNeck_chest_Mid_bind', jnts_grp)\n\n self.finalize_guide()",
"def get_guide(self,annular_fail=True,as_xml=True,print_xml=False):\n\n\n self.set_geometry()\n self.retrieve_guidecats()\n guides = self.select_target()\n if (type(guides) == type(None)) and (annular_fail == True):\n print 'No guide(s) found at fixed position - performing annular search'\n guides = self.annular_search()\n\n if type(guides) == type(None):\n print 'No guide star(s) found...'\n return None\n \n if as_xml:\n if self.lifu:\n return self.to_xml(guides)\n else:\n xmls = [self.to_xml(guide) for guide in guides]\n if print_xml:\n for x in xmls:\n print x.toxml()\n\n return xmls\n else:\n return guides",
"def _generate_layout(self):\n\n pass",
"def structure(self):\n return self.cluster_subspace.structure",
"def init_guides(Nx=120, Ny=120, Npml=20, space=10, wg_width=12, space_slice=8, wg_shift=9):\r\n bg_rho = np.zeros((Nx, Ny))\r\n \r\n # Input waveguide\r\n bg_rho[Nx//2-wg_width//2:Ny//2+wg_width//2,0:int(Npml+space)] = 1\r\n # Data waveguide 1\r\n bg_rho[0:int(Npml+space),Npml+space+wg_shift:Npml+space+wg_width+wg_shift] = 1\r\n # Data waveguide 2\r\n bg_rho[0:int(Npml+space),Ny-Npml-space-wg_width-wg_shift:Ny-Npml-space-wg_shift] = 1\r\n \r\n # Input probe slice\r\n input_slice = Slice(x=np.arange(Nx//2-wg_width//2-space_slice, Nx//2+wg_width//2+space_slice),\r\n y=np.array(Npml+1))\r\n # Data probe slice 1\r\n data_slice1 = Slice(x=np.array(Npml+1),\r\n y=np.arange(Npml+space-space_slice+wg_shift, Npml+space+wg_width+space_slice+wg_shift))\r\n # Data probe slice 2\r\n data_slice2 = Slice(x=np.array(Npml+1),\r\n y=np.arange(Ny-Npml-space-wg_width-wg_shift-space_slice, Ny-Npml-space-wg_shift+space_slice))\r\n input_slices = [input_slice, data_slice1, data_slice2]\r\n \r\n # Output waveguide\r\n bg_rho[int(Nx-Npml-space)::,Ny//2-wg_width//2:Ny//2+wg_width//2] = 1\r\n # Ground waveguide\r\n bg_rho[Nx//2-wg_width//2:Nx//2+wg_width//2,int(Ny-Npml-space)::] = 1\r\n \r\n # Output probe slice\r\n output_slice = Slice(x=np.array(Nx-Npml-1),\r\n y=np.arange(Ny//2-wg_width//2-space_slice, Ny//2+wg_width//2+space_slice))\r\n #Ground probe slice\r\n ground_slice = Slice(x=np.arange(Ny//2-wg_width//2-space_slice, Ny//2+wg_width//2+space_slice),\r\n y=np.array(Ny-Npml-1))\r\n output_slices = [output_slice, ground_slice]\r\n \r\n return bg_rho, input_slices, output_slices",
"def data_dict(self) -> dict:\n return self.design.renderers.gds.options",
"def get_comp_spanrels(self):",
"def retrieve_graph(self):\n\n g = self.g\n\n if 'grid' in g['name']:\n my_layout = g.layout(\"grid\")\n else:\n my_layout = g.layout(\"kk\")\n\n return g, my_layout",
"def design(self):\n return self[self.design_columns]",
"def design(self):\n return self[self.design_columns]",
"def constraintData(self):\n pass",
"def get_info(self):\n pattern = \"{}-{}-{}\".format(*self.diagram).replace(\"/\", \"|\")\n info = \"\"\n info += \"name: triangle group {}\\n\".format(pattern)\n info += \"cox_mat: {}\\n\".format(self.cox_mat)\n info += \"vertices: {}\\n\".format(self.num_vertices)\n info += \"edges: {}\\n\".format(self.num_edges)\n info += \"faces: {}\\n\".format(self.num_faces)\n info += \"states in the automaton: {}\\n\".format(self.G.dfa.num_states)\n info += \"reflection table:\\n{}\\n\".format(self.G.reftable)\n info += \"the automaton is saved as {}_dfa.png\".format(pattern)\n self.G.dfa.draw(pattern + \"_dfa.png\")\n return info",
"def build_guide(self, **kwargs):\n\n # This builds your guide master and updates your options\n self.create_guide_master(**kwargs)\n\n prefix = self.prefix\n options = self.options\n mirror_value = self.mirror_value\n\n num_joints = options.get('numberJoints')\n single_joint = options.get('singleJoint')\n pickWalk_parent = options.get('pickWalkParent')\n\n num_joints += 1\n if single_joint:\n num_joints = 1\n\n # Builde joints\n if single_joint:\n jnt_zero, plc, jnt = self.guide_joint(constraint_type='parent')\n zero, ctrl = self.guide_ctrl(shape='circle', color='light_blue', driver=jnt, axis='X')\n ctrls = [ctrl]\n zeros = [zero]\n\n else:\n jnt_zeros, plcs, jnts = self.guide_joint_chain('', num_joints=num_joints)\n zeros, ctrls = [], []\n for i, jnt in enumerate(jnts[:-1]):\n letter = utils.letters[i]\n zero, ctrl = self.guide_ctrl(name=letter, shape='circle',\n color='light_blue', driver=jnt, axis='X')\n zeros.append(zero)\n ctrls.append(ctrl)\n\n mc.xform(zeros, jnt_zeros, r=1, t=[-1*self.mirror_value, 0, 0])\n\n # lock stuff\n pivots = [mc.listRelatives(c, p=1)[0] for c in ctrls]\n utils.set_attrs(zeros, l=1, k=0)\n utils.set_attrs(pivots, 't s', l=1, k=0)\n\n mc.setAttr(self.guide_master+'.offsetTranslateX', -0.5*self.mirror_value)\n\n # This finalizes your guide.\n self.finalize_guide()",
"def layout(self):\n pass",
"def get_bnd_info(self):\n nbor = self.nbor\n lihbor, liubor, livbor, _, _, _, _, \\\n litbor, _, _, _, _ = self.bnd_info\n\n return (nbor, lihbor, liubor, livbor, litbor)",
"def serialize(self) -> dict:\n output = super().serialize()\n output[\"start\"] = self.start\n output[\"stride\"] = self.stride\n output[\"part_size\"] = self.part_size\n output[\"end\"] = self.end\n output[\"axis\"] = self.axis\n return output",
"def serialize(self) -> dict:\n output = super().serialize()\n output[\"start\"] = self.start\n output[\"stride\"] = self.stride\n output[\"part_size\"] = self.part_size\n output[\"end\"] = self.end\n output[\"axis\"] = self.axis\n return output",
"def get_layout(self):\n return self._layout",
"def _band_geometry(self, widget, guide):\n Guide = QGuideRose.Guide\n if guide == Guide.NoGuide:\n return QRect()\n\n # border hits\n border_size = self.border_size\n rect = widget.contentsRect()\n if guide == Guide.BorderNorth:\n rect.setHeight(border_size)\n elif guide == Guide.BorderEast:\n rect.setLeft(rect.right() + 1 - border_size)\n elif guide == Guide.BorderSouth:\n rect.setTop(rect.bottom() + 1 - border_size)\n elif guide == Guide.BorderWest:\n rect.setWidth(border_size)\n # For the next 4 conditions `widget` will be a QDockArea\n elif guide == Guide.BorderExNorth:\n bar_rect = widget.dockBarGeometry(QDockBar.North)\n if bar_rect.isValid():\n rect = bar_rect\n else:\n rect.setHeight(border_size / 2)\n elif guide == Guide.BorderExEast:\n bar_rect = widget.dockBarGeometry(QDockBar.East)\n if bar_rect.isValid():\n rect = bar_rect\n else:\n rect.setLeft(rect.right() + 1 - border_size / 2)\n elif guide == Guide.BorderExSouth:\n bar_rect = widget.dockBarGeometry(QDockBar.South)\n if bar_rect.isValid():\n rect = bar_rect\n else:\n rect.setTop(rect.bottom() + 1 - border_size / 2)\n elif guide == Guide.BorderExWest:\n bar_rect = widget.dockBarGeometry(QDockBar.West)\n if bar_rect.isValid():\n rect = bar_rect\n else:\n rect.setWidth(border_size / 2)\n\n # compass hits\n elif guide == Guide.CompassNorth:\n rect.setHeight(rect.height() / 3)\n elif guide == Guide.CompassEast:\n rect.setLeft(2 * rect.width() / 3)\n elif guide == Guide.CompassSouth:\n rect.setTop(2 * rect.height() / 3)\n elif guide == Guide.CompassWest:\n rect.setWidth(rect.width() / 3)\n elif guide == Guide.CompassCenter:\n pass # nothing to do\n elif guide == Guide.CompassExNorth:\n pass # nothing to do\n elif guide == Guide.CompassExEast:\n pass # nothing to do\n elif guide == Guide.CompassExSouth:\n pass # nothing to do\n elif guide == Guide.CompassExWest:\n pass # nothing to do\n\n # splitter handle hits\n elif guide == Guide.SplitHorizontal:\n wo, r = divmod(border_size - rect.width(), 2)\n rect.setWidth(2 * (wo + r) + rect.width())\n rect.moveLeft(rect.x() - (wo + r))\n elif guide == Guide.SplitVertical:\n ho, r = divmod(border_size - widget.height(), 2)\n rect.setHeight(2 * (ho + r) + rect.height())\n rect.moveTop(rect.y() - (ho + r))\n\n # single center\n elif guide == Guide.AreaCenter:\n pass # nothing to do\n\n # default no-op\n else:\n return QRect()\n\n pt = widget.mapToGlobal(rect.topLeft())\n return QRect(pt, rect.size())",
"def get_data(self):\n\n raise NotImplementedError('''\n Must Implement get_data. Call help() for details.\n ''')",
"def generate_guide_mesh():\n verts = [[0.0, 0.0, 0.0], [-0.01, -0.01, 0.1], [-0.01, 0.01, 0.1], [0.01, -0.01, 0.1], [0.01, 0.01, 0.1], [-0.03, -0.03, 0.1], [-0.03, 0.03, 0.1], [0.03, 0.03, 0.1], [0.03, -0.03, 0.1], [-0.01, -0.01, 0.2], [-0.01, 0.01, 0.2], [0.01, -0.01, 0.2], [0.01, 0.01, 0.2]]\n edges = [[0, 5], [5, 6], [6, 7], [7, 8], [8, 5], [1, 2], [2, 4], [4, 3], [3, 1], [5, 1], [2, 6], [4, 7], [3, 8], [9, 10], [10, 12], [12, 11], [11, 9], [3, 11], [9, 1], [2, 10], [12, 4], [6, 0], [7, 0], [8, 0]]\n faces = [[0, 5, 6], [0, 6, 7], [0, 7, 8], [0, 8, 5], [1, 3, 11, 9], [1, 2, 6, 5], [2, 4, 7, 6], [4, 3, 8, 7], [3, 1, 5, 8], [12, 10, 9, 11], [4, 2, 10, 12], [3, 4, 12, 11], [2, 1, 9, 10]]\n name = 'ModelingClothPinGuide'\n if 'ModelingClothPinGuide' in bpy.data.objects:\n mesh_ob = bpy.data.objects['ModelingClothPinGuide']\n else: \n mesh = bpy.data.meshes.new('ModelingClothPinGuide')\n mesh.from_pydata(verts, edges, faces) \n mesh.update()\n mesh_ob = bpy.data.objects.new(name, mesh)\n bpy.context.scene.objects.link(mesh_ob)\n mesh_ob.show_x_ray = True\n return mesh_ob",
"def layout(self) -> str:\n return self._layout",
"def build_guide(self, **kwargs):\n\n # This builds your guide master and updates your options\n self.create_guide_master(**kwargs)\n\n prefix = self.prefix # Naming prefix. Use this for every new node you create and there should be no name clashes.\n options = self.options # Build options\n mirror_value = self.mirror_value # 1.0 for left and center sided parts and -1.0 for right sided part.\n\n mc.setAttr(self.guide_master+'.offsetTranslateY', -0.2)\n\n l_prefix = prefix.replace('C','L', 1)\n r_prefix = prefix.replace('C','R', 1)\n mirror_values = [1, -1]\n enable_steering = options.get('enableSteering')\n\n colors = ['green', 'red']\n\n for mi, prefix in enumerate([l_prefix, r_prefix]):\n\n mirror_value = mirror_values[mi]\n color = colors[mi]\n\n l_main_zero, l_main_plc = self.guide_joint('main', alt_prefix=prefix, placer_only=1)\n\n # create hub\n hub_zero, hub_plc, hub_jnt = self.guide_joint('wheelhub', alt_prefix=prefix, constraint_type='point')\n hub_end_zero, hub_end_plc, hub_end_jnt = self.guide_joint('wheelhub_end', alt_prefix=prefix, constraint_type='point')\n\n mc.xform(hub_end_zero, r=1, t=[1,0,0])\n mc.parent(hub_end_jnt, hub_jnt)\n mc.aimConstraint(hub_end_plc, hub_jnt, aim=[mirror_value,0,0], u=[0,1,0], wu=[0,1,0], wut='vector')\n mc.parentConstraint(hub_plc, hub_end_zero , mo=1)\n\n # Create steering arm\n steer_zero, steer_plc, steer_jnt = self.guide_joint('steeringArm', alt_prefix=prefix, constraint_type='parent')\n mc.xform(steer_zero, r=1, t=[-1,0,0])\n mc.parent(hub_jnt, steer_jnt)\n\n # Create shocks\n shock_a_zero, shock_a_plc, shock_a_jnt = self.guide_joint('shock_A', alt_prefix=prefix, constraint_type='point')\n shock_b_zero, shock_b_plc, shock_b_jnt = self.guide_joint('shock_B', alt_prefix=prefix, constraint_type='point')\n\n mc.xform(shock_a_zero, ws=1, t=[-2,2,0])\n mc.xform(shock_b_zero, ws=1, t=[-0.5,0.25,0])\n\n mc.parent(shock_b_jnt, shock_a_jnt)\n\n mc.aimConstraint(shock_b_plc, shock_a_jnt, aim=[mirror_value,0,0], u=[0,0,1], wu=[0,0,1], wut='vector')\n mc.aimConstraint(shock_a_plc, shock_b_jnt, aim=[-mirror_value,0,0], u=[0,0,1], wu=[0,0,1], wut='vector')\n\n # upper arm\n up_arm_zero, up_arm_plc, up_arm_jnt = self.guide_joint('upperArm', alt_prefix=prefix, constraint_type='point')\n up_arm_end_zero, up_arm_end_plc, up_arm_end_jnt = self.guide_joint('upperArm_end', alt_prefix=prefix, constraint_type='point')\n\n mc.xform(up_arm_end_zero, r=1, t=[-3.5,1,0])\n mc.xform(up_arm_zero, r=1, t=[-1,0.5,0])\n mc.parent(up_arm_end_jnt, up_arm_jnt)\n mc.aimConstraint(up_arm_end_plc, up_arm_jnt, aim=[mirror_value,0,0], u=[0,0,1], wu=[0,0,mirror_value], wut='objectRotation', wuo=up_arm_plc)\n\n # lower arm\n lo_arm_zero, lo_arm_plc, lo_arm_jnt = self.guide_joint('lowerArm', alt_prefix=prefix, constraint_type='point')\n lo_arm_end_zero, lo_arm_end_plc, lo_arm_end_jnt = self.guide_joint('lowerArm_end', alt_prefix=prefix, constraint_type='point')\n\n mc.xform(lo_arm_end_zero, r=1, t=[-4,-0.5,0])\n mc.xform(lo_arm_zero, r=1, t=[-1,-0.5,0])\n mc.parent(lo_arm_end_jnt, lo_arm_jnt)\n mc.aimConstraint(lo_arm_end_plc, lo_arm_jnt, aim=[mirror_value,0,0], u=[0,0,1], wu=[0,0,mirror_value], wut='objectRotation', wuo=lo_arm_plc)\n\n # steeringArm\n if enable_steering:\n steeringArm_a_zero, steeringArm_a_plc, steeringArm_a_jnt = self.guide_joint('steeringArm_A', alt_prefix=prefix, constraint_type='point')\n steeringArm_b_zero, steeringArm_b_plc, steeringArm_b_jnt = self.guide_joint('steeringArm_B', alt_prefix=prefix, constraint_type='point')\n\n mc.xform(steeringArm_b_zero, r=1, t=[-1.5,0,1])\n mc.xform(steeringArm_a_zero, r=1, t=[-4,0,1])\n\n mc.parent(steeringArm_b_jnt, steeringArm_a_jnt)\n mc.aimConstraint(steeringArm_b_plc, steeringArm_a_jnt, aim=[mirror_value,0,0], u=[0,0,1], wu=[0,0,1], wut='vector')\n\n # Create control\n zero, ctrl = self.guide_ctrl('wheel', alt_prefix=prefix, driver=hub_end_jnt, color=color, shape='circle', axis='X', scale=[3]*3, create_pivot=0)\n mc.setAttr(ctrl+'.numOffsetCtrls', 1)\n mc.addAttr(ctrl+'.numOffsetCtrls', e=1, min=1)\n mc.xform(ctrl.replace('_CTL','_A_OFF_CTL.cv[*]'), r=1, s=[0.8]*3)\n\n control.create_shape('wheel', ctrl, axis='X', scale=[3]*3)\n\n #suspension_zero, suspension_ctrl = self.guide_ctrl('suspension', create_pivot=0, driver=shock_a_jnt, axis='X', shape='pyramid', color=color, scale=[1.5,1,1], alt_prefix=prefix)\n ground_zero, ground_ctrl = self.guide_ctrl('ground', create_pivot=0, shape='square', color='grass', alt_prefix=prefix)\n mc.delete(mc.pointConstraint(hub_jnt, ground_zero))\n\n # constraint to placer\n childs = [prefix+'_wheelhub_JNT_PLC_ZERO',\n prefix+'_steeringArm_JNT_PLC_ZERO',\n prefix+'_shock_A_JNT_PLC_ZERO',\n prefix+'_shock_B_JNT_PLC_ZERO',\n prefix+'_upperArm_JNT_PLC_ZERO',\n prefix+'_upperArm_end_JNT_PLC_ZERO',\n prefix+'_lowerArm_JNT_PLC_ZERO',\n prefix+'_lowerArm_end_JNT_PLC_ZERO']\n\n for c in childs:\n mc.parentConstraint(l_main_plc, c, mo=1)\n\n mc.setAttr(l_main_plc+'.offsetTranslateY', mirror_value*0.5)\n\n # ################3\n # Place it all\n hub_pos = mc.ls(options.get('hubCenter') or '')\n if hub_pos:\n loc = utils.snap_locator(hub_pos)\n mc.delete(mc.pointConstraint(loc, self.guide_master))\n mc.setAttr(self.guide_master+'.tx', 0)\n mc.delete(mc.pointConstraint(loc, l_main_plc), loc)\n\n hub_end_pos = mc.ls(options.get('hubEndCenter') or '')\n if hub_end_pos:\n loc = utils.snap_locator(hub_end_pos)\n mc.delete(mc.pointConstraint(loc, hub_end_plc), loc)\n\n else:\n mc.xform(self.guide_master, ws=1, t=[0,2,10])\n mc.xform(l_main_plc, r=1, t=[mirror_value*6,0,0])\n\n mc.setAttr(self.guide_master+'.jointAxisVis', 1)\n\n l = utils.snap_locator(hub_jnt)\n mc.setAttr(l+'.ty', 0)\n mc.delete(mc.pointConstraint(l, ground_zero), l)\n\n chassis_plc_zero, chassis_plc = self.guide_joint('chassis_driver', placer_only=1)\n mc.setAttr(chassis_plc+'.radius', 1)\n mc.setAttr(chassis_plc+'.color', 0.96, 0.71, .01)\n mc.setAttr(chassis_plc+'.otherType', 'Leg IK Driver', type='string');\n mc.setAttr(chassis_plc+'.type', 18)\n\n mc.pointConstraint(l_prefix+'_lowerArm_end_JNT_PLC', r_prefix+'_lowerArm_end_JNT_PLC', chassis_plc_zero)\n utils.set_attrs(chassis_plc, l=1, k=0)\n\n # This finalizes your guide.\n self.finalize_guide()\n self.mirror_guide()"
] | [
"0.60837376",
"0.57267016",
"0.5652258",
"0.55439556",
"0.55439556",
"0.5307513",
"0.5296474",
"0.52579033",
"0.52306396",
"0.5197011",
"0.51202613",
"0.5104202",
"0.50606996",
"0.50557256",
"0.5048612",
"0.5043005",
"0.5043005",
"0.50353324",
"0.50348055",
"0.50302404",
"0.50142854",
"0.4993066",
"0.49906528",
"0.49906528",
"0.4979432",
"0.49711528",
"0.49391887",
"0.49018213",
"0.48986924",
"0.48812717"
] | 0.6716383 | 0 |
Enables introspection of the class prior to construction to determine if it is a guide component. | def getComponentType(cls):
return 'Guide' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def abstract(self):\n return self._cls and not self._tool",
"def get_guide_type(guide):\n # Maintained by naming convention in the Blender files. Sub-optimal.\n try:\n return guide.name[guide.name.rindex(\".\") + 1:]\n except:\n return None",
"def setup_class(cls):\n cls.behaviour = MyScaffoldBehaviour(\"behaviour\", SkillContext())",
"def test_explainer_class(self):\n assert self.explainer.__class__.__bases__[0].__name__ == 'ABC'\n assert self.explainer.__class__.__name__ == 'Explainer'",
"def test_AsDocumentationHelperOnClassTool(self):\n class_tool = self.getPortal().portal_classes\n class_doc_helper = class_tool.asDocumentationHelper(class_id='Folder')\n self.assertNotEquals(class_doc_helper, None)\n # We simply check that we can access methods of the documentation helper\n self.assertNotEquals([], class_doc_helper.getStaticPropertyList())",
"def setup_class(self):\n pass",
"def class_is(cls: Class) -> bool:\n pass",
"def identify_class(self, cls):",
"def setup_class(cls):\n # ns.assert_true(False, \"setup_class run\")\n print('setup_class\\n')",
"def setup_class(cls):\n pass",
"def setup_class(cls):\n pass",
"def test_style_guide_manager():\n formatter = mock.create_autospec(base.BaseFormatter, instance=True)\n options = create_options()\n guide = style_guide.StyleGuideManager(options, formatter=formatter)\n assert guide.default_style_guide.options is options\n assert len(guide.style_guides) == 1",
"def setup_class(cls):",
"def setup_class(cls):",
"def setup_class(klass):",
"def setup_class(klass):",
"def predicate(cls: nodes.ClassDef) -> bool:\n if cls.name in CLASS_NAME_SKIPLIST:\n # class looks like an API model class, but it isn't.\n return False\n\n if not cls.name.endswith(\"API\") and \"schema\" not in cls.locals:\n # class does not look like an API model class.\n return False\n\n return True",
"def discoverable(_class):\n\n # Set the attribute to the class name, to prevent subclasses from also\n # being discoverable.\n setattr(_class, _get_discoverable_attribute(_class), True)\n return _class",
"def isKnownComponentClass(self, *args):\n return _libSALOME_LifeCycleCORBA.SALOME_LifeCycleCORBA_isKnownComponentClass(self, *args)",
"def _get_object_reference(self, config_details, package, components=[]):\n selected_class = config_details['@class']\n available_classes = self.__get_available_classes(package)\n attributes = self.__get_attributes(config_details)\n \n for available_class in available_classes:\n if available_class[0] == selected_class:\n kwargs = {}\n \n # Add all attributes to kwargs to pass to the constructor of the object.\n for attribute in attributes:\n if attribute['@is_argument']:\n kwargs[attribute['@name']] = attribute['@value']\n \n # For any component attributes (e.g. Topic, SearchContext)...add to kwargs!\n for attribute_reference in components:\n kwargs[attribute_reference[0]] = attribute_reference[1]\n \n reference = available_class[1](**kwargs)\n \n # If any attributes for the new object are required, now we pass them.\n for attribute in attributes:\n if not attribute['@is_argument']:\n setattr(reference, attribute['@name'], attribute['@value'])\n \n # The instance should be now instantiated!\n return reference\n \n raise ImportError(\"Specified class '{0}' could not be found.\".format(selected_class))",
"def is_class_discoverable(_class, default_discoverability=False):\n return bool(getattr(_class, _get_discoverable_attribute(_class),\n default_discoverability))",
"def test_cls(self):\n assert forge.cls == forge.FParameter(\n forge.FParameter.POSITIONAL_OR_KEYWORD,\n name='cls',\n interface_name='cls',\n contextual=True,\n )",
"def __init__(self):\n self.label = \"Check\"\n self.alias = \"Check Shapefiles\"\n\n # List of tool classes associated with this toolbox\n if core.get_pass():\n self.tools = [Dbound, Overlaps, Numbering, Geometry, Roads, Bld]\n else:\n self.tools = []",
"def _create_documenter(env: sphinx.environment.BuildEnvironment,\n documenter_cls: Type[sphinx.ext.autodoc.Documenter],\n name: str) -> sphinx.ext.autodoc.Documenter:\n bridge = _FakeBridge(env)\n documenter = documenter_cls(bridge, name)\n assert documenter.parse_name()\n assert documenter.import_object()\n if documenter_cls.objtype == 'class':\n bridge.genopt['special-members'] = [\n '__eq__',\n '__getitem__',\n '__setitem__',\n # '__hash__',\n '__init__',\n '__class_getitem__',\n '__call__',\n '__array__',\n ]\n try:\n documenter.analyzer = sphinx.pycode.ModuleAnalyzer.for_module(\n documenter.get_real_modname())\n # parse right now, to get PycodeErrors on parsing (results will\n # be cached anyway)\n documenter.analyzer.find_attr_docs()\n except sphinx.pycode.PycodeError:\n # no source file -- e.g. for builtin and C modules\n documenter.analyzer = None\n return documenter",
"def not_discoverable(_class):\n\n # Set the attribute to the class name, to prevent subclasses from also\n # being not discoverable.\n setattr(_class, _get_discoverable_attribute(_class), False)\n return _class",
"def test_get_component_descriptor_by_clazz_using_get(self):\n pass",
"def setup_class(cls):\n super().setup_class()\n cls.detector = cls.faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)\n cls.headPoseEstimator = cls.faceEngine.createHeadPoseEstimator()\n cls.livenessEstimator = cls.faceEngine.createLivenessV1Estimator()\n cls.detection = cls.detector.detectOne(VLImage.load(filename=CLEAN_ONE_FACE))",
"def __init__(self, enabled_plugins):\n self.plugins = enabled_plugins",
"def __init__(self, cls):\n super().__init__()\n self._cls = cls",
"def initialize(self) -> bool:\n raise NotImplementedError"
] | [
"0.57170075",
"0.5539905",
"0.5513718",
"0.5417464",
"0.53752244",
"0.5361038",
"0.53488845",
"0.5335014",
"0.5306125",
"0.5271901",
"0.5271901",
"0.5254923",
"0.5219103",
"0.5219103",
"0.5090291",
"0.5090291",
"0.5021101",
"0.5020824",
"0.49868792",
"0.49730074",
"0.49605048",
"0.49268454",
"0.49124122",
"0.49022582",
"0.48979834",
"0.487713",
"0.48676047",
"0.48454034",
"0.4825371",
"0.48248863"
] | 0.6441624 | 1 |
Returns the corresponding rig component class for this guide component class | def getRigComponentClass(cls):
return SimpleControlComponentRig | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getComponentType(cls):\n\n return 'Guide'",
"def getComponentType(cls):\n\n return 'Guide'",
"def getRigComponentClass(cls):\n\n return OSSMouthRig",
"def get_class(self):\n\t\treturn self.CLASS",
"def component(self):\n return self._component",
"def component(self):\n return self._component",
"def component_type(self):\n return self._component_type",
"def component_type(self):\n return 'ct'",
"def parentComponent(self):\n return fusion.Component()",
"def cls(self):\n return self.cls_index",
"def _class(self):\n return self.__class",
"def component(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"component\")",
"def component(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"component\")",
"def cls(self):\n return self.__class__.__name__",
"def get_class(self):\n return devices.get_class(self.type)",
"def component_type(self) -> ComponentType:\n return self.configuration.component_type",
"def class_ref(self):\n return self._class_ref",
"def getClass(self):\n return _libsbml.ASTNode_getClass(self)",
"def model_class(self):\n return self.prop.composite_class",
"def get_class(self, name):\n raise NotImplementedError",
"def cls(self):\n return self._cls",
"def get_class(cls):\n return '{}.{}'.format(cls.__module__, cls.__name__)",
"def class_name(self) -> str:\n return pulumi.get(self, \"class_name\")",
"def getReactantComponent(self):\n return _libsbml.SpeciesTypeComponentMapInProduct_getReactantComponent(self)",
"def get_base_comp(self):\n return self._main_model.get_base_comp()",
"def getClassObject(self):\n return self.device()",
"def type(cls):\n return cls.__name__",
"def get_component(self, name):\n for cmpt in self.components:\n if cmpt['name'] == name:\n return cmpt",
"def record_cls(self):\n return self._ELE_CLS",
"def get_class(self):\n return self.meta_model.get_class()"
] | [
"0.72734654",
"0.72734654",
"0.6762508",
"0.6674559",
"0.61377406",
"0.61377406",
"0.61340904",
"0.6081116",
"0.6031022",
"0.59476155",
"0.59440464",
"0.5913267",
"0.59066844",
"0.586445",
"0.583399",
"0.5800438",
"0.5774759",
"0.5768571",
"0.57600415",
"0.5756991",
"0.57568467",
"0.5697234",
"0.5694478",
"0.5680039",
"0.5659978",
"0.5640072",
"0.56190705",
"0.56016934",
"0.5601501",
"0.5579684"
] | 0.74637115 | 0 |
Load csv file to database. Add `year` column | def from_csv_to_database():
for year, path in FileNamePath.items():
# load csv files
with open(path, encoding='cp1251') as dataset:
print(f"Download {year} data")
get_curr_data(dataset, year) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_catalog(self):\n self.catalog = pd.read_csv(self.catalog_path, \n index_col=0, parse_dates=True)\n self.unique_years = self.catalog.index.year.unique()\n return",
"def load_data(path):\n\n columns = ['Item Year', 'Original Value', 'Standard Value', 'Original Currency',\n 'Standard Currency', 'Orignal Measure', 'Standard Measure', 'Location',\n 'Commodity']\n col_type = [int, float, float, object, object, object, object, object]\n\n col_type_dict = dict(zip(columns, col_type))\n\n au_df = pd.read_csv(path, usecols=columns)\n au_df = au_df.astype(col_type_dict)\n au_df.name = 'AU_data'\n \n return au_df, columns",
"def load_csv(self):\n self.database = pd.read_csv(\n self.settings['database_path'],\n encoding='utf-8')",
"def import_year(year):\n\n if year == '2018':\n return import_track(src_location + 'Track/CoordinateCSVs/Track_2018.csv', arc_length_integral_step=1e-1, arc_length_map_resolution=6)\n\n elif year == '2019':\n return import_track(src_location + 'Track/CoordinateCSVs/Track_2019.csv')\n\n raise Exception('Could not find track for year ' + str(year))",
"def csv_to_db(db, filename):\n csv_table = open(filename, 'r')\n updater = [(int(dstr), int(hstr)) for (dstr, hstr) in csv.reader(csv_table)]\n db.bulk_update(updater)",
"def loadCSV(input_file):",
"def from_csv(self, path):\n for model, table in [(self.Dataset, 'dataset'),\n (self.Datarun, 'datarun'),\n (self.Hyperpartition, 'hyperpartition'),\n (self.Classifier, 'classifier')]:\n df = pd.read_csv(os.path.join(path, '%ss.csv' % table))\n\n # parse datetime columns. This is necessary because SQLAlchemy can't\n # interpret strings as datetimes on its own.\n # yes, this is the easiest way to do it\n for c in inspect(model).attrs:\n if type(c) != ColumnProperty:\n continue\n col = c.columns[0]\n if type(col.type) == DateTime:\n df[c.key] = pd.to_datetime(df[c.key],\n infer_datetime_format=True)\n\n for _, r in df.iterrows():\n # replace NaN and NaT with None\n for k, v in list(r.iteritems()):\n if pd.isnull(v):\n r[k] = None\n\n # insert the row into the database\n create_func = getattr(self, 'create_%s' % table)\n create_func(**r)",
"def populate_table_from_csv(csv_file, csv_encoding='iso-8859-15'):\n try:\n with open(file=csv_file, mode='r', encoding=csv_encoding) as input_file:\n # Could find a good place to add iterators/generators/comprehensions elsewhere, so made a new function\n # Also, yet another pylint false positive. The below line isn't supposed to be assigned to anything.\n [add_customer(*l.split(',')) for l in input_file if 'Id,Name,Last_name,' not in l] # pylint: disable=W0106\n except Exception as e:\n logger.error(\"Failed to load records from csv file %s into database %s: %s\", csv_file, customer_db.database, e)",
"def import_ag_data(data_csv):\n df = pd.read_csv(data_csv)\n col_to_drop = ['Program', 'Period', 'Week Ending', 'Geo Level', 'State',\n 'State ANSI', 'Zip Code', 'Region', 'watershed_code',\n 'Watershed', 'Data Item', 'Domain', 'Domain Category',\n 'Ag District', 'Ag District Code', 'CV (%)']\n df = df.drop(col_to_drop, axis=1)\n df = df[(df['Value'] != ' (D)') & (df['Value'] != ' (Z)')]\n df = df.replace(to_replace=r',', value='', regex=True)\n df['Value'] = df['Value'].astype('int')\n df = df.rename(columns={'Value': 'Yield'})\n df['Year'] = pd.to_datetime(df['Year'], format='%Y')\n return df",
"def load_records():\n\n with open('seed_data/records.csv', 'rb') as csvfile:\n data = csv.reader(csvfile)\n for row in data:\n record_id, user_id, common_name, date_time, latitude, longitude, notes, seen, num_birds = row\n\n record = Record(record_id=record_id, user_id=user_id, common_name=common_name,\n date_time=date_time, latitude=latitude, longitude=longitude, \n notes=notes, seen=seen, num_birds=num_birds)\n\n db.session.add(record)\n\n db.session.commit()",
"def import_from_csv(self) -> None:\n logging.info('import_from_csv')\n if self.target_table and str(self.target_table).lower() in [\"issue\", \"version\"]:\n if self.file_path and exists(self.file_path):\n # Read CSV file\n csv_data = pd.read_csv(self.file_path).to_dict('records')\n\n # Import Version\n if str(self.target_table).capitalize() == \"Version\":\n # Overwrite option\n if self.overwrite:\n self.session.query(Version).delete()\n click.echo('Overwrite Version table')\n\n for version in csv_data:\n if all(item in list(version.keys()) for item in ['tag', 'start_date', 'end_date']):\n newVersion=Version(\n project_id=version['project_id'],\n name=version[\"name\"], \n tag=version[\"tag\"], \n start_date=datetime.strptime(version[\"start_date\"], '%Y-%m-%d %H:%M:%S.%f'), \n end_date=datetime.strptime(version[\"end_date\"], '%Y-%m-%d %H:%M:%S.%f'), \n )\n \n try:\n self.session.add(newVersion)\n compute_version_metrics(self.session, self.configuration.current_branch, newVersion.project_id)\n click.echo('Importing ' + str(len(csv_data)) + ' version(s) on database')\n except Exception:\n logging.error(Exception)\n else:\n logging.error(\"CSV file no contain minimal mandatory fields\")\n sys.exit('CSV file no contain minimal mandatory fields')\n\n # Import Issue\n if str(self.target_table).capitalize() == \"Issue\":\n # Overwrite option\n if self.overwrite:\n self.session.query(Issue).delete()\n click.echo('Overwrite Issue table')\n\n for issue in csv_data:\n if all(item in list(issue.keys()) for item in ['number', 'created_at', 'updated_at']):\n newIssue=Issue(\n project_id=issue['project_id'],\n number=issue[\"number\"],\n title=issue[\"title\"],\n created_at=datetime.strptime(issue[\"created_at\"], '%Y-%m-%d %H:%M:%S.%f'),\n updated_at=datetime.strptime(issue[\"updated_at\"], '%Y-%m-%d %H:%M:%S.%f'))\n\n try:\n self.session.add(newIssue)\n click.echo('Importing ' + str(len(csv_data)) + ' issue(s) on database')\n except Exception:\n logging.error(Exception)\n else:\n logging.error(\"CSV file no contain minimal mandatory fields\")\n sys.exit('CSV file no contain minimal mandatory fields') \n\n self.session.commit()\n else:\n logging.error('File not found')\n sys.exit('File not found')\n else:\n logging.error('Target table not found')\n sys.exit('Target table not found')",
"def load_file(self):\n\n self.df = self.sqlContext.read.csv(self.source, sep=self.sep, header=True, inferSchema=True)",
"def csv(self, file, table=None):\n\n if table:\n table.import_from_csv_file(file)\n else:\n db = self.db\n # This is the preferred method as it updates reference fields\n db.import_from_csv_file(file)\n db.commit()",
"def load_data(path=None, dbtable=None, headers=None):\n\n DF = dd.read_csv(\n urlpath=path,\n names=headers,\n dtype='unicode')\n\n dd.to_sql(\n DF,\n name=dbtable,\n uri=data_store,\n if_exists='append',\n index=False\n )",
"def data_from_csv(self, filepath):\n self.dataframe = pd.load_csv(filepath, separator='')",
"def get_loc_year_csv(csv_name):\n fname = (csv_name.split('.'))[0].split('-')\n return fname[0], fname[1]",
"def from_csv(self,path):\n self.csv_path = path\n\n try:\n fh = open(self.csv_path, \"r\")\n except IOError:\n print(\"Error: no such file or directory\") \n\n self.csv_dataframe = pd.DataFrame(pd.read_csv(self.csv_path, header=0, keep_default_na=False)).dropna(axis=0, how='any')\n test = pd.DataFrame(pd.read_csv(self.csv_path)).dropna(axis=0, how='any')\n types = [0 for i in range(len(test.dtypes))]\n a = fh.readline()\n a = a[:-1] # remove '\\n'\n x = a.split(',') # x stores the name of each column\n fh.close()\n\n #type transformation\n for i in range(len(test.dtypes)):\n if test.dtypes[i].name[0:3] == 'int' or test.dtypes[i].name[0:5] == 'float':\n if (x[i][0] == \"'\" or x[i][0] == '\"'):\n x[i] = x[i].replace('\\'', '').replace('\"', '')\n for j in test[x[i]]:\n if not (j == 0 or (j > 1000 and j < 2100)):\n types[i] = test.dtypes[i].name[0:5]\n break\n else:\n types[i] = 'year'\n elif test.dtypes[i].name[0:6] == 'object':\n if (x[i][0] == \"'\" or x[i][0] == '\"'):\n x[i] = x[i].replace('\\'', '').replace('\"', '')\n for j in test[x[i]]:\n if j != 0 and not(re.search(r'\\d+[/-]\\d+[/-]\\d+', j)):\n types[i] = 'varchar'\n break\n else:\n types[i] = 'date'\n \n name = path.rsplit('/', 1)[-1][:-4]\n self.table_info(name, x, types)\n self.import_method = methods_of_import[2] # = 'csv'\n\n self.show_csv_info()",
"def load_data(filename):\n # Load the necessary columns from the csv into pandas\n data = pd.read_csv(filename, sep=';')\n\n # Cleans the data\n data = data[[\"Perioden\", \"Regio's\",\\\n \"Kerkelijke gezindte/Geen kerkelijke gezindte (% van de bevolking)\",\\\n \"Kerkelijke gezindte/Totaal kerkelijke gezindte (% van de bevolking)\",\\\n \"Kerkelijke gezindte/Rooms-Katholiek (% van de bevolking)\",\\\n \"Kerkelijke gezindte/Protestantse Kerk in Nederland (% van de bevolking)\",\\\n \"Kerkelijke gezindte/Nederlands Hervormd (% van de bevolking)\",\\\n \"Kerkelijke gezindte/Gereformeerd (% van de bevolking)\",\\\n \"Kerkelijke gezindte/Islam (% van de bevolking)\",\\\n \"Kerkelijke gezindte/Overige gezindte (% van de bevolking)\"]]\n\n # Creates new columns for renaming purposes\n data[\"Year\"] = data[\"Perioden\"]\n data[\"Region\"] = data[\"Regio's\"]\n data[\"Athiest\"] = data[\"Kerkelijke gezindte/Geen kerkelijke gezindte (% van de bevolking)\"]\n data[\"Total\"] = data[\"Kerkelijke gezindte/Totaal kerkelijke gezindte (% van de bevolking)\"]\n data[\"Roman Catholic\"] = data[\"Kerkelijke gezindte/Rooms-Katholiek (% van de bevolking)\"]\n data[\"Protestant\"] = data[\"Kerkelijke gezindte/Protestantse Kerk in Nederland (% van de bevolking)\"]\n data[\"Dutch Reformed\"] = data[\"Kerkelijke gezindte/Nederlands Hervormd (% van de bevolking)\"]\n data[\"Reformed\"] = data[\"Kerkelijke gezindte/Gereformeerd (% van de bevolking)\"]\n data[\"Islam\"] = data[\"Kerkelijke gezindte/Islam (% van de bevolking)\"]\n data[\"Other\"] = data[\"Kerkelijke gezindte/Overige gezindte (% van de bevolking)\"]\n\n # Deletes doubles\n data.drop(data.columns[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]], axis = 1, inplace=True)\n\n data = data.set_index(\"Region\")\n\n print(data)\n return data",
"def read_csv():",
"def add_csv(filename):\n with open(f'{filename}') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n new_entry = False\n name = row['product_name']\n price = clean_price(row['product_price'])\n quantity = int(row['product_quantity'])\n date = clean_date(row['date_updated'])\n query = session.query(Product).filter_by(product_name=row['product_name'])\n\n if query.count() == 0:\n new_entry = True\n else:\n for item in query:\n if date > item.date_updated:\n item.product_price = price\n item.product_quantity = quantity\n item.date_updated = date\n session.add(item)\n\n if new_entry:\n product = Product(product_name=name, product_price=price,\n product_quantity=quantity, date_updated=date)\n session.add(product)\n session.commit()",
"def add_new_data_to_combine_df(year, filename):\n\n year_dict = collect_data_by_year(year) # Collect new data\n df = open_csv_to_df(filename, index='idNum') # Retrieve other data\n df.set_index('playerCode', inplace=True)\n full_dict = df.to_dict(orient='index')\n full_dict.update(year_dict) # Merge data\n # Create unified df\n full_df = pd.DataFrame.from_dict(full_dict, orient='index')\n\n full_df.reset_index(inplace=True)\n full_df = full_df.rename(columns = {'index':'playerCode'})\n save_df_to_csv(full_df, filename, col_headers=True, index=True,\n index_label='idNum', mode='w')\n\n return full_df",
"def csv_to_df(filename, target_year):\r\n df = pd.read_csv(\"raw_data/\" + filename + \".csv\", index_col=0) # index is column 0 country, column names are years (first row)\r\n # delete all columns but the target year\r\n for col_name in df.columns:\r\n if col_name != str(target_year):\r\n del df[col_name]\r\n return df",
"def insert_books_data():\n # Get data from csv file\n print(\"Getting data from csv..\")\n file = open(\"books.csv\")\n reader = csv.reader(file)\n\n # Insert csv data into table\n print(\"Inserting data into 'books' table..\")\n for isbn, title, author, year in reader:\n try:\n db.execute(\"INSERT INTO books (isbn, title, author, year)\\\n VALUES (:isbn, :title, :author, :year)\", {\n \"isbn\": isbn, \"title\": title, \"author\": author, \"year\": year })\n except exc.DataError as err:\n print(\"Invalid entry in csv file\")\n db.commit()\n print(\"Data inserted\")",
"def add_data(self, year, month):\n data = _download_to_df(self.url, self.table_name, year, month)\n if 'INTERVENTION' in data.columns:\n data = data[data['INTERVENTION'] == 0]\n data = data.loc[:, self.table_columns]\n with self.con:\n data.to_sql(self.table_name, con=self.con, if_exists='append', index=False)\n self.con.commit()",
"def ingest_rental_csv(csv_path):\n # Create a CSV import generator (next yields one db row)\n import_generator = import_csv_gen(csv_path)\n # Skip over the title row\n next(import_generator)\n # Iterate over all other rows\n while True:\n try:\n data = next(import_generator)\n if len(data) != 2:\n logger.error(f'Data with incorrect item count: {len(data)}')\n continue\n # extract items from list and add document to database\n with Connection():\n rental = Rental(\n product_id=data[RENTAL_PROD_ID],\n user_id=data[RENTAL_USER_ID]\n )\n rental.save() # This will perform an insert\n except StopIteration:\n break",
"def load_ratings_data(connection, csvfile):\n insert_sql = 'insert into ratings (userId, movieId, rating, timestamp) ' \\\n 'values (%s, %s, %s, %s)'\n load_data(connection, insert_sql, get_data_from_file(csvfile))",
"def load_csv(filename: str, solr_url: typing.Optional[str]):\n\n solr_client = Solr(solr_url, always_commit=True) if solr_url else Solr(\"\")\n\n csv_data = { row[\"Item ARK\"]: row for row in csv.DictReader(open(filename)) }\n\n config = {\n \"collection_names\": {\n row[\"Item ARK\"]: row[\"Title\"] for row in csv_data.values() if row[\"Object Type\"] == \"Collection\"\n },\n \"controlled_fields\": load_field_config(\"./fields\"),\n \"child_works\": collate_child_works(csv_data),\n }\n\n controlled_fields = load_field_config(\"./fields\")\n\n mapped_records = []\n for row in rich.progress.track(csv_data.values(), description=f\"Importing {filename}...\"):\n if row[\"Object Type\"] not in (\"ChildWork\", \"Page\"):\n mapped_records.append(map_record(row, solr_client, config=config))\n\n if solr_url:\n solr_client.add(mapped_records)\n else:\n print(json.dumps(mapped_records))",
"def load(self, path, separator=\",\", decoder=lambda j,v: v):\n # Date objects are saved and loaded as strings, but it is easy to convert these back to dates:\n # Table.columns[x].map(lambda s: date(s))\n data = open(path, \"rb\").read().lstrip(BOM_UTF8)\n data = StringIO(data)\n data = [row for row in csv.reader(data, delimiter=separator)]\n data = [[_eval(decoder(j,v)) for j,v in enumerate(row)] for row in data]\n return Table(data)",
"def import_csv(self, csvfileobject):\n # Clear previously stored info\n self._tracks = []\n self._selected = None\n\n for row in csvfileobject:\n if row[0] == \"T\":\n track = self.addTrack()\n track.properties = row\n elif row[0] == \"P\":\n period = self.addPeriod([0,1,'-'])\n period.properties = row",
"def load(csv_stream, strip_spaces=True, skip_blank_lines=True,\n encoding=\"utf-8\", delimiter=\",\", force_unique_col_names=False):\n def _force_unique(col_headers):\n seen_names = set()\n unique_col_headers = list()\n for i, col_name in enumerate(col_headers):\n if col_name in seen_names:\n col_name += \"_%s\" % i\n seen_names.add(col_name)\n unique_col_headers.append(col_name)\n return unique_col_headers\n\n def _pad_row(row):\n if len(row) < num_cols:\n for i in range(num_cols - len(row)):\n row.append('')\n return row\n\n def _process_row(row):\n if strip_spaces:\n return _pad_row([value.strip() for value in row])\n else:\n return _pad_row(row)\n\n csv_reader = csv.reader(csv_stream, delimiter=delimiter)\n\n column_headers = [header.strip() for header in csv_reader.next()]\n if force_unique_col_names:\n column_headers = _force_unique(column_headers)\n num_cols = len(column_headers)\n\n # Make a list to gather entries for each column in the data file...\n raw_text_cols = [list() for i in range(num_cols)]\n for row in csv_reader:\n processed_row = _process_row(row)\n # Add this new row if we either allow blank lines or if any field\n # in the line is not blank. We do this to the processed row,\n # because spaces may or may not be significant, depending on\n # whether strip_spaces is True.\n if (not skip_blank_lines) or any(processed_row):\n for i in range(num_cols):\n raw_text_cols[i].append(processed_row[i].decode(encoding))\n\n # Now take the raw data and put it into our Column...\n cols = [Column(raw_col) for raw_col in raw_text_cols]\n\n return Document(zip(column_headers, cols))"
] | [
"0.6482594",
"0.6402999",
"0.6367218",
"0.6283168",
"0.62296",
"0.6219771",
"0.6213001",
"0.6185162",
"0.6111619",
"0.6011145",
"0.60105246",
"0.5978613",
"0.597643",
"0.5974852",
"0.5945671",
"0.59363544",
"0.593171",
"0.5929072",
"0.5925405",
"0.5903456",
"0.5902019",
"0.58859855",
"0.5877367",
"0.58696663",
"0.5849839",
"0.5835613",
"0.5825964",
"0.58169353",
"0.5807664",
"0.58046496"
] | 0.6820516 | 0 |
Save csv file with given header and rows into output folder | def to_csv(header, rows):
with open('result.csv', 'w') as result:
result_writer = csv.writer(result, delimiter=';')
result_writer.writerow(header)
result_writer.writerows(rows) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_csv(self, out_file_name, header):\n\n with open(out_file_name, 'wb') as outf:\n writer = csv.writer(outf, quoting=csv.QUOTE_ALL)\n writer.writerow(header)\n writer.writerows(self.records)",
"def write_csv(header_row, data_rows, filename, course_id):\n shared.ensure_directory_exists(utils.ANSWERS_DISTRIBUTION_REPORTS_DIRECTORY,\n course_id.org, course_id.course)\n\n\n path = shared.get_safe_file_path(utils.ANSWERS_DISTRIBUTION_REPORTS_DIRECTORY,\n course_id.org, course_id.course,\n filename)\n ## need to encode the unico path in order to open the file in prod env\n path = path.encode('utf-8')\n\n with open(path, \"wb\") as ofile:\n writer = csv.writer(ofile, quoting=csv.QUOTE_ALL)\n writer.writerow(header_row)\n for datarow in data_rows:\n encoded_row = [cleanup_newlines(unicode(s).encode('utf-8'))\n for s in datarow]\n writer.writerow(encoded_row)",
"def createFileCSV(table, path=\"./prediction\"):\t\n\tif len(table) < 1:\n\t\traise NameError('Empty Table!')\n\telse:\n\t\tfile = open(path + '.csv', 'w+')\n\n\t\tfile.write(table[0].toStringHeaders() + \"\\n\")\n\n\t\tfor row in table:\n\t\t\tfile.write(row.toStringCSV() + '\\n')\n\t\tfile.close()",
"def generate_csv(table, header):\n with open(\"%s.csv\" % header, \"w\") as csvfile:\n for i in range(len(table)):\n for j in range(len(table[i])):\n if j != len(table[i])-1:\n tmp = table[i][j] + \",\"\n else:\n tmp = table[i][j] + \"\\n\"\n csvfile.write(tmp)",
"def write_csv(outputfile, delimiter, newline, qchar, encoding, header, rows):\n with open(outputfile, 'w', newline=newline, encoding=encoding) as csvfile:\n writer = csv.writer(csvfile, delimiter=delimiter,\n quotechar=qchar, quoting=csv.QUOTE_MINIMAL)\n writer.writerow(header)\n for row in rows:\n writer.writerow(row)",
"def save_results_csv(fname, results, header=0):\n\n new_rows = []\n if not os.path.isfile(fname):\n args = fname.split('/')[:-1]\n directory = os.path.join(*args)\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n with open(fname, 'wt') as f:\n writer = csv.writer(f)\n if header == 0:\n writer.writerows(\n [['Model', 'Dataset', 'Method', 'Weight', 'Label', \n 'Step', 'AUPRC', 'AUROC', 'Precision', 'Recall',\n 'F1 score', 'Random Seed', 'Date']])\n if header == 1:\n writer.writerows(\n [['Precision', 'Recall', 'F1 score', 'Random Seed']])\n elif header ==2:\n writer.writerows(\n [['Step', 'AUPRC', 'AUROC', 'Precision', 'Recall',\n 'F1 score', 'Random Seed']])\n\n elif header == 5:\n writer.writerows(\n [['Model', 'Dataset', 'Method', 'Weight', 'Label', \n 'Step', 'Scores']])\n\n with open(fname, 'at') as f:\n # Overwrite the old file with the modified rows\n writer = csv.writer(f)\n new_rows.append(results) # add the modified rows\n writer.writerows(new_rows)",
"def write_csv_file(filepath, fieldnames, rows):\n headers = [{'label': field} for field in fieldnames]\n with open(filepath, 'w') as f_buf:\n outfile = CsvWriter()\n outfile.set_headers(headers)\n outfile._datas = rows\n outfile.render(f_buf)",
"def save_results_csv(fname, results, header=0):\n\n new_rows = []\n if not os.path.isfile(fname):\n args = fname.split('/')[:-1]\n directory = os.path.join(*args)\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n with open(fname, 'wt') as f:\n writer = csv.writer(f)\n if header == 0:\n writer.writerows(\n [['Model', 'Dataset', 'Method', 'Weight', 'Label', \n 'Step', 'AUROC', 'Precision', 'Recall',\n 'F1 score', 'Random Seed', 'Date']])\n if header == 1:\n writer.writerows(\n [['Precision', 'Recall', 'F1 score', 'Random Seed']])\n elif header ==2:\n writer.writerows(\n [['Step', 'AUROC', 'Precision', 'Recall',\n 'F1 score', 'Random Seed']])\n\n elif header == 5:\n writer.writerows(\n [['Model', 'Dataset', 'Method', 'Weight', 'Label', \n 'Step', 'Scores']])\n\n with open(fname, 'at') as f:\n # Overwrite the old file with the modified rows\n writer = csv.writer(f)\n new_rows.append(results) # add the modified rows\n writer.writerows(new_rows)",
"def write_csv(row_list,out_name,*header_strings : str):\n with open(out_name,'w',newline='') as result_file:\n wr = csv.writer(result_file, delimiter='\\t')\n if header_strings:\n wr.writerow([name for name in header_strings])\n if type(row_list[0]) is list:\n wr.writerows(row_list)\n else:\n for row in row_list:\n wr.writerow([row])",
"def export_csv(self, path):\r\n\r\n with open(path, 'w') as f:\r\n f.write('# h,hr,m')\r\n\r\n if self.rho is not None:\r\n f.write(',rho')\r\n if self.temperature is not None:\r\n f.write(',temperature')\r\n\r\n f.write('\\n')\r\n for i in range(self.shape[0]):\r\n for j in range(self.shape[1]):\r\n f.write(f'{self.h[i, j]},{self.hr[i, j]},{self.m[i, j]}')\r\n if self.rho is not None:\r\n f.write(f',{self.rho[i, j]}')\r\n if self.temperature is not None:\r\n f.write(f',{self.temperature[i, j]}')\r\n f.write('\\n')\r\n return",
"def init_csv_file(csv_path):\n with open(csv_path, 'w', newline='') as csv_file:\n writer = csv.writer(csv_file)\n header = ['file_name', 'chart_in_file',\n 'year', 'month', 'row_no', 'bird_species']\n header += list(range(1, 32))\n writer.writerow(header)",
"def write_csv(path, lines, headers):\n print \"Opening %s for score output\" % base_name(path)\n\n try:\n f = open(path, 'wb')\n writer = csv.writer(f)\n writer.writerow(headers)\n writer.writerows(lines)\n except IOError:\n print \"Cannot open %s\" % path\n else:\n print \"Scores successfully written to %s\" % path\n f.close()",
"def save_csv_file():\n global output_on_display, import_lst, column_names, data\n if data_base == '':\n mistake_load_table()\n else:\n column_names = data[0]\n save_name = asksaveasfilename(title=\"Select file\", filetypes=((\"CSV\", \"*.csv\"), (\"all files\", \"*.*\")),\n confirmoverwrite=True, defaultextension='.csv')\n step = len(column_names)\n data_csv = import_lst\n if len(data_csv[0]) == step:\n pass\n else:\n data_csv = import_lst[step::]\n\n with open(save_name, 'w+') as csv_file:\n csv_writer = csv.writer(csv_file)\n csv_writer.writerow(column_names)\n csv_writer.writerows(data_csv)",
"def csv_file(data,output_dir,filename,order = [],head = True):\n with open(output_dir + filename + '.csv', 'w') as f:\n write = csv.writer(f)\n write.writerows(manip.dic_to_list(data,order,head),)\n return None",
"def serialise(rows: Iterable[Dict], path: pathlib.Path, **kwargs):\n\n fieldnames = settings.OUTPUT_HEADERS\n\n LOGGER.info(\"Writing CSV with headers: %s\", fieldnames)\n\n with path.open('w', newline='') as file:\n writer = csv.DictWriter(file, fieldnames=fieldnames, dialect=UrbanDialect, **kwargs)\n\n row_count = 0\n for row in rows:\n writer.writerow(row)\n\n row_count += 1\n\n if row_count:\n LOGGER.info(\"Wrote %s rows to '%s'\", row_count, file.name)\n else:\n path.unlink()\n LOGGER.info(\"Deleted '%s'\", file.name)",
"def prepare_out_csv(output_dir, filename):\n out_columns_pi = ['fasta_file', 'acc.code',\n 'organism', 'EC.code', 'species',\n 'note', 'pi', 'modification', 'category']\n string = ''\n for i in out_columns_pi:\n if i == out_columns_pi[-1]:\n string += i\n else:\n string += i+','\n string += '\\n'\n with open(output_dir+filename, 'w') as f:\n f.write(string)",
"def save_csv(filename, rows):\n with open(filename, 'w', newline='', encoding='utf-8') as f:\n writer = csv.writer(f)\n writer.writerow([\n 'title', 'runtime', 'genre(s)', 'director(s)', 'writer(s)',\n 'actor(s)', 'rating(s)', 'number of rating(s)'\n ])\n\n writer.writerows(rows)",
"def create_csv(output_file, y, tx, ids, header, is_test):\n print('\\nCreate new csv file named ' + str(output_file) + '...')\n with open(output_file, 'w') as csvfile:\n writer = csv.DictWriter(csvfile, delimiter = ',', fieldnames = header)\n writer.writeheader()\n for idx, y_row, tx_row in zip(ids, y, tx):\n if is_test:\n prediction = '?'\n else:\n prediction = 'b' if y_row == -1 else 's'\n dictionary = {'Id': int(idx),'Prediction': prediction}\n for index in range(len(tx_row)):\n dictionary[header[index + 2]] = float(tx_row[index])\n writer.writerow(dictionary)\n print('\\n... finished.')",
"def csv_writer(data, path):\n\twith open(path, \"wb\") as csv_file:\n\t\twriter= csv.writer(csv_file, delimiter=',')\n\t\twriter.writerows(data)",
"def create_csv(csv_path, headers):\n with open(csv_path, 'w') as csv_file:\n writer = DictWriter(csv_file, fieldnames=headers)\n writer.writeheader()",
"def writeDataCSV(data,outpath,outfile,out_head=None,message='data'):\n if (out_head is not None):\n #nhead = out_head.count(',') + 1\n nhead = len(out_head.split(',')) # Split header at every comma\n if (data.shape[1] != nhead):\n print('Warning: No. of fields does not match number of headings in', \n 'output file:',outfile+'.csv')\n print('No. fields =',data.shape[1],', No. headings =',nhead)\n filename = join(outpath, outfile + '.csv')\n print('Saving',message,'in file:',filename)\n np.savetxt(filename,data,delimiter=',',header=out_head) \n return None",
"def save_csv(self):\n path, _ = QtWidgets.QFileDialog.getSaveFileName(self, '保存数据', '', 'CSV(*.csv)')\n\n if not path:\n return\n\n with open(path, 'w') as f:\n writer = csv.writer(f, lineterminator='\\n')\n\n writer.writerow(self.headers.keys())\n\n for row in range(self.rowCount()):\n row_data = []\n for column in range(self.columnCount()):\n item = self.item(row, column)\n if item:\n row_data.append(str(item.text()))\n else:\n row_data.append('')\n writer.writerow(row_data)",
"def export_csv(header, data):\n with StringIO() as tmp:\n writer = csv.DictWriter(tmp, fieldnames=header)\n writer.writeheader()\n writer.writerows(data)\n data = tmp.getvalue()\n\n return data",
"def csv_writer(data, path):\n with open(path, \"wb\") as csv_file:\n writer = csv.writer(csv_file, delimiter=',')\n for line in data:\n writer.writerow(line)",
"def csv_file_creator(path, list_of_jobs):\n with open(path, \"wb\") as out_file:\n writer = UnicodeWriter(out_file, delimiter=',')\n for row in list_of_jobs:\n writer.writerow(row)",
"def write_data_files(self):\n # build our strings\n header_string = \"\"\n data_string = \"\"\n for value in self.data.values():\n header_string += value[2] + \",\"\n if value[0] != None:\n data_string += value[1].format(value[0])\n else:\n data_string += \",\"\n # remove the extra comma and replace with a newline\n header_string = header_string[:-1]\n header_string += \"\\n\"\n data_string = data_string[:-1]\n data_string += \"\\n\"\n \n # show what we built\n #print(header_string)\n #print(data_string)\n \n # open a temp file\n with open(\"{:s}\\\\VWSInput\\\\temp_data.csv\".format(self.path), \"w\") as temp_file:\n #temp_file.write(header_string)\n temp_file.write(data_string)\n \n # move to the input file\n filetools.mv(\"{:s}\\\\VWSInput\\\\temp_data.csv\".format(self.path), \"{:s}\\\\VWSInput\\\\data.csv\".format(self.path))\n \n return",
"def save_csv(net, wires, net_id, chip_id, chip):\n with open('output/output.csv', 'w') as file:\n # Write first line\n output = csv.writer(file)\n output.writerow([\"net\", \"wires\"])\n\n # Index and fill the body\n for step in range(len(wires)):\n output.writerow([net[step],wires[step]])\n\n # End of file\n output.writerow([f\"chip_{chip_id}_net_{net_id}\", chip.cost])",
"def write_csv(tmp_path):\n lines = [\n ('NFLX,3,99.66,319,998.1,957,-41.1,-0.041\\r\\n'),\n ('XRX,40,33.94,30,1357.6,1200,-157.6,-0.116\\r\\n'),\n ]\n\n filename = tmp_path / 'report1.csv'\n with open(filename, 'w', newline='') as file:\n file.writelines(lines)\n\n return filename",
"def _write_csv(self):\n\n # add the label to the header\n if self.input_data.get_value(InputType.TIME_PERIOD) == 'all':\n self.header.append('Date')\n else:\n self.header.append('sample id')\n\n key_list = []\n\n for i, cube in enumerate(self.cube_list):\n if self.input_data.get_value(InputType.TIME_PERIOD) == 'all':\n self._write_sample_with_date(cube, i, key_list)\n else:\n self._write_sample(cube, i, key_list)\n\n output_data_file_path = self._get_full_file_name()\n self._write_data_dict(output_data_file_path, key_list)\n\n return [output_data_file_path]",
"def write_csv(settings, row, mode):\n with open(settings.output_file_path, mode=mode) as csv_file:\n csv_writer = csv.writer(csv_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n csv_writer.writerow(row)"
] | [
"0.73305476",
"0.7274633",
"0.7113233",
"0.69982344",
"0.68883383",
"0.6880275",
"0.68772936",
"0.6867271",
"0.6855233",
"0.68267447",
"0.67927784",
"0.6781188",
"0.67231953",
"0.67214197",
"0.666982",
"0.6666694",
"0.663893",
"0.6626239",
"0.6602342",
"0.65913993",
"0.65737796",
"0.6561133",
"0.6550545",
"0.6523665",
"0.6514395",
"0.6509307",
"0.65011615",
"0.65007037",
"0.64969134",
"0.64718145"
] | 0.74616706 | 0 |
Return age and the averages of size and intensity. | def calculate(data, data_top):
size, intensity, age = np.array([data["Size"]]), np.array([data["Intensity"]]), data_top.iat[1,0]
size_avg, intensity_avg = np.average(size), np.average(intensity)
return size_avg, intensity_avg, age | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_average_age(self):\n return np.mean([agent.age for agent in self.agents])",
"def average_age():\n df = pd.read_csv(config.META_FQN, sep=\"\\t\")\n ages = []\n for _, row in df.iterrows():\n if row[\"asr_test\"]:\n age = row[\"Age_ses1\"]\n if not math.isnan(age):\n ages.append(age)\n\n print(\"------ Age ------\")\n print_stats(ages)",
"def _get_average_age(self):\n sql = \"\"\"\n SELECT AVG(age) as avg_age\n FROM(\n SELECT DATE_PART('year', AGE(now(), birth_date)) as age\n FROM {schema}.participant_match\n WHERE birth_date is not null\n ) x\n \"\"\".format(schema=self.database.schema)\n df = pd.read_sql(sql, self.database.connection)\n avg_age = None\n if len(df) > 0:\n avg_age = df.loc[0]['avg_age']\n return avg_age",
"def _find_average_age():\r\n count, total = 0, 0\r\n for resource in resources:\r\n patient = resource[\"resource\"]\r\n if \"birthDate\" in patient:\r\n count += 1\r\n dob = patient[\"birthDate\"].split(\"-\")\r\n dob = datetime(int(dob[0]), int(dob[1]), int(dob[2]), 0, 0, 0, 0)\r\n if \"deceasedDateTime\" in patient:\r\n death_time = patient[\"deceasedDateTime\"].split(\"T\")[0].split(\r\n \"-\")\r\n death_time = datetime(int(death_time[0]), int(death_time[1]),\r\n int(death_time[2]), 0, 0, 0, 0)\r\n else:\r\n death_time = datetime.now()\r\n age = relativedelta(death_time, dob).years\r\n total += age\r\n if count == 0:\r\n return count, count\r\n return total / count, count",
"def age_avg_adopt():\n \n import numpy as np\n import pandas as pd\n import matplotlib.pyplot as plt\n\n train = pd.read_csv('./data/train.csv')\n \n # Convert age from months to years\n train.loc[train['Age'] > -1, 'Age'] = (train['Age']//12)\n \n # Divide by dog (Type = 1) and cat (Type = 2)\n dog_df = train.loc[train['Type'] == 1, ['State','Type', 'Age', 'AdoptionSpeed']]\n cat_df = train.loc[train['Type'] == 2, ['State','Type', 'Age', 'AdoptionSpeed']]\n \n dog_max_age = max(dog_df.loc[:, 'Age'])\n dog_min_age = min(dog_df.loc[:, 'Age'])\n \n cat_max_age = max(cat_df.loc[:, 'Age'])\n cat_min_age = min(cat_df.loc[:, 'Age'])\n \n dog_age_labels = []\n dog_avg = []\n \n cat_age_labels = [] \n cat_avg = []\n \n\n # Find dog average adoption speed by age\n for i in range(dog_min_age, dog_max_age + 1) :\n \n count = (dog_df.Age == i).sum()\n if(count > 0) :\n dog_avg.append(dog_df.loc[dog_df['Age'] == i, ['AdoptionSpeed']].mean()[0])\n dog_age_labels.append(i)\n\n # Plot bar graphs\n yticks_index = list(range(5))\n \n plt.figure(num = None, figsize=(6.5,4.5),dpi=80, facecolor = 'w', edgecolor='k')\n index = np.arange(len(dog_age_labels))\n \n index = index[0:13]\n dog_age_labels = dog_age_labels[0:13]\n dog_avg = dog_avg[0:13]\n \n plt.bar(index, dog_avg)\n plt.xlabel('Age in Years')\n plt.xticks(index, dog_age_labels)\n plt.ylabel('Adoption Speed')\n plt.yticks(yticks_index)\n plt.title('Dog Average Adoption Speed for Each Age')\n plt.savefig('bardogAvg.png', bbox_inches='tight')\n\n # Find cat average adoption speed by age\n for i in range(cat_min_age, cat_max_age + 1) :\n \n count = (cat_df.Age == i).sum()\n if(count > 0) :\n cat_avg.append(cat_df.loc[cat_df['Age'] == i, ['AdoptionSpeed']].mean()[0])\n cat_age_labels.append(i)\n\n plt.figure(num = None, figsize=(6,4),dpi=80, facecolor = 'w', edgecolor='k')\n index = np.arange(len(cat_age_labels))\n \n \n index = index[0:11]\n cat_age_labels = cat_age_labels[0:11]\n cat_avg = cat_avg[0:11]\n \n plt.bar(index, cat_avg)\n plt.xlabel('Age in Years')\n plt.xticks(index, cat_age_labels)\n plt.ylabel('Adoption Speed')\n plt.yticks(yticks_index)\n plt.title('Cat Average Adoption Speed for Each Age')\n plt.savefig('barcatAvg.png', bbox_inches='tight')",
"def get_age(self):\n return self.glb[iage]",
"def average_age(self, start=1, end=None):\n picks = self.pick_set.filter(number__gte=start)\n if end is not None:\n picks = picks.filter(number__lte=end)\n\n dt = datetime.date(self.year, 1, 1)\n ages = [e.player.age(dt) for e in picks]\n ages = [e for e in ages if e]\n average = sum(ages) / len(ages)\n return average",
"def get_data():\n\n size, intensity, age = [], [], []\n def calculate(data, data_top):\n \"\"\"Return age and the averages of size and intensity.\"\"\"\n size, intensity, age = np.array([data[\"Size\"]]), np.array([data[\"Intensity\"]]), data_top.iat[1,0]\n size_avg, intensity_avg = np.average(size), np.average(intensity)\n return size_avg, intensity_avg, age\n \n with os.scandir(\"imgdata/\") as files:\n for entry in files:\n data = pd.read_csv(entry, header=3, index_col=0)\n data_top = pd.read_csv(entry, index_col=0, nrows=2, header=None)\n result = calculate(data, data_top)\n size.append(result[0])\n intensity.append(result[1])\n age.append(result[2])\n return size, intensity, age",
"def average_age_dc(all_profile_dict: dict) -> float:\n \"\"\"Param:all_profile_dc: Dictionary containing all profiles\"\"\"\n today = date.today()\n value = sum(map(lambda v: today.year - v['birthdate'].year - ((today.month, today.day) < (\n v['birthdate'].month, v['birthdate'].day)), all_profile_dict.values())) / len(all_profile_dict)\n return value",
"def animal_ages(self):\n herb_ages = []\n carn_ages = []\n for cell in self.land_cells.values():\n for herb in cell.herbivores:\n herb_ages.append(herb.age)\n for carn in cell.carnivores:\n carn_ages.append(carn.age)\n if not herb_ages:\n return [carn_ages]\n elif not carn_ages:\n return [herb_ages]\n else:\n return [herb_ages, carn_ages]",
"def average_age_nt(all_profile_nt: namedtuple) -> tuple:\n \"\"\"Param: all_profile_nt: Named tuple containing all profiles\"\"\"\n today = date.today()\n value = sum(map(lambda v: today.year - v[-1].year - ((today.month, today.day) < (\n v[-1].month, v[-1].day)), all_profile_nt))/len(all_profile_nt)\n return value",
"def average(self):\n return self.summation() / self.count()",
"def mean_height(data):",
"def mean_height(data):",
"def age_avg_adopt():\n \n import numpy as np\n import pandas as pd\n import matplotlib.pyplot as plt\n\n train = pd.read_csv('./data/train.csv')\n \n train_12_months = train.loc[train['Age'] < 13, ['State','Type', 'Age', 'AdoptionSpeed']]\n \n dog_df = train_12_months.loc[train_12_months['Type'] == 1, :]\n cat_df = train_12_months.loc[train_12_months['Type'] == 2, :]\n \n \n dog_max_age = max(dog_df.loc[:, 'Age'])\n dog_min_age = min(dog_df.loc[:, 'Age'])\n \n cat_max_age = max(cat_df.loc[:, 'Age'])\n cat_min_age = min(cat_df.loc[:, 'Age'])\n \n dog_age_labels = []\n dog_avg = []\n \n cat_age_labels = [] \n cat_avg = []\n \n \n # Bar Graphs\n a = list(range(5))\n # Find dog average adoption speed by age\n for i in range(dog_min_age, dog_max_age + 1) :\n \n count = (dog_df.Age == i).sum()\n if(count > 0) :\n dog_avg.append(dog_df.loc[dog_df['Age'] == i, ['AdoptionSpeed']].mean()[0])\n dog_age_labels.append(i)\n\n plt.figure(num = None, figsize=(6,4),dpi=80, facecolor = 'w', edgecolor='k')\n index = np.arange(len(dog_age_labels))\n plt.bar(index, dog_avg)\n plt.xlabel('Age in Months')\n plt.xticks(index, dog_age_labels)\n plt.ylabel('Adoption Speed')\n plt.yticks(a)\n plt.title('Dog Average Adoption Speed Up to 12 Months of Age')\n plt.savefig('bardog12avg.png', bbox_inches='tight')\n \n\n # Find cat average adoption speed by age\n for i in range(cat_min_age, cat_max_age + 1) :\n \n count = (cat_df.Age == i).sum()\n if(count > 0) :\n cat_avg.append(cat_df.loc[cat_df['Age'] == i, ['AdoptionSpeed']].mean()[0])\n cat_age_labels.append(i)\n\n plt.figure(num = None, figsize=(6,4),dpi=80, facecolor = 'w', edgecolor='k')\n index = np.arange(len(cat_age_labels))\n plt.bar(index, cat_avg)\n plt.xlabel('Age in Months')\n plt.xticks(index, cat_age_labels)\n plt.ylabel('Adoption Speed')\n plt.yticks(a)\n plt.title('Cat Average Adoption Speed Up to 12 Months of Age')\n plt.savefig('barcat12avg.png', bbox_inches='tight')",
"def get_average(self):\n self.avg = math.floor((self.maths + self.phy + self.che) / 3, )\n self.assign_grade()\n return self.avg\n # End of method get_average",
"def getAge(self):\r\n return self.age",
"def average(self,start_window, end_window):\n query = f\"select avg(age) from `{self.table_id}` where timestamp between {start_window} and {end_window}\"\n query_job = self.client.query(query)\n return query_job.result",
"def getAvg(self):\r\n\t\treturn self.data['avg']",
"def print_avg():",
"def _get_mean(self):\n return (0.485, 0.456, 0.406)",
"def get_mean_and_variance(self):\n self._set_statistics()\n return self.statistics_object.get_mean(), self.statistics_object.get_variance()",
"def mean(self) -> Dict:\n raise NotImplementedError",
"def get_age(self):\n\t\treturn self.age",
"def get_means(self):\n if self.metadata is None:\n self.get_metadata()\n\n # we want only the numerical features\n df = self.metadata.select_dtypes(include=['int64', 'float64'])\n return df.mean()",
"def getAge(self):\n return self.age",
"def average(self):\n return self.properties.get('average')",
"def load_average(self):\n return _favg(self.load_samples)",
"def get_age(self):\r\n return self.age",
"def average_city(g):\n average = 0\n ctr = 0\n \n for key in g.city_dict:\n average = average + g.city_dict[key].get_population()\n ctr = ctr + 1\n \n \n return (average / ctr)"
] | [
"0.69467896",
"0.6716017",
"0.64098644",
"0.6313252",
"0.62349457",
"0.60789764",
"0.603415",
"0.5983959",
"0.5971684",
"0.5971354",
"0.596329",
"0.5948338",
"0.59151775",
"0.59151775",
"0.5871947",
"0.5868544",
"0.58433545",
"0.57856715",
"0.5773423",
"0.57728165",
"0.57656837",
"0.57019424",
"0.56956106",
"0.56415325",
"0.5629768",
"0.5629329",
"0.5626965",
"0.562118",
"0.56196946",
"0.5616322"
] | 0.74258214 | 0 |
Locates the flags in the resource Calls the LineFinder class in order | def getting_flags_locations(self):
print(self.flags)
self.line_finder.find_line(self.html) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _linesearch(self):\n pass",
"def setup_flags(self):\n self.io_args.color = self.io_args.color_full\n self.io_args.rig_in = self.io_args.rig\n self.io_args.matches = os.path.join(self.io_args.output_root, \"matches.json\")\n self.io_args.rig_out = os.path.join(self.io_args.output_root, \"rig.json\")",
"def read_flags():\n return flag_args",
"def test_get_all_flags(self):\n include_prefixes = ['-I']\n db = CppProperties(include_prefixes)\n\n expected = [\n Flag('-I', path.normpath('/folder/include/path')),\n Flag('-I', path.normpath('/another/file/path')),\n ]\n\n scope = SearchScope(from_folder=_get_test_folder('simple'))\n self.assertEqual(expected, db.get_flags(search_scope=scope))",
"def find_offsets(self):\r\n\r\n #### Begin functionality here\r\n\r\n return()",
"def user_iflags_find(*args):\n return _ida_hexrays.user_iflags_find(*args)",
"def get_flags(args):\r\n\r\n flags = 0\r\n\r\n if args.regexfilepattern is not None:\r\n flags |= pygrep.FILE_REGEX_MATCH\r\n\r\n if not args.regexp:\r\n flags |= pygrep.LITERAL\r\n elif args.dotall:\r\n flags |= pygrep.DOTALL\r\n\r\n if args.ignore_case:\r\n flags |= pygrep.IGNORECASE\r\n\r\n if args.recursive:\r\n flags |= pygrep.RECURSIVE\r\n\r\n if args.regexdirpattern:\r\n flags |= pygrep.DIR_REGEX_MATCH\r\n\r\n return flags",
"def use_LineSearch(self,use_ls):\n if type(use_ls).__name__ == 'bool':\n self._use_ls = use_ls\n else:\n raise KINSOL_Exception(\"The variable sent to 'use_LineSearch' must be a boolean.\")",
"def _find_processing_instructions(self):\n pass",
"def search_in(self, file_object):\n for line_num, line in enumerate(file_object.readlines()):\n line = line.replace(\"\\n\", \"\").replace(\"\\r\", \"\") # remove new line char\n if re.match(self.regex, line):\n result = f\"~{os.path.abspath(file_object.name)}: {line} (line {line_num})\"\n if self.colored:\n result = self.highlight_phrase(result)\n print(result, file=sys.stdout)",
"def findInLine(self) -> str:\n raise NotImplementedError",
"def grep(syntax_dictonary,file,flag):\n\n with open(file,'r') as inFile:\n for line in inFile:\n for syntax in syntax_dictonary:\n matches=re.finditer(syntax,line)\n if matches:\n for to_color in matches:\n if flag:\n start_code = \"\\033[{}m\".format(94)\n end_code=\"\\033[0m\"\n change=start_code + to_color.group() + end_code\n line=re.sub(syntax,change,line)\n print(line)\n else:\n print(line)",
"def _GetFlags(self, lines, build_dir):\n is_win = sys.platform == 'win32'\n flags_by_output = {}\n for line in lines:\n command_line = shlex.split(line.strip(), posix=not is_win)[1:]\n\n output_name = _FindAndRemoveArgWithValue(command_line, '-o')\n dep_name = _FindAndRemoveArgWithValue(command_line, '-MF')\n\n command_line = _MergeSpacedArgs(command_line, '-Xclang')\n\n cc_file = [x for x in command_line if x.endswith('.cc') or\n x.endswith('.c') or\n x.endswith('.cpp') or\n x.endswith('.mm') or\n x.endswith('.m')]\n if len(cc_file) != 1:\n self._skipped.append(command_line)\n continue\n assert len(cc_file) == 1\n\n if is_win:\n rsp_file = [x for x in command_line if x.endswith('.rsp')]\n assert len(rsp_file) <= 1\n if rsp_file:\n rsp_file = os.path.join(build_dir, rsp_file[0][1:])\n with open(rsp_file, \"r\") as open_rsp_file:\n command_line = shlex.split(open_rsp_file, posix=False)\n\n defines = [x for x in command_line if x.startswith('-D')]\n include_dirs = [x for x in command_line if x.startswith('-I')]\n dash_f = [x for x in command_line if x.startswith('-f')]\n warnings = \\\n [x for x in command_line if x.startswith('/wd' if is_win else '-W')]\n others = [x for x in command_line if x not in defines and \\\n x not in include_dirs and \\\n x not in dash_f and \\\n x not in warnings and \\\n x not in cc_file]\n\n for index, value in enumerate(include_dirs):\n if value == '-Igen':\n continue\n path = value[2:]\n if not os.path.isabs(path):\n path = os.path.join(build_dir, path)\n include_dirs[index] = '-I' + os.path.normpath(path)\n\n # GYP supports paths above the source root like <(DEPTH)/../foo while such\n # paths are unsupported by gn. But gn allows to use system-absolute paths\n # instead (paths that start with single '/'). Normalize all paths.\n cc_file = [os.path.normpath(os.path.join(build_dir, cc_file[0]))]\n\n # Filter for libFindBadConstructs.so having a relative path in one and\n # absolute path in the other.\n others_filtered = []\n for x in others:\n if x.startswith('-Xclang ') and \\\n (x.endswith('libFindBadConstructs.so') or \\\n x.endswith('libFindBadConstructs.dylib')):\n others_filtered.append(\n '-Xclang ' +\n os.path.join(os.getcwd(), os.path.normpath(\n os.path.join('out/gn_flags', x.split(' ', 1)[1]))))\n elif x.startswith('-B'):\n others_filtered.append(\n '-B' +\n os.path.join(os.getcwd(), os.path.normpath(\n os.path.join('out/gn_flags', x[2:]))))\n else:\n others_filtered.append(x)\n others = others_filtered\n\n flags_by_output[cc_file[0]] = {\n 'output': output_name,\n 'depname': dep_name,\n 'defines': sorted(defines),\n 'include_dirs': sorted(include_dirs), # TODO(scottmg): This is wrong.\n 'dash_f': sorted(dash_f),\n 'warnings': sorted(warnings),\n 'other': sorted(others),\n }\n return flags_by_output",
"def test_flags(self):\n self.check_search(\n dict(flag_contact=u'yes'),\n [u'Tackle', u'DoubleSlap', u'Ice Punch', u'Bite', u'Fly'],\n 'flimsy search by flag',\n )\n\n self.check_search(\n dict(flag_mirror=u'no'),\n [u'Counter', u'Curse', u'Focus Punch', u'Sunny Day'],\n 'better search by flag',\n )\n\n self.check_search(\n dict(flag_contact=u'no', name=u'punch'),\n [],\n 'searching by nega-flag',\n exact=True,\n )",
"def flags(self) -> UserFlag:",
"def __init__(self, resource, *args):\n self.args = list(args)\n self.flags = OrderedDict()\n self.additional_flags = []\n self._AddCommonFlags(resource)",
"def flags(self):\n data = struct.pack('=I', self.FileFlags & self.FileFlagsMask)\n addr_space = addrspace.BufferAddressSpace(self.obj_vm.get_config(), 0, data)\n bitmap = {'Debug': 0,\n 'Prerelease': 1,\n 'Patched': 2,\n 'Private Build': 3,\n 'Info Inferred': 4,\n 'Special Build' : 5,\n }\n return obj.Object('Flags', offset = 0, vm = addr_space, bitmap = bitmap)",
"def flags(self, f):\n if f.is_inlined:\n return \" (inlined)\"\n return \"\"",
"def _collect_line_info(self, dso, real_path, addr_shifts):\n # 1. Collect addrs to send to addr2line.\n addr_set = set()\n for addr in dso.addrs:\n addr_obj = dso.addrs[addr]\n if addr_obj.source_lines: # already has source line, no need to search.\n continue\n for shift in addr_shifts:\n # The addr after shift shouldn't change to another function.\n shifted_addr = max(addr + shift, addr_obj.func_addr)\n addr_set.add(shifted_addr)\n if shifted_addr == addr_obj.func_addr:\n break\n if not addr_set:\n return\n addr_request = '\\n'.join(['%x' % addr for addr in sorted(addr_set)])\n\n # 2. Use addr2line to collect line info.\n try:\n subproc = subprocess.Popen([self.addr2line_path, '-ai', '-e', real_path],\n stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n (stdoutdata, _) = subproc.communicate(str_to_bytes(addr_request))\n stdoutdata = bytes_to_str(stdoutdata)\n except:\n return\n addr_map = {}\n cur_line_list = None\n for line in stdoutdata.strip().split('\\n'):\n if line[:2] == '0x':\n # a new address\n cur_line_list = addr_map[int(line, 16)] = []\n else:\n # a file:line.\n if cur_line_list is None:\n continue\n # Handle lines like \"C:\\Users\\...\\file:32\".\n items = line.rsplit(':', 1)\n if len(items) != 2:\n continue\n if '?' in line:\n # if ? in line, it doesn't have a valid line info.\n # An addr can have a list of (file, line), when the addr belongs to an inlined\n # function. Sometimes only part of the list has ? mark. In this case, we think\n # the line info is valid if the first line doesn't have ? mark.\n if not cur_line_list:\n cur_line_list = None\n continue\n (file_path, line_number) = items\n line_number = line_number.split()[0] # Remove comments after line number\n try:\n line_number = int(line_number)\n except ValueError:\n continue\n file_id = self._get_file_id(file_path)\n cur_line_list.append((file_id, line_number))\n\n # 3. Fill line info in dso.addrs.\n for addr in dso.addrs:\n addr_obj = dso.addrs[addr]\n if addr_obj.source_lines:\n continue\n for shift in addr_shifts:\n shifted_addr = max(addr + shift, addr_obj.func_addr)\n lines = addr_map.get(shifted_addr)\n if lines:\n addr_obj.source_lines = lines\n break\n if shifted_addr == addr_obj.func_addr:\n break",
"def search(path, f):\n\n started = False\n\n for count, line in enumerate(f):\n number = count + 1\n if search_line(line):\n if not started:\n print config.term.highlight(relpath(path), 'GREEN')\n if config.filenames:\n break\n started = True\n if len(line) <= config.output_limit:\n print '%d:%s' % (number,\n config.term.highlight(line.rstrip('\\n\\r'),\n ('BLACK', 'BG_YELLOW'),\n config.search))\n else:\n print '%d:LINE IS TOO LONG (>%d)' % (number, config.output_limit)\n if started:\n print",
"def __init__(self, runway_type):\n self.primary_surface_length = 200\n self.primary_surface_width = 0\n self.approach_surface_extendedwidth = 0\n self.first_section_length = 0\n self.first_section_slope = 0\n self.second_section_length = 0\n self.second_section_slope = 0\n self.horizontal_surface_height = 150\n self.horizontal_surface_radius = 0\n self.conical_surface_slope = 20\n self.conical_surface_offset = 4000\n self.transitional_surface_slope = 7\n \n # The runway types listed in the documentation for FAA FAR 77 do not \n # match what appears when you actually run the tool in ArcMap.\n # These regular expressions should match either version. \n if re.match(\"Visual\\s*(?:Runway)?\\s*Visual\\sApproach\", runway_type, re.I):\n self.primary_surface_width = 500\n self.approach_surface_extendedwidth = 1500\n self.first_section_length = 5000\n self.first_section_slope = 20\n self.horizontal_surface_radius = 5000\n elif re.match(\"Utility\\s*(?:Runway)?\\s*Visual Approach\", runway_type, re.I):\n self.primary_surface_width = 250\n self.approach_surface_extendedwidth = 1250\n self.first_section_length = 5000\n self.first_section_slope = 20\n self.horizontal_surface_radius = 5000\n elif re.match(\"Utility\\s*(?:Runway)?\\s*Non[\\s\\-]*Precision Instrument Approach\", runway_type, re.I):\n self.primary_surface_width = 500\n self.approach_surface_extendedwidth = 2000\n self.first_section_length = 5000\n self.first_section_slope = 20\n self.horizontal_surface_radius = 5000\n elif re.match(\"Precision Instrument\\s*(?:Runway)?\", runway_type, re.I):\n self.primary_surface_width = 1000\n self.approach_surface_extendedwidth = 16000\n self.first_section_length = 10000\n self.first_section_slope = 50\n self.second_section_length = 40000\n self.second_section_slope = 40\n self.horizontal_surface_radius = 10000\n elif re.match(\"Non Precision Instrument\\s*(?:Runway)?\\s*(?:(?:High)|(?:Greater)) Visibility\", runway_type, re.I):\n self.primary_surface_width = 500\n self.approach_surface_extendedwidth = 3500\n self.first_section_length = 10000\n self.first_section_slope = 34\n self.horizontal_surface_radius = 10000\n elif re.match(\"Non Precision Instrument\\s*(?:Runway)\\s*Approach Low Visibility\", runway_type, re.I):\n self.primary_surface_width = 1000\n self.approach_surface_extendedwidth = 4000\n self.first_section_length = 10000\n self.first_section_slope = 34\n self.horizontal_surface_radius = 10000",
"def _get_relevant_line(self):\n # () -> (Phi.Line)\n line_name = self._get_line_name()\n print(\"looking for \"+str(line_name))\n return Phi.findLine(line_name)",
"def get_file_flag(self):\n flag_list = os.listdir(self.path)\n temp_flag_list = []\n for flag in flag_list[:5]:\n result = re.match('^(\\w{2}\\d{6}\\_)(\\d{8})', flag)\n if result:\n temp_flag_list.append(result[2])\n self.flag_list = list(set(temp_flag_list))",
"def __init__(self, entries: ghidra.program.model.address.AddressSetView, findEntryPoint: bool):\n ...",
"def flags(self,index):\n return self._flags",
"def extractFlag(str):\n \n\tflag = \"\"\n\tlines = str.split(\"\\n\")\n\tfor line in lines:\n\t\tm = r2.search(line)\n\t\tif m:\n\t\t\t#print \"DEBUG: matched %s %s %s %s\" % \\\n\t\t\t# (m.group(4), m.group(3), m.group(2), m.group(1))\n\t\t\tbyte0 = int(\"0x\" + m.group(4), 16)\n\t\t\tbyte1 = int(\"0x\" + m.group(3), 16)\n\t\t\tbyte2 = int(\"0x\" + m.group(2), 16)\n\t\t\tbyte3 = int(\"0x\" + m.group(1), 16)\n\t\t\tflag += chr(byte0) + chr(byte1) + chr(byte2) + chr(byte3)\n \n\treturn flag",
"def _readline_ins(self):\n if self._ins_filehandle is None:\n if not os.path.exists(self._ins_filename):\n raise Exception(\n \"instruction file '{0}' not found\".format(self._ins_filename)\n )\n self._ins_filehandle = open(self._ins_filename, \"r\")\n line = self._ins_filehandle.readline()\n self._ins_linecount += 1\n if line == \"\":\n return None\n self._last_line = line\n # check for spaces in between the markers - this gets ugly\n line = line.lower()\n if self._marker is not None and self._marker in line:\n\n # def find_all(a_str, sub):\n # start = 0\n # while True:\n # start = a_str.find(sub, start)\n # if start == -1:\n # return\n # yield start\n # start += len(sub)\n # poss speedup using regex\n midx = [m.start() for m in re.finditer(re.escape(self._marker), line)]\n # midx = list(find_all(line, self._marker))\n midx.append(len(line))\n first = line[: midx[0]].strip()\n tokens = []\n if len(first) > 0:\n # tokens.append(first)\n tokens.extend([f.strip() for f in first.split()])\n for idx in range(1, len(midx) - 1, 2):\n mstr = line[midx[idx - 1] : midx[idx] + 1]\n ostr = line[midx[idx] + 1 : midx[idx + 1]]\n tokens.append(mstr)\n tokens.extend(ostr.split())\n else:\n tokens = line.strip().split()\n return tokens",
"def getLineInformation(line):\n \n pass",
"def flags(self) -> Optional[int]:\n return self.get(\"/Ff\")",
"def eflags(vdb, line):\n trace = vdb.getTrace()\n argv = shlex.split(line)\n if len(argv) not in (0, 1):\n return vdb.do_help('eflags')\n\n if len(argv) > 0:\n flag = argv[0].upper()\n valid_flags = list(trace.getStatusFlags().keys())\n if flag not in valid_flags:\n raise Exception('invalid flag: %s, valid flags %s' % (flag, valid_flags))\n value = trace.getRegisterByName(flag)\n trace.setRegisterByName(flag, not bool(value))\n # TODO: this is not plumbed through to flags gui due to new gui\n # eventing coming soon.\n vdb.vdbUIEvent('vdb:setflags')\n return\n\n ef = trace.getRegisterByName('eflags')\n vdb.vprint('%16s: %s' % ('Carry', bool(ef & e_i386.EFLAGS_CF)))\n vdb.vprint('%16s: %s' % ('Parity', bool(ef & e_i386.EFLAGS_PF)))\n vdb.vprint('%16s: %s' % ('Adjust', bool(ef & e_i386.EFLAGS_AF)))\n vdb.vprint('%16s: %s' % ('Zero', bool(ef & e_i386.EFLAGS_ZF)))\n vdb.vprint('%16s: %s' % ('Sign', bool(ef & e_i386.EFLAGS_SF)))\n vdb.vprint('%16s: %s' % ('Trap', bool(ef & e_i386.EFLAGS_TF)))\n vdb.vprint('%16s: %s' % ('Interrupt', bool(ef & e_i386.EFLAGS_IF)))\n vdb.vprint('%16s: %s' % ('Direction', bool(ef & e_i386.EFLAGS_DF)))\n vdb.vprint('%16s: %s' % ('Overflow', bool(ef & e_i386.EFLAGS_OF)))"
] | [
"0.6099659",
"0.573128",
"0.5506411",
"0.54494226",
"0.52455074",
"0.52231914",
"0.5129431",
"0.51100206",
"0.5072955",
"0.5024129",
"0.5009258",
"0.4999141",
"0.49835676",
"0.4975226",
"0.49732998",
"0.49641412",
"0.49461514",
"0.4915233",
"0.49087882",
"0.48647398",
"0.48466185",
"0.48400354",
"0.48023334",
"0.4792176",
"0.47918516",
"0.47764072",
"0.47582942",
"0.47530657",
"0.47445574",
"0.47309846"
] | 0.76073164 | 0 |
Add url domain field to each tweet in each user data object. Url domain field contains list of domains corresponding to list of urls. | def modify_user_data(user_d_list):
for user in user_d_list:
for tweet in user['tweets']:
domains = [get_domain_of_url(url) for url in tweet['urls']]
tweet['domains'] = domains
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_domain():\n\n for e in Expr.search() + User.search(): e.set_tld(config.server_name)",
"def fixURLS():\n url_re = re.compile(r'http t co \\S+')\n tweets = Tweet.objects.all()\n for tweet in tweets:\n tweet.text = url_re.sub(' ', tweet.text)\n tweet.text = ' '.join(tweet.text.split())\n tweet.save()",
"def get_urls(listObjofPics, userName):\n\n with open('%s_pic_tweets.csv' %userName, newline='') as csvfile:\n # skipinitialspace=True in order to avoid ',' delimiter issues in row[2] from tweet text\n reader = csv.reader(csvfile, delimiter=',', quotechar='\"', skipinitialspace=True)\n \n for row in reader:\n listObjofPics.append(row[3])\n \n return listObjofPics",
"def domains(self, domains):\n\n self._domains = domains",
"def add_all_friends(twitter, users):\n for u_dict in users:\n u_dict['friends'] = get_friends(twitter,u_dict['screen_name'])",
"def data(urls):\r\n for url in urls:\r\n d = dict(url)\r\n d['url'] = url.hashed.url\r\n yield d",
"def clean_urls(self, tweet):\n self.urls = re.findall(self.regexpForURLs, tweet)\n\n for url in self.urls:\n tweet = tweet.replace(url, '')\n\n tweet = self.clean_unnecessary_whitespaces(tweet)\n return tweet",
"def get_layer_urls(self):\n urls = []\n\n if getattr(self, 'additional_domains'):\n map(urls.append, (domain for domain in self.additional_domains.split(\";\") if domain))\n\n return urls",
"def by_domains(self):\n\t\t\n\t\t# TODO: use urllib instead\n\t\turl_format = r'^\\s*(?:(?P<protocol>\\w+)://)?(?P<domain>[\\w\\d\\-\\.]+)(?::(?P<port>\\d+))?/?(?P<everything_else>.*)$'\n\t\tsites = {}\n\t\tfor line in self.source.lines:\n\t\t\ttry:\n\t\t\t\tif self.filter(line):\n\t\t\t\t\tresult = re.match(url_format, line.content.url)\n\t\t\t\t\tif result.group('domain') not in sites.keys():\n\t\t\t\t\t\tsites[result.group('domain')] = 0\n\t\t\t\t\tsites[result.group('domain')] += int(line.content.size)\n\t\t\texcept AttributeError:\n\t\t\t\tpass\n\t\t\n\t\t# TODO: sort; convert to lists is even better\n\t\t\n\t\treturn sites",
"def get_links_from_tweet(tweet):\n if tweet.has_key('entities'):\n if tweet['entities'].has_key('urls'):\n if tweet['entities']['urls']:\n return [t['expanded_url'] for t in tweet['entities']['urls']]\n\n return None",
"def load_users(self):\n for user_type in self.user_types:\n url_string = \"%s_url\" % user_type\n try:\n url = self.lookup(url_string)\n users = self._fetcher.get_entities(url)\n except AttributeError as ate:\n logger.err(str(ate))\n continue\n user_list = []\n for user in users:\n if 'username' in user:\n user_list.append({'name': user['username']})\n if len(user_list) > 0:\n setattr(self, user_type, user_list)",
"def get_url():\n urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',\n new_tweet)\n return urls",
"def URLs(self, default=[{}]):\n tmp = self.data.get('urls', default)\n return [HEP.URLObject(i) for i in tmp]",
"def extract_domains(self, resp):\n return",
"def URLs(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('urls', default)\n return [HEP.URLObject(i) for i in tmp]",
"def remove_urls(lista_tweets):\n\n novos_tweets = []\n\n for tweet in lista_tweets:\n texto = re.sub(r\"http\\S+\", \"\", tweet[\"text\"])\n novos_tweets.append(texto)\n\n return novos_tweets",
"def add_all_friends(twitter, users):\n ###TODO-- Completed\n\n #calling get_friends here to receive friends ID's for all the values of screen_name,\n # limiting the values to receive to 5000\n for user in users:\n user['friends'] = get_friends(twitter, user['screen_name'])[:5000]\n #print(len(user['friends']))",
"def get_clean_urls(text_list, list_to_exclude=['twitter']):\n ans_ls = []\n for x in text_list:\n rex = re.findall(\n '(?:http:|https:)\\/\\/.*\\/.*?(?:\\.cms|\\.[a-zA-Z]*|\\/[a-zA-Z0-9-\\ ]+[a-zA-z0-9])', x[1])\n for rx in rex:\n if rx and not any(z in rx for z in\n list_to_exclude) and not rx == 'http://' and not rx == 'https://' and not rx.endswith(\n '.') and 't.c' not in rx:\n if '\\xa0' in x[1]:\n for y in x[1].split('\\xa0'):\n # print(x[0],y)\n ans_ls.append((x[0], y.replace(' ', '')))\n elif '@' in x[1]:\n ans_ls.append((x[0], y.split('@')[0].replace(' ', '')))\n\n else:\n ans_ls.append((x[0], x[1].replace(' ', '')))\n return (ans_ls)",
"def load_users(self):\n for user_type in self.user_types:\n url = \"%s_url\" % user_type\n try:\n self.lookup(url)\n except AttributeError:\n continue\n users = self._fetcher.get_entities(self.lookup(url))\n user_list = []\n for user in users:\n if 'username' in user:\n user_list.append({'name': user['username']})\n if len(user_list) > 0:\n setattr(self, user_type, user_list)",
"def get_source_from_twitter(self, source_word_strs, content_dir):\n # Consider each source word string\n name = []\n description = []\n screen_name = []\n created_at = []\n statuses_count = []\n followers_count = []\n for source_word_str in source_word_strs:\n\n # Create and dump, or load, the TwitterSource pickle\n ts = TwitterSource(self, source_word_str, content_dir)\n ts.set_source(do_purge=self.source_do_purge)\n\n # Accumulate created atributes\n for i_src in range(len(ts.screen_name)):\n if not ts.screen_name[i_src] in screen_name:\n name.append(ts.name[i_src])\n description.append(ts.description[i_src])\n screen_name.append(ts.screen_name[i_src])\n created_at.append(ts.created_at[i_src])\n statuses_count.append(ts.statuses_count[i_src])\n followers_count.append(ts.followers_count[i_src])\n\n # Assign number of statuses, number of followers, and compute\n # the followers to statuses ratio\n n_statuses = np.array(statuses_count)\n n_followers = np.array(followers_count)\n n_trusting = np.divide(n_followers, n_statuses)\n\n # Convert the numeric scores to string scores\n s_statuses = ts.n_to_s(n_statuses)\n s_followers = ts.n_to_s(n_followers)\n s_trusting = ts.n_to_s(n_trusting)\n\n # Create a dictionary of users in order to print a JSON document\n # to a file\n users = []\n n_usr = len(name)\n for i_usr in range(n_usr):\n user = {}\n user['name'] = name[i_usr]\n user['description'] = description[i_usr]\n user['screen_name'] = screen_name[i_usr]\n user['created_at'] = created_at[i_usr]\n user['statuses_count'] = statuses_count[i_usr]\n user['followers_count'] = followers_count[i_usr]\n user['statuses'] = n_statuses[i_usr]\n user['followers'] = n_followers[i_usr]\n user['trusting'] = n_trusting[i_usr]\n user['score'] = s_statuses[i_usr] + s_followers[i_usr] + s_trusting[i_usr]\n if user['score'] == \"+++\":\n user['include'] = True\n else:\n user['include'] = False\n users.append(user)\n\n return users",
"def build_end_url_list(url):\n http_types = [\"http://\", \"https://\"]\n dub_types = [\"www.\", \"\"] # this order needs to preserved for testing at www.hgdatascience.com\n http_dub_urls = [\"{}{}{}\".format(h_type, dub_type, url) for dub_type in dub_types for h_type in http_types]\n return http_dub_urls",
"def _make_links(tweet):\n for pattern, repl in (USER_SUB, KEYWORD_SUB):\n tweet = re.sub(pattern, repl, tweet)\n return tweet",
"def set_data(self, data):\r\n self.tweets = data",
"def urls(self, urls):\n\n self._urls = urls",
"def get_urls(self):\n # Use functools.reduce for speed\n # see https://stackoverflow.com/questions/10461531/merge-and-sum-of-two-dictionaries\n def reducer(accumulator, dictionary):\n for key, value in dictionary.items():\n accumulator[key] = accumulator.get(key, []) + value\n return accumulator\n\n list_of_dicts = []\n for (year, quarter, f) in self.quarterly_date_list:\n self.quarterly.year = year\n self.quarterly.quarter = quarter\n self.quarterly.entry_filter = lambda x: f(x) and self.entry_filter(x)\n list_of_dicts.append(self.quarterly.get_urls())\n\n for d in self.daily_date_list:\n self.daily.date = d\n try:\n list_of_dicts.append(self.daily.get_urls())\n except EDGARQueryError:\n pass\n\n complete_dictionary = reduce(reducer, list_of_dicts, {})\n return complete_dictionary",
"def _insert_urls(self, urls):\n UrlsBase = UrlsBaseModel()\n pid = Registry().get('pData')['id']\n\n host_id = HostsModel().get_id_by_name(pid, self.options['host'].value)\n Urls = UrlsModel()\n\n added = 0\n for url in urls:\n if Urls.add(pid, host_id, url['url'], '', url['code'], url['time'], 'dafs'):\n added += 1\n\n paths = urlparse(url['url']).path.split(\"/\")\n while len(paths) != 1:\n del paths[-1]\n if Urls.add(pid, host_id, \"/\".join(paths) + \"/\", '', 0, 0, 'dafs'):\n added += 1\n UrlsBase.add_url(host_id, url['url'])\n\n return added",
"def get_urls(self, data):\n data = json.loads(data)\n urls = []\n for article in data['articles']:\n urls.append(article['url'])\n return urls",
"def get_tweets():\n\n\tuser ='kaiserkumars'\n\t# api = twitter.Api(consumer_key='iJoZZuV7etVrJfE4K9ir8sIqa',\n\t# consumer_secret='uyJyWoP05z2MUKnggW7vHnIG2sckmM1aHRMgGveZLyrz8401Xs',\n\t# access_token_key='622588040-TYDgG1UlGUvA1hW8PA7mOG5CiMw0WiuPZlkoP8cc',\n\t# access_token_secret='laAmFjeLhWzOK7Y524VevdMdeLeNpnmCUmjee1AQU7osj')\n\tapi = twitter.Api(consumer_key=get_secret('consumer_key'),\n\t consumer_secret=get_secret('consumer_secret'),\n\t access_token_key=get_secret('access_token_key'),\n\t access_token_secret=get_secret('access_token_secret'))\n\n\tstatuses = api.GetUserTimeline(user_id=622588040,count=0)\n\t# print(statuses)\n\t# duplicate='UNIQUE constraint failed: mtwitter_weatherdata.location, core_weatherdata.metric, core_weatherdata.date'\n\tbulk_insert=[]\n\t# print(dir(TwitterData))\n\tfor s in statuses:\n\t\t# print(s)\n\t\tdt = parse(s.created_at)\n\t\t# print(dt)\n\t\tdata = TwitterData(org_name=s.user.name,profile_url=s.user.profile_image_url,tweet_id =s.id,screen_name=s.user.screen_name, tweet = s.text, date= dt, favCount =0)\n\t\tbulk_insert.append(data)\n\ttry:\n\t\tTwitterData.objects.bulk_create(bulk_insert)\n\t\tprint(\"Success.\")\n\texcept Exception as e:\n\t\t# if(str(e)==duplicate):\n\t\t# \tprint('Duplicate Data')\n\t\t# else:\n\t\tprint(str(e))\n\n\treturn statuses",
"def importSites(self,sites_list):\n \"\"\" Append these sites objects to a sample \"\"\" \n self.sites = []\n for s in sites_list:\n mySite = Site(s)\n self._addSite(mySite)",
"def fill_tweet(self, t, data):\n t.text=data[\"text\"]\n #\n # update the hashtags cache\n #\n try:\n t.hashtags=data[\"entities\"][\"hashtags\"] \n for htag in t.hashtags:\n #print(\"adding to hashtags: {} to cache:\".format(htag[\"text\"], ))\n if htag[\"text\"] in hash_cache:\n hash_cache[htag[\"text\"]] += 1\n else:\n hash_cache[htag[\"text\"]] = 1\n except:\n t.hashtags=[]\n #\n # update the country cache\n #\n try:\n # see: https://bitbucket.org/richardpenman/reverse_geocode/src/default/\n #country = reverse_geocode.search(data[\"coordinates\"][\"coordinates\"][0])[\"country\"]\n country = data[\"place\"][\"country_code\"]\n if country in country_cache:\n country_cache[country] += 1\n else:\n country_cache[country] = 1\n except:\n print(\" .... Could not identify county by coordinates\")\n \n #\n # update the user cache\n #\n try:\n user_id = \"@\" + data[\"user\"][\"screen_name\"]\n if user_id in user_cache:\n user_cache[user_id] += 1\n else:\n user_cache[user_id] = 1\n except:\n print(\" ERR No User: should never happen\")\n #\n # update the tweets per minute cache\n # \n\n #tweets_descending = OrderedDict(sorted(self.application.tweet_cache.items(), key=lambda kv: kv[1], reverse=True))\n #hash_descending = OrderedDict(sorted(hash_cache.items(), key=lambda kv: kv[1], reverse=True))\n #for counter, elem in enumerate(hash_descending):\n # if counter < 9:\n # print(\"hash top #{} : {} : {}\".format(counter, elem, str(hash_descending[elem])))\n # else:\n # break\n try:\n t.user_screenname=data[\"user\"][\"screen_name\"]\n except:\n t.user_screenname=\"\"\n try:\n t.profile_image_url_https = data[\"user\"][\"profile_image_url_https\"]\n except:\n t.profile_image_url_https = \"\"\n #\n # update the tweets cache\n #\n try:\n t.timestamp = dateutil.parser.parse(data[\"created_at\"])\n except:\n t.timestamp = datetime.datetime.utcnow()\n return t"
] | [
"0.5635619",
"0.55407476",
"0.5532279",
"0.5374472",
"0.537326",
"0.53537875",
"0.53423244",
"0.53104246",
"0.5303713",
"0.5294134",
"0.52771896",
"0.527685",
"0.52617246",
"0.524754",
"0.52349997",
"0.52061516",
"0.5133119",
"0.5130348",
"0.5129113",
"0.5128528",
"0.5121948",
"0.50812286",
"0.50580233",
"0.5037108",
"0.50327027",
"0.5023949",
"0.5013952",
"0.499278",
"0.49817136",
"0.4963723"
] | 0.8328926 | 0 |
This function takes in all paths that are represented as lists of consecutive nodes [node1, node2,...,nodeN] and converted to paths represented as lists of consecutive relations [rel1, rel2,...,relM] if self.include_entity is false, or as lists of nodes and relations [node1, rel1, node2, rel2,...,relM, nodeN] if self.include_entity is true. | def expand_paths_by_nodes(self, paths):
paths_formatted = set()
# Expand each path
for path in paths:
if len(path) < 2:
continue
expanded_paths = set()
if self.include_entity:
relations_for_each_step = [[path[0]]]
else:
relations_for_each_step = []
for index in range(1, len(path)):
node1 = path[index-1]
node2 = path[index]
if (node1, node2) in self.pair_to_relations:
relations = self.pair_to_relations[(node1, node2)]
else:
print(node1, node2)
relations_for_each_step.append(relations)
if self.include_entity:
relations_for_each_step.append([node2])
expanded_paths.update(list(itertools.product(*relations_for_each_step)))
paths_formatted.update(expanded_paths)
return paths_formatted | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_paths(self):\n # convert to node sequences, dropping s'\n self.nodeseq_paths = []\n for path in self.paths:\n node_seq = [] # don't include s'\n for arc in path:\n node_seq.append(self.arc_info[arc]['destin'])\n self.nodeseq_paths.append(node_seq)\n # convert to og graph\n self.converted_paths = []\n for path in self.nodeseq_paths:\n this_path = []\n add_next_node = True\n for i in range(len(path) - 1):\n print(\"This path is\", this_path)\n node1 = path[i]\n node2 = path[i + 1]\n print(\"node1={}, node2={}\".format(node1, node2))\n if (node1, node2) in self.mapping:\n sc = self.mapping[(node1, node2)]\n print(\"uses sc edge for {}\".format(sc))\n print(\"should add {}, but also need to check for overlaps\".\n format(sc[1:-1]))\n if sc[1] in this_path:\n # we have an overlap\n start = len(this_path) - this_path.index(sc[1])\n this_path.extend(sc[start:-1])\n else:\n this_path.extend(sc[1:-1])\n add_next_node = False # next node is second of sc edge\n elif add_next_node:\n this_path.append(node1)\n else:\n add_next_node = True\n this_path.append(path[-1])\n self.converted_paths.append(this_path)",
"def filter_paths(self, paths):\n formatted_paths = set()\n for path in paths:\n formatted_path = []\n if self.include_entity:\n if len(path) == 3:\n continue\n formatted_path.append(self.idx_to_node[path[0]].get_name())\n for rdx in range(0, (len(path)-1)/2):\n formatted_path.append(self.idx_to_relation[path[rdx*2+1]])\n formatted_path.append(self.idx_to_node[path[rdx*2+2]].get_name())\n else:\n if len(path) == 1:\n continue\n for rel_idx in path:\n formatted_path.append(self.idx_to_relation[rel_idx])\n formatted_paths.add(tuple(formatted_path))\n return formatted_paths",
"def neo4j_to_relations(neo4j_path: neo4j.graph.Path) -> List[Relation]:\n relations = []\n for neo4j_relation in neo4j_path.relationships:\n rel_type = neo4j_relation.type\n props = dict(neo4j_relation)\n source_ns, source_id = process_identifier(neo4j_relation.start_node[\"id\"])\n target_ns, target_id = process_identifier(neo4j_relation.end_node[\"id\"])\n rel = Relation(source_ns, source_id, target_ns, target_id, rel_type, props)\n relations.append(rel)\n return relations",
"def reconstruct_path(goal: Vector2D, prev_node: dict) -> list:\n path = []\n prev = prev_node[goal] # remove 'goal' from path\n \n while prev != None:\n path.append(prev)\n prev = prev_node[prev]\n \n path = path[:-1] # remove 'start' from path\n path.reverse()\n return path",
"def get_entity_rel_set(self,\n step_idxs: List[int],\n use_entity: bool = False,\n ) -> Set[Tuple[int, str, str, int, str]]:\n if len(step_idxs) <= 1:\n return []\n\n entity_mapping = defaultdict(set)\n for i, step_idx in enumerate(step_idxs):\n participants = self.steps[step_idx].participants\n for participant in participants:\n if use_entity:\n for value in participant.values:\n entity_mapping[value.entity].add(\n (i, participant.role))\n else:\n entity_mapping[participant.at_id].add(\n (i, participant.role))\n\n entity_rel_set = set()\n for entity_relation in self.entity_relations:\n subject_id = entity_relation.relation_subject\n if type(entity_relation.relations) is list:\n # relations is a list: before v0.9\n relations = entity_relation.relations\n else:\n # relations is a dict: v0.9\n relations = [entity_relation.relations]\n for relation in relations:\n predicate = relation.relation_predicate\n object_id = relation.relation_object\n # TODO: Add support for relation before v0.9\n if type(entity_relation.relations) is list:\n object_id = object_id[0]\n for subject_info, object_info in product(entity_mapping[subject_id], entity_mapping[object_id]):\n entity_rel_set.add((*subject_info, predicate, *object_info))\n\n return entity_rel_set",
"def listRelatives(*args, allDescendents: bool=True, allParents: bool=True, children: bool=True,\n fullPath: bool=True, noIntermediate: bool=True, parent: bool=True, path:\n bool=True, shapes: bool=True, type: Union[AnyStr, List[AnyStr]]=\"\",\n **kwargs)->List[AnyStr]:\n pass",
"def closed_paths(entities, vertices):\n # get a networkx graph of entities\n graph, closed = vertex_graph(entities)\n # add entities that are closed as single- entity paths\n entity_paths = np.reshape(closed, (-1, 1)).tolist()\n # look for cycles in the graph, or closed loops\n vertex_paths = nx.cycles.cycle_basis(graph)\n\n # loop through every vertex cycle\n for vertex_path in vertex_paths:\n # a path has no length if it has fewer than 2 vertices\n if len(vertex_path) < 2:\n continue\n # convert vertex indices to entity indices\n entity_paths.append(\n vertex_to_entity_path(vertex_path,\n graph,\n entities,\n vertices))\n\n return entity_paths",
"def _find_all_paths(self, start_vertex: str, end_vertex: str, path=[]):\n path = path + [start_vertex]\n if start_vertex == end_vertex:\n return [path]\n\n paths = []\n for vertex in self.graph[start_vertex]:\n if vertex not in path:\n extended_paths = self._find_all_paths(vertex,\n end_vertex,\n path)\n for p in extended_paths:\n paths.append(p)\n return paths",
"def transition_path(self, str_representation = True):\n node, path_back = self, []\n while node:\n modification_name = 'None'\n if node.action:\n modification_name = node.action.__str__()\n if modification_name is not 'None':\n if(str_representation):\n path_back.append(modification_name)\n else:\n path_back.append(node.action)\n node = node.parent\n return list(reversed(path_back))",
"def _find_relations(self, node, depth=0):\n depth += 1\n\n model = node.model\n opts = model._meta\n\n # determine relational fields to determine paths\n forward_fields = opts.fields\n reverse_fields = opts.get_all_related_objects()\n\n forward_o2o = filter(self._filter_one2one, forward_fields)\n reverse_o2o = filter(self._filter_related_one2one, reverse_fields)\n\n forward_fk = filter(self._filter_fk, forward_fields)\n reverse_fk = filter(self._filter_related_fk, reverse_fields)\n\n forward_m2m = filter(self._filter_m2m, opts.many_to_many)\n reverse_m2m = filter(self._filter_related_m2m,\n opts.get_all_related_many_to_many_objects())\n\n # iterate m2m relations\n for f in forward_m2m:\n kwargs = {\n 'parent': node,\n 'model': f.rel.to,\n 'relation': 'manytomany',\n 'reverse': False,\n 'related_name': f.name,\n 'accessor_name': f.name,\n 'nullable': True,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over related m2m fields\n for r in reverse_m2m:\n kwargs = {\n 'parent': node,\n 'model': r.model,\n 'relation': 'manytomany',\n 'reverse': True,\n 'related_name': r.field.related_query_name(),\n 'accessor_name': r.get_accessor_name(),\n 'nullable': True,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over one2one fields\n for f in forward_o2o:\n kwargs = {\n 'parent': node,\n 'model': f.rel.to,\n 'relation': 'onetoone',\n 'reverse': False,\n 'related_name': f.name,\n 'accessor_name': f.name,\n 'nullable': False,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over related one2one fields\n for r in reverse_o2o:\n kwargs = {\n 'parent': node,\n 'model': r.model,\n 'relation': 'onetoone',\n 'reverse': True,\n 'related_name': r.field.related_query_name(),\n 'accessor_name': r.get_accessor_name(),\n 'nullable': False,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over fk fields\n for f in forward_fk:\n kwargs = {\n 'parent': node,\n 'model': f.rel.to,\n 'relation': 'foreignkey',\n 'reverse': False,\n 'related_name': f.name,\n 'accessor_name': f.name,\n 'nullable': f.null,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over related foreign keys\n for r in reverse_fk:\n kwargs = {\n 'parent': node,\n 'model': r.model,\n 'relation': 'foreignkey',\n 'reverse': True,\n 'related_name': r.field.related_query_name(),\n 'accessor_name': r.get_accessor_name(),\n 'nullable': True,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n return node",
"def find_all_path(self, start_vertex, end_vertex, path=[]):\n\n graph = self.__graph_dict\n path = path + [start_vertex]\n if start_vertex == end_vertex:\n return [path]\n\n if start_vertex not in graph:\n return []\n\n paths = []\n for vertex in graph[start_vertex]:\n if vertex not in path:\n extended_paths = self.find_all_path(vertex, end_vertex,path)\n for p in extended_paths:\n paths.append(p)\n return paths",
"def extract_relations(self, ne_tagged_line, dependency_tree, pos_tagged_line):\n # Normalize resources\n aligned_ne_tagged_line = self._align_tagged_sentence(ne_tagged_line)\n aligned_pos_tagged_line = self._align_tagged_sentence(pos_tagged_line)\n normalized_dependency_tree = self._normalize_node_addresses(dependency_tree)\n\n verb_nodes = self._extract_verb_nodes(normalized_dependency_tree, aligned_pos_tagged_line)\n extracted_relations = []\n\n for verb_node in verb_nodes:\n subj_node, obj_node = self._get_subj_and_obj(verb_node, normalized_dependency_tree)\n\n expanded_subj_node = self._expand_node(subj_node, normalized_dependency_tree)\n expanded_obj_node = self._expand_node(obj_node, normalized_dependency_tree)\n\n # TODO (FEATURE): Extend definition of verb nodes? (Allow more patterns) [DU 18.04.17]\n # At the moment, the simple extraction heuristic ist just the following:\n # 1.) Find all verb nodes in a dependency tree\n # 2.) Find subject and object of that verb\n # 3.) Check if they are tagged with a Named Entity Tag\n # 4.) If one of them is tagged, extract the hold phrase as a relation triplet\n #\n # Possible improvements\n # - Use Machine Learning to learn patterns from pre-annotated corpus\n # - Alternatively, come up with more sophisticated rules manually\n # - Only extract relevant relationships\n # - Only extract the relevant parts of a relationship\n\n if self._expanded_node_is_ne_tagged(expanded_subj_node, aligned_ne_tagged_line) or \\\n self._expanded_node_is_ne_tagged(expanded_obj_node, aligned_ne_tagged_line):\n subj_phrase = self._join_expanded_node(expanded_subj_node)\n obj_phrase = self._join_expanded_node(expanded_obj_node)\n extracted_relations.append((subj_phrase, verb_node[\"word\"], obj_phrase))\n\n return extracted_relations",
"def vertex_to_entity_path(vertex_path,\n graph,\n entities,\n vertices=None):\n def edge_direction(a, b):\n \"\"\"\n Given two edges, figure out if the first needs to be\n reversed to keep the progression forward.\n\n [1,0] [1,2] -1 1\n [1,0] [2,1] -1 -1\n [0,1] [1,2] 1 1\n [0,1] [2,1] 1 -1\n\n Parameters\n ------------\n a : (2,) int\n b : (2,) int\n\n Returns\n ------------\n a_direction : int\n b_direction : int\n \"\"\"\n if a[0] == b[0]:\n return -1, 1\n elif a[0] == b[1]:\n return -1, -1\n elif a[1] == b[0]:\n return 1, 1\n elif a[1] == b[1]:\n return 1, -1\n else:\n constants.log.debug('\\n'.join([\n 'edges not connected!',\n 'vertex path %s',\n 'entity path: %s',\n 'entity[a]: %s,',\n 'entity[b]: %s']),\n vertex_path,\n entity_path,\n entities[ea].points,\n entities[eb].points)\n\n return None, None\n\n if vertices is None or vertices.shape[1] != 2:\n ccw_direction = 1\n else:\n ccw_check = is_ccw(vertices[np.append(vertex_path,\n vertex_path[0])])\n ccw_direction = (ccw_check * 2) - 1\n\n # make sure vertex path is correct type\n vertex_path = np.asanyarray(vertex_path, dtype=np.int64)\n # we will be saving entity indexes\n entity_path = []\n # loop through pairs of vertices\n for i in np.arange(len(vertex_path) + 1):\n # get two wrapped vertex positions\n vertex_path_pos = np.mod(np.arange(2) + i, len(vertex_path))\n vertex_index = vertex_path[vertex_path_pos]\n entity_index = graph.get_edge_data(*vertex_index)['entity_index']\n entity_path.append(entity_index)\n # remove duplicate entities and order CCW\n entity_path = grouping.unique_ordered(entity_path)[::ccw_direction]\n # check to make sure there is more than one entity\n if len(entity_path) == 1:\n # apply CCW reverse in place if necessary\n if ccw_direction < 0:\n index = entity_path[0]\n entities[index].reverse()\n\n return entity_path\n # traverse the entity path and reverse entities in place to\n # align with this path ordering\n round_trip = np.append(entity_path, entity_path[0])\n round_trip = zip(round_trip[:-1], round_trip[1:])\n for ea, eb in round_trip:\n da, db = edge_direction(entities[ea].end_points,\n entities[eb].end_points)\n if da is not None:\n entities[ea].reverse(direction=da)\n entities[eb].reverse(direction=db)\n\n entity_path = np.array(entity_path)\n\n return entity_path",
"def relations_from(self, start_node):",
"def get_relations(self):\n triples = list(self.get_triples())\n\n for s, p, o in triples:\n if not p.startswith(\"rel\"):\n s, o = int(s.id), int(o.id)\n yield {\"predicate\": p,\n \"subject\": s,\n \"subject_nodes\": list(self.get_descendants(s, triples)),\n \"object\": o,\n \"object_nodes\": list(self.get_descendants(o, triples)),\n }",
"def find_all_paths(parents_to_children, start, end, path=[]):\r\n path = path + [start]\r\n if start == end:\r\n return [path]\r\n if start not in parents_to_children.keys():\r\n return []\r\n paths = []\r\n for node in parents_to_children[start]:\r\n if node not in path:\r\n newpaths = find_all_paths(parents_to_children, node, end, path)\r\n for newpath in newpaths:\r\n paths.append(tuple(newpath))\r\n return paths",
"def get_diagram(self):\n self_nodes=self.nodes.all()\n self_arrows=self.arrows.all()\n \n \n if len(self_nodes)==0:\n return False\n \n nodes = [n.get_icon_obj() for n in self_nodes]\n node_liens = [n.liens.all() for n in self_nodes]\n \n pairs = []\n for n,n_liens in zip(nodes,node_liens):\n if len(n_liens)==0:\n liens = Lien.objects.filter(cause__id=n.target_id).all()\n liens = [l.consequence.id for l in liens]\n temp = [(n,target) for target in nodes if target.target_id in liens]\n pairs.extend(temp)\n else:\n ids=set([(i.cause.id,i.consequence.id) for i in n_liens])\n for n in nodes:\n pairs.extend([(n,i) for i in nodes if i is not n and \n (n.target_id,i.target_id) in ids])\n ids = set([(i.cause.id,i.consequence.id) for i in self_arrows])\n pairs = [p for p in pairs if (p[0].target_id,p[1].target_id) not in ids]\n \n lines=[]\n arrows=[]\n for obj in self_arrows:\n \n n0=[i for i in nodes if i.node_id==obj.cause.id]\n n1=[i for i in nodes if i.node_id==obj.consequence.id]\n if len(n0)!=1 or len(n1)!=1:\n continue\n n0=n0[0]\n n1=n1[0]\n \n pt=[(obj.X0, obj.Y0), (obj.X1,obj.Y1)]\n pt=[np.array(i) for i in pt if None not in i]\n if len(pt)==0:\n pairs.append((n0,n1))\n continue\n pairs = [p for p in pairs if (p[0].node_id,p[1].node_id)!=(n0.node_id,n1.node_id)]\n vect = pt[0]-np.array(n0.pos)\n first_pt = np.array(n0.pos)+vect*n0.size/np.sqrt(sum(vect*vect))\n vect = np.array(n1.pos) - pt[-1]\n last_pt = np.array(n1.pos)-vect*n1.size/np.sqrt(sum(vect*vect))\n pt=[first_pt,*pt,last_pt]\n \n lines.extend([((*i,*j),n0.color) for i,j in zip(pt[:-1],pt[1:])])\n arrows.append(((*pt[-2],*pt[-1]),n0.color))\n \n \n margin=10\n line_width=2\n \n diagram=DiagramObj(self.id,nodes,pairs,margin,\n self.width,self.height,line_width)\n diagram.add_arrows(arrows,lines)\n print(diagram.lines)\n return diagram",
"def path(self):\r\n node, p = self, []\r\n while node:\r\n p.append(node)\r\n node = node.parent\r\n yield from reversed(p)",
"def get_relatives(\n self, reltypes=None, relfilter=None, fetch_objects=True, ignore_missing=True\n ):\n ret = defaultdict(set)\n relations = self.icalendar_component.get(\"RELATED-TO\", [])\n if not isinstance(relations, list):\n relations = [relations]\n for rel in relations:\n if relfilter and not relfilter(rel):\n continue\n reltype = rel.params.get(\"RELTYPE\", \"PARENT\")\n if reltypes and not reltype in reltypes:\n continue\n ret[reltype].add(str(rel))\n\n if fetch_objects:\n for reltype in ret:\n uids = ret[reltype]\n ret[reltype] = []\n for obj in uids:\n try:\n ret[reltype].append(self.parent.object_by_uid(obj))\n except error.NotFoundError:\n if not ignore_missing:\n raise\n return ret",
"def compute_relations(nodes: List[Node]) -> None:\n # Calculate parents\n for node in nodes:\n node.parents = []\n for node in nodes:\n for child in node.children():\n child.parents.append(node)\n\n def compute_dominators(\n entry: Node,\n parents: Callable[[Node], List[Node]],\n dominators: Callable[[Node], Set[Node]],\n immediately_dominates: Callable[[Node], List[Node]],\n set_immediate_dominator: Callable[[Node, Optional[Node]], None],\n ) -> None:\n # See https://en.wikipedia.org/wiki/Dominator_(graph_theory)#Algorithms\n # Note: if `n` is unreachable from `entry`, then *every* node will\n # vacuously belong to `n`'s dominator set.\n for n in nodes:\n dominators(n).clear()\n if n == entry:\n dominators(n).add(n)\n else:\n dominators(n).update(nodes)\n\n changes = True\n while changes:\n changes = False\n for node in nodes:\n if node == entry:\n continue\n nset = dominators(node)\n for parent in parents(node):\n nset = nset.intersection(dominators(parent))\n nset.add(node)\n if len(nset) < len(dominators(node)):\n assert nset.issubset(dominators(node))\n dominators(node).intersection_update(nset)\n changes = True\n\n # Compute immediate dominator, and the inverse relation\n for node in nodes:\n immediately_dominates(node).clear()\n for node in nodes:\n doms = dominators(node).difference({node})\n # If `node == entry` or the flow graph is not reducible, `doms` may be empty.\n # TODO: Infinite loops could be made reducible by introducing\n # branches like `if (false) { return; }` without breaking semantics\n if doms:\n # There should be a unique max `len(dominators(d))` if the flowgraph\n # is reducible. Fall back to largest index for irreducible graphs.\n imdom = max(doms, key=lambda d: (len(dominators(d)), d.block.index))\n immediately_dominates(imdom).append(node)\n set_immediate_dominator(node, imdom)\n else:\n set_immediate_dominator(node, None)\n for node in nodes:\n immediately_dominates(node).sort(key=lambda x: x.block.index)\n\n def _set_immediate_dominator(node: Node, imdom: Optional[Node]) -> None:\n node.immediate_dominator = imdom\n\n def _set_immediate_postdominator(node: Node, impdom: Optional[Node]) -> None:\n node.immediate_postdominator = impdom\n\n entry = nodes[0]\n terminal = nodes[-1]\n assert isinstance(terminal, TerminalNode)\n\n # Compute dominators & immediate dominators\n compute_dominators(\n entry=entry,\n parents=lambda n: n.parents,\n dominators=lambda n: n.dominators,\n immediately_dominates=lambda n: n.immediately_dominates,\n set_immediate_dominator=_set_immediate_dominator,\n )\n\n # Compute postdominators & immediate postdominators\n # This uses the same algorithm as above, but with edges reversed\n compute_dominators(\n entry=terminal,\n parents=lambda n: n.children(),\n dominators=lambda n: n.postdominators,\n immediately_dominates=lambda n: n.immediately_postdominates,\n set_immediate_dominator=_set_immediate_postdominator,\n )\n\n # Iterate over all edges n -> c and check for backedges, which define natural loops\n for node in nodes:\n for child in node.children():\n if child not in node.dominators:\n continue\n # Found a backedge node -> child where child dominates node; child is the \"head\" of the loop\n if child.loop is None:\n child.loop = NaturalLoop(child)\n child.loop.nodes |= {child, node}\n child.loop.backedges.add(node)\n for parent in nodes:\n if reachable_without(parent, node, child):\n child.loop.nodes.add(parent)",
"def dfs_paths_dict_recur(\n graph: Mapping[Node, set[Node]],\n start: Node,\n goal: Node,\n path: Optional[list[Node]] = None\n) -> Iterable[list[Node]]:\n if path is None:\n path = [start]\n if start == goal:\n yield path\n else:\n for next_node in graph[start].difference(path):\n next_path = path + [next_node]\n yield from dfs_paths_dict_recur(graph, next_node, goal, next_path)",
"def find_all_paths(graph, start, end, path=[]):\n path = path + [start]\n if start == end:\n return [path]\n paths = []\n for node in graph[start]:\n newpaths = find_all_paths(graph, node, end, path)\n paths += newpaths\n return paths",
"def decompose_paths(self):\n if self.child_nodes == {}:\n return []\n\n import numpy as np\n\n def decompose_paths_rec(node_inner, path):\n \"\"\"\n This function does the recursive create_path of the decomposition\n :param node_inner:\n :param path:\n \"\"\"\n if node_inner.is_leaf():\n path = np.append(path, str(node_inner.value))\n return path[None]\n else:\n paths = np.array([])\n for edge_name in node_inner.child_nodes:\n new_path = np.append(path, str(edge_name))\n paths = np.append(paths, decompose_paths_rec(node_inner.child_nodes[edge_name], new_path))\n return paths\n\n decomposition = decompose_paths_rec(self, np.array([]))\n return decomposition.reshape((decomposition.shape[0]/(self.d+1), self.d+1))",
"def path(self):\n node, return_path = self, []\n while node:\n # Add the nodes in reverse order to a list until you reach the\n # root parent node which will terminate the loop\n return_path.append(node)\n node = node.parent\n # Reverse the list to get the proper path back\n return list(reversed(return_path))",
"def reconstruct_path(current):\r\n path = [current.coord]\r\n parent = current.parent\r\n while parent:\r\n path = [parent.coord] + path\r\n parent = parent.parent\r\n path = path[1:]\r\n return path",
"def reconstruct_path(cameFrom, current):\n total_path = np.array([[current.x],[current.y]])\n while current_in_cameFrom(current,cameFrom):\n current = current.father\n node_x = current.x\n node_y = current.y\n node_pos = np.array([[node_x],[node_y]])\n total_path = np.hstack((total_path,node_pos))\n\n l1 = total_path[0,:]\n l1 = l1[::-1]\n l2 = total_path[1,:]\n l2 = l2[::-1]\n total_path = np.vstack((l1,l2))\n return total_path",
"def GetInOutRelationsForList(self, G, node, relations=[]):\n res = {gc.InputRelations: {}, gc.OutgoingRelations : {}}\n if len(relations) > 0:\n outEdges = [edge for edge in G.out_edges([node], data = True) if Strings.rel in edge[2]]\n inEdges = [edge for edge in G.in_edges([node], data = True) if Strings.rel in edge[2]]\n \n for rel in relations:\n outRelations = [r for r in outEdges if (Strings.rel, rel) in list(r[2].items())]\n res[gc.OutgoingRelations][rel] = outRelations\n inRelations = [r for r in inEdges if (Strings.rel, rel) in list(r[2].items())]\n res[gc.InputRelations][rel] = inRelations\n return res",
"def _deconstruct_path(predecessors, end):\n if end not in predecessors:\n return None\n current = end\n path = []\n while current:\n path.append(current)\n current = predecessors.get(current)\n return list(reversed(path))",
"def encode_with_relatives(entity, **kwargs):\n exclude_models = kwargs.pop('exclude_models', None)\n entities, models = collect_related_instanses(entity, exclude_models)\n parsed_data = serializers.serialize('json', entities, use_natural_keys=True, **kwargs)\n parsed_data = json.loads(parsed_data)\n return create_encoded_entity(parsed_data, entity)",
"def find_all_path(self, start, end, path=[]):\n path = path+[start]\n if start == end:\n return path\n paths = []\n for node in self.graph[start]:\n if node not in path:\n newpaths = self.find_path(node, end, path)\n paths.append(newpaths)\n return paths"
] | [
"0.6200332",
"0.5851866",
"0.571507",
"0.56179786",
"0.5481076",
"0.5464558",
"0.5461847",
"0.5296667",
"0.5288939",
"0.5283457",
"0.5277893",
"0.5269692",
"0.52677166",
"0.52636945",
"0.5238955",
"0.5237788",
"0.5216618",
"0.52149516",
"0.5190407",
"0.51791054",
"0.5176276",
"0.5171295",
"0.51322275",
"0.51202583",
"0.51186204",
"0.50996196",
"0.5097509",
"0.50821847",
"0.50636387",
"0.50467736"
] | 0.6647005 | 0 |
This function is used to write all paths between any two entities that are connected by the input relation to a file. Because this function will go through all paths node by node, this function will also used to filter paths to save computation. | def write_and_filter_paths(self, source, target, relation, label, paths):
file_dir = os.path.join(self.save_dir, relation + "_" + str(self.maximum_length) + "_" + str(self.remaining_percentage) + "_" + str(self.random_seed) + ".txt")
with open(file_dir, "a") as fh:
fh.write(str(label) + "\t" + str(source) + "\t" + str(target) + "\t")
for pdx, path in enumerate(paths):
if not self.include_entity:
if len(path) == 1:
continue
for rdx, rel_idx in enumerate(path):
fh.write(self.idx_to_relation[rel_idx])
if rdx != len(path)-1:
fh.write("|")
if pdx != len(paths)-1:
fh.write("###")
else:
if len(path) == 3:
continue
fh.write(self.idx_to_node[path[0]].get_name())
fh.write("|")
for rdx in range(0, (len(path)-1)/2):
fh.write(self.idx_to_relation[path[rdx*2+1]])
fh.write("|")
fh.write(self.idx_to_node[path[rdx*2+2]].get_name())
if rdx*2+2 != len(path)-1:
fh.write("|")
if pdx != len(paths)-1:
fh.write("###")
fh.write("\n") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def writePathways( self ):\n\n self.logger.info( 'writePathways: START' )\n\n # Generate inserts for meabolic pathways.\n self.importerPathway.writePathways()\n\n self.logger.info( 'writePathways: DONE' )",
"def filter_paths(self, paths):\n formatted_paths = set()\n for path in paths:\n formatted_path = []\n if self.include_entity:\n if len(path) == 3:\n continue\n formatted_path.append(self.idx_to_node[path[0]].get_name())\n for rdx in range(0, (len(path)-1)/2):\n formatted_path.append(self.idx_to_relation[path[rdx*2+1]])\n formatted_path.append(self.idx_to_node[path[rdx*2+2]].get_name())\n else:\n if len(path) == 1:\n continue\n for rel_idx in path:\n formatted_path.append(self.idx_to_relation[rel_idx])\n formatted_paths.add(tuple(formatted_path))\n return formatted_paths",
"def convert_paths(self):\n # convert to node sequences, dropping s'\n self.nodeseq_paths = []\n for path in self.paths:\n node_seq = [] # don't include s'\n for arc in path:\n node_seq.append(self.arc_info[arc]['destin'])\n self.nodeseq_paths.append(node_seq)\n # convert to og graph\n self.converted_paths = []\n for path in self.nodeseq_paths:\n this_path = []\n add_next_node = True\n for i in range(len(path) - 1):\n print(\"This path is\", this_path)\n node1 = path[i]\n node2 = path[i + 1]\n print(\"node1={}, node2={}\".format(node1, node2))\n if (node1, node2) in self.mapping:\n sc = self.mapping[(node1, node2)]\n print(\"uses sc edge for {}\".format(sc))\n print(\"should add {}, but also need to check for overlaps\".\n format(sc[1:-1]))\n if sc[1] in this_path:\n # we have an overlap\n start = len(this_path) - this_path.index(sc[1])\n this_path.extend(sc[start:-1])\n else:\n this_path.extend(sc[1:-1])\n add_next_node = False # next node is second of sc edge\n elif add_next_node:\n this_path.append(node1)\n else:\n add_next_node = True\n this_path.append(path[-1])\n self.converted_paths.append(this_path)",
"def create_all(graph,first_last_fn):\n trip_id = 1\n line_num = 0\n num_trips = 0\n trip_id2model = {}\n #paths = {}\n p = Path(trip_id,graph,line_num=line_num)\n trip_id2model[trip_id] = p.edges\n num_trips += 1\n #paths[trip_id] = p\n while p.next_line != len(graph.lines):#file_length:\n graph.trip_id2line_num[trip_id] = line_num\n line_num = p.next_line\n trip_id = normalize_simple(graph.lines[line_num])[0]\n #trip_id = dg.normalize(lines[line_num])[0]\n p = Path(trip_id,graph,line_num=line_num)\n trip_id2model[trip_id] = p.edges\n num_trips += 1\n # paths[trip_id] = p\n graph.trip_id2line_num[trip_id] = line_num\n graph.num_trips = num_trips\n\n\n with open(first_last_fn,'wb') as output:\n pickle.dump(graph.first_last2trip_ids,output)\n\n with open('pickles/trip_id2model.pickle','wb') as output:\n pickle.dump(trip_id2model,output)\n #return paths",
"def expand_paths_by_nodes(self, paths):\n paths_formatted = set()\n # Expand each path\n for path in paths:\n if len(path) < 2:\n continue\n expanded_paths = set()\n if self.include_entity:\n relations_for_each_step = [[path[0]]]\n else:\n relations_for_each_step = []\n for index in range(1, len(path)):\n node1 = path[index-1]\n node2 = path[index]\n if (node1, node2) in self.pair_to_relations:\n relations = self.pair_to_relations[(node1, node2)]\n else:\n print(node1, node2)\n relations_for_each_step.append(relations)\n if self.include_entity:\n relations_for_each_step.append([node2])\n expanded_paths.update(list(itertools.product(*relations_for_each_step)))\n paths_formatted.update(expanded_paths)\n return paths_formatted",
"def _generate_file_paths(self):\n for table_name in self.tables:\n logger.info(f\"Generating input and output paths for table '{table_name}'...\")\n self.input_paths[table_name] = os.path.join(self.pipeline['input_dir'], f'{table_name}.xml')\n logger.info(f\"Input path for table '{table_name}': {self.input_paths[table_name]}\")\n self.output_paths[table_name] = os.path.join(self.pipeline['output_dir'], f'{table_name}.jsonl')\n logger.info(f\"Output path for table '{table_name}': {self.output_paths[table_name]}\")\n logger.info(f\"Generated {len(self.input_paths)} input paths and {len(self.output_paths)} output paths.\")",
"def visited_nodes_to_file(self):\r\n # Create and write file only if we have something to write\r\n if len(self.visited_node) > 0:\r\n with open('{}'.format(self.path), mode='w') as f:\r\n # Writing line by line to the file\r\n for node, val in self.visited_node:\r\n f.write('{} {}\\n'.format(self.convert_matrix_rastor(node), val))",
"def filter_paths(self, blobs):\n # check against one map for read, one for write\n # if check fails, figure out if it was the view map or the protects\n # that caused the problem and report accordingly\n self.author_denied = []\n self.pusher_denied = []\n self.foruser_denied = []\n self.fusion_denied = []\n self.unmapped = []\n c2d = P4.Map.RIGHT2LEFT\n\n LOG.debug('filter_paths() write_filter: %s', self.write_filter)\n for blob in blobs:\n gwt_path = self.ctx.gwt_path(blob['path'])\n topath_c = gwt_path.to_client()\n topath_d = gwt_path.to_depot()\n\n LOG.debug('filter_paths() topath_d: %s', topath_d)\n # for all actions, need to check write access for dest path\n result = \" \" # zum loggen\n if topath_d and P4GF_DEPOT_OBJECTS_RE.match(topath_d):\n LOG.debug('filter_paths() topath_d in //.git-fusion/objects')\n continue\n # do not require user write access to //.git-fusion/branches\n if topath_d and P4GF_DEPOT_BRANCHES_RE.match(topath_d):\n LOG.debug('filter_paths() topath_d in //.git-fusion/branches')\n continue\n if not self.write_filter.includes(topath_c, c2d):\n if not self.view_map.includes(topath_c, c2d):\n self.unmapped.append(topath_c)\n result = NTR('unmapped')\n elif not (self.ignore_author_perms or\n self.write_protect_author.includes(topath_d)):\n self.author_denied.append(topath_c)\n result = NTR('author denied')\n elif (self.write_protect_pusher and\n not self.write_protect_pusher.includes(topath_d)):\n self.pusher_denied.append(topath_c)\n result = NTR('pusher denied')\n elif (self.write_protect_foruser and\n not self.write_protect_foruser.includes(topath_d)):\n self.foruser_denied.append(topath_c)\n result = NTR('foruser denied')\n elif not self.write_protect_fusion.includes(topath_d):\n self.fusion_denied.append(topath_c)\n result = NTR('Git Fusion denied')\n else:\n result = \"?\"\n LOG.error('filter_paths() {:<13} {}, {}, {}'\n .format(result, blob['path'], topath_d, topath_c))\n elif LOG.isEnabledFor(logging.DEBUG):\n LOG.debug('filter_paths() topath_c in write_filter: %s', topath_c)",
"def get_all_path_facts(self, question_entities, answer_entities, passage_entities, seed_weighting=True, fp=None):\n\n if FLAGS.verbose_logging:\n print('Getting subgraph')\n tf.logging.info('Getting subgraph')\n question_entity_ids = [\n int(self.data.ent2id[x]) for x in question_entities if x in self.data.ent2id\n ]\n question_entity_names = str([self.data.entity_names['e'][str(x)]['name'] for x in question_entity_ids\n ])\n #if fp is not None:\n # fp.write(str(question_entities)+\"\\t\"+question_entity_names+\"\\t\")\n if FLAGS.verbose_logging:\n print('Question Entities')\n tf.logging.info('Question Entities')\n print(question_entities)\n print(question_entity_names)\n tf.logging.info(question_entity_names)\n\n answer_entity_ids = [\n int(self.data.ent2id[x]) for x in answer_entities if x in self.data.ent2id\n ]\n answer_entity_names = str([self.data.entity_names['e'][str(x)]['name'] for x in answer_entity_ids\n ])\n #if fp is not None:\n # fp.write(str(answer_entities)+\"\\t\"+answer_entity_names+\"\\t\")\n if FLAGS.verbose_logging:\n print('Answer Entities')\n tf.logging.info('Answer Entities')\n print(answer_entities)\n print(answer_entity_names)\n tf.logging.info(answer_entity_names)\n passage_entity_ids = [\n int(self.data.ent2id[x]) for x in passage_entities if x in self.data.ent2id\n ]\n passage_entity_names = str([self.data.entity_names['e'][str(x)]['name'] for x in passage_entity_ids\n ])\n if FLAGS.verbose_logging:\n print('Passage Entities')\n tf.logging.info('Passage Entities')\n print(passage_entity_names)\n tf.logging.info(passage_entity_names)\n\n freq_dict = {x: question_entity_ids.count(x) for x in question_entity_ids}\n\n extracted_paths, num_hops = csr_get_all_paths(question_entity_ids, self.data.adj_mat_t_csr, answer_entity_ids, self.data.rel_dict, k_hop=FLAGS.k_hop)\n augmented_facts = self.get_all_path_augmented_facts(extracted_paths, self.data.entity_names)\n\n if FLAGS.verbose_logging:\n print('All path Extracted facts: ')\n print(str(augmented_facts))\n tf.logging.info('All path Extracted facts: ')\n tf.logging.info(str(augmented_facts))\n print(\"Num hops: \"+str(num_hops))\n return augmented_facts, num_hops",
"def test_find_all_paths():\n g = Graph()\n node_1 = Node({'A':['B','C']})\n g.add(node_1)\n node_2 = Node({'B':['C','D']})\n g.add(node_2)\n node_3 = Node({'C':['D']})\n g.add(node_3)\n node_4 = Node({'D':['C']})\n g.add(node_4)\n node_5 = Node({'E':['C']})\n g.add(node_5)\n\n # zero path between node_1 and node_5\n paths_0 = g.find_all_paths(node_1, node_5)\n assert len(paths_0) == 0\n # only one path between node_5 and node_4\n paths_1 = g.find_all_paths(node_5, node_4)\n assert len(paths_1) == 1\n assert [ node.name for node in paths_1[0] ] == [ node_5.name, node_3.name, node_4.name ]\n # three paths between node_1 and node_3, verify all the three paths are returned\n paths_3 = g.find_all_paths(node_1, node_3)\n assert len(paths_3) == 3\n for path in paths_3:\n assert [ node.name for node in path ] == [ node_1.name, node_2.name, node_3.name ] or \\\n [ node.name for node in path ] == [ node_1.name, node_2.name, node_4.name, node_3.name ] or \\\n [ node.name for node in path ] == [ node_1.name, node_3.name ]",
"def just_create_paths(graph):\n trip_id = 1\n line_num = 0\n num_trips = 0\n trip_id2model = {}\n p = Path(trip_id,graph,line_num=line_num)\n trip_id2model[trip_id] = p.edges\n num_trips += 1\n fl2t = p\n id2bad = {}\n while p.next_line != len(graph.lines):\n #if trip_id > 30:\n # return\n print trip_id\n line_num = p.next_line\n trip_id = normalize_simple(graph.lines[line_num])[0]\n p = Path(trip_id,graph,line_num=line_num)\n first,last = p.first_last\n \"\"\"\n simple = graph.is_simple(p.edges[:],first,last)\n if not simple or p.edges.count(1) == 0:\n #print \"%d: (%d,%d)\" % (trip_id,first,last)\n #graph.draw_grid(p.edges)\n id2bad[trip_id] = True\n \"\"\"\n trip_id2model[trip_id] = p.edges\n num_trips += 1\n\n #print len(id2bad.keys())\n #with open('pickles/trip_id2bad-%d-%d.pickle' % (graph.rows,graph.cols),'wb') as output:\n # pickle.dump(id2bad,output)\n with open('psdd/better_pickles/trip_id2model.pickle','wb') as output:\n pickle.dump(trip_id2model,output)",
"def find_path_to(output_var, input_var):\r\n\r\n #If output and input are the same we have a singleton path\r\n if output_var is input_var:\r\n return [output_var]\r\n\r\n #If output has no inputs then there is no path\r\n owner = output_var.owner\r\n\r\n if owner is None:\r\n return None\r\n\r\n #If input_var is an input to the output node, there is a\r\n #simple two element path\r\n inputs = owner.inputs\r\n\r\n if input_var in inputs:\r\n return [input_var, output_var]\r\n\r\n #Otherwise we must recurse by searching for a path to one\r\n #of our inputs, then appending the output to that path\r\n for ipt in inputs:\r\n path = find_path_to(ipt, input_var)\r\n\r\n if path is not None:\r\n path.append(output_var)\r\n\r\n return path\r\n\r\n #Since none of the above methods returned a path, there is none\r\n return None",
"def __saveEdges(self, edges):",
"def write_graph_to_file(self, path):\n graph = nx.Graph()\n for node in self.graph.nodes(data=True):\n new_node = deepcopy(node)\n new_node[1]['blocks'] = list(new_node[1]['blocks'])\n graph.add_node(*new_node)\n graph.add_edges_from(self.graph.edges())\n json.dump(json_graph.node_link_data(graph), open(path, 'w'))",
"def save_connections(self, path):\n\n print(\"Saving connections...\")\n\n # Iterate over layers to save each projection in a separate txt file.\n for projection in self.connections:\n filepath = os.path.join(path, projection.label.partition('→')[-1])\n if self.config.getboolean('output', 'overwrite') or \\\n confirm_overwrite(filepath):\n projection.save('connections', filepath)",
"def get_path(input_dictionary, output_dictionary,\n input_species_list, output_species_list):\n\n input_operon_list = []\n path_queue = [(input_operon_list, input_species_list) ]\n\n final_operon_path_list = []\n final_species_path_list = []\n\n while path_queue != []:\n\n ###print \"\\nget_path: path queue:\",path_queue\n\n path_queue,\\\n final_operon_path_list,\\\n final_species_path_list = traverse(input_dictionary,\n output_dictionary,\n input_species_list,\n output_species_list,\n path_queue,\n final_operon_path_list,\n final_species_path_list)\n\n return final_operon_path_list, final_species_path_list",
"def generate_output(input_filename: str, output_filename: str, goal_node: Node,\n generated: set) -> None:\n\n input_stream = io.open(input_filename, 'r', encoding='utf-8', errors='ignore',\n newline='\\n')\n with open(output_filename, 'w') as out_file:\n for i in range(0, 10):\n out_file.write(input_stream.readline().rstrip())\n out_file.write('\\n')\n \"\"\" The first ten lines of the output file are identical to those in the \n input file. The tenth line should be skipped because it's blank.\"\"\"\n out_file.write(str(goal_node.path_cost) + '\\n')\n # Line 11 of the output, the depth level d\n out_file.write(str(len(generated)) + '\\n')\n # Line 12 of the output, the total number of nodes generated\n\n # Writing Line 13 of the output, the sequence of moves\n length = len(goal_node.path_history)\n for i in range(length - 1):\n out_file.write(goal_node.path_history[i] + ' ')\n out_file.write(goal_node.path_history[length - 1] + '\\n')\n\n # Writing Line 14 of the output, the f(n) values\n f_line = str(goal_node.f) + ' '\n parent = goal_node.parent\n while parent: # Loop stops when parent == None\n f_line += (str(parent.f) + ' ')\n parent = parent.parent\n f_list = f_line.split(' ')\n # Breaks down the string to the integers it contains\n reverse = ''\n for i in range(len(f_list) - 2, -1, -1):\n # f_line[len(f_line)-1] is an extra whitespace character and\n # thus shouldn't be copied\n reverse += str(f_list[i])\n if i != 0:\n reverse += ' '\n \"\"\" The order of the f(n) values in f_line is from goal node \n to root node. The four lines above reverse the order, which \n is what the output format expects.\"\"\"\n out_file.write(reverse)\n\n out_file.close()",
"def manage_paths(node, paths) :\r\n\r\n #Getting the nodes neighbouring the given node\r\n neighbours = get_neighbouring_nodes(node) \r\n\r\n #Creating a new path branch\r\n new_path = [] #The new path\r\n path_found = False #Indicates whether the path to which the node belongs has been found\r\n\r\n #Looping through the neighbours\r\n for neighbour in neighbours :\r\n for path in paths :\r\n #Checking whether the path contains the neighbour\r\n if(neighbour in path) :\r\n index = path.index(neighbour)\r\n #Checking if the branch belongs to the current path\r\n if(path[index].gn_value == neighbour.gn_value) :\r\n new_path = path[:index + 1] + [node] #Creating a new path branch\r\n new_path[-1].gn_value = new_path.__len__() - 1 #Updating the node's g(n) value\r\n path_found = True\r\n break\r\n if(path_found) :\r\n break\r\n \r\n if(not path_found) :\r\n raise Exception(\"No branch junction found\")\r\n\r\n #Setting the new path as the current path\r\n return new_path",
"def save(self, path=\"\"):\n path = path + \"model_\" + str(self.name) + \".txt\"\n if os.path.isfile(path):\n os.remove(path)\n f = open(path, \"w+\")\n for ident in self.networks:\n f.write(ident + \"_\" + self.networks[ident].descriptor.codify_components() + \"_\" + str(self.networks[ident].taking.size) + \",\" + self.networks[ident].taking.type + \"_\" + str(self.networks[ident].producing.size) + \",\" + self.networks[ident].producing.type + \"_\" +\n str(self.networks[ident].depth) + \"_\" + \",\".join(self.reachable[ident]) + \"_\" + \",\".join(self.comps_below[ident]) + \"\\n\")\n f.write(\"\\n\")\n\n for ident in self.inputs:\n f.write(ident + \"_\" + str(self.inputs[ident].producing.size) + \"_\" + self.inputs[ident].producing.type + \"_\" + str(self.inputs[ident].depth) + \"\\n\")\n f.write(\"\\n\")\n\n for ident in self.outputs:\n f.write(ident + \"_\" + str(self.outputs[ident].taking.size) + \"_\" + self.outputs[ident].taking.type + \"_\" + str(self.outputs[ident].depth) + \"_\" + \",\".join(self.comps_below[ident]) + \"\\n\")\n f.write(\"\\n\")\n\n for con in self.connections:\n f.write(self.connections[con].codify() + \"\\n\")\n #f.write(\"\\n\")\n\n f.close()\n\n return path",
"def _mutate_file(self, node, visited = set([])):\n for ch in self._get_children(node):\n\n if ch not in visited:\n visited.add(ch)\n\n try:\n self._mutate_node(ch)\n except Exception as e:\n print(e)\n\n # Recursion is a bitch\n self._mutate_file(ch, visited)",
"def all_routing_tree_2(G, tors1, tors2, table_file_name):\n \n table = OrderedDict({})\n for s in G.nodes():\n table[s] = OrderedDict({})\n for s in tors1:\n for d in tors2:\n if s != d:\n routing(G, s, d, table)\n for d in tors1:\n for s in tors2:\n if s != d:\n routing(G, s, d, table)\n\n with open(table_file_name, 'w') as file:\n file.write(json.dumps(table))\n return table",
"def _find_all_paths(self, start_vertex: str, end_vertex: str, path=[]):\n path = path + [start_vertex]\n if start_vertex == end_vertex:\n return [path]\n\n paths = []\n for vertex in self.graph[start_vertex]:\n if vertex not in path:\n extended_paths = self._find_all_paths(vertex,\n end_vertex,\n path)\n for p in extended_paths:\n paths.append(p)\n return paths",
"def update_trip_path(trip_mpois, paths, graph):\n n_nodes = len(trip_mpois)\n # adjacency matrix\n new_paths = np.zeros(shape=(n_nodes, n_nodes))\n\n # iterate through all the nodes and create a list of nodes with sequential id\n for i, node1 in enumerate(trip_mpois):\n for j, node2 in enumerate(trip_mpois):\n new_paths[i, j] = paths[node1, node2]\n\n # new_paths = new_paths/np.max(new_paths[new_paths < _INF])\n # new_paths[np.isinf(new_paths)] = _INF\n\n # create a dummy edge between end and start node with weight 0\n new_paths[1,0] = -_INF\n # new_paths[0,1] = _INF\n\n shortest_path = None\n if n_nodes > 5:\n shortest_path, dist = tsp.solve(n_nodes, new_paths)\n # shortest_path = range(n_nodes)\n else:\n shortest_path = range(n_nodes)\n\n trip_path = np.array(trip_mpois)[shortest_path]\n\n if ___DEBUG:\n fname = 'dump/' + str(n_nodes) + '.dist'\n np.savetxt(fname, new_paths, fmt='%.6f')\n \n mpoi_pos = np.zeros(shape=(n_nodes,2))\n \n for i, node in enumerate(trip_mpois):\n pos_3d = graph.vs[node]['position']\n assert node == graph.vs[node].index\n mpoi_pos[i,:] = pos_3d[:2]\n\n fname = 'dump/' + str(n_nodes) + '.pos'\n np.savetxt(fname, mpoi_pos)\n \n # print trip_mpois, trip_path\n\n return trip_path",
"def find_all_path(self, start_vertex, end_vertex, path=[]):\n\n graph = self.__graph_dict\n path = path + [start_vertex]\n if start_vertex == end_vertex:\n return [path]\n\n if start_vertex not in graph:\n return []\n\n paths = []\n for vertex in graph[start_vertex]:\n if vertex not in path:\n extended_paths = self.find_all_path(vertex, end_vertex,path)\n for p in extended_paths:\n paths.append(p)\n return paths",
"def _path(from_object, to_object):\n\n if from_object._root != to_object._root:\n raise ValueError(\"No connecting path found between \" +\n str(from_object) + \" and \" + str(to_object))\n\n other_path = []\n obj = to_object\n while obj._parent is not None:\n other_path.append(obj)\n obj = obj._parent\n other_path.append(obj)\n object_set = set(other_path)\n from_path = []\n obj = from_object\n while obj not in object_set:\n from_path.append(obj)\n obj = obj._parent\n index = len(from_path)\n i = other_path.index(obj)\n while i >= 0:\n from_path.append(other_path[i])\n i -= 1\n return index, from_path",
"def export_blend_connections():\n selection_list = pm.ls(tr=1, sl=1, l=1)\n\n dialog_return = pm.fileDialog2(cap=\"Save As\", fm=0, ff='Text Files(*.txt)')\n\n filename = dialog_return[0]\n print(filename)\n\n print(\"\\n\\nFiles written:\\n--------------------------------------------\\n\")\n\n with open(filename, 'w') as fileId:\n for i in range(0, len(selection_list)):\n shapes = pm.listRelatives(selection_list[i], s=True, f=True)\n\n main_shape = \"\"\n for j in range(0, len(shapes)):\n if pm.getAttr(shapes[j] + '.intermediateObject') == 0:\n main_shape = shapes\n break\n if main_shape == \"\":\n main_shape = shapes[0]\n\n con = pm.listConnections(main_shape, t=\"blendShape\", c=1, s=1, p=1)\n\n cmd = \"connectAttr -f %s.worldMesh[0] %s;\" % (\n ''.join(map(str, main_shape)),\n ''.join(map(str, con[0].name()))\n )\n print (cmd + \"\\n\")\n fileId.write(\"%s\\n\" % cmd)\n\n print(\"\\n------------------------------------------------------\\n\")\n print(\"filename: %s ...done\\n\" % filename)",
"def get_path(self,first_node,last_node):\n edge_pattern=re.compile('edge_(?P<begin_node>\\w+)_(?P<end_node>\\w+)_(?P<iterator>\\w+)')\n exit_paths=self.get_exiting_edges(first_node)\n next_nodes=self.get_exiting_nodes(first_node)\n #be careful here using the wrong assignment statement breaks this function\n possible_paths=[]\n for exit_path in exit_paths:\n possible_paths.append([exit_path])\n #print(\"{0} is {1}\".format('possible_paths',possible_paths))\n for i in range(len(self.node_names)):\n for index,path in enumerate(possible_paths):\n last_edge=path[-1]\n match=re.match(edge_pattern,last_edge)\n begin_node=match.groupdict()['begin_node']\n end_node=match.groupdict()['end_node']\n #print next_node\n if end_node==last_node:\n #print(\"The path found is {0}\".format(path))\n return path\n next_possible_paths=[]\n next_edges=self.get_exiting_edges(end_node)\n next_nodes=self.get_exiting_nodes(end_node)\n #print(\"{0} is {1}\".format('next_edges',next_edges))\n for index,next_edge in enumerate(next_edges):\n #be careful here using the wrong assignment statement breaks this function\n #next_path=path is a deal breaker!!\n next_path=[]\n for edge in path:\n next_path.append(edge)\n #print(\"{0} is {1}\".format('next_path',next_path))\n #print(\"{0} is {1}\".format('next_edge',next_edge))\n #next_node=next_nodes[index]\n #print next_node\n next_match=re.match(edge_pattern,next_edge)\n next_node=next_match.groupdict()[\"end_node\"]\n begin_node_next_edge=next_match.groupdict()[\"begin_node\"]\n #print(\"{0} is {1}\".format('next_node',next_node))\n #print(\"{0} is {1}\".format('begin_node_next_edge',begin_node_next_edge))\n\n if next_node==last_node and begin_node_next_edge==end_node:\n next_path.append(next_edge)\n #print(\"The path found is {0}\".format(next_path))\n return next_path\n elif begin_node_next_edge==end_node:\n next_path.append(next_edge)\n next_possible_paths.append(next_path)\n #print(\"{0} is {1}\".format('next_possible_paths',next_possible_paths))\n else:\n pass\n #print(\"{0} is {1}\".format('next_possible_paths',next_possible_paths))\n possible_paths=next_possible_paths\n #print(\"{0} is {1}\".format('possible_paths',possible_paths))",
"def path(g): #g: graph\n marked = set()\n nodes = set(g.nodes) \n output = list()\n def recursive(g):\n for i in nodes.copy():\n d = dependents(g,i)\n if (not d) or all(dd in marked for dd in d):\n output.append((i,g.nodes[i]['word']))\n marked.add(i)\n nodes.remove(i)\n if nodes==set([0]):\n break\n recursive(g)\n break\n recursive(g)\n return output",
"def savegraph(self, path):\n\n raise NotImplementedError",
"def writeImports2File(self, file, indent = \" \"):\r\n # import each entity and its associated graphical file\r\n for obj in self.listNodes.keys():\r\n file.write(indent+\"from \"+obj+\" import \"+obj+\"\\n\")\r\n if not obj[0:4] == \"ASG_\":\r\n file.write(indent+\"from graph_\"+obj+\" import graph_\"+obj+\"\\n\")"
] | [
"0.6408844",
"0.6286845",
"0.62742597",
"0.619009",
"0.6096018",
"0.5878812",
"0.5735827",
"0.5685394",
"0.56852794",
"0.5661119",
"0.5632934",
"0.5616579",
"0.5599253",
"0.55716807",
"0.556647",
"0.5563934",
"0.5434351",
"0.54200315",
"0.537365",
"0.5366517",
"0.53606457",
"0.533113",
"0.530668",
"0.53060406",
"0.5296164",
"0.52914524",
"0.5289586",
"0.52885544",
"0.527888",
"0.52771676"
] | 0.74876946 | 0 |
This function is used to filter all paths and change paths represented by relation index and entity index to paths represented by relation name and entity name | def filter_paths(self, paths):
formatted_paths = set()
for path in paths:
formatted_path = []
if self.include_entity:
if len(path) == 3:
continue
formatted_path.append(self.idx_to_node[path[0]].get_name())
for rdx in range(0, (len(path)-1)/2):
formatted_path.append(self.idx_to_relation[path[rdx*2+1]])
formatted_path.append(self.idx_to_node[path[rdx*2+2]].get_name())
else:
if len(path) == 1:
continue
for rel_idx in path:
formatted_path.append(self.idx_to_relation[rel_idx])
formatted_paths.add(tuple(formatted_path))
return formatted_paths | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_and_filter_paths(self, source, target, relation, label, paths):\n file_dir = os.path.join(self.save_dir, relation + \"_\" + str(self.maximum_length) + \"_\" + str(self.remaining_percentage) + \"_\" + str(self.random_seed) + \".txt\")\n with open(file_dir, \"a\") as fh:\n fh.write(str(label) + \"\\t\" + str(source) + \"\\t\" + str(target) + \"\\t\")\n for pdx, path in enumerate(paths):\n if not self.include_entity:\n if len(path) == 1:\n continue\n for rdx, rel_idx in enumerate(path):\n fh.write(self.idx_to_relation[rel_idx])\n if rdx != len(path)-1:\n fh.write(\"|\")\n if pdx != len(paths)-1:\n fh.write(\"###\")\n else:\n if len(path) == 3:\n continue\n fh.write(self.idx_to_node[path[0]].get_name())\n fh.write(\"|\")\n for rdx in range(0, (len(path)-1)/2):\n fh.write(self.idx_to_relation[path[rdx*2+1]])\n fh.write(\"|\")\n fh.write(self.idx_to_node[path[rdx*2+2]].get_name())\n if rdx*2+2 != len(path)-1:\n fh.write(\"|\")\n if pdx != len(paths)-1:\n fh.write(\"###\")\n fh.write(\"\\n\")",
"def filter_paths(self, blobs):\n # check against one map for read, one for write\n # if check fails, figure out if it was the view map or the protects\n # that caused the problem and report accordingly\n self.author_denied = []\n self.pusher_denied = []\n self.foruser_denied = []\n self.fusion_denied = []\n self.unmapped = []\n c2d = P4.Map.RIGHT2LEFT\n\n LOG.debug('filter_paths() write_filter: %s', self.write_filter)\n for blob in blobs:\n gwt_path = self.ctx.gwt_path(blob['path'])\n topath_c = gwt_path.to_client()\n topath_d = gwt_path.to_depot()\n\n LOG.debug('filter_paths() topath_d: %s', topath_d)\n # for all actions, need to check write access for dest path\n result = \" \" # zum loggen\n if topath_d and P4GF_DEPOT_OBJECTS_RE.match(topath_d):\n LOG.debug('filter_paths() topath_d in //.git-fusion/objects')\n continue\n # do not require user write access to //.git-fusion/branches\n if topath_d and P4GF_DEPOT_BRANCHES_RE.match(topath_d):\n LOG.debug('filter_paths() topath_d in //.git-fusion/branches')\n continue\n if not self.write_filter.includes(topath_c, c2d):\n if not self.view_map.includes(topath_c, c2d):\n self.unmapped.append(topath_c)\n result = NTR('unmapped')\n elif not (self.ignore_author_perms or\n self.write_protect_author.includes(topath_d)):\n self.author_denied.append(topath_c)\n result = NTR('author denied')\n elif (self.write_protect_pusher and\n not self.write_protect_pusher.includes(topath_d)):\n self.pusher_denied.append(topath_c)\n result = NTR('pusher denied')\n elif (self.write_protect_foruser and\n not self.write_protect_foruser.includes(topath_d)):\n self.foruser_denied.append(topath_c)\n result = NTR('foruser denied')\n elif not self.write_protect_fusion.includes(topath_d):\n self.fusion_denied.append(topath_c)\n result = NTR('Git Fusion denied')\n else:\n result = \"?\"\n LOG.error('filter_paths() {:<13} {}, {}, {}'\n .format(result, blob['path'], topath_d, topath_c))\n elif LOG.isEnabledFor(logging.DEBUG):\n LOG.debug('filter_paths() topath_c in write_filter: %s', topath_c)",
"def expand_paths_by_nodes(self, paths):\n paths_formatted = set()\n # Expand each path\n for path in paths:\n if len(path) < 2:\n continue\n expanded_paths = set()\n if self.include_entity:\n relations_for_each_step = [[path[0]]]\n else:\n relations_for_each_step = []\n for index in range(1, len(path)):\n node1 = path[index-1]\n node2 = path[index]\n if (node1, node2) in self.pair_to_relations:\n relations = self.pair_to_relations[(node1, node2)]\n else:\n print(node1, node2)\n relations_for_each_step.append(relations)\n if self.include_entity:\n relations_for_each_step.append([node2])\n expanded_paths.update(list(itertools.product(*relations_for_each_step)))\n paths_formatted.update(expanded_paths)\n return paths_formatted",
"def reindex(self):\n self.index.drop_db()\n objectpath = os.path.join(self.rootpath, self.OBJECTPATH)\n for root, dirs, files in os.walk(objectpath, topdown=False):\n for name in files:\n blob_uuid = name\n self.index.update_from_metadata(self.load_blob_metadata(blob_uuid))",
"def _merge_new_into_all_paths(self):\n self.all_source_paths.update(self.new_source_paths)",
"def _rewrite_filter(self, node: saldag.Filter):\n\n out_rel_cols = node.out_rel.columns\n\n for in_col, out_col in zip(node.get_in_rel().columns, out_rel_cols):\n out_col.coll_sets |= copy.deepcopy(in_col.coll_sets)",
"def get_all_path_facts(self, question_entities, answer_entities, passage_entities, seed_weighting=True, fp=None):\n\n if FLAGS.verbose_logging:\n print('Getting subgraph')\n tf.logging.info('Getting subgraph')\n question_entity_ids = [\n int(self.data.ent2id[x]) for x in question_entities if x in self.data.ent2id\n ]\n question_entity_names = str([self.data.entity_names['e'][str(x)]['name'] for x in question_entity_ids\n ])\n #if fp is not None:\n # fp.write(str(question_entities)+\"\\t\"+question_entity_names+\"\\t\")\n if FLAGS.verbose_logging:\n print('Question Entities')\n tf.logging.info('Question Entities')\n print(question_entities)\n print(question_entity_names)\n tf.logging.info(question_entity_names)\n\n answer_entity_ids = [\n int(self.data.ent2id[x]) for x in answer_entities if x in self.data.ent2id\n ]\n answer_entity_names = str([self.data.entity_names['e'][str(x)]['name'] for x in answer_entity_ids\n ])\n #if fp is not None:\n # fp.write(str(answer_entities)+\"\\t\"+answer_entity_names+\"\\t\")\n if FLAGS.verbose_logging:\n print('Answer Entities')\n tf.logging.info('Answer Entities')\n print(answer_entities)\n print(answer_entity_names)\n tf.logging.info(answer_entity_names)\n passage_entity_ids = [\n int(self.data.ent2id[x]) for x in passage_entities if x in self.data.ent2id\n ]\n passage_entity_names = str([self.data.entity_names['e'][str(x)]['name'] for x in passage_entity_ids\n ])\n if FLAGS.verbose_logging:\n print('Passage Entities')\n tf.logging.info('Passage Entities')\n print(passage_entity_names)\n tf.logging.info(passage_entity_names)\n\n freq_dict = {x: question_entity_ids.count(x) for x in question_entity_ids}\n\n extracted_paths, num_hops = csr_get_all_paths(question_entity_ids, self.data.adj_mat_t_csr, answer_entity_ids, self.data.rel_dict, k_hop=FLAGS.k_hop)\n augmented_facts = self.get_all_path_augmented_facts(extracted_paths, self.data.entity_names)\n\n if FLAGS.verbose_logging:\n print('All path Extracted facts: ')\n print(str(augmented_facts))\n tf.logging.info('All path Extracted facts: ')\n tf.logging.info(str(augmented_facts))\n print(\"Num hops: \"+str(num_hops))\n return augmented_facts, num_hops",
"def vertex_to_entity_path(vertex_path,\n graph,\n entities,\n vertices=None):\n def edge_direction(a, b):\n \"\"\"\n Given two edges, figure out if the first needs to be\n reversed to keep the progression forward.\n\n [1,0] [1,2] -1 1\n [1,0] [2,1] -1 -1\n [0,1] [1,2] 1 1\n [0,1] [2,1] 1 -1\n\n Parameters\n ------------\n a : (2,) int\n b : (2,) int\n\n Returns\n ------------\n a_direction : int\n b_direction : int\n \"\"\"\n if a[0] == b[0]:\n return -1, 1\n elif a[0] == b[1]:\n return -1, -1\n elif a[1] == b[0]:\n return 1, 1\n elif a[1] == b[1]:\n return 1, -1\n else:\n constants.log.debug('\\n'.join([\n 'edges not connected!',\n 'vertex path %s',\n 'entity path: %s',\n 'entity[a]: %s,',\n 'entity[b]: %s']),\n vertex_path,\n entity_path,\n entities[ea].points,\n entities[eb].points)\n\n return None, None\n\n if vertices is None or vertices.shape[1] != 2:\n ccw_direction = 1\n else:\n ccw_check = is_ccw(vertices[np.append(vertex_path,\n vertex_path[0])])\n ccw_direction = (ccw_check * 2) - 1\n\n # make sure vertex path is correct type\n vertex_path = np.asanyarray(vertex_path, dtype=np.int64)\n # we will be saving entity indexes\n entity_path = []\n # loop through pairs of vertices\n for i in np.arange(len(vertex_path) + 1):\n # get two wrapped vertex positions\n vertex_path_pos = np.mod(np.arange(2) + i, len(vertex_path))\n vertex_index = vertex_path[vertex_path_pos]\n entity_index = graph.get_edge_data(*vertex_index)['entity_index']\n entity_path.append(entity_index)\n # remove duplicate entities and order CCW\n entity_path = grouping.unique_ordered(entity_path)[::ccw_direction]\n # check to make sure there is more than one entity\n if len(entity_path) == 1:\n # apply CCW reverse in place if necessary\n if ccw_direction < 0:\n index = entity_path[0]\n entities[index].reverse()\n\n return entity_path\n # traverse the entity path and reverse entities in place to\n # align with this path ordering\n round_trip = np.append(entity_path, entity_path[0])\n round_trip = zip(round_trip[:-1], round_trip[1:])\n for ea, eb in round_trip:\n da, db = edge_direction(entities[ea].end_points,\n entities[eb].end_points)\n if da is not None:\n entities[ea].reverse(direction=da)\n entities[eb].reverse(direction=db)\n\n entity_path = np.array(entity_path)\n\n return entity_path",
"def optimize_path_filter():\n print(\"optimize_path_filter...\")\n if len(gCodeBlocks) == 0:\n print(\"no gcode loaded: cannot apply filter\")\n return\n block_to_filter = gCodeBlocks[-1]\n\n g01blocks = block_to_filter.g01blocks\n ng01 = len(g01blocks)\n\n print(block_to_filter)\n\n for ri in range(ng01-1):\n if ri % 10 == 0:\n print(ri, end='\\r')\n next_block_index = ri + 1\n idx_shortest = g01blocks[ri].shortestPathToStart2(g01blocks, next_block_index)\n if idx_shortest is not None:\n if idx_shortest != next_block_index:\n g01blocks[next_block_index], g01blocks[idx_shortest] = \\\n g01blocks[idx_shortest], g01blocks[next_block_index]\n\n print()\n # rearrange original lines\n block_to_filter.lines = []\n for g01block in block_to_filter.g01blocks:\n for line in g01block.lines:\n block_to_filter.lines.append(line)\n\n print(\"optimize_path_filter done.\")",
"def create_index(filenames: list, raw_data_dir: str,\n processed_data_dir: str) -> None:\n entities, relations = set(), set()\n\n for filename in filenames:\n file_path = path.join(raw_data_dir, filename)\n with open(file_path, 'r') as file:\n for line in file.readlines():\n mid1, relation, mid2 = line.strip().split('\\t')\n entities.add(mid1)\n entities.add(mid2)\n relations.add(relation)\n\n logger = logging.getLogger(__name__)\n logger.info(\"Found %i different entities\", len(entities))\n logger.info(\"Found %i different relations\", len(relations))\n\n entity_to_id = {entity: i for (i, entity) in enumerate(sorted(entities))}\n relation_to_id = {relation: i for (i, relation) in\n enumerate(sorted(relations))}\n\n id_to_entity = {i: entity for entity, i in entity_to_id.items()}\n id_to_relation = {i: relation for relation, i in relation_to_id.items()}\n\n e2i_path = processed_data_dir + \"/entity_to_id.pickle\"\n filename_relation_to_id = processed_data_dir + \"/relation_to_id.pickle\"\n i2e_path = processed_data_dir + \"/id_to_entity.pickle\"\n filename_id_to_relation = processed_data_dir + \"/id_to_relation.pickle\"\n\n os.makedirs(processed_data_dir, exist_ok=True)\n\n with open(e2i_path, \"wb\") as handle1:\n pickle.dump(entity_to_id, handle1, protocol=pickle.HIGHEST_PROTOCOL)\n\n with open(filename_relation_to_id, \"wb\") as handle2:\n pickle.dump(relation_to_id, handle2, protocol=pickle.HIGHEST_PROTOCOL)\n\n with open(i2e_path, \"wb\") as handle3:\n pickle.dump(id_to_entity, handle3, protocol=pickle.HIGHEST_PROTOCOL)\n\n with open(filename_id_to_relation, \"wb\") as handle4:\n pickle.dump(id_to_relation, handle4, protocol=pickle.HIGHEST_PROTOCOL)",
"def reindex_subcomponent_taxa():\n pass",
"def path_entries(self):",
"def update_rels(fl_list, tmp_loc, dict_1):\n old_files = natsort.natsorted([i for i in dict_1.keys()])\n path = f'{output_path}/ppt'\n for i in fl_list:\n root, tree = gen_tree(f'{path}/{i}')\n for relation in root:\n attrib = relation.attrib\n if attrib.get('Target')[3:] in old_files:\n relation.set('Target', f\"../{dict_1[attrib.get('Target')[3:]]}\")\n tree.write(f'{path}/{i}', pretty_print=True, xml_declaration=True, encoding='UTF-8', standalone=True)\n return",
"def preprocess_entities_by_mapping(entities, objects_alias_mapping, predicates_alias_mapping):\n\n for entity in entities:\n\n for object in entity.objects:\n candidate_object = object.names[0].lower()\n\n # Update object name according to the objects_alias_mapping or just save it lower-case\n if candidate_object in objects_alias_mapping:\n object.names[0] = objects_alias_mapping[candidate_object]\n else:\n object.names[0] = candidate_object\n\n for relation in entity.relationships:\n candidate_predicate = relation.predicate.lower()\n\n # Update object name according to the predicates_to_be_used or just save it lower-case\n if candidate_predicate in predicates_alias_mapping:\n relation.predicate = predicates_alias_mapping[candidate_predicate]\n else:\n relation.predicate = candidate_predicate",
"def get_paths(self, *paths, **kwargs):\n filter_fn = kwargs.get(\"filter_fn\", None)\n\n def default_filter_fn(obj):\n return True\n\n def default_transport(x):\n return x\n\n if filter_fn is None:\n filter_fn = default_filter_fn\n key = kwargs.get(\"key\", None)\n if key is None:\n objs = self._analyzer_objects\n else:\n objs = sorted(self._analyzer_objects, key=key)\n\n res = []\n for obj_name in objs:\n obj = objs[obj_name]\n if not filter_fn(obj):\n continue\n obj_res = []\n for i in range(len(paths)):\n path = paths[i]\n if isinstance(path, dict):\n transport = path[\"transport\"]\n path = path[\"path\"]\n else:\n transport = default_transport\n traverse = obj[path[0]]\n for p in path[1:]:\n if traverse is not None:\n if p in traverse:\n traverse = traverse[p]\n elif p.upper() in traverse:\n traverse = traverse[p.upper()]\n else:\n traverse = None\n obj_res.append(transport(traverse))\n if len(obj_res) == len(paths):\n res.append(obj_res)\n return res",
"def transition_path(self, str_representation = True):\n node, path_back = self, []\n while node:\n modification_name = 'None'\n if node.action:\n modification_name = node.action.__str__()\n if modification_name is not 'None':\n if(str_representation):\n path_back.append(modification_name)\n else:\n path_back.append(node.action)\n node = node.parent\n return list(reversed(path_back))",
"def convert_paths(self):\n # convert to node sequences, dropping s'\n self.nodeseq_paths = []\n for path in self.paths:\n node_seq = [] # don't include s'\n for arc in path:\n node_seq.append(self.arc_info[arc]['destin'])\n self.nodeseq_paths.append(node_seq)\n # convert to og graph\n self.converted_paths = []\n for path in self.nodeseq_paths:\n this_path = []\n add_next_node = True\n for i in range(len(path) - 1):\n print(\"This path is\", this_path)\n node1 = path[i]\n node2 = path[i + 1]\n print(\"node1={}, node2={}\".format(node1, node2))\n if (node1, node2) in self.mapping:\n sc = self.mapping[(node1, node2)]\n print(\"uses sc edge for {}\".format(sc))\n print(\"should add {}, but also need to check for overlaps\".\n format(sc[1:-1]))\n if sc[1] in this_path:\n # we have an overlap\n start = len(this_path) - this_path.index(sc[1])\n this_path.extend(sc[start:-1])\n else:\n this_path.extend(sc[1:-1])\n add_next_node = False # next node is second of sc edge\n elif add_next_node:\n this_path.append(node1)\n else:\n add_next_node = True\n this_path.append(path[-1])\n self.converted_paths.append(this_path)",
"def extract_all_relations(self):\n\n caption = 'all relations in %s' % self.partition\n for xmi_path in tqdm(self.xmi_paths, desc=caption):\n\n # does this xmi belong to the sought partition?\n xmi_file_name = xmi_path.split('/')[-1]\n id = int(xmi_file_name.split('_')[0][-3:])\n if id % 8 not in self.splits[self.partition]:\n continue\n\n xmi_file = open(xmi_path, 'rb')\n cas = load_cas_from_xmi(xmi_file, typesystem=self.type_system)\n gold_view = cas.get_view('GoldView')\n sys_view = cas.get_view('_InitialView')\n\n rel_lookup = Data.index_relations(gold_view)\n\n # iterate over sentences extracting relations\n for sent in sys_view.select(sent_type):\n sent_text = sent.get_covered_text().replace('\\n', '')\n self.inputs.append('Relation extraction: ' + sent_text)\n\n # event-time relations in this sentence\n et_rels_in_sent = []\n\n for event in gold_view.select_covered(event_type, sent):\n for time in gold_view.select_covered(time_type, sent):\n\n if (time, event) in rel_lookup:\n label = rel_lookup[(time, event)]\n time_text = time.get_covered_text()\n event_text = event.get_covered_text()\n rel_string = '%s(%s, %s)' % (label, time_text, event_text)\n et_rels_in_sent.append(rel_string)\n\n if (event, time) in rel_lookup:\n label = rel_lookup[(event, time)]\n time_text = time.get_covered_text()\n event_text = event.get_covered_text()\n rel_string = '%s(%s, %s)' % (label, event_text, time_text)\n et_rels_in_sent.append(rel_string)\n\n et_output = 'event-time relations: '\n if len(et_rels_in_sent) == 0:\n et_output = et_output + 'none'\n else:\n et_output = et_output + ' '.join(et_rels_in_sent)\n\n # event-event relations in this sentence\n ee_rels_in_sent = []\n\n events_in_sent = list(gold_view.select_covered(event_type, sent))\n for i in range(0, len(events_in_sent)):\n for j in range(i + 1, len(events_in_sent)):\n\n event1 = events_in_sent[i]\n event2 = events_in_sent[j]\n\n if (event1, event2) in rel_lookup:\n label = rel_lookup[(event1, event2)]\n event1_text = event1.get_covered_text()\n event2_text = event2.get_covered_text()\n rel_string = '%s(%s, %s)' % (label, event1_text, event2_text)\n ee_rels_in_sent.append(rel_string)\n\n if (event2, event1) in rel_lookup:\n label = rel_lookup[(event2, event1)]\n event1_text = event1.get_covered_text()\n event2_text = event2.get_covered_text()\n rel_string = '%s(%s, %s)' % (label, event2_text, event1_text)\n ee_rels_in_sent.append(rel_string)\n\n ee_output = 'event-event relations: '\n if len(ee_rels_in_sent) == 0:\n ee_output = ee_output + 'none'\n else:\n ee_output = ee_output + ' '.join(ee_rels_in_sent)\n\n self.outputs.append(et_output + '; ' + ee_output)",
"def traverse(name, furtherPath):",
"def test_RelationIndex(self):\n from quotationtool.site.site import QuotationtoolSite\n from zope.container.btree import BTreeContainer\n root = BTreeContainer()\n root['quotationtool'] = site = QuotationtoolSite()\n from zc.relation.interfaces import ICatalog\n cat = zope.component.getUtility(\n ICatalog, context = site)\n #self.assertTrue('ifigure-reference' in list(cat.iterSearchIndexes()))",
"def rebuild_index_old(self):\n logging.debug(\"updating detailed information for {}\".format(self))\n\n with get_db_connection() as db:\n c = db.cursor()\n c.execute(\"\"\"DELETE FROM observable_mapping WHERE alert_id = %s\"\"\", ( self.id, ))\n c.execute(\"\"\"DELETE FROM tag_mapping WHERE alert_id = %s\"\"\", ( self.id, ))\n db.commit()\n\n self.build_index()",
"def update_forbidden_paths(self, prefix, as_path, sdx_set, ingress_participant, egress_participant):\n\n change = dict()\n\n if not as_path:\n route = self.rib.get_route(prefix, egress_participant)\n if route:\n as_path = route[\"as_path\"]\n else:\n if egress_participant in self.forbidden_paths[ingress_participant][prefix]:\n self.forbidden_paths[ingress_participant][prefix].remove(egress_participant)\n self.logger.debug(\"update forbidden paths for \" + str(ingress_participant) + \" - \" + str(prefix) +\n \" results in \" + str(self.forbidden_paths[ingress_participant][prefix]))\n\n change[\"participant\"] = ingress_participant\n change[\"prefix\"] = prefix\n\n return change\n as_path_sdxes = self.get_sdxes_on_path([int(v) for v in as_path.split(\" \")])\n if not sdx_set:\n cl_entry = self.cib.get_cl_entry(prefix, ingress_participant)\n if cl_entry:\n sdx_set = set([int(v) for v in cl_entry[\"sdx_set\"].split(\";\")])\n else:\n sdx_set = set()\n\n intersection = sdx_set.intersection(as_path_sdxes)\n\n if len(intersection) > 0:\n if egress_participant not in self.forbidden_paths[ingress_participant][prefix]:\n self.forbidden_paths[ingress_participant][prefix].append(egress_participant)\n elif egress_participant in self.forbidden_paths[ingress_participant][prefix]:\n self.forbidden_paths[ingress_participant][prefix].remove(egress_participant)\n\n self.logger.debug(\"update forbidden paths for \" + str(ingress_participant) + \" - \" +\n str(prefix) + \" results in \" + str(self.forbidden_paths[ingress_participant][prefix]))\n\n change[\"participant\"] = ingress_participant\n change[\"prefix\"] = prefix\n\n return change",
"def paths_for_od(self, r, s):\n pass",
"def fix_genindex(self, tree: list[tuple[str, list[tuple[str, Any]]]]) -> None:\n # XXX: modifies tree inline\n # Logic modeled from themes/basic/genindex.html\n for _key, columns in tree:\n for _entryname, (links, subitems, _key) in columns:\n for (i, (ismain, link)) in enumerate(links):\n m = self.refuri_re.match(link)\n if m:\n links[i] = (ismain,\n self.fix_fragment(m.group(1), m.group(2)))\n for _subentryname, subentrylinks in subitems:\n for (i, (ismain, link)) in enumerate(subentrylinks):\n m = self.refuri_re.match(link)\n if m:\n subentrylinks[i] = (ismain,\n self.fix_fragment(m.group(1), m.group(2)))",
"def set(self, new_path):\n\n for i in range(self.depth):\n self.path[i] = new_path[self.max_input*i:self.max_input*(i + 1)]",
"def rebuild_index():\n print('Building indexes...')\n print(data_fldr)\n ndx = []\n for root, _, files in os.walk(data_fldr):\n for f in files:\n if f[-3:].upper() in ['CSV','TXT']:\n ndx.extend(get_index_terms(root + os.sep + f))\n with open(ndx_file, 'w') as fio:\n for i in ndx:\n fio.write(i + '\\n')",
"def manage_paths(node, paths) :\r\n\r\n #Getting the nodes neighbouring the given node\r\n neighbours = get_neighbouring_nodes(node) \r\n\r\n #Creating a new path branch\r\n new_path = [] #The new path\r\n path_found = False #Indicates whether the path to which the node belongs has been found\r\n\r\n #Looping through the neighbours\r\n for neighbour in neighbours :\r\n for path in paths :\r\n #Checking whether the path contains the neighbour\r\n if(neighbour in path) :\r\n index = path.index(neighbour)\r\n #Checking if the branch belongs to the current path\r\n if(path[index].gn_value == neighbour.gn_value) :\r\n new_path = path[:index + 1] + [node] #Creating a new path branch\r\n new_path[-1].gn_value = new_path.__len__() - 1 #Updating the node's g(n) value\r\n path_found = True\r\n break\r\n if(path_found) :\r\n break\r\n \r\n if(not path_found) :\r\n raise Exception(\"No branch junction found\")\r\n\r\n #Setting the new path as the current path\r\n return new_path",
"def _find_all_paths(self, start_vertex: str, end_vertex: str, path=[]):\n path = path + [start_vertex]\n if start_vertex == end_vertex:\n return [path]\n\n paths = []\n for vertex in self.graph[start_vertex]:\n if vertex not in path:\n extended_paths = self._find_all_paths(vertex,\n end_vertex,\n path)\n for p in extended_paths:\n paths.append(p)\n return paths",
"def _walk_dirs(self):\n for project_name in self.new_source_paths.keys():\n # print \"-------- Now mapping ---- \" + project_name\n search_path = self.root + project_name + '\\\\Data'\n for dirpath, subdirs, files in os.walk(search_path):\n for file in files:\n self.new_source_paths[project_name][file] = dirpath\n # print \"------------ Finished mapping ------- \" + project_name\n return self.new_source_paths",
"def construct_paths(data, relation_level_words, qald=False,goldorpred='gold'):\n abstract_question = data[goldorpred]['abstract_question'].replace('<e>', 'entity1').replace('<l>', 'literal1')\n question = ei.vocabularize(nlutils.tokenize(abstract_question))\n\n \"\"\"======\"\"\"\n question_dep = []\n if 'abstract_question_deppath' in data['gold']:\n for abstract_question_deppath_simple in data['gold']['abstract_question_deppath']:\n abstract_question_deppath_simple = abstract_question_deppath_simple.replace('<E0>', 'entity1')\n abstract_question_deppath_simple = abstract_question_deppath_simple.replace('<E1>', 'entity2')\n abstract_question_deppath_simple = abstract_question_deppath_simple.replace('<E2>', 'entity3')\n abstract_question_deppath_simple = abstract_question_deppath_simple.replace('<L1>', 'literal1')\n abstract_question_deppath_simple = abstract_question_deppath_simple.replace('<L2>', 'literal2')\n abstract_question_deppath_simple = abstract_question_deppath_simple.replace('<L3>', 'literal3')\n question_dep.append([int(id_) for id_ in list(\n ei.vocabularize(nlutils.tokenize(abstract_question_deppath_simple.strip())))])\n if len(question_dep) == 0:\n question_dep.append([int(id_) for id_ in list(ei.vocabularize(nlutils.tokenize(abstract_question.strip())))])\n question_dep_mask_matrix = 1.0*np.ones((1, len(question_dep)))\n\n \"\"\"======\"\"\"\n\n '''goldpathindex 可能要用于mrr计算,有了goldpathindex其实就不需要no_positive_path'''\n candidates=[]\n for key in ['hop4','hop3_2','hop3_1','hop3_0','hop3','hop2','hop1']:\n if key in data[goldorpred]:\n candidates+=data[goldorpred][key]\n\n ####get gold path####\n goldpathindex = -1\n for index,candidate in enumerate(candidates):\n if np.array_equal(candidate, data['gold']['path']):\n goldpathindex=index\n break\n\n ##########get candidate path#####\n candidate_paths = []\n candidate_paths_words = []\n for cand_path in candidates:\n candidate_path=[]\n candidate_path_words=[]\n add=True\n for p in cand_path:\n # p = p.lower() lcquad\n if p in embeddings_interface.SPECIAL_CHARACTERS:\n candidate_path.extend( vocabularize_relation(p))\n else:\n if p not in relation_level_words:\n # add=False\n # break\n candidate_path.extend( ei.vocabularize([p.replace(\"http://dbpedia.org/property/\", \"\")]))\n candidate_path_words.extend( ei.vocabularize([p.replace(\"http://dbpedia.org/property/\", \"\")]))\n else:\n if \"0\" not in relation_level_words[p]:\n # add=False\n # break\n # print('pppp', p, p.replace(\"http://dbpedia.org/property/\", \"\"), relation_level_words[p])\n candidate_path.extend( ei.vocabularize([p.replace(\"http://dbpedia.org/property/\", \"\")]))\n # print('before',candidate_path_words)\n candidate_path_words.extend( ei.vocabularize([p.replace(\"http://dbpedia.org/property/\", \"\")]))\n # print('end',ei.vocabularize([p.replace(\"http://dbpedia.org/property/\", \"\")]),candidate_path_words)\n else:\n candidate_path.extend( ei.vocabularize(relation_level_words[p]['0']).tolist())\n candidate_path_words.extend( ei.vocabularize(relation_level_words[p]['0']).tolist())\n if add:\n candidate_paths.append(np.asarray(candidate_path))\n candidate_paths_words.append(np.asarray(candidate_path_words))\n\n return question,\\\n np.asarray(question_dep), np.asarray(question_dep_mask_matrix),\\\n np.asarray(candidate_paths), np.asarray(candidate_paths_words),\\\n goldpathindex, candidates"
] | [
"0.58268344",
"0.52432096",
"0.5228353",
"0.51687616",
"0.5044035",
"0.5042572",
"0.4956355",
"0.49512407",
"0.4946628",
"0.4930969",
"0.49182546",
"0.49161366",
"0.4869944",
"0.48368976",
"0.4831286",
"0.48303708",
"0.48242262",
"0.48204356",
"0.48135132",
"0.48054898",
"0.4790159",
"0.47758484",
"0.47570205",
"0.4751328",
"0.47392616",
"0.47261882",
"0.4722561",
"0.4710717",
"0.47016332",
"0.47003925"
] | 0.6383274 | 0 |
Convert single line in Instruction instance. | def process_line(line: str) -> Instruction:
register, op, value, _, base, check, limit = line.split()
return Instruction(register, op, int(value), base, check, int(limit)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def trans_line(line: str, progname):\r\n splitline = line.split()\r\n command = splitline[0]\r\n if command == 'push':\r\n segment = splitline[1]\r\n index = splitline[2]\r\n out = mem.push(segment, index, progname)\r\n elif command == 'pop':\r\n segment = splitline[1]\r\n index = splitline[2]\r\n out = mem.pop(segment, index, progname)\r\n else:\r\n out = arithmetic.generate(command)\r\n return out",
"def translate_line_by_line(self):\n for command in self.commands:\n if command.startswith('@'): # is A instruction\n number = command.split('@')[-1]\n instruction = self.convert_A_instruction(number)\n\n else: # is C instruction\n instruction = self.convert_C_instruction(command)\n\n self.hack_instructions.append(instruction)",
"def read_line(self, line):\n line_in_asm_array = list()\n documentation = [\"//\" + line]\n line_in_asm_array.extend(documentation)\n command_type = Parser.commandType(line)\n\n # not supposed to happen:\n # if command_type is None:\n # print(\"problem\")\n\n self.update_last_function(line, command_type)\n translated_lines = self.get_correct_lines(command_type, line)\n\n self.label_counter = self.label_counter + 1\n line_in_asm_array.extend(translated_lines)\n self.assembler_lines.append(line_in_asm_array)",
"def from_line(self, line: str):\n raise NotImplementedError()",
"def get_disasm_line( ea ):\r\n\top1 = ua_outop2( ea, 0, 0 )\t\r\n\top2 = ua_outop2( ea, 1, 0 )\r\n\top3 = ua_outop2( ea, 2, 0 )\r\n\tif op1 == None:\r\n\t\top1 = \"\"\r\n\telse:\r\n\t\top1 = idaline_to_string( op1 )\r\n\tif op2 == None:\r\n\t\top2 = \"\"\r\n\telse:\r\n\t\top2 = idaline_to_string( op2 )\r\n\tif op3 == None:\r\n\t\top3 = \"\"\r\n\telse:\r\n\t\top3 = idaline_to_string( op3 )\r\n\tret = [ ea, ua_mnem( ea ), op1, op2, op3 ]\r\n\treturn ret",
"def eval(self, line):\n self.eval(line)",
"def Assemble(ea, line):\n if type(line) in ([bytes] + list(ida_idaapi.string_types)):\n lines = [line]\n else:\n lines = line\n ret = []\n for line in lines:\n seg = ida_segment.getseg(ea)\n if not seg:\n return (False, \"No segment at ea\")\n ip = ea - (ida_segment.sel2para(seg.sel) << 4)\n buf = ida_idp.AssembleLine(ea, seg.sel, ip, seg.bitness, line)\n if not buf:\n return (False, \"Assembler failed: \" + line)\n ea += len(buf)\n ret.append(buf)\n\n if len(ret) == 1:\n ret = ret[0]\n return (True, ret)",
"def _translate_line_to_handle(self, line):",
"def interpret_line(self, line, source=None, lineno=None):\n\n pline = self.parser.parse_line(line, source=source, lineno=lineno)\n return self.execute(pline)",
"def parse_line(self, line):\n raise NotImplementedError",
"def to_instruction(self):\n return self.to_circuit().to_gate()",
"def compileInstruction(self, ins):\n pass",
"def ConvertToSingleLine(lines):\n state = []\n total_length = 0\n for l in lines:\n total_length += len(l)\n # TODO: Use a tuple instead.\n state.append({'pos': total_length, # the line split point\n 'blocks': [], # blocks which belong to this line\n })\n result = \"\".join(lines)\n assert len(state) == len(lines)\n return (result, state)",
"def decode(self, line):\n try:\n commands = self.tokenize(line)\n for command in commands:\n self.delegate.output(str(command))\n self.execute(command)\n except EmptyStackException as e:\n self.delegate.error(str(e))\n except SmyrkRuntimeError as e:\n self.delegate.error(str(e))\n except KeyError as e:\n self.delegate.error('{0} is not defined'.format(str(e)))",
"def parse(cls, line):\r\n raise NotImplementedError",
"def do(self, line): \n self.interface.onecmd(line)",
"def xx(self, line=''):\r\n ## line in this context is one ipython line which may have line breaks in it\r\n line = self.xxFixLine(line)\r\n return self.shell.getoutput(line)",
"def ins(self, line=None):\n self.inspect(line=line)",
"def GetLine(line):\r\n pass",
"def _execute_ins_line(self, ins_line, ins_lcount):\n cursor_pos = 0 # starting cursor position\n val_dict = {} # storage dict for obsname: obsval pairs in line\n # for ii,ins in enumerate(ins_line):\n ii = 0 # counter over instruction entries\n all_markers = True\n line_seps = set([\",\", \" \", \"\\t\"])\n n_ins = len(ins_line) # number of instructions on line\n maxsearch = 500 # maximum number of characters to search when slicing line\n while True:\n if ii >= n_ins:\n break\n ins = ins_line[ii] # extract instruction\n i1 = ins[:1] # first char in instruction\n # primary marker\n if ii == 0 and i1 == self._marker:\n # if first and instruction starts with primary marker\n # search for presence of primary marker e.g. ~start~\n mstr = ins.replace(self._marker, \"\")\n while True:\n # loop over lines until primary marker is found\n line = self._readline_output() # read line from output\n if line is None:\n self.throw_out_error(\n \"EOF when trying to find primary marker '{0}' from \"\n \"instruction file line {1}\".format(mstr, ins_lcount)\n )\n if mstr in line: # when marker is found break and update\n # cursor position in current line\n break\n # copy a version of line commas replaced\n # (to support comma sep strings)\n rline = line.replace(\",\", \" \").replace(\"\\t\",\"\")\n\n cursor_pos = line.index(mstr) + len(mstr)\n\n # line advance\n elif i1 == \"l\": # if start of instruction is line advance\n try:\n nlines = int(ins[1:]) # try and get advance number\n except Exception as e:\n self.throw_ins_error(\n \"casting line advance to int for \"\n \"instruction '{0}'\".format(ins),\n ins_lcount,\n )\n for i in range(nlines):\n line = self._readline_output()\n if line is None:\n self.throw_out_error(\n \"EOF when trying to read {0} lines for line \"\n \"advance instruction '{1}', from instruction \"\n \"file line number {2}\".format(nlines, ins, ins_lcount)\n )\n # copy a version of line commas replaced\n # (to support comma sep strings)\n rline = line.replace(\",\", \" \")\n elif ins == \"w\": # whole string comparison\n raw = rline[cursor_pos : cursor_pos + maxsearch].split(\n None, 2\n ) # TODO: maybe slow for long strings -- hopefuly maxsearch helps\n if line[cursor_pos] in line_seps:\n raw.insert(0, \"\")\n if len(raw) == 1:\n self.throw_out_error(\n \"no whitespaces found on output line {0} past {1}\".format(\n line, cursor_pos\n )\n )\n # step over current value\n cursor_pos = rline.replace(\"\\t\",\" \").find(\" \", cursor_pos)\n # now find position of next entry\n cursor_pos = rline.find(raw[1], cursor_pos)\n # raw[1]\n # )\n\n elif i1 == \"!\": # indicates obs instruction folows\n oname = ins.replace(\"!\", \"\")\n # look a head for a second/closing marker\n if ii < n_ins - 1 and ins_line[ii + 1] == self._marker:\n # if penultimate instruction and last instruction is\n # primary marker, look for that marker in line\n m = ins_line[ii + 1].replace(self._marker, \"\")\n es = line.find(m, cursor_pos)\n if es == -1: # m not in rest of line\n self.throw_out_error(\n \"secondary marker '{0}' not found from cursor_pos {1}\".format(\n m, cursor_pos\n )\n )\n # read to closing marker\n val_str = line[cursor_pos:es]\n else:\n # find next space in (r)line -- signifies end of entry\n es = rline.find(\" \", cursor_pos)\n if es == -1 or es == cursor_pos:\n # if no space or current position is space\n # use old fashioned split to get value\n # -- this will happen if there are leading blanks before\n # vals in output file (e.g. formatted)\n val_str = rline[cursor_pos : cursor_pos + maxsearch].split(\n None, 1\n )[0]\n else:\n # read val (constrained slice is faster for big strings)\n val_str = rline[cursor_pos:es]\n try:\n val = float(val_str)\n except Exception as e:\n if oname != \"dum\":\n self.throw_out_error(\n \"casting string '{0}' to float for instruction '{1}'\".format(\n val_str, ins\n )\n )\n\n if oname != \"dum\":\n val_dict[oname] = val\n ipos = line.find(val_str.strip(), cursor_pos)\n # val_len = len(val_str)\n cursor_pos = ipos + len(val_str) # update cursor\n all_markers = False\n\n elif i1 == self._marker:\n m = ins.replace(self._marker, \"\") # extract just primary marker\n # find position of primary marker in line\n es = line.find(m, cursor_pos)\n if es == -1: # m not in rest of line\n if all_markers:\n ii = 0\n continue\n else:\n self.throw_out_error(\n \"secondary marker '{0}' not found from \"\n \"cursor_pos {1}\".format(m, cursor_pos)\n )\n cursor_pos = es + len(m)\n\n elif i1 == \"(\":\n if \")\" not in ins:\n self.throw_ins_error(\"unmatched ')'\", self._instruction_lcount)\n oname = ins[1:].split(\")\", 1)[0].lower()\n raw = ins.split(\")\")[1]\n if \":\" not in raw:\n self.throw_ins_error(\n \"couldnt find ':' in semi-fixed instruction: '{0}'\".format(ins),\n lcount=self._instruction_lcount,\n )\n raw = raw.split(\":\")\n try:\n s_idx = int(raw[0]) - 1\n except Exception as e:\n self.throw_ins_error(\n \"error converting '{0}' to integer in semi-fixed instruction: '{1}'\".format(\n raw[0], ins\n ),\n lcount=self._instruction_lcount,\n )\n try:\n e_idx = int(raw[1])\n except Exception as e:\n self.throw_ins_error(\n \"error converting '{0}' to integer in semi-fixed instruction: '{1}'\".format(\n raw[1], ins\n ),\n lcount=self._instruction_lcount,\n )\n\n if len(line) < e_idx:\n self.throw_out_error(\n \"output line only {0} chars long, semi-fixed ending col {1}\".format(\n len(line), e_idx\n )\n )\n\n if cursor_pos > e_idx:\n self.throw_out_error(\n \"cursor at {0} has already read past semi-fixed ending col {1}\".format(\n cursor_pos, e_idx\n )\n )\n\n ss_idx = max(cursor_pos, s_idx)\n raw = line[ss_idx : ss_idx + maxsearch].split(\n None, 1\n ) # slpitting only 1 might be margin faster\n rs_idx = line.index(raw[0])\n if rs_idx > e_idx:\n self.throw_out_error(\n \"no non-whitespace chars found in semi-fixed observation {0}\".format(\n ins\n )\n )\n re_idx = rs_idx + len(raw[0])\n val_str = line[rs_idx:re_idx]\n try:\n val = float(val_str)\n except Exception as e:\n if oname != \"dum\":\n self.throw_out_error(\n \"casting string '{0}' to float for instruction '{1}'\".format(\n val_str, ins\n )\n )\n\n if oname != \"dum\":\n val_dict[oname] = val\n cursor_pos = re_idx\n\n elif i1 == \"[\":\n if \"]\" not in ins:\n self.throw_ins_error(\"unmatched ']'\", self._instruction_lcount)\n oname = ins[1:].split(\"]\", 1)[0].lower()\n raw = ins.split(\"]\")[1]\n if \":\" not in raw:\n self.throw_ins_error(\n \"couldnt find ':' in fixed instruction: '{0}'\".format(ins),\n lcount=self._instruction_lcount,\n )\n raw = raw.split(\":\")\n try:\n s_idx = int(raw[0]) - 1\n except Exception as e:\n self.throw_ins_error(\n \"error converting '{0}' to integer in fixed instruction: '{1}'\".format(\n raw[0], ins\n ),\n lcount=self._instruction_lcount,\n )\n try:\n e_idx = int(raw[1])\n except Exception as e:\n self.throw_ins_error(\n \"error converting '{0}' to integer in fixed instruction: '{1}'\".format(\n raw[1], ins\n ),\n lcount=self._instruction_lcount,\n )\n\n if len(line) < e_idx:\n self.throw_out_error(\n \"output line only {0} chars long, fixed ending col {1}\".format(\n len(line), e_idx\n )\n )\n\n if cursor_pos > s_idx:\n self.throw_out_error(\n \"cursor at {0} has already read past fixed starting col {1}\".format(\n cursor_pos, e_idx\n )\n )\n\n val_str = line[s_idx:e_idx]\n try:\n val = float(val_str)\n except Exception as e:\n if oname != \"dum\":\n self.throw_out_error(\n \"casting string '{0}' to float for instruction '{1}'\".format(\n val_str, ins\n )\n )\n\n if oname != \"dum\":\n val_dict[oname] = val\n cursor_pos = e_idx\n\n else:\n self.throw_out_error(\n \"unrecognized instruction '{0}' on ins file line {1}\".format(\n ins, ins_lcount\n )\n )\n ii += 1\n return val_dict",
"def push(self, line):\n if transforms.FROM_NONSTANDARD.match(line):\n transforms.add_transformers(line)\n self.buffer.append(\"\\n\")\n else:\n self.buffer.append(line)\n\n add_pass = False\n if line.rstrip(' ').endswith(\":\"):\n add_pass = True\n source = \"\\n\".join(self.buffer)\n if add_pass:\n source += \"pass\"\n source = transforms.transform(source)\n if add_pass:\n source = source.rstrip(' ')[:-4]\n\n # some transformations may strip an empty line meant to end a block\n if not self.buffer[-1]:\n source += \"\\n\"\n more = self.runsource(source, self.filename)\n\n if not more:\n self.resetbuffer()\n return more",
"def parse(self, ins):\n if type(ins)!=Instr:\n raise Exception(\"You are parsing object that isn't a instruction\")\n self.type = ins.instr\n if ins.instr in control_instructions:\n self.parse_control(ins)\n elif ins.instr in loadstore_instructions:\n self.parse_ls(ins) \n elif ins.instr in intarithm_instructions :\n self.parse_int(ins)\n elif ins.instr in floatarithm_instructions:\n self.parse_float(ins)\n elif ins.instr in misc_instructions:\n self.parse_misc(ins)\n else:\n self.parse_unknown(ins)",
"def parse_line(line):\n if len(line) < 11:\n exit(\"invalid Intel HEX line: %s\" % line)\n if line[0] != ':':\n exit(\"invalid Intel HEX line: %s\" % line)\n line = line[1:]\n try:\n int(line, 16)\n except ValueError:\n exit(\"invalid Intel HEX line: %s\" % line)\n line = line.decode('hex')\n return Line(line[0], line[1:3], line[3], line[4:-1], line[-1])",
"def __edit_line(self, line, code, code_obj): # pylint: disable=R0201\r\n try:\r\n result = eval(code_obj, globals(), locals())\r\n except TypeError as ex:\r\n message = \"failed to execute {}: {}\".format(code, ex)\r\n logger.warning(message)\r\n raise EditorError(message)\r\n if result is None:\r\n raise EditorError(\"cannot process line '{}' with {}\".format(\r\n line, code))\r\n elif isinstance(result, list) or isinstance(result, tuple):\r\n line = ' '.join([str(res_element) for res_element in result])\r\n else:\r\n line = str(result)\r\n return line",
"def process_line(self, line, data):\n return data",
"def encode(self: object, line: str) -> tuple[bool, str]:\n if not self._parsed:\n return (False, '')\n if len(line) != sum(self.offsets()):\n return (False, '')\n\n values = []\n offsets = self.offsets()\n start = 0\n for offset in offsets:\n end = start + offset\n values.append(line[start:end])\n start = end\n encoder = self.get_delimit_encoding()\n enc_values = [encoder.to_out_type(v) for v in values]\n return True, \",\".join(enc_values)",
"def parseLine(self, line):\n # Removes surrounding whitespace\n line = self.separateElements(line)\n if len(line) == 0: return\n # Checks if the line is a label declaration\n if line[0].lower() == \"label\":\n # --Validates the line\n if len(line) != 2: raise Exception(\"Invalid Label\")\n if len(line[1]) < 2: raise Exception(\"Invalid Label\") \n if line[1][-1] != ':': raise Exception(\"Invalid Label\")\n # Gets the label name\n labelName = line[1][:-1]\n\n # Creates a new symbol entry for the label, the pointer refers to the memory location of the label\n # It defaults to the location of the label in the instruction sequence\n self.symbolTable.append({ \"type\": \"LABEL\", \"name\": labelName, \"pointer\": len(self.instructionList) * 4})\n # Checks if the line is data declaration\n elif line[0].lower() == \"data\" or (line[0].lower()[:4] == \"data\" and line[0][4] == \"[\"):\n # Removes the DATA tag from the data\n line[0] = line[0][4:]\n # --Validates the line\n if len(line) < 2: raise Exception(\"Invalid DATA\")\n # Gets the data name\n dataName = line[1]\n # Stores the data length\n dataLength = 4 # A word\n # Gets any default data\n defaultData = 0\n # Stores the data type\n dataType = \"int\"\n if len(line) == 3:\n if line[2][0] == \"\\\"\" and line[2][-1] == \"\\\"\":\n dataType = \"string\"\n defaultData = line[2][1:-1]\n dataLength = len(defaultData)\n elif line[2].isnumeric():\n defaultData = line[2]\n elif line[2][-1] == 'f' and line[2][:-1].isnumeric():\n dataType = \"float\"\n defaultData = line[2][0]\n # Checks if a data length was given\n if len(line[0]) > 2 and (line[0][0] == \"[\" and line[0][-1] == \"]\"):\n data = line[0][1:-1]\n if not data.isnumeric(): raise TypeError(\"Invalid data length type\")\n dataLength = int(data)\n\n # Creates a new symbol entry for the data\n self.symbolTable.append({ \"type\": \"DATA\", \"name\": dataName, \"default\": defaultData, \"dataType\": dataType, \"length\": dataLength})\n # The line is most likely an instruction\n else:\n # --Validates the line\n #Stores the control bits\n controlBits = 1 << 5 # Sets it to 0b100000\n # Checks if the first element is control bits\n if line[0][0] == \"{\" and line[0][-1] == \"}\": # First element is control bits\n # Separates the two sections of the control bits\n controlSections = line[0].split(':')\n #Goes through the characters and constructs the control bits for the instruction\n carryBits = controlSections[0].lower()\n carryFlag = int('c' in carryBits)\n zeroFlag = int('z' in carryBits)\n negativeFlag = int('n' in carryBits)\n signedOverflowFlag = int('s' in carryBits)\n #Gets the conditions bits\n if len(controlSections) == 2:\n conditionBits = controlSections[1].lower()\n isAnd = int('x' in conditionBits)\n isOne = int('1' in conditionBits)\n #Sets the last two bits on controlBits to the conditionBits\n controlBits ^= isAnd << 1\n controlBits ^= isOne\n # Constructs the control bits section\n controlBits ^= carryFlag << 5\n controlBits ^= zeroFlag << 4\n controlBits ^= negativeFlag << 3\n controlBits ^= signedOverflowFlag << 2\n # Removes the control bits section from the line\n line.pop(0)\n # Performs this check as the controlbits element gets removed (if it existed) and so the length of the elments could be zerp\n if len(line) == 0: raise Exception(\"Invalid Instruction\")\n # --The first element is the instruction\n # Identifies the instruction from the mnemonic using the lookup table\n if line[0] in self.InstructionLookupTable:\n ins = self.InstructionLookupTable[line[0]]\n insCode = ins[\"code\"]\n insControlBits = ins['controlBits'] if ins['controlBits'] else controlBits\n # Creates a representation of the instruction, this is stored in the instructionList and is assembled later\n instrucitonRepr = {\n \"code\": insCode,\n \"controlBits\": insControlBits,\n }\n # Parses the arguments given and stores the operandStruct returned in the instruciton representation\n if len(line) > 1: instrucitonRepr[\"operand\"] = self.parseArgs(line[1:], insCode)\n self.instructionList.append(instrucitonRepr)",
"def lineToList(self, line):\n raise NotImplementedError",
"def to_line(self):\n v = self.vertices + self.vertices[0]\n return Line(v, properties=self.properties, crs=self.crs)",
"def do_Intermediate (self, line):\r\n GenIntermediate(self.stdin, self.tracking).do_cmdloop()"
] | [
"0.6149348",
"0.60882974",
"0.60344046",
"0.59739935",
"0.5775252",
"0.5676443",
"0.56726754",
"0.56274086",
"0.5606504",
"0.5577222",
"0.55470794",
"0.5473916",
"0.5454122",
"0.54540783",
"0.54350394",
"0.5411006",
"0.5390487",
"0.5379209",
"0.53752047",
"0.53626865",
"0.53533787",
"0.5322384",
"0.53202766",
"0.52887964",
"0.52822727",
"0.51979923",
"0.5177468",
"0.51587284",
"0.5145955",
"0.51417893"
] | 0.6650071 | 0 |
Convert raw data in the easytouse list of Instruction instances. | def process_data(data: str) -> list[Instruction]:
instructions = []
for line in data.strip().split("\n"):
instruction = process_line(line)
instructions.append(instruction)
return instructions | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def extract(input_data: str) -> list:\n instructions = list()\n for instruction in input_data.split('\\n'):\n op, arg = instruction.split(' ')\n arg = int(arg)\n assert op in ('acc', 'jmp', 'nop')\n instructions.append(Instruction(op, arg))\n return instructions",
"def _perform_data_conversion(self):\n self.data = []\n items = 0\n for value in self.elements_to_convert:\n try:\n location = parse_int(value.get('location_id'), nullable=False)\n if not value.get('list', []):\n continue\n for obs in value['list']:\n items += 1\n # Setting timezone to pytz.UTC FIXES [BUG-039].\n timestamp = parse_date_utc(obs.get('dt') * 1000)\n date = timestamp.date()\n time = timestamp.time()\n temperature = parse_int(obs['main'].get('temp'))\n pressure = parse_float(obs['main'].get('pressure'))\n humidity = parse_int(obs['main'].get('humidity'))\n wind_speed = parse_int(obs.get('wind', {}).get('speed'))\n wind_degrees = parse_int(obs.get('wind', {}).get('deg'))\n wind_direction = compute_wind_direction(wind_degrees)\n weather = obs.get('weather', [{}])[0]\n if weather.get('icon') and weather.get('id'):\n weather = - parse_int(weather.get('id'), nullable=False) if 'n' in weather['icon'] else \\\n parse_int(weather.get('id'), nullable=False)\n self.data.append(WeatherForecastObservation(location_id=location, date=date, time=time,\n temperature=temperature, pressure=pressure, humidity=humidity, wind_speed=wind_speed,\n wind_degrees=wind_degrees, wind_direction=wind_direction, weather_id=weather))\n except (ValueError, AttributeError, KeyError, IndexError, TypeError):\n _id = value.get('_id', 'Unknown ID')\n self.logger.exception('An error occurred while parsing data. WeatherForecastObservation with ID \"%s\" '\n 'will not be converted.' % _id)\n self.state['elements_to_convert'] = items",
"def transform(self, data):",
"def assemble(self):\n machineCodeLength = len(self.instructionList)\n # Adds all of the data lengths to the length\n for symbol in self.symbolTable:\n if symbol[\"type\"] == \"DATA\":\n machineCodeLength += symbol[\"length\"]\n # Stores the machine code instructions\n machineCode = [0 for i in range(machineCodeLength)]\n # Adds all DATA symbols to the machineCode\n dataOffset = len(self.instructionList) # Stores the offset into the machine code for the current data symbol\n for symbol in self.symbolTable:\n if symbol[\"type\"] == \"DATA\":\n # Stores the operand into the memory\n\n # Stores the memory location of the data\n symbol[\"pointer\"] = dataOffset\n dataOffset += symbol[\"length\"]\n\n # Assembles every instruction\n for i in range(len(self.instructionList)):\n ins = self.instructionList[i]\n # Constructs the machine code instruction\n machineCode[i] |= (ins['controlBits'] & 0x3F) << 26\n machineCode[i] |= (ins['code'] & 0xFF) << 18\n # Looks through all of the awaiting in the operand and fills in the output for each\n for sym in ins['operand']['awaiting']:\n symType = \"DATA\" if \"DATA\" in sym else \"LABEL\"\n symbolName = sym[symType]['symbol']\n destination = sym[symType]['output']\n # Searches through the symbol table for the symbol\n for symbol in self.symbolTable:\n # Checks if it is a valid symbol\n if symbol[\"type\"] == symType and symbol[\"name\"] == symbolName:\n if symbol[\"type\"] == \"LABEL\":\n ins[\"operand\"][destination] = symbol[\"pointer\"]\n elif symbol[\"type\"] == \"DATA\":\n ins[\"operand\"][destination] = symbol[\"pointer\"]\n ins['operand']['awaiting'] = []\n print(ins)\n # Gets the main operand value\n if ins['operand']:\n if 'operand' in ins['operand']:\n if ins['operand']['operandType'] == 'int':\n machineCode[i] |= (1 << 18) # Sets value mode for the operand\n value = ins['operand']['operand'].to_bytes(4, \"big\")\n machineCode[i] |= value[0] << 12\n machineCode[i] |= value[1] << 8\n machineCode[i] |= value[2] << 4\n machineCode[i] |= value[3]\n elif ins['operand']['operandType'] == 'float':\n machineCode[i] |= (1 << 18) # Sets value mode for the operand\n value = struct.pack('>f', ins['operand']['operand'])\n machineCode[i] |= value[0] << 12\n machineCode[i] |= value[1] << 8\n machineCode[i] |= value[2] << 4\n machineCode[i] |= value[3]\n elif ins['operand']['operandType'] == 'register':\n machineCode[i] |= (ins['operand']['operand'] & 0xF) << 4\n if 'Rin' in ins['operand']: \n # Clears the bits at the location\n machineCode[i] &= 0xFFFFF0FF\n machineCode[i] |= (ins['operand']['Rin'] & 0xF) << 8\n elif 'address' in ins['operand']:\n if ins['operand']['addressingMode'] == \"Absolute\" or ins['operand']['addressingMode'] == \"Indirect\":\n addr = ins['operand']['address'].to_bytes(4, \"big\")\n machineCode[i] |= addr[0] << 12\n machineCode[i] |= addr[1] << 8\n machineCode[i] |= addr[2] << 4\n machineCode[i] |= addr[3]\n if ins['operand']['addressingMode'] == \"Absolute\": machineCode[i] |= 0x0 << 16\n elif ins['operand']['addressingMode'] == \"Indirect\": machineCode[i] |= 0x1 << 16\n\n if ins['operand']['addressingMode'] == \"Register\":\n machineCode[i] |= 0x2 << 16\n machineCode[i] |= ins['operand']['offset']\n if 'Rout' in ins['operand']:\n # Clears the bits at the location\n machineCode[i] &= 0xFFFFF0FF\n machineCode[i] |= (ins['operand']['Rin'] & 0xF) << 8\n else:\n # Clears the bits at the location\n machineCode[i] &= 0xFFFF0FFF\n machineCode[i] |= (ins['operand']['Rin'] & 0xF) << 12\n elif ins['operand']['addressingMode'] == \"RegisterOffset\": \n machineCode[i] |= 0x3 << 16\n\n if 'Rout' in ins['operand']:\n # Clears the bits at the location\n machineCode[i] &= 0xFFFF0FFF\n machineCode[i] |= (ins['operand']['Rout'] & 0xF) << 12\n print(machineCode[i])",
"def dis_to_instructions(disasm):\n\tline_num = None\n\tinstructions = []\n\tfor line in disasm.split(\"\\n\"):\n\t\tmatch = re.search(\n\t\t\tr\"( ?(?P<line_num>\\d+)[ >]+)?(?P<offset>\\d+) (?P<opname>[A-Z_]+)(?:\\s+(?P<arg>\\d+)(?: \\((?P<argval>.+)\\))?)?\",\n\t\t\tline\n\t\t)\n\t\tif match is not None:\n\t\t\tif match[\"line_num\"]:\n\t\t\t\tline_num = int(match[\"line_num\"])\n\t\t\toffset = int(match[\"offset\"])\n\t\t\topname = match[\"opname\"]\n\t\t\tif match[\"arg\"] is not None:\n\t\t\t\targ = int(match[\"arg\"])\n\t\t\telse:\n\t\t\t\targ = None\n\t\t\tif opname == \"EXTENDED_ARG\":\n\t\t\t\tcontinue\n\t\t\targval = match[\"argval\"]\n\t\t\tinstructions.append(Instruction(line_num, offset, opname, arg, argval))\n\treturn instructions",
"def _translate(self, data):\n pass\n return [i*2 for i in data]",
"def convertData(data):\n\n return data",
"def translate(self, instruction):\n trans_instrs = []\n\n try:\n src_read_instrs = []\n dst_write_instrs = []\n\n src_regs, src_read_instrs = self._translate_src_oprnds(instruction)\n dst_regs, dst_write_instrs = self._translate_dst_oprnds(instruction)\n\n trans_instrs = self.instr_translator.translate(instruction, src_regs, dst_regs)\n except NotImplementedError as err:\n src_read_instrs = []\n dst_write_instrs = []\n\n trans_instrs = [self.ir_builder.gen_unkn()]\n\n logger.debug(\"[E] x86 Translator :: Instruction not supported : '%s' (%s)\" % (instruction, instruction.mnemonic))\n except Exception as err:\n print_translation_exception(instruction, err)\n\n translation = src_read_instrs + trans_instrs + dst_write_instrs\n\n self._translate_instr_addresses(instruction.address, translation)\n\n return translation",
"def _convert_all(self, ast, label, idlnode_ctor):\n res = []\n found = self._find_all(ast, label)\n if not found:\n return res\n if not isinstance(found, list):\n raise RuntimeError(\"Expected list but %s found\" % type(found))\n for childAst in found:\n converted = idlnode_ctor(childAst)\n res.append(converted)\n return res",
"def instruction_iter(self):\n for ins in self.instructions:\n yield ins",
"def ingest_many(self, data):\n raise NotImplementedError()",
"def decode(self) -> Iterable:\r\n if self.data[0:1] not in (b'd', b'l'):\r\n return self.__wrap_with_tuple()\r\n return self.__parse()",
"def disassemble(\n raw_data: bytes, count: int = -1, base: int = DISASSEMBLY_DEFAULT_BASE_ADDRESS\n) -> list[Instruction]:\n arch = cemu.core.context.architecture\n insns: list[Instruction] = []\n for idx, ins in enumerate(arch.cs.disasm(raw_data, base)):\n insn = Instruction(ins.address, ins.mnemonic, ins.op_str, ins.bytes)\n insns.append(insn)\n if idx == count:\n break\n\n dbg(f\"{insns=}\")\n return insns",
"def data_to(self, data, name):\r\n return self.interpreter(name)(data)",
"def convert_raw_configuration_to_input_instances(self) -> List[Input]:\n\n return [\n self.get_or_create_input_instance_from_raw(key, value)[0]\n for key, value in self.raw_configuration.items()\n ]",
"def preprocess(self, ir: str) -> List[str]:\n lines = [[x] for x in ir.split(\"\\n\")]\n try:\n structs = inst2vec_preprocess.GetStructTypes(ir)\n for line in lines:\n for struct, definition in structs.items():\n line[0] = line[0].replace(struct, definition)\n except ValueError:\n pass\n\n preprocessed_lines, _ = inst2vec_preprocess.preprocess(lines)\n preprocessed_texts = [\n inst2vec_preprocess.PreprocessStatement(x[0]) if len(x) else \"\"\n for x in preprocessed_lines\n ]\n return [x for x in preprocessed_texts if x]",
"def convertData(img):\n dataset = []\n for i in img:\n dataset.append(format(ord(i), '08b'))\n return dataset",
"def processData(data):\n ids, instances, labels = [], [], []\n for i in data:\n idField = int(i[0])\n instance = i[1:-1]\n label = i[-1]\n ids.append(idField)\n instances.append(instance)\n labels.append(label)\n\n ids = np.array(ids)\n instances = np.array(instances)\n labels = np.array(labels)\n\n return (ids, instances, labels)",
"def transmogrify_inputs(self, plates):\n ichips, aliquots = [], []\n # A single plate run must be converted to a list of plates\n if isinstance(plates, dict):\n plates = [plates]\n for plate in plates:\n for chip_nr in range(1, 5):\n for well_nr in range(1, 9):\n key = \"chip-{}_well-{}\".format(chip_nr, well_nr)\n if plate.get(key, False):\n brains = find(object_provides=IAliquot.__identifier__,\n Title=plate[key])\n plate[key] = brains[0].UID\n aliquots.append(brains[0].getObject())\n key = \"chip-id-{}\".format(chip_nr)\n if plate.get(key, False):\n brains = find(object_provides=IiChip.__identifier__,\n Title=plate[key])\n plate[key] = brains[0].UID\n ichips.append(brains[0].getObject())\n\n return plates, ichips, aliquots",
"def get_instructions(file_input: TextIO) -> List[str]:\n instructions = []\n for instruction in file_input:\n instruction = instruction.strip()\n instructions.append(instruction)\n\n return instructions",
"def example_to_data(self, example):\n raise NotImplementedError",
"def i(target: QubitSetInput) -> Iterable[Instruction]:\n return [Instruction(I(), target=qubit) for qubit in QubitSet(target)]",
"def _convert_data(self, data):\n if isinstance(data, Tensor):\n data = data.asnumpy()\n elif isinstance(data, list):\n data = np.array(data)\n elif isinstance(data, np.ndarray):\n pass\n else:\n raise TypeError('Input data type must be tensor, list or numpy.ndarray')\n return data",
"def compile_as_obj(self, data):\r\n data = self.compile_as_list(data)\r\n print(data)\r\n fut = []\r\n for i in data:\r\n obj = Record(**i)\r\n obj._primary_key = self.primary_key\r\n fut.append(obj)\r\n return fut",
"def generator(self, data):\n for instance in data:\n yield (0, [str(instance.string)])",
"def decode(data: bytes) -> Iterable:\r\n decoder = Decoder(data)\r\n return decoder.decode()",
"def soft_list_eval(data):\n out = []\n for x in data:\n try:\n out.append(eval(x, {}))\n except:\n try:\n out.append(x.decode())\n except (AttributeError, SyntaxError):\n out.append(x)\n \n return out",
"def GetInstructionList():\n return [i[0] for i in ida_idp.ph_get_instruc() if i[0]]",
"def consumeData(self, data):\n ret = []\n\n soup = BeautifulSoup(StringIO(data))\n ingredientses = soup.find_all(None, itemprop='ingredients')\n for ing in ingredientses:\n separateByClass(soup, ing, \"ingredient\")\n separateByTag(soup, ing, ['br', 'tr', 'li'])\n instructionses = soup.find_all(None, itemprop=\"recipeInstructions\")\n for ins in instructionses:\n separateByClass(soup, ins, \"instruction\")\n separateByTag(soup, ins, ['br', 'tr', 'li'])\n workingDocument = StringIO(soup.encode('utf-8'))\n\n items = microdata.get_items(workingDocument)\n for i in items:\n for typ in i.itemtype:\n if typ.string == MICROFORMAT_RECIPE:\n ret.append(i.json())\n break\n return map(json.loads, ret)",
"def parse_instr_bin_list(lines, instructions, instruction_names):\r\n for line_num in range(len(lines)):\r\n if line_num % 2 == 1:\r\n # format the binary instruction (remove spaces and expend if neeeded to 96 bits)\r\n instruction_to_add = lines[line_num].replace(\"\\t\", \"\").rstrip()[6:]\r\n while len(instruction_to_add) < 96:\r\n instruction_to_add += \"0\"\r\n instructions.append(instruction_to_add)\r\n else:\r\n instruction_names.append(lines[line_num].partition(\"\\t\")[0])"
] | [
"0.5894083",
"0.580195",
"0.56562746",
"0.5608601",
"0.55650634",
"0.55505216",
"0.55473304",
"0.5376276",
"0.53636885",
"0.5328439",
"0.5309102",
"0.52925634",
"0.52925086",
"0.5272285",
"0.5267069",
"0.5258318",
"0.5257359",
"0.5243541",
"0.5228365",
"0.5214779",
"0.5211774",
"0.52006686",
"0.5193949",
"0.51849824",
"0.5168488",
"0.5163527",
"0.51582235",
"0.515117",
"0.51354444",
"0.5133858"
] | 0.63322127 | 0 |
Apply all instructions and return registers + the biggest value seen. | def perform_instructions(
instructions: list[Instruction],
) -> tuple[DefaultDict[str, int], int]:
registers: DefaultDict[str, int] = defaultdict(int)
biggest = 0
for instruction in instructions:
update = OPERATORS[instruction.op]
check = OPERATORS[instruction.check]
register = instruction.register
old_value = registers[register]
base = registers[instruction.base]
if check(base, instruction.limit):
registers[register] = update(old_value, instruction.value)
if registers[register] > biggest:
biggest = registers[register]
return registers, biggest | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_most_valuable(self):\n return self.most_valuable",
"def computeActionFromValues(self, state):\n \"*** YOUR CODE HERE ***\"\n maxvalue = -100000000\n bestaction = None\n for action in self.mdp.getPossibleActions(state):\n valueforthisaction = self.getQValue(state, action) # is this right? \n if valueforthisaction > maxvalue:\n bestaction = action\n maxvalue = valueforthisaction\n return bestaction",
"def return_the_maximum(self):\n\n return self.__max_stack[-1]",
"def get_max(self):\n # 0(1)\n return self.max_stack.peek()\n\n # Don't need find_max we returned max_stack.peek()",
"def max_val(board):\n v = -math.inf\n if terminal(board):\n return utility(board)\n for action in actions(board):\n v = max(v,min_val(result(board,action)))\n return v",
"def calculate_greatest(self):\n greatest = 0\n for resourceList in self.loading.values():\n for time, use in resourceList:\n if use > greatest:\n greatest = use\n self.emit(\"greatest_calculated\",greatest)\n return greatest",
"def max(self):\n assert self.__stack\n return self.__max_values[-1]",
"def fastMaxVal(toConsider, avail, memo ={}):\n \n if(len(toConsider), avail) in memo:\n result = memo[(len(toConsider), avail)]\n elif toConsider == [] or avail == 0:\n result =(0,())\n elif toConsider[0].getCost()>avail:\n #explore right branch\n result = fastMaxVal(toConsider[1:], avail, memo)\n else:\n nextItem = toConsider[0]\n #Explore left branch\n withVal, withToTake =\\\n fastMaxVal(toConsider[1:],\n avail-nextItem.getCost(),memo)\n withVal += nextItem.getValue()\n #explore right branch\n withoutVal, withoutToTake = fastMaxVal(toConsider[1:], avail, memo)\n \n #Choose better branch\n if withVal > withoutVal:\n result = (withVal,withToTake + (next.Item,))\n else:\n result = (withoutVal, withoutToTake)\n memo[(len(toConsider), avail)] = result\n return result",
"def _max(self, board: Board) -> (float, int):\n\n #\n # First we check if we have seen this board position before, and if yes just return the cached value\n #\n board_hash = board.hash_value()\n if board_hash in self.cache:\n return self.cache[board_hash]\n\n #\n # Init the min value as well as action. Min value is set to DRAW as this value will pass through in case\n # of a draw\n #\n max_value = self.DRAW_VALUE\n action = -1\n\n #\n # If the game has already finished we return. Otherwise we look at possible continuations\n #\n winner = board.who_won()\n if winner == self.side:\n max_value = self.WIN_VALUE\n action = -1\n elif winner == board.other_side(self.side):\n max_value = self.LOSS_VALUE\n action = -1\n else:\n for index in [i for i, e in enumerate(board.state) if board.state[i] == EMPTY]:\n b = Board(board.state)\n b.move(index, self.side)\n\n res, _ = self._min(b)\n if res > max_value or action == -1:\n max_value = res\n action = index\n\n # Shortcut: Can't get better than that, so abort here and return this move\n if max_value == self.WIN_VALUE:\n self.cache[board_hash] = (max_value, action)\n return max_value, action\n\n self.cache[board_hash] = (max_value, action)\n return max_value, action",
"def find_max(self):\n if self.right:\n return self.right.find_max()\n return self.data",
"def globalMaximum(self):\n # The global maximum is at one peak's position\n potential_max = list()\n for func, pos, height, width in zip(self.peaks_function,\n self.peaks_position,\n self.peaks_height,\n self.peaks_width):\n potential_max.append((func(pos, pos, height, width), pos))\n return max(potential_max)",
"def computeActionFromValues(self, state):\n \n State_actions = self.mdp.getPossibleActions(state)\n max_Action=util.Counter()\n for k in State_actions:\n max_Action[k] = self.getQValue(state,k)\n return max_Action.argMax()\n \n util.raiseNotDefined()",
"def get_max(self):\n current = self\n while current.hasRight(): # This is the belief that the max has to be to the right. If you can't go right either in the begining or any more\n # if current has a right this line will be set and will keep going from line 129 to 130 until there are no more rights.\n current = current.right\n # this line returns as soon there is no more rights. breaking out of the loop.\n return current.value",
"def _compute_best_value(self):\n reduced_cs = []\n concerned_vars = set()\n\n for c in self.utilities:\n asgt = filter_assignment_dict(self._neighbors_values, c.dimensions)\n reduced_cs.append(c.slice(asgt))\n concerned_vars.update(c.dimensions)\n var_val, rel_val = find_arg_optimal(\n self.variable,\n lambda x: functools.reduce(operator.add, [f(x) for f in reduced_cs]),\n self._mode,\n )\n # Add the cost for each variable value if any\n for var in concerned_vars:\n if var.name == self.name:\n rel_val += var.cost_for_val(self.current_value)\n else:\n rel_val += var.cost_for_val(self._neighbors_values[var.name])\n\n return var_val, rel_val",
"def find_max(self):\n\n if self.right:\n return self.right.find_max()\n\n return self.data",
"def get_highest_value_action(self, state):\n a = self.sess.run(self.network.maxOutputNode, feed_dict={self.network.inputs: [state]})\n return a[0]",
"def max_value(board): # the X player wants to maximize the score\n if terminal(board):\n return utility(board), None\n else:\n v = -math.inf\n move = None\n for action in actions(board):\n val, _ = min_value(result(board, action))\n # Check if returned Value is less than v if not return v and current action\n if val > v:\n # Assign v the maximum value for future evaluation\n v = max(v,val)\n # Keep track of action\n move = action\n # If best move then return it\n if v == 1:\n return v, move\n return v, move",
"def __mini_max(self, board: Board, depth: int, is_max: bool, states: List[Board]) -> Tuple[int, Board]:\n self.nodes_count += 1\n if depth == 0:\n return self.__moves_available(board), states[0]\n all_moves = self.get_all_moves(board, self.player_color)\n\n if self.get_num_of_moves(board, self.opponent_color) == 0:\n return 9999, states[0]\n\n func = max if is_max else min\n return func([self.__mini_max(m, depth - 1, not is_max, states + [m]) for m in all_moves], key=lambda x: x[0])",
"def _single_value_max(self, maps, threshold):\r\n max_vec = np.max(maps, axis=1)\r\n cmin = np.min(max_vec)\r\n cmax = np.max(max_vec)\r\n limit = cmax - (cmax - cmin) * threshold\r\n max_mask = max_vec > limit\r\n argmax = np.argmax(maps, axis=1)\r\n return (argmax + 1) * max_mask",
"def _compute_best_value(self):\n asgt = self._neighbors_values.copy()\n best_cost, best_val = None, []\n\n for v in self._variable.domain:\n asgt[self.variable.name] = v\n c = self._compute_cost(**asgt)\n if (\n best_cost is None\n or (best_cost > c and self._mode == \"min\")\n or (best_cost < c and self._mode == \"max\")\n ):\n best_cost = c\n best_val = [v]\n elif best_cost == c:\n best_val.append(v)\n\n return best_val, best_cost",
"def __call__(self, individual, count=True):\n possible_values = []\n \n for func, pos, height, width in zip(self.peaks_function,\n self.peaks_position,\n self.peaks_height,\n self.peaks_width):\n possible_values.append(func(individual, pos, height, width))\n \n if self.basis_function:\n possible_values.append(self.basis_function(individual))\n\n fitness = max(possible_values)\n\n if count:\n # Compute the offline error\n self.nevals += 1\n if self._optimum is None:\n self._optimum = self.globalMaximum()[0]\n self._error = abs(fitness - self._optimum)\n self._error = min(self._error, abs(fitness - self._optimum))\n self._offline_error += self._error\n\n # We exausted the number of evaluation, change peaks for the next one.\n if self.period > 0 and self.nevals % self.period == 0:\n self.changePeaks()\n \n return fitness,",
"def solve_bruteforce(self):\n max_value = -1\n for z in range(0, self.k):\n max_value = -1\n max_index = -1\n for i, v in enumerate(self.numbers):\n if v > max_value:\n max_index = i\n max_value = v\n del self.numbers[max_index]\n\n return max_value",
"def max_apply(x): \n if len(x) == 1:\n return x[0]\n else:\n return x[1]",
"def maximum_inplace(a, b):",
"def max_value(board, max_util, min_util, depth):\r\n \r\n global nodes_generated \r\n global min_prune\r\n global max_prune\r\n global max_depth\r\n \r\n nodes_generated += 1\r\n max_depth = max(max_depth,depth)\r\n \r\n if cutoff_search(board, depth):\r\n return evaluation(board)\r\n v = -1000\r\n moves = legal_moves(board,1)\r\n for move in moves:\r\n temp_board = camelot_board.Camelot(list(board.white),list(board.black))\r\n state = action(temp_board, move, 1)\r\n v = max(v, min_value(state, max_util, min_util, depth + 1))\r\n if v >= min_util:\r\n max_prune += 1\r\n return v\r\n max_util = max(max_util, v)\r\n return v",
"def max_value (self, new_state):\n \n ##create a list to save reward information\n return_list = []\n \n ##get each values from Q based on the new_state and its possible actions\n for s, a in self.Q.keys():\n if s == new_state:\n return_list.append(self.Q[s,a])\n \n ##return the maximum value based on new_state\n return max(return_list)",
"def computeActionFromValues(self, state):\n \"*** YOUR CODE HERE ***\"\n actions = self.mdp.getPossibleActions(state)\n # Initialize max_value as - infinity\n # Initialize best action as None, choose max_value action\n max_value = float(\"-inf\")\n computed_action = None\n\n for action in actions:\n # Find q value of specified action\n q_value = self.computeQValueFromValues(state, action)\n # Update action if it's the best so far\n if q_value > max_value:\n max_value = q_value\n computed_action = action\n return computed_action",
"def computeValueFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n max_next_qvalue = None\n for nextAction in self.legalActions:\n next_qvalue = self.getQValue(state, nextAction)\n if max_next_qvalue is None or max_next_qvalue < next_qvalue:\n max_next_qvalue = next_qvalue\n if max_next_qvalue is None:\n max_next_qvalue = 0.0\n\n return max_next_qvalue",
"def computeValueFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n max_next_qvalue = None\n for nextAction in self.legalActions:\n next_qvalue = self.getQValue(state, nextAction)\n if max_next_qvalue is None or max_next_qvalue < next_qvalue:\n max_next_qvalue = next_qvalue\n if max_next_qvalue is None:\n max_next_qvalue = 0.0\n\n return max_next_qvalue",
"def reduce_run():"
] | [
"0.5996123",
"0.5894548",
"0.5865219",
"0.5787033",
"0.5779433",
"0.57666355",
"0.56985855",
"0.56887287",
"0.56882644",
"0.5670334",
"0.56451887",
"0.56444883",
"0.5636384",
"0.56336486",
"0.56252307",
"0.55974555",
"0.55864197",
"0.5554566",
"0.55422884",
"0.5532165",
"0.55254763",
"0.55068505",
"0.547662",
"0.54602927",
"0.54357463",
"0.5398923",
"0.53932625",
"0.5381545",
"0.5381545",
"0.53535044"
] | 0.65974396 | 0 |
Convert a URL to IDN notation | def _convert_to_idn(url):
# this function should only be called with a unicode string
# strategy: if the host cannot be encoded in ascii, then
# it'll be necessary to encode it in idn form
parts = list(urllib.parse.urlsplit(url))
try:
parts[1].encode('ascii')
except UnicodeEncodeError:
# the url needs to be converted to idn notation
host = parts[1].rsplit(':', 1)
newhost = []
port = ''
if len(host) == 2:
port = host.pop()
for h in host[0].split('.'):
newhost.append(h.encode('idna').decode('utf-8'))
parts[1] = '.'.join(newhost)
if port:
parts[1] += ':' + port
return urllib.parse.urlunsplit(parts)
else:
return url | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def url_to_doi(url):\n return url[url.index(prefix):].rstrip(url_suffix).rstrip(INT_URL_SUFFIX)",
"def iri2uri(uri): \r\n if isinstance(uri ,unicode):\r\n (scheme, authority, path, query, fragment) = urlparse.urlsplit(uri)\r\n authority = authority.encode('idna')\r\n # For each character in 'ucschar' or 'iprivate'\r\n # 1. encode as utf-8\r\n # 2. then %-encode each octet of that utf-8 \r\n uri = urlparse.urlunsplit((scheme, authority, path, query, fragment))\r\n uri = \"\".join([encode(c) for c in uri])\r\n return uri",
"def format_url(url):\n no_scheme = url.split('://', 1)[-1]\n return '[{0}]({1})'.format(no_scheme, url)",
"def format_internal_url(url):\n\n url = url.split('\"')[-2]\n\n if not url.startswith('https:'):\n url = (\n 'https://medium.com{}'.format(url) if not url.startswith('//medium.com')\n else 'https:{}'.format(url))\n\n return url",
"def asinGeturl(url):\n asin = url.split('/')\n for i in asin:\n asinNum = i.strip()\n if len(asinNum) != 10:\n continue\n else:\n asinN = asinNum\n\n return asinN",
"def encode_url(url):\n\treturn url.replace(' ', '_')",
"def test_idna():\n assert (normalize_url(\"http://ドメイン.テスト\") ==\n \"http://xn--eckwd4c7c.xn--zckzah/\")\n assert (normalize_url(\"http://Яндекс.рф\") ==\n \"http://xn--d1acpjx3f.xn--p1ai/\")",
"def encodeUrl(self, id):\n characters = \"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n # base = 62\n base = len(characters)\n ret = []\n while id > 0:\n val = id % base\n ret.append(characters[val])\n id = id // base\n # reverse and return\n return \"\".join(ret[::-1])",
"def make_cm_url(url):\n protocol, address = url.split('//')\n address_parts = address.split('/')\n new_address_parts = []\n for i, part in enumerate(address_parts):\n if part == 'api':\n continue\n if i == 0 and '-gk-' in part:\n new_address_parts.append(part.replace('-gk-', '-cm-'))\n elif part.endswith('s'):\n new_address_parts.append(part[:-1])\n else:\n new_address_parts.append(part)\n return protocol + '//' + '/'.join(new_address_parts)",
"def format_url(url):\n if not (url.startswith(\"//\") or url.startswith(\"http\")):\n url = \"http://\" + url\n return url",
"def normalize_url(url):\n # print(url)\n if not url.startswith('http://') and not url.startswith('https://'):\n return 'https://{}/{}'.format(zone_name, url.replace('//', '/'))\n return url",
"def formatURL(self, url):\n pattern = r'(imdb\\.com\\/title\\/(.*/))'\n urls = re.findall(pattern, url)\n urls = urls[0]\n new_url = urls[0]\n new_url = \"https://www.\" + new_url\n title_code = urls[1].replace(\"/\", \"\")\n return new_url",
"def makeXnatUrl(host, _url):\n\n if isinstance(_url, bytes):\n _url = _url.decode(sys.getdefaultencoding())\n \n if _url.startswith('/'):\n _url = _url[1:]\n\n if not _url.startswith(host):\n if _url.startswith('data/'):\n _url = requests.compat.urljoin(host, _url)\n else:\n prefixUri = requests.compat.urljoin(host, 'data/archive/')\n _url = requests.compat.urljoin(prefixUri, _url)\n\n\n #--------------------\n # Remove double slashes\n #--------------------\n _url = _url.replace('//', '/')\n if 'http:/' in _url:\n _url = _url.replace('http:/', 'http://')\n elif 'https:/' in _url:\n _url = _url.replace('https:/', 'https://')\n\n return _url",
"def format_url(url: str) -> str:\n return urljoin(url.replace('https://app', 'https://api'), '')",
"def _anonymize_url(url: str, ip_dict: Dict[str, int]) -> str:\n regex_match = re.match(r\"(?i)(^https?://)(.*?)([/:].*$)\", url)\n ip = regex_match.group(2)\n\n try:\n num = ip_dict[ip]\n except KeyError:\n ip_dict[ip] = len(ip_dict.values()) + 1\n num = ip_dict[ip]\n\n return f\"{regex_match.group(1)}ip-{num:05d}{regex_match.group(3)}\"",
"def convert_to_dl_url(_id, ext):\n result = list(urlparse(base_url))\n result[4] = urlencode({\n \"M\": \"d\",\n \"P\": \"{0}.{1}\".format(_id, ext)})\n return urlunparse(result)",
"def _format_api_url(self, url):\n user_name = self._get_user_name()\n # format and return url\n return url.format(\n user_name = user_name,\n element = urllib.quote(self.qnet_element.encode('utf-8'), safe=''),\n token = self._md5(\"%s:%s:%s\" % (user_name, self.iteration_id, self._secret_key))\n )",
"def _convert_url(url, website):\n\n if website == 'xinhua':\n page_url = url.replace('\"', '')\n page_url = page_url.encode('ascii')\n elif website == 'upi':\n page_url = url.encode('ascii')\n elif website == 'zaman':\n # Find the weird thing. They tend to be ap or reuters, but generalized\n # just in case\n com = url.find('.com')\n slash = url[com + 4:].find('/')\n replaced_url = url.replace(url[com + 4:com + slash + 4], '')\n split = replaced_url.split('/')\n # This is nasty and hackish but it gets the jobs done.\n page_url = '/'.join(['/'.join(split[0:3]), 'world_' + split[-1]])\n else:\n page_url = url.encode('utf-8')\n\n return page_url",
"def to_url(val, scheme, url_scheme=\"http\"):\n pid = normalize_pid(val, scheme)\n if scheme in LANDING_URLS:\n if scheme == \"gnd\" and pid.startswith(\"gnd:\"):\n pid = pid[len(\"gnd:\") :]\n if scheme == \"urn\" and not pid.lower().startswith(\"urn:nbn:\"):\n return \"\"\n if scheme == \"ascl\":\n pid = val.split(\":\")[1]\n if scheme == \"viaf\" and pid.startswith(\"viaf:\"):\n pid = pid[len(\"viaf:\") :]\n url_scheme = \"https\"\n return LANDING_URLS[scheme].format(scheme=url_scheme, pid=pid)\n elif scheme in [\"purl\", \"url\"]:\n return pid\n\n return \"\"",
"def correct_url(self, url: str) -> str:\n # check if url has \"http://\" prefix\n if \"http://\" not in url:\n if \"https://\" not in url:\n url = \"http://\" + url\n url_split = url.split(\"/\")\n # correct URL as needed for script\n if url_split[4] == '':\n raise URLError('No Story ID given')\n if len(url_split) == 5:\n url_split.append('')\n else:\n raise URLError('Unknown URL format')\n url = '/'.join(url_split)\n url = urljoin(url, ' ')[0:-2]\n return url",
"def _format_id(ns, id):\n label = '%s:%s' % (ns, id)\n label = label.replace(' ', '_')\n url = get_identifiers_url(ns, id)\n return (label, url)",
"def shorten_id(id):\n if id.startswith('CN'):\n id = id[2:]\n if not id[-1].isdigit():\n id = id[:-1]\n return id",
"def doi_to_url(doi, plos_network=False):\n URL_TMP = INT_URL_TMP if plos_network else EXT_URL_TMP\n return URL_TMP.format(doi)",
"def encode_url(self, url):\n # turn string into unicode\n if not isinstance(url, unicode):\n url = url.decode('utf8')\n\n # parse it\n parsed = urlsplit(url)\n\n # divide the netloc further\n netloc_pattern = re.compile(r\"\"\"\n (?:(?P<user>[^:@]+)(?::(?P<password>[^:@]+))?@)?\n (?P<host>[^:]+)\n (?::(?P<port>[0-9]+))?\n \"\"\", re.X | re.U)\n netloc_parsed = netloc_pattern.match(parsed.netloc).groupdict()\n\n # encode each component\n scheme = parsed.scheme\n user = netloc_parsed['user'] and quote(netloc_parsed['user'])\n password = (netloc_parsed['password'] and\n quote(netloc_parsed['password']))\n host = netloc_parsed['host']\n port = netloc_parsed['port'] and netloc_parsed['port']\n path = '/'.join( # could be encoded slashes!\n quote(unquote(pce).encode('utf8'), '')\n for pce in parsed.path.split('/')\n )\n query = quote(unquote(parsed.query), '=&?/')\n fragment = quote(unquote(parsed.fragment))\n\n # put it back together\n netloc = ''\n if user:\n netloc += user\n if password:\n netloc += ':' + password\n netloc += '@'\n netloc += host\n if port:\n netloc += ':'+port\n return urlunsplit((scheme, netloc, path, query, fragment))",
"def extract_id(url):\n trail_id = url.replace('https://www.trailforks.com/trails/','').replace('/','')\n return trail_id",
"def shortURLToId(self, shortURL):\n id = 0\n for i in shortURL: \n val_i = ord(i) \n if(val_i >= ord('a') and val_i <= ord('z')): \n id = id*62 + val_i - ord('a') \n elif(val_i >= ord('A') and val_i <= ord('Z')): \n id = id*62 + val_i - ord('Z') + 26\n else: \n id = id*62 + val_i - ord('0') + 52\n return id",
"def create_key_from_url(raw_url):\n org_url = urllib2.urlparse.urlparse(raw_url)\n new_key = ''\n net_location = org_url.netloc\n netloc_list = net_location.split(\".\")\n netloc_list.reverse()\n for part in netloc_list:\n new_key += '%s.' % part\n new_key = new_key[:-1] # Removes trailing period\n new_key = new_key + org_url.path \n return new_key",
"def _transform_identifier(self, identifier, scheme):\n urlize = self.context.get(\"urlize_identifiers\", True)\n prefix_scheme = self.context.get(\"prefix_identifier_schemes\", True)\n result = None\n\n if urlize:\n result = idutils.to_url(identifier, scheme, url_scheme=\"https\")\n\n if not result and prefix_scheme and not identifier.startswith(scheme):\n result = f\"{scheme}:{identifier}\"\n\n return result or identifier",
"def parse_url(url):\n url = urllib.parse.urlparse(url)\n query = urllib.parse.parse_qs(url.query)\n query_ = query.get('dn', query.get('title', ''))[0]\n if url.scheme == \"magnet\":\n return \"magnet:?xt={}\".format(query['xt'][0]), query_\n return \"http://{}{}{}\".format(*url[0:3]), query_",
"def convert_single_relation_url_to_simplified_format(relation_url):\n relation_url = relation_url.strip()\n prefix = 'www.freebase.com/'\n if not relation_url.startswith(prefix):\n raise Exception(\"Invalid format of relation '{}', expected prefix '{}'\".format(relation_url, prefix))\n return relation_url[len(prefix):].replace('/', '.').strip()"
] | [
"0.7021145",
"0.65356356",
"0.6437324",
"0.6412383",
"0.6395479",
"0.6311997",
"0.61961514",
"0.6158848",
"0.61555415",
"0.6119481",
"0.6110008",
"0.61025923",
"0.60946435",
"0.6066466",
"0.6030167",
"0.6021279",
"0.6020139",
"0.6007491",
"0.60052556",
"0.5983566",
"0.5945938",
"0.5928175",
"0.5927752",
"0.59249055",
"0.5898025",
"0.5870845",
"0.58703",
"0.5847327",
"0.58330554",
"0.5827036"
] | 0.8388875 | 0 |
One epoch is a single tournament here | def one_epoch(self, tournament_id: int, epoch=0):
# TODO: tournament pre-fetcher
tournament = Tournament(tournament_id, cache=self.cache)
# Measure correlation before to see whether gradient update took effect
correlation_before = self.get_prediction_correlation(tournament)
correlation_after = 0
# Prepare Trainer
self.model.train()
# For optimizer, keep embedding LR the same, but scale head LR by number of teams (more teams -> larger LR)
# self.optimizer.lr = self.optimizer.lr * something
self.optimizer.zero_grad()
# collate_fn = lambda x: collate_match(x, tournament.max_members)
dl_match = DataLoader(tournament.matches, num_workers=self.jobs, batch_size=self.bs, shuffle=True)
iterator = tqdm(dl_match, position=0, desc=f'epoch {epoch+1:04d}/{self.total} id{tournament_id}')
cum_loss = 0
for i, (team_1, team_2, result) in enumerate(iterator):
# Calculate the loss based on match results
loss = self.model(team_1.to(self.device), team_2.to(self.device), result.to(self.device))
# Scale the loss by number of updates per team
# loss /= (tournament.matches.n_pairs - 1)
# Do backward step, accumulate loss and gradients
loss.backward()
cum_loss += loss.item()
# This condition is needed to update tqdm
if i == (len(dl_match) - 1):
# Perform optimizer step once in an epoch (we consider all the matches simultaneous)
self.optimizer.step()
# Clip weights if necessary
if self.clip_zero:
self.model.emb.apply(self.model.clipper)
# Scale head so the output would always be a weighted average
with torch.no_grad():
self.model.head.weight.div_(torch.sum(self.model.head.weight))
# self.model.head.weight = torch.nn.Parameter(self.model.head.weight /
# torch.sum(self.model.head.weight), requires_grad=True)
# Print difference in correlation
correlation_after = self.get_prediction_correlation(tournament)
postfix = {'loss': f'{cum_loss / (len(dl_match) + 1):.4f}',
'corr': f'{correlation_before:.4f} -> {correlation_after:.4f}',
}
else:
postfix = {'loss': f'{cum_loss / (i + 1):.4f}'}
iterator.set_postfix(postfix)
return cum_loss / len(dl_match), correlation_before, correlation_after | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tournament(self):\n pass",
"def train_one_epoch(self):\n raise NotImplementedError",
"def train_epoch(self) -> None:\n ct = self.config.training\n total_games = self._get_total_games()\n print(f\"Total Games: {total_games}\")\n train_size = int(0.9 * total_games)\n dataset_wrapper = DatasetWrapper(self.config)\n self.agent.model.fit(\n dataset_wrapper.get_dataset(train_size),\n epochs=ct.epoch_to_checkpoint,\n validation_data=dataset_wrapper.get_dataset(train_size, is_training=False),\n )",
"def TrainOneStep(self):\n pass",
"def train_on_history(self, history):\n \n # Split into episodes\n n_episodes = history[-1][\"episode\"] \n episodes = [list(filter(lambda h: h[\"episode\"]==e , history)\n ) for e in range(n_episodes)\n ]\n\n # Split into game lives\n for episode in episodes:\n \n \n game_lives = [\n list(filter(lambda h: h.get('info').get('ale.lives')==l, episode)\n ) for l in range(5)\n ]\n \n for life in game_lives:\n if life:\n self.train(life)\n else:\n print(\"No ocurrance\")\n return",
"def learn(self):\n epochswin = [] # count the number of wins at every epoch of the network against the preceding version\n epochdraw = [] # count the number of draws at every epoch of the network against the preceding version\n epochswingreedy = [] # count the number of wins against greedy at every epoch\n epochswinrandom = [] # count the number of wins against random at every epoch\n epochsdrawgreedy = [] # count the number of draws against greedy at every epoch\n epochsdrawrandom = [] # count the number of wins against random at every epoch\n epochswinminmax = [] # count the number of wins against minmax at every epoch\n epochsdrawminmax = [] # count the number of draws against minmax at every epoch\n\n\n if self.args.load_model == True:\n file = open(self.args.trainExampleCheckpoint + \"graphwins:iter\" + str(self.args.numIters) + \":eps\" + str(\n self.args.numEps) + \":dim\" + str(self.game.n) + \".txt\", \"r+\")\n lines = file.readlines()\n for index, line in enumerate(lines):\n for word in line.split():\n if index == 0:\n epochswin.append(word)\n elif index == 1:\n epochdraw.append(word)\n file.close()\n\n file = open(self.args.trainExampleCheckpoint + \"graphwins:iter\" + str(self.args.numIters) + \":eps\" + str(\n self.args.numEps) + \":dim\" + str(self.game.n) + \":greedyrandom.txt\", \"r+\")\n lines = file.readlines()\n for index, line in enumerate(lines):\n for word in line.split():\n if index == 0:\n epochswingreedy.append(word)\n elif index == 1:\n epochsdrawgreedy.append(word)\n elif index == 2:\n epochswinrandom.append(word)\n elif index == 3:\n epochsdrawrandom.append(word)\n elif index == 4:\n epochswinminmax.append(word)\n elif index == 5:\n epochsdrawminmax.append(word)\n file.close()\n self.loadTrainExamples()\n\n for i in range(1, self.args.numIters + 1):\n # bookkeeping\n print('------ITER ' + str(i) + '------')\n # examples of the iteration\n iterationTrainExamples = deque([], maxlen=self.args.maxlenOfQueue)\n\n eps_time = AverageMeter()\n bar = Bar('Self Play', max=self.args.numEps)\n end = time.time()\n\n for eps in range(self.args.numEps):\n iterationTrainExamples += self.executeEpisode()\n\n # bookkeeping + plot progress\n eps_time.update(time.time() - end)\n end = time.time()\n bar.suffix = '({eps}/{maxeps}) Eps Time: {et:.3f}s | Total: {total:} | ETA: {eta:}'.format(eps=eps + 1,\n maxeps=self.args.numEps,\n et=eps_time.avg,\n total=bar.elapsed_td,\n eta=bar.eta_td)\n bar.next()\n bar.finish()\n\n # save the iteration examples to the history\n self.trainExamplesHistory.append(iterationTrainExamples)\n\n if len(self.trainExamplesHistory) > self.args.numItersForTrainExamplesHistory:\n print(\"len(trainExamplesHistory) =\", len(self.trainExamplesHistory),\n \" => remove the oldest trainExamples\")\n self.trainExamplesHistory.pop(0)\n # backup history to a file\n # NB! the examples were collected using the model from the previous iteration, so (i-1)\n self.saveTrainExamples(i - 1)\n\n # shuffle examlpes before training\n trainExamples = []\n for e in self.trainExamplesHistory:\n trainExamples.extend(e)\n shuffle(trainExamples)\n\n # training new network, keeping a copy of the old one\n\n filename = \"curent\"+str(i)+\"temp:iter\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n filenameBest = \"best\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=filename)\n exists = os.path.isfile(filenameBest)\n if exists:\n self.pnet.load_checkpoint(folder=self.args.checkpoint, filename=filenameBest)\n else:\n self.pnet.load_checkpoint(folder=self.args.checkpoint, filename=filename)\n pmcts = MCTS(self.game, self.pnet, self.args)\n\n self.nnet.train(trainExamples)\n nmcts = MCTS(self.game, self.nnet, self.args)\n\n print('PITTING AGAINST PREVIOUS VERSION')\n arena = Arena(lambda x: np.argmax(pmcts.getActionProb(x, temp=0)),\n lambda x: np.argmax(nmcts.getActionProb(x, temp=0)), self.game,nmcts,pmcts,evaluate=True)\n\n pwins, nwins, draws = arena.playGames(self.args.arenaCompare, False)\n\n pmcts.clear()\n nmcts.clear()\n del pmcts\n del nmcts\n\n print(' ')\n print('NEW/PREV WINS : %d / %d ; DRAWS : %d' % (nwins, pwins, draws))\n if i == 1:\n epochswin.append(pwins)\n epochdraw.append(0)\n\n epochswin.append(nwins)\n epochdraw.append(draws)\n self.writeLogsToFile(epochswin, epochdraw)\n\n ''' Get all the players and then pit them against the network. You need to modify here if you implement \n more players\n '''\n (gp, rp, mp) = self.decidePlayers()\n\n if self.args.parallel == 0:\n\n\n nmcts1 = MCTS(self.game, self.nnet, self.args)\n nmcts2 = MCTS(self.game, self.nnet, self.args)\n nmcts3 = MCTS(self.game, self.nnet, self.args)\n\n arenagreedy = Arena(lambda x: np.argmax(nmcts1.getActionProb(x, temp=0)), gp, self.game,nmcts1)\n arenarandom = Arena(lambda x: np.argmax(nmcts2.getActionProb(x, temp=0)), rp, self.game,nmcts2)\n arenaminmax = Arena(lambda x: np.argmax(nmcts3.getActionProb(x, temp=0)), mp, self.game,nmcts3,evaluate=True)\n\n pwinsminmax, nwinsminmax, drawsminmax = arenaminmax.playGames(self.args.arenaCompare)\n print(\"minmax - \"+str(pwinsminmax)+\" \"+str(nwinsminmax)+\" \"+str(drawsminmax))\n pwinsgreedy, nwinsgreedy, drawsgreedy = arenagreedy.playGames(self.args.arenaCompare)\n print(\"greedy - \"+str(pwinsgreedy)+\" \"+str(nwinsgreedy)+\" \"+str(drawsgreedy))\n pwinsreandom, nwinsrandom, drawsrandom = arenarandom.playGames(self.args.arenaCompare)\n print(\"random - \"+str(pwinsreandom)+\" \"+str(nwinsrandom)+\" \"+str(drawsrandom))\n\n nmcts1.clear()\n nmcts2.clear()\n nmcts3.clear()\n del nmcts1\n del nmcts2\n del nmcts3\n\n else:\n '''\n This will be used if you want to evaluate the network against the benchmarks in a parallel way\n '''\n\n self.args.update({'index': str(i)})\n\n p = self.parallel(self.args.arenaCompare)\n (pwinsminmax, nwinsminmax, drawsminmax) = p[0] # self.parallel(\"minmax\", self.args.arenaCompare)\n (pwinsgreedy, nwinsgreedy, drawsgreedy) = p[1] # self.parallel(\"greedy\",self.args.arenaCompare)\n (pwinsreandom, nwinsrandom, drawsrandom) = p[2] # self.parallel(\"random\",self.args.arenaCompare)\n\n epochsdrawgreedy.append(drawsgreedy)\n epochsdrawrandom.append(drawsrandom)\n epochswinrandom.append(pwinsreandom)\n epochswingreedy.append(pwinsgreedy)\n epochswinminmax.append(pwinsminmax)\n epochsdrawminmax.append(drawsminmax)\n\n self.writeLogsToFile(epochswingreedy, epochsdrawgreedy, epochswinrandom, epochsdrawrandom, epochswinminmax,\n epochsdrawminmax, training=False)\n\n if pwins + nwins == 0 or float(nwins) / (pwins + nwins) <= self.args.updateThreshold:\n print('REJECTING NEW MODEL')\n filename = \"curent\"+str(i)+\"temp:iter\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n filenameBest = \"best\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n exists = os.path.isfile(filenameBest)\n if exists:\n self.nnet.load_checkpoint(folder=self.args.checkpoint, filename=filenameBest)\n else:\n self.nnet.load_checkpoint(folder=self.args.checkpoint, filename=filename)\n\n else:\n print('ACCEPTING NEW MODEL')\n\n filename = \"best\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=self.getCheckpointFile(i))\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=filename)\n self.mcts.clear()\n del self.mcts\n self.mcts = MCTS(self.game, self.nnet, self.args, mcts=True) # reset search tree\n print(self.tracker.print_diff())\n self.writeLogsToFile(epochswin, epochdraw, training=True)",
"def test(self, test_iter, step, corpus_type, id):\n\n self.model.eval()\n stats = Statistics()\n if not os.path.exists(self.args.result_path):\n os.makedirs(self.args.result_path)\n if not os.path.exists(self.args.story_path):\n os.makedirs(self.args.story_path)\n can_path = self.args.result_path + corpus_type + '.' + id + '_step%d.candidate' % step\n gold_path = self.args.result_path + corpus_type + '.' + id + '_step%d.gold' % step\n story_path = self.args.story_path + corpus_type + '.' + id + '.story'\n with open(story_path, 'w') as save_story:\n with open(can_path, 'w') as save_pred:\n with open(gold_path, 'w') as save_gold:\n with torch.no_grad():\n for batch in test_iter:\n src = batch.src\n labels = batch.labels\n segs = batch.segs\n clss = batch.clss\n mask = batch.mask\n mask_cls = batch.mask_cls\n weight = batch.weight\n index = batch.index\n\n pred = []\n\n sents_vec, sent_scores, mask, cluster_weight = self.model(src, segs, clss, mask, mask_cls)\n loss = self.loss(sent_scores, labels.float())\n weight_loss = self.weight_loss(cluster_weight, weight)\n loss = (loss * mask.float()).sum()\n total_loss = loss + weight_loss * 10\n batch_stats = Statistics(float(total_loss.cpu().data.numpy()), len(labels))\n stats.update(batch_stats)\n\n sent_scores = sent_scores + mask.float()\n sent_scores = sent_scores.cpu().data.numpy()\n cluster_weight = cluster_weight.cpu().data.numpy()\n selected_ids = np.argsort(-sent_scores, 1)\n cluster_weight = np.argsort(cluster_weight)\n # print(selected_ids)\n # selected_ids = np.sort(selected_ids,1)\n cluster_num = len(cluster_weight)\n for i, idx in enumerate(selected_ids):\n rank = np.where(cluster_weight == i)[0][0]\n\n if rank <= max(cluster_num // 6, 6):\n for j in range(5):\n sen_ind = selected_ids[i][j]\n _pred = batch.src_str[i][sen_ind].strip()\n pred.append((index[i][sen_ind], _pred))\n elif rank <= max(cluster_num // 3, 10):\n for j in range(3):\n sen_ind = selected_ids[i][j]\n _pred = batch.src_str[i][sen_ind].strip()\n pred.append((index[i][sen_ind], _pred))\n elif rank <= max(cluster_num * 2 // 3, 15):\n for j in range(2):\n sen_ind = selected_ids[i][j]\n _pred = batch.src_str[i][sen_ind].strip()\n pred.append((index[i][sen_ind], _pred))\n else:\n sen_ind = selected_ids[i][0]\n _pred = batch.src_str[i][sen_ind].strip()\n pred.append((index[i][sen_ind], _pred))\n\n gold_summary = (batch.tgt_str[0].strip())\n pred.sort(key=lambda x: x[0])\n for i in range(len(pred)):\n save_story.write(pred[i][1].strip() + '\\n')\n if i == 0:\n save_pred.write(pred[i][1].strip())\n else:\n save_pred.write('<q> ' + pred[i][1].strip())\n save_gold.write(gold_summary)\n for sent in gold_summary.split('<q>'):\n save_story.write('@highlight {}\\n'.format(sent))\n if self.args.test_txt:\n return stats\n else:\n rouges = calculate_rouge(can_path, gold_path)\n logger.info('Rouges at step %d \\n%s' % (step, rouge_results_to_str(rouges)))\n self._report_step(0, step, valid_stats=stats)\n return stats, rouges",
"def train_epoch(self):\n # We can't validate a winner for submissions generated by the learner,\n # so we will use a winner-less match when getting rewards for such states\n blank_match = {\"winner\":None}\n\n learner_submitted_actions = 0\n null_actions = 0\n\n # Shuffle match presentation order\n if(self.N_TEMP_TRAIN_MATCHES):\n path_to_db = \"../data/competitiveMatchData.db\"\n sources = {\"patches\":self.TEMP_TRAIN_PATCHES, \"tournaments\":[]}\n print(\"Adding {} matches to training pool from {}.\".format(self.N_TEMP_TRAIN_MATCHES, path_to_db))\n temp_matches = pool.match_pool(self.N_TEMP_TRAIN_MATCHES, path_to_db, randomize=True, match_sources=sources)[\"matches\"]\n else:\n temp_matches = []\n data = self.training_data + temp_matches\n\n shuffled_matches = random.sample(data, len(data))\n for match in shuffled_matches:\n for team in self.teams:\n # Process match into individual experiences\n experiences = mp.process_match(match, team)\n for pick_id, experience in enumerate(experiences):\n # Some experiences include NULL submissions (usually missing bans)\n # The learner isn't allowed to submit NULL picks so skip adding these\n # to the buffer.\n state,actual,_,_ = experience\n (cid,pos) = actual\n if cid is None:\n null_actions += 1\n continue\n # Store original experience\n self.replay.store([experience])\n self.step_count += 1\n\n # Give model feedback on current estimations\n if(self.step_count > self.observations):\n # Let the network predict the next action\n feed_dict = {self.ddq_net.online_ops[\"input\"]:[state.format_state()],\n self.ddq_net.online_ops[\"valid_actions\"]:[state.get_valid_actions()]}\n q_vals = self.ddq_net.sess.run(self.ddq_net.online_ops[\"valid_outQ\"], feed_dict=feed_dict)\n sorted_actions = q_vals[0,:].argsort()[::-1]\n top_actions = sorted_actions[0:4]\n\n if(random.random() < self.epsilon):\n pred_act = random.sample(list(top_actions), 1)\n else:\n # Use model's top prediction\n pred_act = [sorted_actions[0]]\n\n for action in pred_act:\n (cid,pos) = state.format_action(action)\n if((cid,pos)!=actual):\n pred_state = deepcopy(state)\n pred_state.update(cid,pos)\n r = get_reward(pred_state, blank_match, (cid,pos), actual)\n new_experience = (state, (cid,pos), r, pred_state)\n\n self.replay.store([new_experience])\n learner_submitted_actions += 1\n\n if(self.epsilon > 0.1):\n # Reduce epsilon over time\n self.epsilon -= self.eps_decay_rate\n\n # Use minibatch sample to update online network\n if(self.step_count > self.pre_training_steps):\n self.train_step()\n\n if(self.step_count % self.target_update_frequency == 0):\n # After the online network has been updated, update target network\n _ = self.ddq_net.sess.run(self.ddq_net.target_ops[\"target_update\"])\n\n # Get training loss, training_acc, and val_acc to return\n loss, train_acc = self.validate_model(self.training_data)\n _, val_acc = self.validate_model(self.validation_data)\n return (loss, train_acc, val_acc)",
"def TrainEpoch(ss):\n ss.StopNow = False\n curEpc = ss.TrainEnv.Epoch.Cur\n while True:\n ss.TrainTrial()\n if ss.StopNow or ss.TrainEnv.Epoch.Cur != curEpc:\n break\n ss.Stopped()",
"def train(self)->None:",
"def train():\n pass",
"def nn_vs_random(nn_batch, game_num):\n uniform_net = UniformPredictionNet(path_to_model = '/', board_dimension = BOARD_DIM)\n utils = GoUtils()\n count_nn_winning = 0\n count_random_winning = 0\n alphago0 = AlphaGoZero(model_path=\"../models/batch_\" + str(nn_batch), restored=True)\n \n for i in range(game_num):\n print()\n print(\"game number \", i)\n game_over = False\n board = GoBoard(board_dimension=BOARD_DIM, player=PLAYER_BLACK)\n while not game_over:\n #Raw NN plays black \n if board.player == PLAYER_BLACK:\n print(\"Raw NN plays\")\n move, _ = alphago0.play_with_raw_nn(board)\n else:\n print(\"Random plays\")\n p, _ = uniform_net.predict(board)\n move = random.choice([move for move in p.keys() if p[move] > 0])\n\n print(\"\\t move is\", move)\n\n _, board = utils.make_move(board=board, move=move)\n\n if utils.is_game_finished(board) or len(board.game_history) > BOARD_DIM**2*2:\n game_over = True\n winner, winning_by_points = utils.evaluate_winner(board.board_grid)\n if winning_by_points > 0:\n if winner == 1:\n count_nn_winning += 1\n elif winner == -1:\n count_random_winning += 1\n print(\"winner is \", winner)\n print(\"winning by points\", winning_by_points)\n print(board)\n\n return count_nn_winning, count_random_winning",
"def fit_epoch_single(self, num_games: int = 1, worker_idx: int = 0) -> None:\n states = []\n policies = []\n values = []\n\n for game in range(num_games):\n start_state = self.env.random_state()\n s, pi, r = self.play(worker_idx, start_state, clear=True)\n\n states.append(s)\n policies.append(pi)\n values.append(r)\n\n states = np.concatenate(states)\n policies = np.concatenate(policies)\n values = np.concatenate(values)\n\n self.network_manager.fit(states, policies, values)",
"def run_one_epoch(self, dataset, phase, lr=None):\n epoch_loss = []\n epoch_predictions = []\n for x_input in dataset.get_batch_data():\n loss, prediction = self.model_wrapper.run_batch(x_input,\n lr,\n phase=phase)\n epoch_loss.append(loss)\n epoch_predictions.append(prediction)\n\n epoch_loss = np.array(epoch_loss)\n\n epoch_predictions = self.concat_element(epoch_predictions)\n\n if phase == RunnerPhase.PREDICT:\n epoch_predictions = dataset.get_last_inversed_pred(epoch_predictions)\n return epoch_loss, epoch_predictions\n else:\n epoch_predictions, epoch_labels = dataset.get_masked_inversed_pred_and_label(epoch_predictions)\n return epoch_loss, epoch_predictions, epoch_labels",
"def train(self, num_batches: int):",
"def train_step(self):\n pass",
"def eval_epoch(self, final=False, save_predictions=False):\n t1 = time()\n output = {'tp': [], 'fp': [], 'fn': [], 'tn': [], 'loss': [], 'preds': [],'truth': [], 'true': 0,'true_sep':np.zeros(self.rel_size)}\n test_info = []\n test_result = []\n self.model.eval()\n test_iter = self.iterator(self.data['test'], batch_size=self.params['batch'], shuffle_=False)\n # preds=[]\n # truths=[]\n for batch_idx, batch in enumerate(test_iter):\n batch = self.convert_batch(batch, istrain=False, save=True)\n\n with torch.no_grad():\n loss, stats, predictions, select, pred_pairs, multi_truths, mask, _ = self.model(\n batch) # pred_pairs <#pair, relations_num>\n pred_pairs = torch.sigmoid(pred_pairs)\n\n output['loss'] += [loss.item()]\n output['tp'] += [stats['tp'].to('cpu').data.numpy()]\n output['fp'] += [stats['fp'].to('cpu').data.numpy()]\n output['fn'] += [stats['fn'].to('cpu').data.numpy()]\n output['tn'] += [stats['tn'].to('cpu').data.numpy()]\n output['preds'] += [predictions.to('cpu').data.numpy()]\n # preds.extend(predictions.to('cpu').data.numpy())\n # truths.extend(truth.to('cpu').data.numpy())\n\n if True:\n test_infos = batch['info'][select[0].to('cpu').data.numpy(),\n select[1].to('cpu').data.numpy(),\n select[2].to('cpu').data.numpy()][mask.to('cpu').data.numpy()]\n test_info += [test_infos]\n\n pred_pairs = pred_pairs.data.cpu().numpy()\n multi_truths = multi_truths.data.cpu().numpy()\n output['true'] += multi_truths.sum() - multi_truths[:, self.loader.label2ignore].sum()\n output['true_sep'] = output['true_sep'] +multi_truths.sum(axis=0)\n if save_predictions:\n assert test_infos.shape[0] == len(pred_pairs), print(\n \"test info=%d, pred_pair=%d\" % (len(test_infos.shape[0]), len(pred_pairs)))\n for pair_id in range(len(pred_pairs)):\n multi_truth = multi_truths[pair_id] #第pair_id个实体对的true\n for r in range(0, self.rel_size):\n if r == self.loader.label2ignore:\n continue\n\n test_result.append((int(multi_truth[r]) == 1, float(pred_pairs[pair_id][r]),\n test_infos[pair_id]['intrain'],test_infos[pair_id]['cross'], self.loader.index2rel[r], r,\n len(test_info) - 1, pair_id))\n\n\n # estimate performance\n total_loss, scores = self.performance(output)\n # pairs*rel_size*batch\n test_result.sort(key=lambda x: x[1], reverse=True)\n\n input_theta, w, f1,p,r,scores_class = self.tune_f1_theta(test_result, output['true'],output['true_sep'], self.params['input_theta'], isTest=save_predictions)\n\n t2 = time()\n if not final:\n self.test_res['loss'] += [total_loss]\n # self.test_res['score'] += [scores[self.primary_metric]]\n self.test_res['score'] += [f1]\n self.test_res['p'] = p\n self.test_res['r'] = r\n print(' TEST | LOSS = {:.05f}, '.format(total_loss), end=\"\")\n print_results(scores, scores_class, self.show_class, t2 - t1)\n # print(\"不同类别:\")\n # t = classification_report(truths, preds,target_names=[\"NA\",\"父母子女\", \"祖孙\", \"兄弟姐妹\", \"叔伯姑舅姨\", \"夫妻\", \"其他亲戚\", \"好友\", \"上下级\", \"师生\", \"合作\", \"情侣\", \"对立\", \"共现\", \"同学\", \"同门\"])\n # print(t)\n\n if save_predictions:\n\n test_result = test_result[: w + 1]\n test_result_pred = []\n test_result_info = []\n for item in test_result:\n test_result_pred.append([(item[-3], item[1])]) #预测的关系是的概率\n test_result_info.append([test_info[item[-2]][item[-1]]])\n assert (item[-3] in test_info[item[-2]][item[-1]]['rel']) == item[0], print(\"item\\n\", item, \"\\n\",\n test_info[item[-2]][\n item[-1]])\n write_errors(test_result_pred, test_result_info, self.preds_file, map_=self.loader.index2rel, type=\"theta\")\n write_preds(test_result_pred, test_result_info, self.preds_file, map_=self.loader.index2rel)\n # f1_score_t=f1_score(truths, preds, average='micro')\n # print(f1, scores['micro_f'], f1_score_t)\n\n return f1, scores['micro_f'],input_theta,p,r",
"def first_round_history(self):\n self.ts_dict = self.get_tourney_slots()\n self.tsr_dict = self.match_seeds()\n first_seed_win = 0\n second_seed_win = 0\n third_seed_win = 0\n fourth_seed_win = 0\n fifth_seed_win = 0\n sixth_seed_win = 0\n seventh_seed_win = 0\n eighth_seed_win = 0\n total_games = 128\n\n for year1 in self.ts_dict: \n for slot, match_up in self.ts_dict[year1].items():\n if slot[:2] == \"R1\":\n for year2 in self.tsr_dict:\n if year1 == year2:\n for winning, losing in self.tsr_dict[year2].items():\n if winning[5:] == match_up[:3]:\n seed = winning[6:] \n if seed == \"01\":\n first_seed_win += 1\n elif seed == \"02\":\n second_seed_win += 1\n elif seed == \"03\":\n third_seed_win += 1\n elif seed == \"04\":\n fourth_seed_win += 1\n elif seed == \"05\":\n fifth_seed_win += 1\n elif seed == \"06\":\n sixth_seed_win += 1\n elif seed == \"07\":\n seventh_seed_win += 1\n elif seed == \"08\":\n eighth_seed_win += 1 \n \n #print(first_seed_win, second_seed_win, third_seed_win, fourth_seed_win, fifth_seed_win, sixth_seed_win, seventh_seed_win, eighth_seed_win, total_games)\n\n gauge = pygal.SolidGauge(inner_radius=0.70, title=\"NCAA First Round Results\")\n ratio_first_seed = int(first_seed_win / total_games * 100)\n ratio_second_seed = int(second_seed_win / total_games * 100)\n ratio_third_seed = int(third_seed_win / total_games * 100)\n ratio_fourth_seed = int(fourth_seed_win / total_games * 100)\n ratio_fifth_seed = int(fifth_seed_win / total_games * 100)\n ratio_sixth_seed = int(sixth_seed_win / total_games * 100)\n ratio_seventh_seed = int(seventh_seed_win / total_games * 100)\n ratio_eighth_seed = int(eighth_seed_win / total_games * 100) \n\n percent_formatter = lambda x: '{:.10g}%'.format(x)\n gauge.value_formatter = percent_formatter\n gauge.add('1 vs. 16', [{'value': ratio_first_seed, 'max_value': 100}])\n gauge.add('2 vs. 15', [{'value': ratio_second_seed, 'max_value': 100}])\n gauge.add('3 vs. 14', [{'value': ratio_third_seed, 'max_value': 100}])\n gauge.add('4 vs. 13', [{'value': ratio_fourth_seed, 'max_value': 100}])\n gauge.add('5 vs. 12', [{'value': ratio_fifth_seed, 'max_value': 100}])\n gauge.add('6 vs. 11', [{'value': ratio_sixth_seed, 'max_value': 100}])\n gauge.add('7 vs. 10', [{'value': ratio_seventh_seed, 'max_value': 100}])\n gauge.add('8 vs. 9', [{'value': ratio_eighth_seed, 'max_value': 100}])\n \n gauge.render_to_file('chart.svg')",
"def train(self):\n pass",
"def train(self):\n pass",
"def train(self):\n pass",
"def train(self):\n pass",
"def train(self):\n pass",
"def run_epoch(model, data, id_2_word, is_train=False, is_test=False, lr=1.0):\n if is_train:\n model.train() # train the model\n else:\n model.eval() # test or validate the model\n\n future_word_num = args.future_word_num\n epoch_size = ((len(data) // model.module.batch_size) - future_word_num) // model.module.num_steps\n start_time = time.time()\n hidden = model.module.init_hidden()\n\n costs = 0.0\n iters = 0\n # total = 0\n # correct = 0\n # total_train = 0\n # correct_train = 0\n true_pos = 0\n false_pos = 0\n false_neg = 0\n\n for step, (x, y) in enumerate(reader.ptb_iterator(data, model.module.batch_size, model.module.num_steps, future_word_num)):\n\n inputs = Variable(torch.from_numpy(x.astype(np.int64)).transpose(0,1).contiguous()).cuda()\n #print(inputs.size())\n #print(inputs)\n # model.zero_grad() # clear the gradient in previous step\n\n hidden = repackage_hidden(hidden) # type(hidden) is 'tuple'\n outputs, hidden = model(inputs, hidden)\n\n # outputs = F.sigmoid(outputs);\n\n # targets = Variable(torch.from_numpy(y.astype(np.int64)).transpose(0,1).contiguous()).cuda()\n \n\n # tt = torch.squeeze(targets.view(-1, model.module.batch_size * model.module.num_steps))\n # reshape y into a 1-d tensor\n\n index = []\n for j in range(y.shape[1]-future_word_num+1):\n pair = y[:, j:j+future_word_num]\n index.append(pair)\n\n index_ = np.asarray(index)\n target_loss = []\n for i in range(model.module.num_steps):\n t = index_[i]\n for j in range(model.module.batch_size):\n t_ = t[j]\n tt = np.zeros(vocab_size, dtype=np.int64)\n tt[t_] = 1\n target_loss.append(tt)\n\n targetLoss = np.asarray(target_loss)\n targetLoss = Variable(torch.from_numpy(targetLoss).contiguous()).float().cuda()\n\n # outputs.view(-1, model.vocab_size).size() = 700 x 10000\n # tt.size() = 700\n # inp = torch.squeeze(inputs.view(-1, model.batch_size * model.num_steps))\n out_loss = outputs.view(-1, model.module.vocab_size)\n # max_val, index = torch.max(out_loss, dim=1)\n\n # ######\n # word_inp = []\n # word_pred = []\n # word_tt = []\n # word_id_pred = []\n # word_id_tt = []\n\n # for i in range(list(index.size())[0]):\n # ind_inp = inp.data[i]\n # w_inp = id_2_word[ind_inp]\n # word_inp.append(w_inp)\n\n # ind_pred = list(index.data[i])[0]\n # w_pred = id_2_word[ind_pred]\n # word_pred.append(w_pred)\n # word_id_pred.append(ind_pred)\n\n # ind_tt = tt.data[i]\n # w_tt = id_2_word[ind_tt]\n # word_tt.append(w_tt)\n # word_id_tt.append(ind_tt)\n \n # word_inp_print = np.reshape(word_inp, (model.num_steps, model.batch_size)).T\n # word_pred_print = np.reshape(word_pred, (model.num_steps, model.batch_size)).T\n # word_tt_print = np.reshape(word_tt, (model.num_steps, model.batch_size)).T\n # word_id_pred_ = np.reshape(word_id_pred, (model.num_steps, model.batch_size)).T\n # word_id_tt_ = np.reshape(word_id_tt, (model.num_steps, model.batch_size)).T\n # pred_word_id = np.asarray(word_id_pred_)\n # target_word_id = np.asarray(word_id_tt_)\n ######\n\n # loss = criterion(out_loss, tt)\n loss = criterion(out_loss, targetLoss)\n # loss.data[0] -> get the loss value\n\n costs += loss.data[0] * model.module.num_steps\n iters += model.module.num_steps\n\n if is_train:\n optimizer.zero_grad()\n loss.backward() # backward propagation\n torch.nn.utils.clip_grad_norm(model.parameters(), 0.25) # prevent gradient exploding\n optimizer.step()\n #for name, p in model.named_parameters():\n # \"\"\"if p.requires_grad:\n # print(name, p.data.size()) \"\"\"\n #p.data.add_(-lr, p.grad.data) # update the weight and bias\n if step % (epoch_size // 10) == 10:\n print(\"{} loss: {:8.5f}\".format(step * 1.0 / epoch_size, (costs/iters)))\n # print(\"{} perplexity: {:8.2f} speed: {} wps\".format(step * 1.0 / epoch_size, np.exp(costs / iters),\n # iters * model.batch_size / (time.time() - start_time)))\n \n # print(\"input:\")\n # print(word_inp_print)\n # print(\"----------------------\")\n # print(\"predict:\")\n # print(word_pred_print)\n # print(\"----------------------\")\n # print(\"target:\")\n # print(word_tt_print)\n\n # savewords(word_inp_print, 'input_train')\n # savewords(word_pred_print, 'predict_train')\n # savewords(word_tt_print, 'target_train')\n # elif is_test:\n # savewords(word_inp_print, 'input_test')\n # savewords(word_pred_print, 'predict_test')\n # savewords(word_tt_print, 'target_test')\n\n if is_train: \n diff_train = (torch.sign(out_loss) - targetLoss).data.cpu().numpy()\n tp = (diff_train == 0).sum()\n fp = (diff_train == 1).sum()\n fn = (diff_train == -2).sum()\n true_pos += tp\n false_pos += fp\n false_neg += fn\n\n if (is_train == False):\n diff_ = (torch.sign(out_loss) - targetLoss).data.cpu().numpy()\n tp = (diff_ == 0).sum()\n fp = (diff_ == 1).sum()\n fn = (diff_ == -2).sum()\n true_pos += tp\n false_pos += fp\n false_neg += fn\n\n if is_train:\n precision = true_pos / (true_pos + false_pos)\n recall = true_pos / (true_pos + false_neg)\n f1_score = 2 * precision * recall / (precision + recall)\n\n print(\"Training Precision: {:8.5f}\".format(precision))\n print(\"Training Recall: {:8.5f}\".format(recall))\n print(\"Training F1 score: {:8.5f}\".format(f1_score))\n\n if (is_train == False):\n precision = true_pos / (true_pos + false_pos)\n recall = true_pos / (true_pos + false_neg)\n f1_score = 2 * precision * recall / (precision + recall)\n\n print(\"Precision: {:8.5f}\".format(precision))\n print(\"Recall: {:8.5f}\".format(recall))\n print(\"F1 score: {:8.5f}\".format(f1_score))\n\n\n # if is_train:\n # total_train += model.batch_size \n # last = pred_word_id.shape[1]-1\n\n # for i in range(pred_word_id.shape[0]):\n # if (pred_word_id[i][last]==target_word_id[i][last]):\n # correct_train += 1\n\n # if (is_train == False):\n # total += model.batch_size\n # last = pred_word_id.shape[1]-1\n\n # for i in range(pred_word_id.shape[0]):\n # if (pred_word_id[i][last]==target_word_id[i][last]):\n # correct += 1\n\n\n # if is_train:\n # train_accuracy = correct_train / total_train * 100\n # print(\"accuracy: {:8.2f}\".format(train_accuracy))\n\n # if (is_train == False):\n # accuracy = correct / total * 100\n # print(\"accuracy: {:8.2f}\".format(accuracy))\n\n return (costs / iters)\n # return np.exp(costs / iters) ",
"def step(self):\n fit_default_config = {\"verbose\": self.verbose}\n fit_default_config.update(self.config.get(\"fit_config\", {}))\n\n history = self.model.fit(self.train_dataset, **fit_default_config)\n if history is None:\n stats = {}\n else:\n stats = {\"train_\" + k: v[-1] for k, v in history.history.items()}\n\n self.epoch += 1\n return stats",
"def train(self, training_steps=10):",
"def train(self, batch):\n pass",
"def ai_vs_random(nn_batch, ai_simulation_num, game_num):\n uniform_net = UniformPredictionNet(path_to_model = '/', board_dimension = BOARD_DIM)\n utils = GoUtils()\n count_nn_winning = 0\n count_random_winning = 0\n alphago0 = AlphaGoZero(model_path=\"../models/batch_\" + str(nn_batch), restored=True)\n \n for i in range(game_num):\n print()\n print(\"game number \", i)\n game_over = False\n board = GoBoard(board_dimension=BOARD_DIM, player=PLAYER_BLACK)\n while not game_over:\n #AlphaGo with MCTS plays black \n if board.player == PLAYER_BLACK:\n print(\"AlphaGo Zero plays\")\n move = alphago0.play_with_mcts(board, simulation_number=mcts_simulation_num)\n else:\n print(\"Random plays\")\n p, _ = uniform_net.predict(board)\n move = random.choice([move for move in p.keys() if p[move] > 0])\n \n print(\"\\t move is\", move)\n\n _, board = utils.make_move(board=board, move=move)\n\n if utils.is_game_finished(board) or len(board.game_history) > BOARD_DIM**2*2:\n game_over = True\n winner, winning_by_points = utils.evaluate_winner(board.board_grid)\n if winning_by_points > 0:\n if winner == 1:\n count_nn_winning += 1\n elif winner == -1:\n count_random_winning += 1\n print(\"winner is \", winner)\n print(\"winning by points\", winning_by_points)\n print(board)\n\n return count_nn_winning, count_random_winning",
"def eval_teams(sess, model):\n game_to_teams=load_obj(\"game_to_teams\")\n team_q_values={}\n game_number = 0\n global_counter = 0\n converge_flag = False\n\n # loading network\n saver = tf.train.Saver()\n merge = tf.summary.merge_all()\n\n sess.run(tf.global_variables_initializer())\n\n ## Preload and resume training\n if model_train_continue:\n checkpoint = tf.train.get_checkpoint_state(SAVED_NETWORK)\n if checkpoint and checkpoint.model_checkpoint_path:\n check_point_game_number = int((checkpoint.model_checkpoint_path.split(\"-\"))[-1])\n game_number_checkpoint = check_point_game_number % number_of_total_game\n game_number = check_point_game_number\n game_starting_point = 0\n saver.restore(sess, checkpoint.model_checkpoint_path)\n print(\"Successfully loaded:\", checkpoint.model_checkpoint_path)\n else:\n print(\"Could not find old network weights\")\n\n iteration_now=0\n ## Training loop\n iteration_now +=1\n \n num_teams=200\n ##Read in reward, state, and trace from files\n game_files = os.listdir(DATA_STORE)\n game_info_list=[]\n teams=[]\n for filename in game_files:\n game_info_list.append(np.load(\"./pickles/\"+filename[:],allow_pickle=True)) \n print(\"same Length?:\",len(game_info_list)==len(game_files))\n for game_number,game in enumerate(game_info_list[-num_teams:]):\n print(game_number)\n # try:\n home_team=game_to_teams[\"./pickles/\"+game_files[-num_teams+game_number][:-4]][0]\n away_team=game_to_teams[\"./pickles/\"+game_files[-num_teams+game_number][:-4]][1]\n if home_team not in team_q_values:\n team_q_values[home_team]={\"games\":0,\"possesions\":0,\"total_value\":0,\"movements\":0}\n if away_team not in team_q_values:\n team_q_values[away_team]={\"games\":0,\"possesions\":0,\"total_value\":0,\"movements\":0}\n team_q_values[home_team][\"games\"]+=1\n team_q_values[away_team][\"games\"]+=1\n for reward, episode, episode_length,event_type,final_tl,possession in game:\n # s_t0 = observations[train_number]\n team_q_values[home_team][\"possesions\"]+=1\n team_q_values[away_team][\"possesions\"]+=1\n possession_number=0\n s_t0 = episode[possession_number]\n possession_number+=1\n \n while possession_number<len(episode):\n # try:\n batch_return, possession_number, s_tl = get_nba_possessesion_batch(s_t0,episode,reward,possession_number,final_tl,1,event_type,BATCH_SIZE)\n\n # get the batch variables\n s_t0_batch = [d[0] for d in batch_return]\n s_t1_batch = [d[1] for d in batch_return]\n r_t_batch = [d[2] for d in batch_return]\n trace_t0_batch=[1 for i in s_t0_batch]\n trace_t1_batch=[1 for i in s_t1_batch]\n # trace_t0_batch = [d[3] for d in batch_return]\n # trace_t1_batch = [d[4] for d in batch_return]\n y_batch = []\n\n [outputs_t1, readout_t1_batch] = sess.run([model.outputs, model.read_out],\n feed_dict={model.trace_lengths: trace_t0_batch,\n model.rnn_input: s_t0_batch})\n home_values=0\n away_values=0\n movements=len(readout_t1_batch)\n for home,away in readout_t1_batch:\n home_values+=home\n away_values+=away\n\n team_q_values[home_team][\"total_value\"]+=home_values\n team_q_values[home_team][\"movements\"]+=movements\n\n team_q_values[away_team][\"total_value\"]+=away_values\n team_q_values[away_team][\"movements\"]+=movements\n # except:\n # print(\"errored\")\n return team_q_values",
"def run_tournament(genes):\n return _run_tournament(genes) / (len(genes) - 1)"
] | [
"0.6938331",
"0.6872798",
"0.6221495",
"0.61553615",
"0.6041733",
"0.5884427",
"0.58615357",
"0.5858718",
"0.58503413",
"0.584667",
"0.58107626",
"0.5793915",
"0.57814837",
"0.5771732",
"0.57555825",
"0.57402873",
"0.5730085",
"0.5728672",
"0.57039595",
"0.57039595",
"0.57039595",
"0.57039595",
"0.57039595",
"0.5701828",
"0.5697149",
"0.56890666",
"0.56638396",
"0.566164",
"0.56405735",
"0.5621132"
] | 0.7137898 | 0 |
Get scores for all the teams | def get_scores(self, tournament: Tournament):
self.model.eval()
# collate_fn = lambda x: collate_teams(x, tournament.max_members)
dl_rank = DataLoader(tournament.ranking, num_workers=self.jobs, batch_size=self.bs, shuffle=False)
iterator = tqdm(dl_rank, position=0, desc=f'{tournament.tournament_id} ranking', disable=True)
scores = []
for i, team in enumerate(iterator):
score = self.model.get_team_score(team.to(self.device))
scores.append(score.cpu().numpy())
scores = np.concatenate(scores)
return scores.flatten() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_list_team_scores(self):\n scores = defaultdict(lambda: {\n \"scored_xg\": [],\n \"conceded_xg\": [],\n \"home_adv\": 0,\n \"expected_points\": 0\n })\n\n for g in self.games:\n scores[g.HomeTeam][\"scored_xg\"].append(g.FTHG)\n scores[g.HomeTeam][\"conceded_xg\"].append(g.FTAG)\n scores[g.AwayTeam][\"scored_xg\"].append(g.FTAG)\n scores[g.AwayTeam][\"conceded_xg\"].append(g.FTHG)\n\n for team in scores.keys():\n scores[team][\"expected_points\"] = (self.get_table(metric='points')[team] /\n len(scores[team][\"scored_xg\"]))\n\n return scores",
"def get_teams():",
"def get_team_scores(self, team, include_home=True, include_away=True):\n if include_away:\n away_games = list(filter(lambda g: team == g.AwayTeam, self.games))\n else:\n away_games = []\n\n if include_home:\n home_games = list(filter(lambda g: team == g.HomeTeam, self.games))\n else:\n home_games = []\n\n scored_h = [g.FTHG for g in home_games]\n scored_a = [g.FTAG for g in away_games]\n\n conceded_h = [g.FTAG for g in home_games]\n conceded_a = [g.FTHG for g in away_games]\n\n try:\n mean_gd = mean(scored_h + scored_a) - mean(conceded_h + conceded_a)\n home_gd = mean(scored_h) - mean(conceded_h)\n home_adv = home_gd - mean_gd\n except Exception:\n home_adv = 0\n\n return {\n \"scored_xg\": scored_h + scored_a,\n \"conceded_xg\": conceded_h + conceded_a,\n \"home_adv\": home_adv,\n \"expected_points\": self.get_table(metric='points')[team] /\n len(home_games + away_games)\n }",
"def get_scores(self):\n return [(self.players[p.get_color()], p.get_score()) for p in self.state.get_players()]",
"def _get_current_teams_score(self):\n for game in self._get_live_games():\n teams_playing = [x['abbreviation'] for index, x in game['teams'].items()]\n if self.team in teams_playing:\n # Our team is playing in this game, get the score \n return int(game['scores'][self.team])",
"def getScores():\r\n results = \"\"\r\n with sqlite3.connect(database_file) as conn:\r\n cursor = conn.cursor()\r\n team_scores = cursor.execute(\"\"\" SELECT * FROM scores;\"\"\")\r\n\r\n for row in team_scores.fetchall():\r\n teamname, auto, rc, spirit, video = row\r\n results += result_string.format(teamname, auto, rc, spirit, video) + \"\\n\"\r\n return results",
"def calculate_scores():\n all_people = models.Leaderboard.query.order_by(\n models.Leaderboard.score.desc()).all()\n print(all_people)\n users = []\n scores = []\n for person in all_people:\n users.append(person.username)\n scores.append(person.score)\n return users, scores",
"def perform_get_scores(responder, options):\n match = options['<match-id>']\n all_scores = scores.get_match_scores(match)\n\n if options.get(yaml_opt, False):\n responder(yaml.dump({'scores': all_scores}))\n else:\n if all_scores is None:\n responder('No scores available for match {0}'.format(match))\n else:\n for tla, score in all_scores.iteritems():\n responder('Team {0} scored {1} in match {2}'.format(tla, score, match))",
"def calculate_scores(players):\n scores = {}\n for player in players.tuple_:\n scores[player.id_] = player.score()\n return scores",
"def fetch_teams_stats():\n teams_scraper = TeamStatsScraper(API_URL, API_HEADERS)\n result = teams_scraper.save_objects()\n return result",
"def get_scores(self):\n return self.score",
"def get_fb_team_rankings(self):\n\n ranks = []\n self._logger.debug(\"Getting foosball team rankings\")\n\n try:\n self.check_if_db_connected()\n cursor = self._db_conn.cursor()\n cursor.execute(\"SELECT team_id, team_name FROM team\")\n teams = cursor.fetchall()\n\n for team_id, team_name in teams:\n cursor.execute(\"SELECT fb_team_rating FROM \\\nteam WHERE team_id = {0}\".format(team_id))\n team_rating = cursor.fetchall()[0]\n\n cursor.execute(\"SELECT mu, sigma FROM rating WHERE rating_id \\\n= {0}\".format(team_rating[0]))\n mu, sigma = cursor.fetchall()[0]\n\n team_rank = float(mu) - (3 * float(sigma))\n\n # get player_ids\n cursor.execute(\"SELECT player from player_team_xref \\\nWHERE team = {0}\".format(team_id))\n players = cursor.fetchall()\n player_one = players[0]\n player_two = players[1]\n\n cursor.execute(\"SELECT first_name FROM player WHERE \\\nplayer_id = {0}\".format(player_one[0]))\n player_one_name = cursor.fetchone()[0]\n\n cursor.execute(\"SELECT first_name FROM player WHERE \\\nplayer_id = {0}\".format(player_two[0]))\n player_two_name = cursor.fetchone()[0]\n\n cursor.execute(\"SELECT COUNT(result_id) FROM fb_result WHERE \\\n(offense_winner = {0} AND defense_winner = {1}) OR (offense_winner = {1} \\\nAND defense_winner = {0})\".format(player_one[0], player_two[0]))\n team_win_count = cursor.fetchone()[0]\n\n cursor.execute(\"SELECT COUNT(result_id) FROM fb_result WHERE \\\n(offense_loser = {0} AND defense_loser = {1}) OR (offense_loser = {1} \\\nAND defense_loser = {0})\".format(player_one[0], player_two[0]))\n team_loss_count = cursor.fetchone()[0]\n\n intermediate_rank = (team_name, round(team_rank, 4),\n team_win_count, team_loss_count, player_one_name,\n player_two_name)\n ranks.append(intermediate_rank)\n del intermediate_rank\n\n except MySQLdb.OperationalError:\n self._logger.error(\"MySQL operational error occured\")\n traceback.print_exc()\n raise exceptions.DBConnectionError(\"Cannot connect to MySQL server\")\n\n except MySQLdb.ProgrammingError:\n self._logger.error(\"MySQL programming error\")\n traceback.print_exc()\n raise exceptions.DBSyntaxError(\"MySQL syntax error\")\n\n else:\n return ranks",
"def find_all(self):\n cursor = self._connection.cursor()\n cursor.execute('SELECT * FROM scores ORDER BY level')\n all_scores = cursor.fetchall()\n return all_scores",
"def get_teams(self):\n url = 'teams'\n result = self.get(url)\n return result.get('teams', result)",
"def getScores(self,query):\n pass",
"def getAllTeams(self):\n return []",
"def get(self):\n for team in api.team.get_all_teams():\n team_id = team[\"tid\"]\n team_members = api.team.get_team_members(tid=team_id, show_disabled=False)\n all_scoreboards = api.scoreboards.get_all_scoreboards()\n member_eligibilities = dict()\n for member in team_members:\n member_eligibilities[member[\"uid\"]] = {\n scoreboard[\"sid\"]\n for scoreboard in all_scoreboards\n if api.scoreboards.is_eligible(member, scoreboard)\n }\n\n team_eligibilities = list(set.intersection(*member_eligibilities.values()))\n db = api.db.get_conn()\n db.teams.find_one_and_update(\n {\"tid\": team_id}, {\"$set\": {\"eligibilities\": team_eligibilities}}\n )\n return jsonify({\"success\": True})",
"def scoreboard(year, month, day):\n # Get data from mlbgame library\n data = mlbgame.data.get_scoreboard(year, month, day)\n # Parse through returned data\n parsed = etree.parse(data)\n root = parsed.getroot()\n output = []\n # Loop through the list of games that are returned\n for game in root:\n if game.tag == 'data':\n return []\n # Get the Team Names\n teams = game.findall('team')\n home_name = teams[0].attrib['name']\n away_name = teams[1].attrib['name']\n # Building a dictionary\n # I am really only interested in the scores.... not sure if\n # game_id is actually necessary....but here it stays\n game_data = game.find('game')\n game_id = game_data.attrib['id']\n home_team_data = teams[0].find('gameteam')\n home_team = home_name\n home_team_runs = int(home_team_data.attrib['R'])\n away_team_data = teams[1].find('gameteam')\n away_team = away_name\n away_team_runs = int(away_team_data.attrib['R'])\n score = {\n 'home_team': home_team,\n 'home_team_runs': home_team_runs,\n 'away_team': away_team,\n 'away_team_runs': away_team_runs\n }\n output.append(score)\n return output",
"def get_all_teams():\n # Try to get all teams from database\n query = Team.query\n\n try:\n teams = query.all()\n\n # If query returns no teams, return erorr\n if len(teams) == 0:\n return jsonify({'error': 'No results found!'}), 404\n\n # If no result found, return error\n except NoResultFound:\n return jsonify({'error': 'No result found!'}), 404\n\n # If some other sqlalchemy error is thrown, return error\n except SQLAlchemyError:\n return jsonify({'error': 'Some problem occurred!'}), 400\n\n # Serialize array of teams\n team_schema = TeamSchema(many=True)\n output = team_schema.dump(teams).data\n\n # Return json response\n return jsonify(\n {\n 'num_results': str(len(output)),\n 'success': 'Successfully retrieved teams!',\n 'teams': output,\n }\n ), 200",
"def get_teams():\n teams = []\n for teamId in range(1, 68):\n t = requests.get(TEAMS_URL.format(teamId)).json()\n team_list = t.get('teams')\n if team_list is None or len(team_list) == 0:\n continue\n teams.append(Team.from_json(team_list[0]))\n return teams",
"def scoreTeams(curTeams, oppTeam, pokedex, league, minDistWanted):\n battleData, htmlData = loadBattleData(league)\n similarities = loadSims() \n \n #If not given an opponent team then simply randomly choose losers from the dataset to compare to.\n if len(oppTeam) == 0:\n picks = set([])\n while (len(picks) < NUMLOSINGTEAMS and (not len(picks) == len(battleData))):\n picks.add(random.randint(0,len(battleData)-1))\n\n losers = []\n loserDict = {}\n for i in picks:\n entry = battleData[i]\n winner,loser = determineWinner(entry)\n loserDict[str(loser)] = [winner]\n losers.append( (loser,0) )\n\n #Given opponent team then find similar teams\n else:\n oppTeam = [getSimPokemon(opp,similarities) for opp in oppTeam]\n\n #create dictionary from losers team to the team that beat them.\n loserDict = {}\n sims = []\n for d in battleData:\n winner, loser = determineWinner(d)\n\n wTeam = teamToArray(winner,pokedex)\n lTeam = np.array(teamToArray(loser, pokedex))\n\n score = 0\n for oppNp in oppTeam:\n score+= np.amax(lTeam*oppNp) \n\n if str(loser) in loserDict:\n loserDict[str(loser)].append(winner)\n else:\n #new to dictonary\n loserDict[str(loser)] = [winner]\n\n sims.append((loser, score))\n\n\n sims = sorted(sims, key = lambda x : x[1], reverse = True)\n\n cutoff = min(len(sims),NUMLOSINGTEAMS)\n losers = sims[:cutoff]\n\n #Gather winners to losing teams\n winnersComp = []\n for loser,_ in losers:\n for winner in loserDict[str(loser)]:\n winnersComp.append(teamToArray(winner,pokedex))\n \n topScore = len(winnersComp)*6 #pkmn team size\n\n results = []\n inverted_idx = {}\n\n existsSet = []\n\n #Creates inverted index for teams, while simoultaneously weeding out any teams that are exactly similar.\n for i in range(len(curTeams)):\n team = curTeams[i]\n results.append((team,0))\n sTeam = set(team)\n if not (sTeam in existsSet):\n existsSet.append(sTeam)\n for pkm in team:\n if pkm != EMPTY:\n if pkm in inverted_idx:\n inverted_idx[pkm].append(i)\n else:\n inverted_idx[pkm] = [i]\n \n #Giving the similiarity scores to the winners based off of the inverted index.\n for pkm in inverted_idx:\n for winner in winnersComp:\n wArr = np.array(winner)\n #tArr = getSimPokemon(pkm,similarities)\n tArr = similarities[pkm]\n \n vals = wArr * tArr\n\n score = np.amax(vals)\n\n for i in inverted_idx[pkm]:\n results[i] = (results[i][0],results[i][1]+(score/topScore))\n\n results = sorted(results, key = lambda x : x[1], reverse = True)\n\n if len(results) < NUMTEAMSRETURN:\n if len(results) == 0:\n returnTeams = [[] for x in range(NUMTEAMSRETURN)]\n teamScores = [0 for x in range(NUMTEAMSRETURN)]\n\n else:\n returnTeams = [result[0] for result in results]\n teamScores = [result[1] for result in results]\n else:\n firstResult, firstScore = results[0]\n returnTeams = [firstResult]\n teamScores = [round(firstScore*100,1)]\n returnSets = [set(firstResult)]\n \n i = 1\n\n #Loops through results and adds teams with the proper edit distance away.\n while(len(returnTeams) < NUMTEAMSRETURN and minDistWanted > 0):\n teamToConsider,teamToConsiderScore = results[i]\n \n considerSet = set(teamToConsider)\n add = True\n ##checks the edit distance of teams is above wanted\n for team in returnSets:\n if len(team.union(considerSet)) < len(team)+minDistWanted:\n add = False\n\n ##If indeed above wanted levels then add\n if add:\n returnTeams.append(teamToConsider)\n returnSets.append(considerSet)\n teamScores.append(round(teamToConsiderScore*100,1))\n \n i+=1\n\n if i >= len(results):\n i = 1\n minDistWanted -= 1 \n \n winHtmls = []\n if htmlData != None:\n for team,_ in losers:\n for winner in loserDict[str(team)]:\n winHtmls.extend(htmlData[str(sorted(winner))])\n \n\n return returnTeams, teamScores, winHtmls",
"def get_all_matches_by_league(self):\n raise NotImplementedError",
"def get_matches_with_teams():\n\tf = open(\"raw_tba.json\")\n\tjsonvar = json.loads(f.read())\n\n\treturn_val = []\n\tfor i in jsonvar:\n\t\t# print i\n\t\tif \"score_breakdown\" in i and i[\"score_breakdown\"] != None:\n\t\t\treturn_val.append(FullTBAMatch(i))\n\n\treturn return_val",
"def get_team_stats(self, team_name, year):\n \n base_url = 'http://www.sports-reference.com/cbb/schools/' + \\\n team_name + '/' + str(year) + '.html'\n\n response = urllib2.urlopen(base_url)\n content = response.read()\n soup = BeautifulSoup(content)\n soup_results = soup.find('td', text='Team')\n team_stats = []\n \n if soup_results:\n soup_results = soup_results.parent()\n \n for result in soup_results[1::]:\n if result.string:\n team_stats.append(float(result.string))\n else:\n team_stats.append(None)\n else:\n team_stats += [None]*21\n\n return team_stats",
"def get_all_teams(self):\n return self._db.Teams.find({})",
"def matchscore(self):\n print(self.team1.name + \" \" + str(self.team1score) + \" - \" + str(self.team2score) + \" \" + self.team2.name)",
"def get_people(team):",
"def calculate_matches(teams: Dict[int, Team]) -> Dict[int, Match]:\r\n match_urls = TCS_Scraper.scrape_matches(end_round=CURRENT_ROUND)\r\n matches = {}\r\n for match in match_urls:\r\n print(\"Scraping\", match)\r\n team_1id, results, team_2id \\\r\n = TCS_Scraper.scrape_match(match, teams)\r\n # If nothing happened on this match page, skip it\r\n if not results:\r\n continue\r\n team_1 = teams[team_1id]\r\n team_2 = teams[team_2id]\r\n\r\n team_1elos = [team_1.elo]\r\n team_2elos = [team_2.elo]\r\n for result in results:\r\n # Calculate new elo for each team\r\n e1p, e2p = Team.calculate_elo(team_1.elo, team_2.elo, result[0])\r\n\r\n # Print elo changes for each team\r\n print(team_1.name, str(e1p - team_1.elo))\r\n print(team_2.name, str(e2p - team_2.elo))\r\n\r\n # Store the elo changes\r\n team_1elos.append(e1p)\r\n team_2elos.append(e2p)\r\n\r\n # Set new elo values\r\n team_1.elo = e1p\r\n team_2.elo = e2p\r\n\r\n # Create a new match object and append it to the list of matches\r\n new_match = Match(\r\n match,\r\n team_1id,\r\n team_2id,\r\n team_1elos,\r\n team_2elos,\r\n results\r\n )\r\n matches[new_match.id] = new_match\r\n\r\n # Add match id to each team object\r\n team_1.matches.append(new_match.id)\r\n team_2.matches.append(new_match.id)\r\n\r\n return matches",
"def set_scores(apps, schema_editor):\n\n Game = apps.get_model(\"stats\", \"Game\")\n for game in Game.objects.all():\n score_allies = 0\n score_opponents = 0\n player_stats = game.playerstat_set.all()\n for stat in player_stats:\n if stat.is_opponent:\n score_opponents += stat.scored\n else:\n score_allies += stat.scored\n\n game.score_allies = score_allies\n game.score_opponents = score_opponents\n game.save()",
"def teams(self):\n return self._get_by_class(Team)"
] | [
"0.77148676",
"0.6926027",
"0.6908442",
"0.6832203",
"0.68074715",
"0.67282873",
"0.67100906",
"0.6675928",
"0.66030395",
"0.6575878",
"0.6529839",
"0.647783",
"0.63599753",
"0.63421893",
"0.6304855",
"0.63044083",
"0.62909013",
"0.6269161",
"0.62505054",
"0.62181664",
"0.6209051",
"0.6172294",
"0.616696",
"0.61646754",
"0.61646163",
"0.6146321",
"0.6134858",
"0.6125098",
"0.6122785",
"0.610999"
] | 0.7616093 | 1 |
suit and value should be integers | def __init__(self, value, suit) -> None:
self.value = value
self.suit = suit | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, value, suit):\n self.value = value # A,2,3,4,5,6,7,8,9,10,J,Q, or K\n self.suit = suit # hearts, diamonds, clubs, spades",
"def test_is_suit_integer(self):\n self.assertIsInstance(cardutils.Card(10,1).suit, int)",
"def suit(self):\r\n\t\tsuit = self.n // 13\r\n\t\treturn suit",
"def card_value (card):\r\n value = card[0]\r\n if value in ['Jack','Queen','King']:\r\n return 10\r\n if value in [2,3,4,5,6,7,8,9,10]:\r\n return value\r\n else:\r\n raise 'CardValueError'",
"def test_value_hard_hand(self):\n hand = self._hand\n cards = [BjCard('spades', '6'), BjCard('hearts', 'A'), BjCard('clubs', 'K')]\n for card in cards:\n hand.add_card(card)\n self.assertEqual(hand.value, 17)",
"def __init__(self,suit,rank):\n self.suit = suit\n self.rank = rank\n self.value = values[rank]",
"def get_num_suit(self):\n if self.suit == \"Diamonds\":\n return 0\n if self.suit == \"Clubs\":\n return 1\n if self.suit == \"Hearts\":\n return 2\n if self.suit == \"Spades\":\n return 3\n return -1",
"def hand_value_check(self, hand):\r\n hand_value = 0\r\n ace = 0\r\n result = []\r\n a = 0\r\n for card in hand: # calculate value of a hand\r\n if card.value < 10:\r\n a = card.value\r\n elif card.value in range(10, 14):\r\n a = 10\r\n elif card.value == 14: # keep track of Aces that may be counted both as 11 and as 1\r\n a = 11\r\n ace += 1\r\n hand_value += a\r\n\r\n if ace > 0: # if hand had aces, return all possible hand values\r\n for i in range(0, ace + 1):\r\n result.append(hand_value)\r\n hand_value -= 10\r\n self.display_hand_val = result\r\n return result\r\n else:\r\n result.append(hand_value)\r\n self.display_hand_val = result\r\n return result",
"def get_suit(self):\r\n return self.suit",
"def getSuit(self):\r\n return self.suit",
"def get_value(self):\r\n value, aces = 0, 0\r\n for card in self.hand:\r\n value += VALUES[card.get_rank()]\r\n # Keep track of the aces in Hand\r\n if card.get_rank() == \"A\":\r\n aces += 1\r\n if aces >= 1 and value + 10 <= 21:\r\n value += 10\r\n return value",
"def hand_value_check(self, hand):\r\n hand_value = 0\r\n result = []\r\n a = 0\r\n for card in hand: # calculate value of a hand\r\n if card.value < 10:\r\n a = card.value\r\n elif card.value in range(10, 14):\r\n a = 10\r\n elif card.value == 14: # keep track of Aces that may be counted both as 11 and as 1\r\n a = 11\r\n hand_value += a\r\n\r\n result.append(hand_value)\r\n self.display_hand_val = result\r\n return result",
"def get_value(self):\n global VALUES\n hand_value = 0\n has_ace = False\n\n for card in self.hand:\n v = VALUES[card.get_rank()]\n hand_value += v\n if card.get_rank() is 'A':\n has_ace = True\n\n if not has_ace:\n return hand_value\n else:\n if hand_value + 10 <= 21:\n return hand_value + 10\n else:\n return hand_value",
"def test_value(self):\n hand = self._hand\n cards = [BjCard('clubs', '10'), BjCard('diamonds', 'A')]\n for card in cards:\n hand.add_card(card)\n self.assertEqual(hand.value, 21)",
"def test_value_soft_hand(self):\n hand = self._hand\n cards = [BjCard('diamonds', '7'), BjCard('hearts', 'A')]\n for card in cards:\n hand.add_card(card)\n self.assertEqual(hand.value, 18)",
"def hand_value(hand):\n val = 0 \n for card in hand:\n val += card.value\n\n return val",
"def setSuit(self, arg):\n self.suit = arg",
"def calculate_value(self, hand):\n global FACE_CARDS\n #could refactor the 2 hand possiblities into methods of a Dealer and Player Class\n if hand == \"player\":\n if self.player_hand[-1].value in FACE_CARDS:\n self.player_value += 10\n elif self.player_hand[-1].value == \"A\":\n self.player_value += 11\n self.player_ace_count += 1\n else:\n self.player_value += int(self.player_hand[-1].value)\n\n if self.player_value > 21:\n if self.player_ace_count > self.player_almost_bust:\n #To prevent a Bust, your Ace became a one\n self.player_value -= 10\n self.player_almost_bust += 1\n else:\n self.player_lose()\n elif self.player_value == 21:\n self.blackjack = True\n self.endgame()\n\n elif hand == \"dealer\":\n if len(self.dealer_hand) > 1:\n if self.dealer_hand[-1].value in FACE_CARDS:\n self.dealer_value += 10\n elif self.dealer_hand[-1].value == \"A\":\n self.dealer_value += 11\n self.dealer_ace_count += 1\n else:\n self.dealer_value += int(self.dealer_hand[-1].value)\n\n if self.dealer_value > 21:\n if self.dealer_ace_count > self.dealer_almost_bust:\n #To prevent a Bust, the Dealer's Ace became a one\n self.dealer_value -= 10\n self.dealer_almost_bust += 1\n else:\n self.player_win()\n elif self.dealer_value == 21:\n self.player_lose()",
"def aces_high(card):\n if isinstance(card, Value):\n if card == Value.Ace:\n return 14\n return card.value\n\n if card.joker:\n return 15\n if card.value == Value.Ace:\n return 14\n return card.value.value",
"def get_value(self):\n \n value = 0\n ace = False\n\n for card in self.hand:\n value += VALUES[card.get_rank()]\n \n if (card.get_rank() == 'A'):\n ace = True\n \n if not ace:\n return value\n else:\n if (value + 10) <= 21:\n return (value + 10)\n else:\n return value",
"def suit(self):\n return self._suit",
"def suit(self):\n return self._suit",
"def get_value(self):\n bj_rankings = {'Ace': 11, 'King': 10, 'Queen': 10, 'Jack': 10,\n 10: 10, 9: 9, 8: 8, 7: 7, 6: 6, 5: 5, 4: 4, 3: 3, 2: 2}\n value = 0\n for card in self.cards:\n value += bj_rankings[card.rank]\n\n if value > 21:\n bj_rankings['Ace'] = 1\n value = 0\n for card in self.cards:\n value += bj_rankings[card.rank]\n return value",
"def test_value_hard_hand_two_aces(self):\n hand = self._hand\n cards = [BjCard('spades', '6'), BjCard('hearts', 'A'), BjCard('clubs', 'K'), BjCard('diamonds', 'A')]\n for card in cards:\n hand.add_card(card)\n self.assertEqual(hand.value, 18)",
"def blackjackValue(self):\n NUMBERRANKS = [\"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\"]\n FACECARDS = [\"jack\", \"queen\", \"king\"]\n ACE = [\"ace\"]\n if self.rank in NUMBERRANKS:\n return int(self.rank)\n elif self.rank in FACECARDS:\n return 10\n elif self.rank in ACE:\n return 11",
"def __init__(self, face: str, value: int, suit: str):\n self.face = face\n self.value = value\n self.suit = suit",
"def getSuit(self):\n return self.suit",
"def get_card_value(self, card):\n if card >= 10:\n return 10\n if card == 1:\n return 11\n return card",
"def __update_values(self):\r\n\r\n\t\tv = [0]\r\n\t\thas_ace = False\r\n\r\n\t\t# two values for hands with aces\r\n\t\tfor card in self.cards:\r\n\t\t\tv[0] += card.value\r\n\t\t\tif card.rank == 'Ace':\r\n\t\t\t\thas_ace = True\r\n\r\n\t\t# hand is soft if below 12\r\n\t\tif has_ace:\r\n\t\t\tif v[0] < 12:\r\n\t\t\t\tv.append(v[0] + 10)\r\n\r\n\t\tself.values = v",
"def __init__(self, suit: str, rank: str) -> None:\n self.suit = suit\n self.rank = rank\n self.value = Card.values[rank]\n self.hidden = False"
] | [
"0.7262717",
"0.7030172",
"0.67238253",
"0.66163784",
"0.65954757",
"0.65391135",
"0.6420939",
"0.6411027",
"0.6410355",
"0.6391943",
"0.6384562",
"0.63782066",
"0.63565224",
"0.63530475",
"0.6339618",
"0.63249195",
"0.62757915",
"0.62429863",
"0.6201983",
"0.61999196",
"0.6197215",
"0.6197215",
"0.6195858",
"0.6195684",
"0.6168728",
"0.6134652",
"0.6134569",
"0.61243176",
"0.6113449",
"0.608421"
] | 0.7071617 | 1 |
Return comments tree by entity or root comment | async def get_comments_tree(request):
comment_id = request.match_info.get('comment_id')
if comment_id:
# valitation was in route (\d+)
comment_id = int(comment_id)
tree = CommentsTreeDAO.create_by_parent(comment_id)
else:
entity_type = request.match_info.get('entity_type')
if not entity_type:
return web.HTTPBadRequest(reason="Entity params error!")
# valitation was in route (\d+)
entity_id = int(request.match_info.get('entity_id'))
tree = CommentsTreeDAO.create_by_entity(entity_type, entity_id)
await tree.fetch(request['conn'])
return web.json_response(await tree.rows) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_by_entity(entity_type, entity_id, only_roots=False):\n\n return CommentsTreeDAO(entity_type=entity_type, entity_id=entity_id,\n only_roots=only_roots)",
"async def fetch(self, conn, page=None, fdt=None, tdt=None):\n\n sql = \"\"\"SELECT\n comm.id,\n to_json(created_dt) as created_dt,\n entity_type,\n entity_id,\n user_id,\n u.username,\n text,\n parent_id,\n nlevel(ltree_path) as level\n FROM comments_tbl comm, users_tbl u\n WHERE comm.user_id=u.id AND NOT is_removed AND \"\"\"\n where = \"\"\n params = []\n\n if self._root_id is not None:\n root = await CommentDAO.get_by_id(conn, self._root_id)\n where += \" ltree_path <@ %s\"\n params.append(root.path)\n\n if self._entity_type is not None and self._entity_id is not None:\n if where:\n where += \" AND\"\n where += \" entity_type=%s AND entity_id=%s\"\n params.extend([self._entity_type, self._entity_id])\n\n if self._user_id is not None:\n if where:\n where += \" AND\"\n where += \" user_id=%s\"\n params.append(self._user_id)\n\n if not where:\n raise Exception(\"Sql params error\")\n\n if fdt and tdt:\n where += \" AND created_dt between %s and %s\"\n params.extend([fdt, tdt])\n\n if self._only_roots:\n where += \" AND parent_id IS NULL\"\n\n if page and where:\n where += \" LIMIT %s OFFSET %s\"\n limit = config.comments['on_page']\n offset = (page - 1)*config.comments['on_page']\n params.extend([limit, offset])\n\n sql += where\n log.debug(\"SQL: {}\".format(sql))\n\n self._result = await conn.execute(sql, params)\n return self",
"def thread(comments):\r\n \r\n ret = {'root': []}\r\n for comment in comments:\r\n if not comment.parent_id:\r\n ret['root'].append(comment)\r\n else:\r\n if comment.parent_id not in ret:\r\n ret[comment.parent_id] = []\r\n ret[comment.parent_id].append(comment)\r\n return ret",
"def generate_discreet_comment_tree(tribe):\n\n p1 = generate_random_post(tribe, user=get_random_user())\n p2 = generate_random_post(tribe, user=get_random_user(), parent_comment=p1)\n p3 = generate_random_post(tribe, user=get_random_user(), parent_comment=p1)\n p4 = generate_random_post(tribe, user=get_random_user())\n p5 = generate_random_post(tribe, user=get_random_user(), parent_comment=p4)\n p6 = generate_random_post(tribe, user=get_random_user(), parent_comment=p5)\n\n posts = [p1, p2, p3, p4, p5, p6]\n for post in posts:\n post.save()\n\n return posts",
"def get_discus_for_comment(id_article, id_comment):\n discus_id_list = list() # result id list - for easier calculations\n discus_obj_list = list() # list with Comment obj\n\n all_comments_by_article_obj = Comment.objects.filter(article=id_article).order_by('create')\n adjacent_list = list(zip(\n list(\n all_comments_by_article_obj.values_list('reply_to_comment', flat=1).filter(reply_to_comment__isnull=False)),\n list(all_comments_by_article_obj.values_list('id', flat=1).filter(reply_to_comment__isnull=False))\n ))\n\n def dfs(comment_id):\n for i in adjacent_list:\n if (comment_id in i) and (i[1] not in discus_id_list):\n discus_id_list.append(i[1])\n\n discus = Comment.objects.get(id=i[1])\n discus_obj_list.append(discus)\n\n dfs(i[1])\n\n dfs(id_comment)\n\n if len(discus_id_list) == 0:\n return None\n else:\n # return discus_id_list\n return discus_obj_list",
"def get_queryset(self, *args, **kwargs):\n return CommentQuerySet(self.model, using=self._db).order_by(\n self.tree_id_attr,\n self.left_attr\n )",
"def get_post_comments_recur(comment, comments, parent_comment_id, parent_post_id):\n if 'data' in comment:\n comment_data = comment['data']\n\n new_comment = None\n\n # a new comment exists at this layer, add it to the total list of comments\n if 'body' in comment_data:\n new_comment = {\n \"score\": comment_data['score'],\n \"body\": comment_data['body'],\n \"subreddit\": comment_data['subreddit'],\n \"author\": comment_data['author'],\n \"parent_comment_id\": parent_comment_id,\n \"parent_post_id\": parent_post_id,\n \"created\": comment_data['created'],\n \"comment_id\": comment_data['id']\n }\n comments.append(new_comment)\n\n next_parent_comment_id = parent_comment_id if new_comment is None else new_comment['comment_id']\n\n # recurse on children\n if 'children' in comment_data:\n for child in comment_data['children']:\n comments = get_post_comments_recur(child, comments, next_parent_comment_id, parent_post_id)\n\n # recurse on replies\n if 'replies' in comment_data:\n comments = get_post_comments_recur(comment_data['replies'], comments, next_parent_comment_id, parent_post_id)\n\n return comments",
"def comment_nodes(cursor: Cursor, children: List[Cursor]) -> None:\n # The idea here is to look for comment tokens between nodes.\n tu = cursor.tu\n prev_child = None\n for child in children:\n # :func:`comment_node` will look to see if the node is in\n # UNDOCUMENTED_NODES, but do it here anyway to save the effort of\n # getting tokens, no performance metrics were checked, but the general\n # hunch is there will be a lot fewer UNDOCUMENTED_NODES than not.\n if child.kind not in UNDOCUMENTED_NODES:\n prev_child = child\n continue\n\n # This may not be 100% accurate but move the end to the previous\n # line. This solves problems like macro definitions not including the\n # preprocessor `#define` tokens.\n #\n # <-- previous line\n # #define SOME_MACRO 23\n # ^ ^ (Note `end` is exclusive)\n # | |\n # +-- extent --+\n #\n location = child.extent.start\n end = cindex.SourceLocation.from_position(\n tu, location.file, location.line - 1, 1\n )\n\n start = prev_child.extent.end if prev_child else cursor.extent.start\n extent = cindex.SourceRange.from_locations(start, end)\n tokens = list(cindex.TokenGroup.get_tokens(tu, extent=extent))\n\n if tokens:\n comment_node(child, tokens[-1])\n comment_node(prev_child, tokens[0], trailing=True)\n\n prev_child = child\n\n first_child = children[0] if children else None\n cursor.raw_comment = get_file_comment(cursor, first_child)",
"def modify_comment_tree(self, comment_tree):\n return comment_tree",
"def generate_graph(comments):\n for comment in comments:\n topic['all_comments'].append(comment)\n parent = topic['graph'].setdefault(comment['parentId'], [])\n parent.append(comment['id'])\n generate_graph(comment['children'])",
"def parse_comment(self, node):\n\n data = []\n\n if node is not None:\n comment_id_pattern = re.compile('comment-(\\d+)')\n for comment_node in node.find_all('div', class_='comment'):\n item = {}\n item['is_deletable'] = False\n item['is_editable'] = False\n \n comment_id_result = comment_id_pattern.search(comment_node.get('id'))\n if comment_id_result:\n item['id'] = int(comment_id_result.group(1))\n \n comment_body_node = comment_node.find('div', class_='comment-body')\n if comment_body_node is not None:\n item['content'] = ''\n for p in comment_body_node.find_all(recursive=False):\n if 'class' in p.attrs and 'author' in p['class']:\n item['author'] = p.get_text()\n item['profile_url'] = self.get_link(p.get('href'))\n author_id = self._parse_user_id_from_url(item['profile_url'])\n if self.userId == author_id:\n item['is_deletable'] = True\n item['is_editable'] = True\n elif 'class' in p.attrs and 'age' in p['class']:\n item['date'] = p.abbr['title']\n item['date_ago'] = timeago.format(self._parse_datetime(item['date']), datetime.now(TIMEZONE))\n elif 'class' in p.attrs and 'edit' in p['class']:\n continue\n elif p.name == 'form':\n continue\n else:\n item['content'] += str(p)\n\n data.append(item)\n\n return data",
"def children(self):\n return Comment.objects.filter(parent=self)",
"def do_get_threaded_comment_tree(parser, token):\r\n error_string = \"%r tag must be of format {%% get_threaded_comment_tree for OBJECT [TREE_ROOT] as CONTEXT_VARIABLE %%}\" % token.contents.split()[0]\r\n try:\r\n split = token.split_contents()\r\n except ValueError:\r\n raise template.TemplateSyntaxError(error_string)\r\n if len(split) == 5:\r\n return CommentTreeNode(split[2], split[4], split[3])\r\n elif len(split) == 6:\r\n return CommentTreeNode(split[2], split[5], split[3])\r\n else:\r\n raise template.TemplateSyntaxError(error_string)",
"def post_tree(user, root):\n\n # Get all posts that belong to post root.\n query = Post.objects.valid_posts(u=user, root=root).exclude(pk=root.id)\n\n # Filter spam/deleted comments or answers.\n if user.is_anonymous or not user.profile.is_moderator:\n query = query.exclude(Q(status=Post.DELETED) | Q(spam=Post.SPAM))\n\n query = query.select_related(\"lastedit_user__profile\", \"author__profile\", \"root__author__profile\")\n\n # Apply the sort order to all posts in thread.\n thread = query.order_by(\"type\", \"-accept_count\", \"-vote_count\", \"creation_date\")\n\n # Gather votes by the current user.\n votes = get_votes(user=user, root=root)\n\n # Shortcuts to each storage.\n bookmarks, upvotes = votes[Vote.BOOKMARK], votes[Vote.UP]\n\n # Build comments tree.\n comment_tree = dict()\n\n def decorate(post):\n # Mutates the elements! Not worth creating copies.\n if post.is_comment:\n comment_tree.setdefault(post.parent_id, []).append(post)\n post.has_bookmark = int(post.id in bookmarks)\n post.has_upvote = int(post.id in upvotes)\n if user.is_authenticated:\n post.can_accept = not post.is_toplevel and (user == post.root.author or user.profile.is_moderator)\n post.can_moderate = user.profile.is_moderator\n post.is_editable = (user == post.author or user.profile.is_moderator)\n else:\n post.can_accept = False\n post.is_editable = False\n post.can_moderate = False\n\n return post\n\n # Decorate the objects for easier access\n thread = list(map(decorate, thread))\n\n # Decorate the root post\n root = decorate(root)\n\n # Select the answers from the thread.\n answers = [p for p in thread if p.type == Post.ANSWER]\n\n return root, comment_tree, answers, thread",
"def create_by_parent(parent_id):\n return CommentsTreeDAO(root_id=parent_id)",
"def get_comments_by_parent_genus_type(self, comment_genus_type):\n # Implemented from template for\n # osid.resource.ResourceLookupSession.get_resources_by_parent_genus_type\n # STILL NEED TO IMPLEMENT!!!\n return objects.CommentList([])",
"def get_comment_order(self):\n\n with g.stats.get_timer('comment_tree.get.1') as comment_tree_timer:\n comment_tree = CommentTree.by_link(self.link, comment_tree_timer)\n sort_name = self.sort.col\n sorter = get_comment_scores(\n self.link, sort_name, comment_tree.cids, comment_tree_timer)\n comment_tree_timer.intermediate('get_scores')\n\n if isinstance(self.sort, operators.shuffled):\n # randomize the scores of top level comments\n top_level_ids = comment_tree.tree.get(None, [])\n top_level_scores = [\n sorter[comment_id] for comment_id in top_level_ids]\n shuffle(top_level_scores)\n for i, comment_id in enumerate(top_level_ids):\n sorter[comment_id] = top_level_scores[i]\n\n self.timer.intermediate(\"load_storage\")\n\n comment_tree = self.modify_comment_tree(comment_tree)\n self.timer.intermediate(\"modify_comment_tree\")\n\n initial_candidates, offset_depth = self.get_initial_candidates(comment_tree)\n\n comment_tuples = self.get_initial_comment_list(comment_tree)\n if comment_tuples:\n # some comments have bypassed the sorting/inserting process, remove\n # them from `initial_candidates` so they won't be inserted again\n comment_tuple_ids = {\n comment_tuple.comment_id for comment_tuple in comment_tuples}\n initial_candidates = [\n comment_id for comment_id in initial_candidates\n if comment_id not in comment_tuple_ids\n ]\n\n candidates = []\n self.update_candidates(candidates, sorter, initial_candidates)\n self.timer.intermediate(\"pick_candidates\")\n\n # choose which comments to show\n while candidates and len(comment_tuples) < self.max_comments:\n sort_val, comment_id = heapq.heappop(candidates)\n if comment_id not in comment_tree.cids:\n continue\n\n comment_depth = comment_tree.depth[comment_id] - offset_depth\n if comment_depth >= self.max_depth:\n continue\n\n child_ids = comment_tree.tree.get(comment_id, [])\n\n comment_tuples.append(CommentTuple(\n comment_id=comment_id,\n depth=comment_depth,\n parent_id=comment_tree.parents[comment_id],\n num_children=comment_tree.num_children[comment_id],\n child_ids=child_ids,\n ))\n\n child_depth = comment_depth + 1\n if child_depth < self.max_depth:\n self.update_candidates(candidates, sorter, child_ids)\n\n self.timer.intermediate(\"pick_comments\")\n\n # add all not-selected top level comments to the comment_tuples list\n # so we can make MoreChildren for them later\n top_level_not_visible = {\n comment_id for sort_val, comment_id in candidates\n if comment_tree.depth.get(comment_id, 0) - offset_depth == 0\n }\n\n if top_level_not_visible:\n num_children_not_visible = sum(\n 1 + comment_tree.num_children[comment_id]\n for comment_id in top_level_not_visible\n )\n comment_tuples.append(MissingChildrenTuple(\n num_children=num_children_not_visible,\n child_ids=top_level_not_visible,\n ))\n\n self.timer.intermediate(\"handle_morechildren\")\n return comment_tuples",
"def comments(self):\n return self.get_queryset().filter(content_type__model='comment').order_by('-comments__createdAt')",
"def load_comments(request):\n # TODO: Add the ability to return comment tree in JSON format.\n # First we get the root of the comment tree being requested\n try:\n tree_root, parent_object = _get_or_create_tree_root(request)\n except InvalidCommentException as e:\n return JsonResponse({\n 'ok': False,\n 'error_message': str(e),\n })\n\n # Check if the user doesn't pass the appropriate permission check (on the parent_object)...\n if not user_has_permission(request, parent_object, 'can_view_comments'):\n return JsonResponse({\n 'ok': False,\n 'error_message': \"You do not have permission to view comments for this object.\",\n })\n\n # Once we have our desired nodes, we tack on all of the select/prefetch related stuff\n nodes = tree_root.get_family().select_related('deleted_user_info', 'created_by', 'parent', 'content_type')\\\n .prefetch_related(Prefetch('versions', queryset=CommentVersion.objects.order_by('-date_posted')\\\n .select_related('posting_user', 'deleted_user_info')))\n\n # The 'X_KWARGS' header is populated by settings.kwarg in comments.js\n kwargs = json.loads(request.META.get('HTTP_X_KWARGS', {}))\n kwargs.update({\n 'nodes': nodes,\n 'parent_object': parent_object,\n 'max_depth': tree_root.max_depth\n })\n\n comments_template = get_attr_val(request, parent_object, 'comments_template', 'comments/comments.html', **kwargs)\n\n # In the parent_object, sites can define a function called 'filter_nodes' if they wish to apply any additional filtering to the nodes queryset before it's rendered to the template.\n # Default value is the nodes tree with the deleted comments filtered out.\n nodes = get_attr_val(request, parent_object, \"filter_nodes\", default=nodes.filter(deleted=False), **kwargs)\n kwargs.update({\"nodes\": nodes, 'request': request})\n\n # Checks/assigns permissions to each node (so the template doesn't have to)\n _process_node_permissions(**kwargs)\n\n return JsonResponse({\n 'ok': True,\n 'html_content': loader.render_to_string(comments_template, context=kwargs, request=request),\n 'number_of_comments': tree_root.get_descendant_count()\n })",
"def fetch_top_level_comments(api: NytApi, article_url: str, pagination_size: int) -> List[Dict]:\n\n comments = []\n while True:\n response = api.community.get_comments(article_url, offset=len(comments), limit=pagination_size)\n if response['status'] != 'OK':\n # some multimedia articles dont allow comments and instead throw an error here\n return []\n\n results = response['results']\n new_comments = results['comments']\n comments.extend(new_comments)\n\n if len(new_comments) < pagination_size or len(comments) >= results['totalParentCommentsFound']:\n return comments",
"def get_path_to_comment(cls, comment, context, comment_tree):\n\n if comment._id not in comment_tree.cids:\n # the comment isn't in the tree\n raise InconsistentCommentTreeError\n\n comment_id = comment._id\n path = []\n while comment_id and len(path) <= context:\n path.append(comment_id)\n try:\n comment_id = comment_tree.parents[comment_id]\n except KeyError:\n # the comment's parent is missing from the tree. this might\n # just mean that the child was added to the tree first and\n # the tree will be correct when the parent is added.\n raise InconsistentCommentTreeError\n\n # reverse the list so the first element is the most root level comment\n path.reverse()\n return path",
"def _get_comment_order(self):\n\n comment_tuples = CommentOrdererBase.get_comment_order(self)\n if not comment_tuples:\n return comment_tuples\n elif isinstance(comment_tuples[-1], MissingChildrenTuple):\n missing_children_tuple = comment_tuples.pop()\n else:\n missing_children_tuple = None\n\n special_responder_ids = self.link.responder_ids\n\n # unfortunately we need to look up all the Comments for QA\n comment_ids = {ct.comment_id for ct in comment_tuples}\n comments_by_id = Comment._byID(comment_ids, data=True)\n\n # figure out which comments will be kept (all others are discarded)\n kept_comment_ids = set()\n for comment_tuple in comment_tuples:\n if comment_tuple.depth == 0:\n kept_comment_ids.add(comment_tuple.comment_id)\n continue\n\n comment = comments_by_id[comment_tuple.comment_id]\n parent = comments_by_id[comment.parent_id] if comment.parent_id else None\n\n if comment.author_id in special_responder_ids:\n kept_comment_ids.add(comment_tuple.comment_id)\n continue\n\n if parent and parent.author_id in special_responder_ids:\n kept_comment_ids.add(comment_tuple.comment_id)\n continue\n\n if hasattr(comment, \"distinguished\") and comment.distinguished != \"no\":\n kept_comment_ids.add(comment_tuple.comment_id)\n continue\n\n # add all ancestors to kept_comment_ids\n for comment_id in sorted(kept_comment_ids):\n # sort the comments so we start with the most root level comments\n comment = comments_by_id[comment_id]\n parent_id = comment.parent_id\n\n counter = 0\n while (parent_id and\n parent_id not in kept_comment_ids and\n counter < g.max_comment_parent_walk):\n kept_comment_ids.add(parent_id)\n counter += 1\n\n comment = comments_by_id[parent_id]\n parent_id = comment.parent_id\n\n # remove all comment tuples that aren't in kept_comment_ids\n comment_tuples = [comment_tuple for comment_tuple in comment_tuples\n if comment_tuple.comment_id in kept_comment_ids\n ]\n\n if missing_children_tuple:\n comment_tuples.append(missing_children_tuple)\n\n return comment_tuples",
"def get(self, request, *args, **kwargs):\n with connection.cursor() as cursor:\n params = (kwargs['object_id'], kwargs['content_type_id'],\n ContentType.objects.get_for_model(models.Comment).id)\n cursor.execute(SQL_GET_CHILDREN, params)\n return Response(dictfetchall(cursor))",
"def get(pid, sid, aid, cid):\n helpers.abort_if_invalid_parameters(pid, sid)\n helpers.abort_if_unknown_comment(cid, aid)\n project = Project.query.get(pid)\n\n if not project.is_public:\n user = User.query.filter_by(email=get_jwt_identity()).first()\n helpers.abort_if_not_a_member_and_private(user, project)\n children = CommentsModel.query.filter_by(parent_id=cid).all()\n return custom_response(200, data=UserAnnotationCommentSchema(many=True).dump(children))",
"def comment(self, comment_id):\r\n return RepoCommitsComment(self.parent, comment_id)",
"def get_comments_from_submission_id(submission_id):\n flat_comments = []\n tree_comments = []\n\n submission = (REDDIT.submission(id=submission_id))\n print(submission.num_comments)\n print(submission.shortlink)\n\n # sort comments by best and get the flattened list\n submission.comment_sort = 'confidence'\n\n # tree comments traversal\n submission.comments.replace_more(limit=1)\n for comm in submission.comments.list():\n tree_comments.append(comm)\n\n flat_comments = list(submission.comments)\n\n return flat_comments, tree_comments",
"def comments(self):\r\n return RepoCommitsComments(self.parent)",
"def _get_comment_map(self):\r\n def _visit(obj):\r\n res = []\r\n for child in obj.get('children', []):\r\n res.append((child['id'], child))\r\n if 'children' in child:\r\n res += _visit(child)\r\n return res\r\n return dict(_visit(self.thread))",
"def dfs(comment, fun):\n # comment has no replies\n if not comment.replies:\n return\n else:\n for r in comment.replies:\n # do something with a comment here\n fun(r)\n # recurr\n Comment.dfs(r, fun)",
"def test_print_comments():\n flat_comments, tree_comments = get_comments_from_submission_id('jrjn70')\n print(len(flat_comments))\n print(len(tree_comments))\n\n print('flat comments')\n for c in flat_comments[0:5]:\n comment_instance = REDDIT.comment(c)\n print(comment_instance.body)\n\n print()\n print('tree comments')\n for c in tree_comments[0:5]:\n comment_instance = REDDIT.comment(c)\n print(comment_instance.body)"
] | [
"0.65842324",
"0.63664144",
"0.63229674",
"0.6065913",
"0.6050549",
"0.6045573",
"0.60114443",
"0.5975179",
"0.5946544",
"0.58939505",
"0.587878",
"0.58299065",
"0.58249295",
"0.5780401",
"0.57469136",
"0.5738545",
"0.57361287",
"0.57340264",
"0.5731925",
"0.5722351",
"0.5712423",
"0.57042634",
"0.56964725",
"0.5696197",
"0.56665254",
"0.5665912",
"0.56359184",
"0.5617154",
"0.5591647",
"0.55121446"
] | 0.7140785 | 0 |
checks to see if word is in dictionary, then checks if homophones | def word_check(word):
word1 = word[1:]
if word1 not in word_dict: return False
if not homophones (word, word1): return False
word2 = word[0] + word[2:]
if word2 not in word_dict: return False
if not homophones(word, word2): return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def homophone_words(word_one, word_two, pron_dict):\n if word_one not in pron_dict or word_two not in pron_dict:\n return False\n return pron_dict[word_one] == pron_dict[word_two]",
"def homophones():\n pron = pronounce.read_dictionary('c06d')\n words = mkwrddct('words.txt')\n\n for word in words:\n phone1 = word[1:]\n phone2 = word[0] + word[2:]\n if phone1 in pron and phone2 in pron and word in pron:\n if pron[word] == pron[phone1] and pron[word] == pron[phone2]:\n print word, phone1, phone2",
"def isWord(word, dictionary):\n return word in dictionary",
"def homophones(a, b):\n if a not in phonetic or b not in phonetic:\n return False\n\n return phonetic[a] == phonetic[b]",
"def is_hindi(word):\r\n\twordlist = []\r\n\twith open(\"HINDI_DICT.txt\", \"r\", encoding = \"utf-8\") as f:\r\n\t\tfor line in f:\r\n\t\t\tline = re.sub(r'[^A-Za-z.;]','',line)\r\n\t\t\tline = line.lower()\r\n\t\t\tlist1 = line.split(\";\")\r\n\t\t\tfor element in list1:\r\n\t\t\t\tif element != '':\r\n\t\t\t\t\twordlist.append(element)\r\n\tif word in list(wordlist):\r\n\t\treturn 1\r\n\treturn 0",
"def check_in_dictionary(text):\n # check if text is printable\n if not text.isprintable():\n return False\n\n # if there are all complete words in the text\n if text[-1] == ' ':\n # check if all words exist in the dictionary\n if not words_in_dictionary(text.split()):\n return False\n\n # if the last word is incomplete\n else:\n # check if all words but the last exists in the dictionary\n text = text.split()\n if not words_in_dictionary(text[:-1]):\n return False\n\n # checks if there is any word in the dictionary which starts with the\n # last word in the plaintext\n word = text[-1].lower()\n raw_word = word.replace(\"'\", '').replace('.', '')\n return any(a for a in DICTIONARY_LOWER if a.startswith(word)) or \\\n any(a for a in DICTIONARY_LOWER if a.startswith(raw_word))\n\n return True",
"def search(self, word):\n for i in xrange(len(word)):\n w = word[:i] + '*' + word[i+1:]\n if w in self.dict and (len(self.dict[w]) > 1 or word[i] not in self.dict[w]): return True \n return False",
"def words_in_dictionary(word_list):\n for word in word_list:\n word = word.lower()\n raw_word = word.replace(\"'\", '').replace('.', '')\n if word not in DICTIONARY_LOWER and raw_word not in DICTIONARY_LOWER:\n return False\n return True",
"def check_word(self, word):\n word = word.lower().strip()\n return not word or word in self.dictionary",
"def word_dict_contains (self,\r\n word):\r\n\r\n\r\n\r\n if self.using_database:\r\n aprint('WORDDICT CONTAINS')\r\n\r\n value_tuple = (notebookname, word,)\r\n db_cursor.execute(\"SELECT rowid\"\r\n +\" FROM word_to_indexes\"\r\n +\" WHERE notebook=?\"\r\n +\" AND word=?;\",\r\n value_tuple)\r\n try:\r\n return db_cursor.fetchone()[0] # MIGHT BE PROBLEMATIC\r\n except:\r\n return False\r\n\r\n return str(word) in self.word_dict",
"def isValidWord(word, hand, wordList):\n # TO DO ... <-- Remove this comment when you code this function\n if word not in wordList:\n return False\n dic={}\n for k in hand:\n dic[k]=hand[k] \n for w in word:\n a=dic.get(w,0)\n if a > 0:\n dic[w]=a-1\n else:\n return False\n return True",
"def include_word(word, chardict):\n if (all(char in chardict.keys() for char in word)) & (len(word)<=25):\n # Some word2vec entries are all capitals and generally are acronyms.\n # This is unlikely to be learnable\n if not word.isupper():\n return True\n\n return False",
"def checkWord(word):\r\n check = word in cachedWordList\r\n if check:\r\n print(word + \" spelt correctly\")\r\n else:\r\n print(word + \" not found in dictionary\")\r\n return check",
"def search(self, word: 'str') -> 'bool':\n \n def dfs(word,dictword):\n if not word: \n if '#' in dictword:\n return True\n else:\n return False\n for k in range(len(word)):\n if word[k]!='.':\n if word[k] not in dictword:\n return False\n else:\n return dfs(word[k+1:],dictword[word[k]])\n else:\n for ss in 'qwertyuiopasdfghjklzxcvbnm':\n if ss in dictword and dfs(word[k+1:],dictword[ss]):\n return True\n return False\n return dfs(word,self.dictword)",
"def search(self, word):\r\n t = self.trie\r\n for w in word: \r\n if w not in t: \r\n return False\r\n t = t[w]\r\n if '#' in t:\r\n return True\r\n return False",
"def compare(theInput,dictionary):\n n=len(theInput)\n ret=0\n for word in dictionary:\n if theInput==word: return 2\n if theInput==word[:n]: ret=1\n return ret",
"def inHistogram(s,d):\n #if the character is in the dictionary, it will print True,\n #ifnot returns False \n return s in d",
"def search(self, word):\n current = self.root\n for i in word:\n if current.hash_map.get(i) is None:\n return False\n current = current.hash_map.get(i)\n if current.num != 0:\n return True\n return False",
"def search(self, word: str) -> bool:\n m = len(word)\n\n for dict_word in self.dict[m]:\n i = 0\n while i < m:\n if (word[i] == dict_word[i]) or (word[i] == '.'):\n i += 1\n else:\n break\n\n if i == m:\n return True\n\n return False",
"def isUnique(self, word):\n if len(word) <= 1:\n n = word\n else:\n n = word[0] + str(len(word) - 2) + word[-1] #Get the abbrviation.\n if n not in self.abbrdict or (self.abbrdict[n] == 1 and word in self.origdict): #If it is not in abbrdict or the abbrevation count is 1 and the word has appeared in dictionary, return true.\n return True\n else: #Otherwise, return false.\n return False",
"def check_for_greeting(sentence):\n for word in sentence.words:\n if word.lower() in greetings:\n return True",
"def rhymes(self,a,b):\r\n \r\n a=a.lower()\r\n b=b.lower()\r\n if(a in self._words): ##check if A is in the dict\r\n checkA=1\r\n soundA=self._pronun[a]\r\n lenA=len(soundA)\r\n #print(soundA)\r\n else :\r\n return False\r\n if(b in self._words): ##check if B is in dict\r\n checkB=1\r\n soundB=self._pronun[b]\r\n lenB=len(soundB)\r\n #print(soundB)\r\n else:\r\n return False\r\n \r\n if((checkA==1) and (checkB==1)): ##if both in dict then move ahead\r\n #print(lenA,lenB)\r\n \r\n for countA in range(lenA):\r\n if soundA[countA][0][0] not in ['A','E','I','O','U']:\r\n soundA[countA]=soundA[countA][1:]\r\n\r\n for countA in range(lenA):\r\n soundA[countA]=''.join(soundA[countA])\r\n \r\n # print(soundA)\r\n \r\n\r\n for countB in range(lenB):\r\n if soundB[countB][0][0] not in ['A','E','I','O','U']:\r\n soundB[countB]=soundB[countB][1:]\r\n\r\n for countB in range(lenB):\r\n soundB[countB]=''.join(soundB[countB])\r\n\r\n #print(soundB)\r\n \r\n else:\r\n return False\r\n\r\n rhyme_count=0\r\n \r\n for countA in range(lenA):\r\n for countB in range(lenB):\r\n if((soundA[countA].endswith(soundB[countB]))==True):\r\n #print('substring matched')\r\n rhyme_count=rhyme_count+1\r\n\r\n for countB in range(lenB):\r\n for countA in range(lenA):\r\n if((soundB[countB].endswith(soundA[countA]))==True):\r\n #print('substring matched')\r\n rhyme_count=rhyme_count+1\r\n \r\n if(rhyme_count>0):\r\n #print('True') \r\n return True\r\n else:\r\n # print('False')\r\n return False",
"def check_word(words, word):\r\n if word in words:\r\n return True\r\n else:\r\n return False",
"def is_valid_word(word, hand, word_list):\n h = dict(hand)\n if word.lower() in word_list:\n for letter in word:\n if h.get(letter, 0) == 0:\n return False\n else:\n h[letter] -= 1\n else:\n return False\n return True",
"def isValidWord(word: str, hand: Dict[str, int], wordList: List[str]) -> bool:\n cp_hand = hand.copy()\n\n if word not in wordList:\n return False\n\n for char in word:\n if cp_hand.get(char, 0) < 1:\n return False\n else:\n cp_hand[char] = cp_hand.get(char,0) - 1\n\n return True\n # one line:\n # return word in wordList and all(word.count(c) <= hand.get(c, 0) \n # for c in word) # Kiwitrader",
"def _fe_check_phishing_similarity_words(self, sample):\n result = OrderedDict()\n\n for key in self._similarity_words:\n result[key + \"_lev_1\"] = 0\n\n for word in sample['fqdn_words']:\n if distance(word, key) == 1:\n result[key + \"_lev_1\"] = 1\n\n return result",
"def check_words(dictionary_, start_word, stop_word):\n if dictionary_.is_real_word(start_word) is False:\n print(\"Word {} not found in the dictionary\".format(start_word))\n return False\n if dictionary_.is_real_word(stop_word) is False:\n print(\"Word {} not found in the dictionary\".format(stop_word))\n return False\n return True",
"def has_word(self, word):\n return word in self.word_set",
"def search(self, word):\n curr = self.trie\n for i, ch in enumerate(word):\n curr = curr.get(ch, {})\n if curr:\n continue\n else:\n break\n \n if i==len(word)-1 and '\\0' in curr:\n ret = True\n else:\n ret = False\n\n return ret",
"def isValidWord(word, hand, wordList):\n # TO DO ... <-- Remove this comment when you code this function\n #for line in fil:\n # print line\n if word not in w:\n w.append(word)\n #print w\n else:\n #print \"already have a word inside\"\n return False\n for z in word:\n print z\n if z in hand:\n #if p != word:\n #print hand[c]\n if hand[z] > 0:\n hand[z] -= 1\n \n else:\n return False\n else:\n return False\n if word in wordList:\n print \"here is true\"\n return True\n else:\n return False"
] | [
"0.7909334",
"0.7649232",
"0.7379768",
"0.70193833",
"0.66508675",
"0.6501066",
"0.64329946",
"0.64270467",
"0.63939303",
"0.63705605",
"0.6364078",
"0.63594246",
"0.6337399",
"0.63142025",
"0.625433",
"0.62256026",
"0.6210163",
"0.6180701",
"0.6136245",
"0.6133826",
"0.6120047",
"0.6111174",
"0.60638636",
"0.6046244",
"0.6027015",
"0.6021058",
"0.6019472",
"0.60037243",
"0.5987341",
"0.59856623"
] | 0.80829525 | 0 |
Generate the positions from trace | def posns_from_trace(trace):
posns = []
for i in range((len(trace.variables)-1)//2):
var_x = trace.variables[2*i]
var_y = trace.variables[2*i+1]
car_i = int(var_x.name.split('_')[2])
xy = (var_x.value.item(), var_y.value.item())
if len(posns) <= car_i:
posns.append(xy) # if it's first, append it
else:
posns[car_i] = xy # else overwrite
return posns | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def positions(self, tileID, numSamples):",
"def BeamPosition():\n \n XPOS, YPOS = [], []\n\n x=0\n for j in range(0,6,1):\n x += 0.1\n y=0\n for k in range(0,6,1):\n y += 0.2\n XPOS.append(x)\n YPOS.append(y)\n\n return XPOS, YPOS",
"def generate_positions(self):\n raise NotImplementedError(\"Should implement generate_positions()!\")",
"def generatePos(self):\n self.pos = np.zeros((self.num_points, 2), dtype='int32')\n self.pos[:, 1] = np.repeat(list(reversed(np.arange(1, self.x*2, 2))), self.y)\n self.pos[:, 0] = np.tile(np.arange(1, self.x*2, 2), self.y)",
"def get_positions(specs):\r\n xy = []\r\n for i, spec in enumerate(specs):\r\n slit = spec.split(\"n3311\", 1)[1].replace(\".fits\", \"\")\r\n # slit = spec.split(\".\")[0].split(\"_\", 1)[1][5:]\r\n index = canvas.slits.ids.index(slit)\r\n xy.append([canvas.slits.x[index], canvas.slits.y[index]])\r\n return np.array(xy)",
"def make_pos(self, posvalues):\n self._positions = []\n self._directions = []\n pos_cur = posvalues[0]\n line_cur = np.insert(pos_cur, 0, 0)\n self._joint_first = line_cur\n\n current_length = 0\n idx=0\n\n for values in posvalues[0:]:\n # print 'values=', values\n pos_next = values\n current_length += arc_length(pos_cur, pos_next)\n line_next = np.insert(pos_next, 0, current_length)\n # print('line_cur=',line_cur)\n # line_next.insert(0, current_length)\n unit_dir, grads=calc_dir(line_cur,line_next, self._eps/3)\n if unit_dir!=None:\n self._positions.append(line_cur)\n # print line_cur[0:4]\n vec=unit_dir.tolist()\n # vec.insert(0,idx)\n self._directions.append(vec)\n line_cur=line_next\n pos_cur=pos_next\n idx +=1\n # add the last point and zero direction at the end\n # line_cur[0]=line_cur[0] + self._eps\n zero_dir = np.zeros(7)\n self._positions.append(line_cur)\n self._joint_last = line_cur\n\n # print 'last'\n # print line_cur[0:4]\n vec=zero_dir.tolist()\n vec[0]=line_cur[0]\n # vec.insert(0,idx)\n self._directions.append(vec)\n\n # with open('dir1.csv', 'wb') as csvfile:\n # writer = csv.writer(csvfile, delimiter=',',\n # quotechar='|', quoting=csv.QUOTE_MINIMAL)\n # # writer.writerow('path_pos, x, y, z, rot, rot, rot, rot')\n # [writer.writerow(r) for r in self._directions]\n # print 'end of refine_pos'",
"def getPosition(self):\n\t\txxx1 = self.stokes()\n\t\txxx2 = self.thp()\n\t\txxx3 = self.tthp()\n\t\treturn [xxx1, xxx2, xxx3]",
"def getTargetPositions(rg):\n targetPositions = OrderedDict()\n for r in rg.robotDict.values():\n x, y, z = r.metFiberPos\n targetPositions[r.id] = [x, y]\n return targetPositions",
"def list_posns(lot, x, y):\n return [position(t, x, y) for t in lot]",
"def process_traces(st,positions):\n spti = {}\n isis = {}\n rav = {}\n\n nspti = {}\n nisis = {}\n nrav = {}\n\n for i,sti in enumerate(st.keys()):\n print('analysing trace nr %i'%i)\n cst = st[sti]\n cp = positions[sti]\n cisi = np.diff(cst)\n if len(cisi) > 0:\n spti[sti], isis[sti], positions[sti] = delete_artifacts(cst,cisi,cp)\n nspti[sti], nisis[sti] = fill_gaps(spti[sti][:-1],np.diff(spti[sti]))\n\n print('smooting traces')\n\n return smooth_traces(nspti,nisis,5), positions",
"def _some_variables(use_posInd=False):\n\n parent = (\n np.array(\n [\n 0,\n 1,\n 2,\n 3,\n 4,\n 5,\n 1,\n 7,\n 8,\n 9,\n 10,\n 1,\n 12,\n 13,\n 14,\n 15,\n 13,\n 17,\n 18,\n 19,\n 20,\n 21,\n 20,\n 23,\n 13,\n 25,\n 26,\n 27,\n 28,\n 29,\n 28,\n 31,\n ]\n )\n - 1\n )\n\n offset = np.array(\n [\n 0.000000,\n 0.000000,\n 0.000000,\n -132.948591,\n 0.000000,\n 0.000000,\n 0.000000,\n -442.894612,\n 0.000000,\n 0.000000,\n -454.206447,\n 0.000000,\n 0.000000,\n 0.000000,\n 162.767078,\n 0.000000,\n 0.000000,\n 74.999437,\n 132.948826,\n 0.000000,\n 0.000000,\n 0.000000,\n -442.894413,\n 0.000000,\n 0.000000,\n -454.206590,\n 0.000000,\n 0.000000,\n 0.000000,\n 162.767426,\n 0.000000,\n 0.000000,\n 74.999948,\n 0.000000,\n 0.100000,\n 0.000000,\n 0.000000,\n 233.383263,\n 0.000000,\n 0.000000,\n 257.077681,\n 0.000000,\n 0.000000,\n 121.134938,\n 0.000000,\n 0.000000,\n 115.002227,\n 0.000000,\n 0.000000,\n 257.077681,\n 0.000000,\n 0.000000,\n 151.034226,\n 0.000000,\n 0.000000,\n 278.882773,\n 0.000000,\n 0.000000,\n 251.733451,\n 0.000000,\n 0.000000,\n 0.000000,\n 0.000000,\n 0.000000,\n 0.000000,\n 99.999627,\n 0.000000,\n 100.000188,\n 0.000000,\n 0.000000,\n 0.000000,\n 0.000000,\n 0.000000,\n 257.077681,\n 0.000000,\n 0.000000,\n 151.031437,\n 0.000000,\n 0.000000,\n 278.892924,\n 0.000000,\n 0.000000,\n 251.728680,\n 0.000000,\n 0.000000,\n 0.000000,\n 0.000000,\n 0.000000,\n 0.000000,\n 99.999888,\n 0.000000,\n 137.499922,\n 0.000000,\n 0.000000,\n 0.000000,\n 0.000000,\n ]\n )\n\n offset = offset.reshape(-1, 3)\n\n rotInd = [\n [5, 6, 4],\n [8, 9, 7],\n [11, 12, 10],\n [14, 15, 13],\n [17, 18, 16],\n [],\n [20, 21, 19],\n [23, 24, 22],\n [26, 27, 25],\n [29, 30, 28],\n [],\n [32, 33, 31],\n [35, 36, 34],\n [38, 39, 37],\n [41, 42, 40],\n [],\n [44, 45, 43],\n [47, 48, 46],\n [50, 51, 49],\n [53, 54, 52],\n [56, 57, 55],\n [],\n [59, 60, 58],\n [],\n [62, 63, 61],\n [65, 66, 64],\n [68, 69, 67],\n [71, 72, 70],\n [74, 75, 73],\n [],\n [77, 78, 76],\n [],\n ]\n\n # definitions are originating from matlab file --> bring them to zero based indexing\n rotInd = [[e - 1 for e in s if len(s) > 0] for s in rotInd]\n posInd = [0, 1, 2] if use_posInd else None\n\n expmapInd = np.split(np.arange(4, 100) - 1, 32)\n\n return parent, offset, rotInd, expmapInd, posInd",
"def get_position(self, t0):\n my_pos_x=np.random.uniform(-20, 20)\n my_pos_y=np.random.uniform(-20, 20)\n r=np.array([my_pos_x, my_pos_y])\n x_y=np.zeros(shape=(self.no_planets-1, 2))\n tol=1e-5\n diff=np.zeros(self.no_planets-1)\n for k in range(self.no_planets-1):\n r1=np.linalg.norm(r)\n r2=np.linalg.norm(r-self.positionFunction(t0)[:, k])\n r3=np.linalg.norm(r-self.positionFunction(t0)[:, k+1])\n x1=0\n y1=0\n x2=self.positionFunction(t0)[0,k]\n y2=self.positionFunction(t0)[1,k]\n x3=self.positionFunction(t0)[0,k+1]\n y3=self.positionFunction(t0)[1, k+1]\n x,y,difference=self.triangulate_analytic(x1,y1,r1,x2,y2,r2,x3,y3,r3)\n x_y[k, 0]=x\n x_y[k, 1]=y\n diff[k]=difference\n if (diff > tol).any():\n print diff.max()\n print \"Oh no, one failed :(\"\n sys.exit(1)\n print \"My pos x:\", my_pos_x\n print \"My pos y:\", my_pos_y\n #return x1, y1, r1, x2, y2, r2, x3, y3, r3",
"def positions(self):\n return self.preorder()",
"def positions(self):\n method = 'get_xdata' if self.direction == 'horizontal' else 'get_ydata'\n return [getattr(line, method)()[0] for line in self.artists]",
"def calc_positions(self) :\n\t\tx, y = self.x0, self.y0\n\n\t\twhile self.is_visible(x, y) :\n\t\t\tx = 0.5 * self.gx * self.t**2 + self.vx0 * self.t + self.x0\n\t\t\ty = 0.5 * self.gy * self.t**2 + self.vy0 * self.t + self.y0\n\t\t\t\n\t\t\tself.t += self.dt\n\t\t\tself.pos_x.append(x)\n\t\t\tself.pos_y.append(y)",
"def make_position(data) -> Position:\n return (data[\"x\"], data[\"y\"])",
"def trace(self, coord01: np.ndarray) -> np.ndarray:\n rect = self.clip_rect()\n return (rect.position + coord01 * rect.size).astype(np.int)",
"def coordinates(self):",
"def position_line(self, prc=50.0):\n rtc = self._get_fibonnaci_level(prc)[0]\n x_pos = [self.roi.pos()[0], rtc]\n y_pos = [self.roi.pos()[0] + self.roi.size()[0], rtc]\n return x_pos, y_pos",
"def get_position(self, position):",
"def getTelescopeCoords(self):\n return self.header['ANT_X'],self.header['ANT_Y'],self.header['ANT_Z']",
"def _make_pos(pos):\n return pos.chromosome, pos.strand, pos.min_position, pos.min_position+20",
"def _sort_index(self):\n\n allAltPos = np.array(sorted(list(set(list(self.data['altitude'])))))[::-1]\n allAziPos = np.array(sorted(list(set(list(self.data['azimuth'])))))\n\n indON = [[None for azi in allAziPos] for alt in allAltPos]; indOFF = [[None for azi in allAziPos] for alt in allAltPos]\n\n for i, traceItem in enumerate(self.data):\n alt = traceItem['altitude'];azi = traceItem['azimuth'];sign = traceItem['sign']\n for j, altPos in enumerate(allAltPos):\n for k, aziPos in enumerate(allAziPos):\n if alt==altPos and azi==aziPos:\n if sign==1:\n if indON[j][k] is not None: raise LookupError('Duplication of trace items found at location:'+str([alt, azi])+'; sign: 1!')\n else: indON[j][k]=i\n\n if sign==-1:\n if indOFF[j][k] is not None: raise LookupError('Duplication of trace items found at location:'+str([alt, azi])+'; sign:-1!')\n else: indOFF[j][k]=i\n\n indON = np.array([np.array(x) for x in indON]); indOFF = np.array([np.array(x) for x in indOFF])\n\n return indON,indOFF,allAltPos,allAziPos",
"def get_positions_by_slits(slits):\r\n xy = []\r\n for i, slit in enumerate(slits):\r\n index = canvas.slits.ids.index(slit)\r\n xy.append([canvas.slits.x[index], canvas.slits.y[index]])\r\n return np.array(xy)",
"def positions(self):\n x_curves = np.array([0, 0.0563, 0.1958, 0.2925, 0.5000, 0.5625, 0.9375,\n 1.0000]) * self.length_car\n x_wheels = np.array([0.1958, 0.8133]) * self.length_car\n x_spoiler = 0.8688 * self.length_car\n return x_curves, x_wheels, x_spoiler",
"def time_position(self):\n rt_most_pixel = None\n lf_most_pixel = None\n time_position = []\n min_time_len = None\n for i in range (len(np.unique(self.pd.objid))):\n trajec = self.dataset.trajec(self.dataset.keys[i])\n times = trajec.time_epoch_secs + trajec.time_epoch_nsecs / 1e9\n time_pos = np.vstack([times, trajec.position_x])\n time_position.append(time_pos)\n if min_time_len == None:\n min_time_len = len(times)\n elif min_time_len > len(times):\n min_time_len = len(times)\n pixels = np.unique(trajec.position_x)\n if rt_most_pixel ==None:\n rt_most_pixel = pixels[-1]\n elif rt_most_pixel < pixels[-1]:\n rt_most_pixel = pixels[-1]\n if lf_most_pixel ==None:\n lf_most_pixel = pixels[0]\n elif lf_most_pixel > pixels[0]:\n lf_most_pixel = pixels[0]\n print min_time_len\n print rt_most_pixel\n print lf_most_pixel\n print rt_most_pixel - lf_most_pixel\n return time_position, rt_most_pixel, lf_most_pixel",
"def log_plane_positions(self):\n cmd = '{}logPlanePositions'.format(self.console)\n self.write_command(cmd)",
"def beam_positions(closepack=False):\n \n x_pos, y_pos = [], []\n\n x=0\n for j in range(0,6,1):\n x += 0.1\n y=0\n for k in range(0,6,2):\n y += 0.2\n x_pos.append(x+(0.05 if closepack else 0))\n y_pos.append(y)\n y += 0.2\n x_pos.append(x)\n y_pos.append(y)\n\n return x_pos, y_pos",
"def get_positions(wire):\n x = 0\n y = 0\n positions = [(0, 0)]\n\n for instruction in wire:\n direction = instruction[0]\n dist = int(instruction[1:])\n if direction == \"R\":\n for pos in range(1, dist+1):\n positions.append((x + pos, y))\n x += dist\n elif direction == \"L\":\n for pos in range(1, dist+1):\n positions.append((x - pos, y))\n x -= dist\n elif direction == \"U\":\n for pos in range(1, dist + 1):\n positions.append((x, y + pos))\n y += dist\n elif direction == \"D\":\n for pos in range(1, dist + 1):\n positions.append((x, y - pos))\n y -= dist\n else:\n raise ValueError(\"Direction not recognised\")\n\n return positions",
"def getPosicion(self):\r\n\t\treturn [self._x, self._y]"
] | [
"0.6406588",
"0.62758404",
"0.62750536",
"0.61463076",
"0.611264",
"0.6014426",
"0.6004055",
"0.5968917",
"0.5884226",
"0.5875474",
"0.5816131",
"0.5784888",
"0.5737535",
"0.5731324",
"0.5729159",
"0.5626728",
"0.56128687",
"0.5607636",
"0.5580684",
"0.5576681",
"0.5570371",
"0.5566832",
"0.55593264",
"0.5555513",
"0.5533306",
"0.5530989",
"0.5525859",
"0.5524877",
"0.5515567",
"0.5511613"
] | 0.706652 | 0 |
Parts should call draw on its child parts. It should determine if a change has been made, and if so, make the change and call update. If a part has pasted outside its region, it should return True Parts should not make changes to the display until draw has been called! This is because the order parts are drawn is important now that all parts share a single canvas. For example, the border should not be drawn before the panel's background is drawn, or else the panel's background may bleed over the panel into the borders. force can be set to True in order to force all parts to redraw. | def draw(self, force=False):
self.display.draw(force) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def draw(self, force = False):\n\t\tpass",
"def _onPaint(self, evt):\n if not self._isRealized:\n self.realize()\n if self._drawn < 2:\n self.draw(repaint = False)\n self._drawn += 1\n self.gui_repaint(drawDC=wx.PaintDC(self))",
"def draw(self, force=False):\n for child in self.children.values():\n child.draw(force)",
"def draw(self, surface, force=False):\n if self.redraw or force:\n surface.blit(self.image, self.loc)\n self.redraw = False",
"def draw(self):\n if self.node:\n if self.async:\n if self.cancel_draw:\n self.after_cancel(self.cancel_draw)\n self.cancel_draw = self.after(3, self._draw)\n else: self._draw()",
"def draw (self):\n screen = self.screen\n dirty = False\n for z, displays in self.layers.iteritems():\n for display in displays:\n drew = display.draw(screen)\n # if made changes to the surface\n if drew:\n # set any displays that overlap this one dirty\n for d in display.overlapped:\n d.dirty = True\n dirty |= drew\n return dirty",
"def draw(self):\n if self.master != None :\n fill = Cell.FILLED_COLOR_BG\n outline = Cell.FILLED_COLOR_BORDER\n\n if not self.fill:\n fill = Cell.EMPTY_COLOR_BG\n outline = Cell.EMPTY_COLOR_BORDER\n walls[self.ord][self.abs] = 0\n else:\n walls[self.ord][self.abs] = 1\n\n\n xmin = self.abs * self.size\n xmax = xmin + self.size\n ymin = self.ord * self.size\n ymax = ymin + self.size\n self.master.create_rectangle(xmin, ymin, xmax, ymax, fill = fill, outline = outline)",
"def draw(self, canvas) -> bool:\n return False",
"def update(self, force = False):\n\t\tfor c in self.components:\n\t\t\tc.update(force)\n\t\tself.draw(force)",
"def drawChild(self,x,y,z,thing):\n self.z = z\n \n \n \n if not thing.visable:\n return \n \n \n self.color = Vec4(*thing.color)\n \n realX = x+float(thing._x)\n realY = y+float(thing._y)\n \n if thing.style:\n style = gui.theme.define(thing.style)\n if style:\n style.draw(\n self,\n (realX,realY),\n (float(thing._width),float(thing._height)))\n \n if thing.clips:\n # set clip stuff\n self.pushClip(realX,realY,realX+thing._width,realY+thing._height)\n \n if thing.icon:\n rect = self.atlas.getRect(thing.icon)\n if rect: \n self.color = thing.color\n u,v,us,vs = rect\n self.rectStreatch((realX,realY,us,vs),(u,v,us,vs))\n \n if thing.text:\n # draw text stuff\n if thing.editsText:\n self.drawEditText(\n gui.theme.defineFont(thing.font),\n thing.text,\n realX,\n realY,\n thing.selection,\n thing.caret)\n else:\n self.drawText(\n gui.theme.defineFont(thing.font),\n thing.text,\n realX,\n realY)\n \n \n if thing.children:\n for child in thing.children:\n z += 1\n self.drawChild(realX,realY,z,child)\n \n if thing.clips:\n self.popClip()",
"def on_draw_event(self, widget, ctx):\n # the _need_redraw flag doesnt work. it sometimes prevents\n # the rendering and leaving the canvas blank\n #if self._need_redraw:\n self._renderer.set_context(ctx)\n allocation = self.get_allocation()\n x, y, w, h = allocation.x, allocation.y, allocation.width, allocation.height\n self._render_figure(w, h)\n #self._need_redraw = False\n\n return False # finish event propagation?",
"def draw(self):\n if self.master != None :\n outline = Cell.FILLED_COLOR_BORDER if self.fill else Cell.EMPTY_COLOR_BORDER\n\n xmin = self.abs * self.size\n xmax = xmin + self.size\n ymin = self.ord * self.size\n ymax = ymin + self.size\n\n self.master.create_rectangle(xmin, ymin, xmax, ymax, fill = self.fill, outline = outline)",
"def on_draw(self):\n # draw everything",
"def update(self, force=False):\n self.axes.figure.canvas.draw()\n if self.colorbar is not None:\n if force is True:\n self.colorbar.update_bruteforce(self.pixels)\n else:\n self.colorbar.update_normal(self.pixels)\n self.colorbar.draw_all()",
"def draw(self):\n x = self.displacement.x + self.physics_canvas.origin_x\n y = self.displacement.y + self.physics_canvas.origin_y\n self.canvas_id = self.physics_canvas.canvas.create_rectangle(x-10,y+10,x+10,y-10, fill='black') # e.g.",
"def draw(self):\n if self.master != None :\n fill = self.fill\n #fill = Cell.FILLED_COLOR_BG\n outline = Cell.EMPTY_COLOR_BORDER\n\n #if not self.fill:\n # fill = Cell.EMPTY_COLOR_BG\n # outline = Cell.EMPTY_COLOR_BORDER\n\n xmin = self.abs * self.size\n xmax = xmin + self.size\n ymin = self.ord * self.size\n ymax = ymin + self.size\n\n self.master.create_rectangle(xmin, ymin, xmax, ymax, fill = fill, outline = outline)",
"def check(self, context):\n self.set_prompts_from_properties()\n self.product.obj_x.location.x = self.width\n self.product.obj_y.location.y = -self.depth\n props_closet.update_render_materials(self, context)\n # self.update_product_size()\n return True",
"def check(self, context):\n self.set_prompts_from_properties()\n self.product.obj_x.location.x = self.width\n self.product.obj_y.location.y = -self.depth\n props_closet.update_render_materials(self, context)\n # self.update_product_size()\n return True",
"def draw(self):\n\n for item in self.vis:\n item.undraw()\n self.render()\n for item in self.vis:\n item.draw(self.win)\n self.drawn = True",
"def should_redraw_board(self):\n return True",
"def _prepare_draw(self, view=None):\n return True",
"def draw(self, thing):\n thing.draw(self, Point([2,2]), flip=1)\n\n # configure the scroll region\n bbox = Canvas.bbox(self.canvas, ALL)\n self.canvas.configure(scrollregion=bbox)",
"def drawChanges(self):\n self.draw(wait=False)\n draw(self.values,color='yellow',bbox=None,clear=False,shrink=self.shrink)",
"def draw(self, ctx):\n self.set_size(self.width, self.available_height) \n #Drawing cell lines\n for i in range(0, (max(self.available_width,int(self.width)) / self.cell_width) + 1):\n ctx.move_to(i * self.cell_width, 0)\n ctx.line_to(i * self.cell_width, self.available_height)\n ctx.set_line_width(1)\n red = float(self.get_style().fg[gtk.STATE_INSENSITIVE].red) / 65535\n green = float(self.get_style().fg[gtk.STATE_INSENSITIVE].green) / 65535\n blue = float(self.get_style().fg[gtk.STATE_INSENSITIVE].blue) / 65535\n ctx.set_source_rgba(red, green, blue, 0.3)\n ctx.stroke()\n greatest = self.calculate_greatest() \n # Drawing scale lines\n step = greatest / 5\n ctx.save()\n ctx.set_dash([5],5)\n for i in range(int(step), int(greatest),5):\n ctx.move_to(0, self.available_height - (self.available_height - 20) * i / greatest)\n ctx.line_to(max(self.available_width,int(self.width)), self.available_height - (self.available_height - 20) * i / greatest)\n ctx.set_source_rgba(red,green,blue,0.3)\n ctx.stroke()\n\n ctx.restore()\n # Drawing the diagram\n loadingCopy = copy.deepcopy(self.loading)\n colorIndex = 0\n loadingKeys = loadingCopy.keys()\n loadingKeys.sort()\n for key in loadingKeys:\n while loadingCopy[key] != []:\n x1, y1 = loadingCopy[key].pop(0)\n if loadingCopy[key] != []:\n x2, y2 = loadingCopy[key][0]\n else:\n x2 = self.duration\n ctx.line_to (x1 * self.cell_width, self.available_height - (self.available_height - 20) * y1 / greatest)\n ctx.line_to (x2 * self.cell_width, self.available_height - (self.available_height - 20) * y1 / greatest)\n \n ctx.set_line_width(2)\n ctx.set_source_rgba(self.colors[colorIndex][0], self.colors[colorIndex][1], self.colors[colorIndex][2],0.5)\n ctx.stroke()\n colorIndex = (colorIndex + 1) % 11",
"def simple_canvas(self):\n self.canvas = Canvas()\n\n self.box1 = Box()\n self.canvas.add(self.box1)\n self.box1.matrix.translate(100, 50)\n self.box1.width = 40 \n self.box1.height = 40 \n self.box1.request_update()\n\n self.box2 = Box()\n self.canvas.add(self.box2)\n self.box2.matrix.translate(100, 150)\n self.box2.width = 50 \n self.box2.height = 50 \n self.box2.request_update()\n\n self.line = Line()\n self.head = self.line.handles()[0]\n self.tail = self.line.handles()[-1]\n self.tail.pos = 100, 100\n self.canvas.add(self.line)\n\n self.canvas.update_now()\n self.view = GtkView()\n self.view.canvas = self.canvas\n from gi.repository import Gtk\n win = Gtk.Window()\n win.add(self.view)\n self.view.show()\n self.view.update()\n win.show()\n\n self.tool = ConnectHandleTool(self.view)",
"def validate(self):\n self.parent.copyCurrentWinState(self.pltw)\n if self.incr:\n self.pltw.blklst[self.blkno][self.ypos] = self.data[2]\n else:\n self.pltw.blklst[self.blkno][self.ypos] = self.data[2][::-1]\n self.pltw.dirty = True\n self.pltw.activecurv = self.cpos\n self.pltw.updatePlot()\n self.parent.updateUI()\n self.hide()",
"def draw (self):\n screen = self.screen\n dirty = False\n for display in self.displays:\n dirty |= display.draw(screen)\n return dirty",
"def change(self):\r\n\r\n # If checkboxes are available, check status and set boat speed reference line visibility accordingly.\r\n if self.cb:\r\n if self.cb_bt.checkState() == QtCore.Qt.Checked:\r\n for item in self.bt:\r\n item.set_visible(True)\r\n else:\r\n for item in self.bt:\r\n item.set_visible(False)\r\n # GGA\r\n if self.cb_gga.checkState() == QtCore.Qt.Checked:\r\n for item in self.gga:\r\n item.set_visible(True)\r\n # self.gga[0].set_visible(True)\r\n elif self.gga is not None:\r\n for item in self.gga:\r\n item.set_visible(False)\r\n # self.gga[0].set_visible(False)\r\n # VTG\r\n if self.cb_vtg.checkState() == QtCore.Qt.Checked:\r\n for item in self.vtg:\r\n item.set_visible(True)\r\n # self.vtg[0].set_visible(True)\r\n elif self.vtg is not None:\r\n for item in self.vtg:\r\n item.set_visible(False)\r\n # self.vtg[0].set_visible(False)\r\n\r\n # Draw canvas\r\n self.canvas.draw()",
"def _redraw(self, render_as_done: \"bool\" = False) -> \"None\":\n if not self.drawn:\n cast(\"Application\", super())._redraw(render_as_done=True)\n self.drawn = True",
"def redraw(self):\n raise NotImplementedError()"
] | [
"0.6380005",
"0.593526",
"0.5855134",
"0.572638",
"0.56879467",
"0.564566",
"0.56153196",
"0.5614835",
"0.5576688",
"0.55423486",
"0.5540616",
"0.55378747",
"0.55355537",
"0.5513865",
"0.55125195",
"0.54864395",
"0.5464399",
"0.5464399",
"0.54607123",
"0.5451927",
"0.5438394",
"0.5435861",
"0.53909993",
"0.53885704",
"0.53713703",
"0.53533316",
"0.53238815",
"0.53032726",
"0.53024924",
"0.5291124"
] | 0.5939307 | 1 |
Returns True if the coord is in Part or any of its children. May be a better idea to call the get_part_containing function instead though, which returns the lowest level Part that contains the coord (none of its children contain the coord, but the Part does) | def contains(self, coord):
# print(coord, self.position, self.size)
return (0 <= coord[0] - self.position[0] < self.size[0] and
0 <= coord[1] - self.position[1] < self.size[1]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_part_containing(self, coord):\n # print('in', self)\n for k, child in self.children.items():\n # print('try', k, child)\n if child.ignore:\n # print('ignore', k, child)\n continue\n if child.contains(coord):\n # print('contained', k, child)\n return child.get_part_containing(coord)\n # Could not find any children containing the coord, so we must be at the\n # lowest level already\n return self",
"def in_node(self, coord):\n for axis in range(3):\n if coord[axis] < self.mins[axis] or coord[axis] > self.maxs[axis]:\n return False\n\n return True",
"def active_piece_contains(self, coords):\n return coords in self.active_piece",
"def is_state_a_child_by_coord(x, y, width, height, parent: State) -> bool:\n if x+1 >= parent.x and y+1 >= parent.y and x + width - 1 <= parent.x + parent.width:\n if y + height - 1 <= parent.y + parent.height:\n return True\n return False",
"def is_inside(self, x: int, y: int) -> bool:\n pass",
"def contains(self, pt):\n x,y = pt.as_tuple()\n return (self.left <= x <= self.right and\n self.top <= y <= self.bottom)",
"def __contains__(self, pos):\n if pos in self._coordinates:\n return True\n return False",
"def particle_is_inside(self, particle):\n return self.in_box_bounds(particle.position)",
"def isIn(self, coor, rec):\n x, y = coor[0], coor[1]\n top, bottom, left, right = rec[1][1], rec[0][1], rec[0][0], rec[1][0]\n # print(top, bottom, left, right)\n if left <= x <= right and bottom <= y <= top:\n return True\n else:\n return False",
"def is_intersect(self, coord: Union[Coordinate, frozenset[Coordinate]]) -> bool:\n if isinstance(coord, Coordinate):\n coord = frozenset((Coordinate,))\n return len(self._coords.intersection(coord)) > 0",
"def is_on_intersection(intersection, coord):\n return intersection.is_on_intersection(coord)",
"def is_inside(self, p):\n s, t = self.get_barycentric_coord(p)\n if 0 <= s <= 1 and 0 <= t <= 1 and s + t <= 1:\n return True\n else:\n return False",
"def __contains__(self, pid):\n return self.contains_child(pid) or self.contains_parent(pid)",
"def contains(self, loc):\n if loc.isLeft(self.topLeft): return False\n if loc.isRight(self.botRight): return False\n if loc.isAbove(self.topLeft): return False\n if loc.isBelow(self.botRight): return False\n return True",
"def is_island(self):\n return bool(not self.children.exists() and not self.parents.exists())",
"def isPointInside(self, p):\n x,y = p[0], p[1]\n A = self.left <= x <= self.right\n B = self.bottom <= y <= self.top\n return (A and B)",
"def isPointInside(self, p):\n x,y = p[0], p[1]\n A = self.left <= x <= self.right\n B = self.bottom <= y <= self.top\n return (A and B)",
"def is_inside(inner_path, outer_path):\r\n if not hasattr(inner_path, 'bounding_box'):\r\n inner_path.bounding_box = CutPlanner.bounding_box(inner_path)\r\n if not hasattr(outer_path, 'bounding_box'):\r\n outer_path.bounding_box = CutPlanner.bounding_box(outer_path)\r\n if outer_path.bounding_box[0] > inner_path.bounding_box[0]:\r\n # outer minx > inner minx (is not contained)\r\n return False\r\n if outer_path.bounding_box[1] > inner_path.bounding_box[1]:\r\n # outer miny > inner miny (is not contained)\r\n return False\r\n if outer_path.bounding_box[2] < inner_path.bounding_box[2]:\r\n # outer maxx < inner maxx (is not contained)\r\n return False\r\n if outer_path.bounding_box[3] < inner_path.bounding_box[3]:\r\n # outer maxy < inner maxy (is not contained)\r\n return False\r\n if outer_path.bounding_box == inner_path.bounding_box:\r\n if outer_path == inner_path: # This is the same object.\r\n return False\r\n if not hasattr(outer_path, 'vm'):\r\n outer_path = Polygon([outer_path.point(i / 100.0, error=1e4) for i in range(101)])\r\n vm = VectorMontonizer()\r\n vm.add_cluster(outer_path)\r\n outer_path.vm = vm\r\n for i in range(101):\r\n p = inner_path.point(i / 100.0, error=1e4)\r\n if not outer_path.vm.is_point_inside(p.x, p.y):\r\n return False\r\n return True",
"def is_inside(self, coordinates: tuple) -> bool:\n if len(coordinates) != 2:\n raise IndexError(\"Coordinates consist of x and y\")\n x, y = coordinates\n if (self.MIN_X <= x <= self.MAX_X) and (self.MIN_Y <= y <= self.MAX_Y):\n return True\n else:\n return False",
"def is_within(\r\n self,\r\n y: Tuple[int, int],\r\n x: Tuple[int, int],\r\n expansion: int = 0,\r\n ) -> Tuple[bool, bool, Dict[str, bool]]:\r\n if expansion != 0:\r\n # expand tile boundaries\r\n y = (min(0, y[0] - expansion), y[1] + expansion)\r\n x = (min(0, x[0] - expansion), x[1] + expansion)\r\n\r\n if self.anatomical_structure is None:\r\n # do not proceed without anatomical structure\r\n return True, True, {\"any\": True}\r\n points = list(product(x, y))\r\n paths = {}\r\n for i, region in enumerate(self.anatomical_structure):\r\n region_name = region[\"properties\"][\"classification\"][\"name\"]\r\n for j, coords in enumerate(region[\"geometry\"][\"coordinates\"]):\r\n coords = np.array(coords, dtype=np.int32).squeeze()\r\n paths[f\"{region_name}_{i}_{j}\"] = mpath.Path(coords)\r\n # if any corner is within the structure, consider the whole tile within\r\n within_region = {\r\n region: np.any(path.contains_points(points))\r\n for region, path in paths.items()\r\n }\r\n within_any = np.any(list(within_region.values()))\r\n within_cortex = np.any(\r\n [\r\n \"cortex\" in region.lower() and within\r\n for region, within in within_region.items()\r\n ]\r\n )\r\n return within_any, within_cortex, within_region",
"def contains_pt(self, pt):\n x, y = pt\n if not self.x - self.radius < x < self.x + self.radius:\n return False\n if not self.y - self.radius < y < self.y + self.radius:\n return False\n return True",
"def is_inside(pos):\r\n\t\trow, col = pos\r\n\t\treturn 0 <= row and row < num_rows and \\\r\n\t\t\t0 <= col and col < num_cols",
"def in_geofence(self, coordinates):\n\t\tcoords_transformed = ogr.Geometry(ogr.wkbPoint)\n\t\tcoords_transformed.AddPoint(*coordinates)\n\t\treturn self.polygon.Contains(coords_transformed)",
"def _is_in_grid(self, atom_coordinate):\n return c_is_in_grid(atom_coordinate, self._origin_crd, self._uper_most_corner_crd)",
"def inside(self, x, on_boundary):\n return bool((near(x[0], xmin) or near(x[1], ymin)) and \\\n (not ((near(x[0], xmin) and near(x[1], ymax)) \\\n or (near(x[0], xmax) and near(x[1], ymin)))) \\\n and on_boundary)",
"def is_piece(self, piece_coords):\n for piece in self.game_pieces:\n if piece_coords[0] == piece.x and piece_coords[1] == piece.y:\n return True\n return False",
"def contains_parent(self, pid):\n return pid in self._parent_ids",
"def check_inside(self, pos):\n x,y = pos\n return x >= self.posx and x <= self.posx + self.sizex and y >= self.posy and y <= self.posy + self.sizey",
"def contains ( self, pos ):\n \n poly = Polygon(array(self.edges).reshape(-1,2)[:,0],array(self.edges).reshape(-1,2)[:,1])\n dists = poly.is_inside(pos[0,:],pos[1,:]) \n if self.include_border:\n inds = dists >= -self.abs_tol\n else:\n inds = dists > 0\n \n \n # if none inside, take nearest\n if ~inds.any() and self.default_nearest:\n dr2 = array(self.edges).reshape(-1,2).mean(0)\n inds[argmin(dr2)] = True\n \n return inds",
"def contains ( self, pos ):\n # make sure xmin is minimum etc\n xmin = min(self.x_min,self.x_max)\n xmax = max(self.x_min,self.x_max)\n ymin = min(self.y_min,self.y_max)\n ymax = max(self.y_min,self.y_max)\n \n abs_tol = self.abs_tol\n # get pos indices inside rectangle (* == and)\n if self.include_border:\n inds = (pos[0, :] - xmin > -abs_tol) * \\\n (pos[0, :] - xmax < abs_tol) * \\\n (pos[1, :] - ymin > -abs_tol) * \\\n (pos[1, :] - ymax < abs_tol)\n else:\n inds = (pos[0, :] - xmin > abs_tol) * \\\n (pos[0, :] - xmax < -abs_tol) * \\\n (pos[1, :] - ymin > abs_tol) * \\\n (pos[1, :] - ymax < -abs_tol)\n \n \n # if none inside, take nearest\n if ~inds.any() and self.default_nearest:\n x = (xmin + xmax) / 2.0\n y = (ymin + ymax) / 2.0\n dr2 = (pos[0, :] - x)**2 + (pos[1, :] - y)**2\n inds[argmin(dr2)] = True\n \n return inds.astype(bool)"
] | [
"0.75447255",
"0.6645123",
"0.66114485",
"0.6500257",
"0.6442654",
"0.64163774",
"0.6345653",
"0.63166255",
"0.61932963",
"0.6176176",
"0.6176057",
"0.61623186",
"0.61566186",
"0.6145891",
"0.6117253",
"0.60773057",
"0.60773057",
"0.6069949",
"0.6057143",
"0.6003461",
"0.59902805",
"0.5985761",
"0.59245324",
"0.59219533",
"0.59198684",
"0.5913187",
"0.590567",
"0.5896447",
"0.5867926",
"0.5851909"
] | 0.7134822 | 1 |
Returns the lowest Part that contains the coord (a part that contains the coord where none of its children contain the coord) Assumes that self already contains coord! Please check this if you are not sure! | def get_part_containing(self, coord):
# print('in', self)
for k, child in self.children.items():
# print('try', k, child)
if child.ignore:
# print('ignore', k, child)
continue
if child.contains(coord):
# print('contained', k, child)
return child.get_part_containing(coord)
# Could not find any children containing the coord, so we must be at the
# lowest level already
return self | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def findMin(self):\n curr = self\n while curr.hasLeftChild():\n curr = curr.leftChild\n return curr",
"def get_parent_by_coord(x, y, w, h, states: [State]) -> State:\n parents = [state for state in states if is_state_a_child_by_coord(x, y, w, h, state)]\n if not parents:\n return None\n parents.sort(key = lambda st: st.x, reverse=True)\n return parents[0]",
"def find_min(self):\n return self.root and self.root.find_min()",
"def find_min(self):\n \n return self.root and self.root.find_min()",
"def find_min(self):\n\n if self.left:\n return self.left.find_min()\n\n return self.data",
"def find_min(self):\n current = self\n while current.left is not None:\n current = current.left\n return current",
"def find_smallest(self):\n return self._find_smallest(self.root)",
"def best_cell(self, coord):\n if coord[0] == self.pos[0] and coord[1] == self.pos[1]:\n return self.pos\n\n # Get all available cells\n free_cells = self.get_moves()\n smal_dist = float(\"Inf\")\n\n for cell in free_cells:\n d_x = abs(coord[0] - cell[0])\n d_y = abs(coord[1] - cell[1])\n dist = (d_x**2 + d_y**2)**0.5\n if dist < smal_dist:\n smal_dist = dist\n new_cell = cell\n\n return new_cell",
"def getMinNode(self):\n currentNode = self.openList[0]\n for node in self.openList:\n if node.g + node.h < currentNode.g + currentNode.h:\n currentNode = node\n return currentNode",
"def min(self):\n return self._min_coords",
"def _update_min(self):\n tmp = self\n while tmp.left is not None:\n tmp = tmp.left\n return tmp.parent.key",
"def minchild(self, pos):\n minpos = minkey = None\n for c in self.children(pos):\n if minkey == None or self.heap[c].key < minkey:\n minkey, minpos = self.heap[c].key, c\n return minpos",
"def find_min(self):\n return min(self.nodes, key=int)",
"def _find_min(self):\n if self.is_empty(): # is_empty inherited from base class\n raise Empty('Priority queue is empty')\n small = self._data.first()\n walk = self._data.after(small)\n while walk is not None:\n if walk.element() < small.element():\n small = walk\n walk = self._data.after(walk)\n return small",
"def get_element(mouse): # pylint: disable=inconsistent-return-statements\n point = wtl.Point(mouse.x - 5, mouse.y - 5)\n\n with data_lock:\n if not current_view:\n return\n\n smallest_element, smallest_area = None, 999999\n for e in current_view.snapshot.elements:\n if point in e.bounds and e.bounds.area < smallest_area:\n smallest_area, smallest_element = e.bounds.area, e\n\n return smallest_element",
"def extract_min(self):\r\n if self.is_empty():\r\n return None\r\n min_elem = self.heap_array[0]\r\n aux_elem = self.heap_array.pop()\r\n\r\n if self.is_empty() == False:\r\n self.heap_array[0] = aux_elem\r\n\r\n current_index = 0\r\n left_child_index = (2 * current_index) + 1\r\n current_value = self.heap_array[current_index]\r\n\r\n while left_child_index < len(self.heap_array): # loop that will repeat until no violation of the minheap properties exist\r\n current_min = current_value\r\n\r\n for i in range(2): # this loop is in place so that both children are compared and the smaller of the two is chosen \r\n if (left_child_index + i) > len(self.heap_array)-1: # condition to avoid out of bounds\r\n continue\r\n else:\r\n if int(self.heap_array[left_child_index + i]) < int(current_min): # if child is smaller than parent\r\n current_min = self.heap_array[left_child_index + i ] # set current minimum value\r\n current_min_index = left_child_index + i # and cureent minimim index( index where current minimum value is found )\r\n if current_min == current_value: # if no property is broken (in this case, the parent is actually less than its' children)\r\n break\r\n else: # if propert is broken\r\n self.heap_array[current_index], self.heap_array[current_min_index] = self.heap_array[current_min_index], self.heap_array[current_index] # swap the elements \r\n current_index = current_min_index\r\n left_child_index = int((2 * current_index) + 1)\r\n return min_elem",
"def min_child(self, index):\n if self.empty():\n return None\n if self._has_left(index):\n left = self._left(index)\n small_child = left\n if self._has_right(index):\n right = self._right(index)\n if self._data[right] < self._data[left]:\n small_child = right\n if self._data[right] == self._data[left]:\n small_child = right\n return small_child\n return None",
"def min(self):\n return self.root.leftmost",
"def _findMinNode(self, s):\n\n minNode = None\n minVal = self.inf\n for vertex in s:\n if self.dist[vertex] < minVal:\n minVal = self.dist[vertex]\n minNode = vertex\n return minNode",
"def findLowerNeedles(self, pt):\r\n nodes = slicer.util.getNodes('manual-seg_*')\r\n candidates = []\r\n validNeedles = self.findNeedles()\r\n for node in nodes.values():\r\n name = node.GetName()\r\n nb = int(name.split('_')[1]) # get needle number\r\n if nb in validNeedles:\r\n hp = self.getNeedleHighestPoint(nb)\r\n if hp[2] < pt[2]:\r\n theta = self.angle(self.getNeedleOrientation(nb),-self.getOrientationVect(pt, hp))\r\n candidates.append([name, min(theta,abs(theta-np.pi))])\r\n\r\n return candidates",
"def min(self):\n return self._min(self.root)",
"def min(self):\n no = self.root\n if no:\n no = self.__search_node_min_dir(no)\n if no:\n return no.valor\n return None",
"def _get_min_child(self, parent_idx):\n if 2 * parent_idx + 2 > len(self._heap) - 1:\n return 2 * parent_idx + 1\n if self._heap[2 * parent_idx + 1] < self._heap[2 * parent_idx + 2]:\n return 2 * parent_idx + 1\n return 2 * parent_idx + 2",
"def get_min(self):\n if self.root is None: # BC1\n return float('+inf')\n\n current = self.root\n while current.left is not None: # Traverse like a linked-list\n current = current.left\n\n return current.key",
"def smallest (self):\n return self.pointers[0].smallest()",
"def get_min_position(self):\n raise NotImplementedError()",
"def locate_point(self, coord):\n lowest_lat = self.lower_left[0]\n leftmost_lng = self.lower_left[1]\n dist_lat = utils.haversine((coord[0], leftmost_lng), self.lower_left)*1000 # in meters\n dist_lng = utils.haversine((lowest_lat, coord[1]), self.lower_left)*1000 # in meters\n grid_coord = (floor(dist_lng/self.distance), floor(dist_lat/self.distance))\n if grid_coord in self.cells:\n return grid_coord\n return None",
"def prim_solve(self):\n\n\t\tmin_span_tree = Graph([self.graph.vertices[0]], [])\n\t\tdup_graph = self.graph.duplicate()\n\n\t\tfor i in range(len(self.graph.vertices) - 1):\n\t\t\tneighbour_edges = []\n\t\t\tfor cur in min_span_tree.vertices:\n\t\t\t\tneighbour_edges += dup_graph.get_neighbour_edges(cur)\n\n\t\t\tneighbour_edges.sort(key=lambda x: x[2])\n\t\t\tshortest_edge = neighbour_edges[0]\n\t\t\tnew_node = shortest_edge[0] if shortest_edge[1] in min_span_tree.vertices else shortest_edge[1]\n\n\t\t\tmin_span_tree.edges.append(shortest_edge)\n\t\t\tmin_span_tree.vertices.append(new_node)\n\t\t\tdup_graph.edges.remove(shortest_edge)\n\n\t\treturn min_span_tree",
"def findNearset(x,y,lon,lat):\n dist = np.sqrt( (lon - x)**2 + (lat - y)**2)\n\n return np.argwhere(dist==dist.min())[0][0]",
"def _previous(self, coord):\n candidates = [(coord[0] - 1, coord[1]), (coord[0] + 1, coord[1]), (coord[0], coord[1] - 1), (coord[0], coord[1] + 1)]\n for candidate in (x for x in candidates if 0 <= x[0] < self.dimension and 0 <= x[1] < self.dimension):\n if self.board[candidate[0]][candidate[1]].next == self.board[coord[0]][coord[1]]:\n return candidate"
] | [
"0.65753007",
"0.64252496",
"0.6329845",
"0.6284443",
"0.6221933",
"0.6202329",
"0.61960083",
"0.61666095",
"0.6122884",
"0.60579133",
"0.601533",
"0.59646225",
"0.59528434",
"0.5939711",
"0.58846384",
"0.5878549",
"0.5874045",
"0.5835615",
"0.58301",
"0.5770619",
"0.5761702",
"0.575791",
"0.5734797",
"0.5733435",
"0.57292926",
"0.57271516",
"0.5725218",
"0.57149065",
"0.56998765",
"0.56788903"
] | 0.7808834 | 0 |
splink score histogram diagnostic plot public API function Compute a histogram using the provided buckets and plot the result. | def splink_score_histogram(
df_e: DataFrame,
spark: SparkSession,
buckets=None,
score_colname=None,
symmetric=True,
):
rows = _calc_probability_density(
df_e,
spark=spark,
buckets=buckets,
score_colname=score_colname,
symmetric=symmetric,
)
return _create_probability_density_plot(rows) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def makeHistogram(values, numBins, xLabel, yLabel, title=None):",
"def plot_hitstogram_graph(data_values, title,\r\n number_of_keys,\r\n max_val,\r\n file_in):\r\n\r\n # bins = max(data_values)\r\n # pylab.hist(data_values, facecolor='blue')\r\n pylab.hist(data_values, facecolor='green', alpha=0.6)\r\n pylab.grid(True)\r\n pylab.title(title + \"_histogram\")\r\n pylab.xlabel('number in cluster')\r\n pylab.ylabel('Count')\r\n pylab.savefig(file_in + \"_\" + title + '_histogram.png')\r\n plt.close()\r\n pylab.close()\r\n os.chdir('..')",
"def plot_histogram(self,**kwargs):\n axes = []\n for i in range(self.score_length):\n fig = plt.figure()\n scores = np.array([s[i] for s in self.scores_list])\n probs,bins,patches = plt.hist(scores,label=\"Sample {}\".format(self.labels[i]), **kwargs)\n plt.vlines(self.xhat,fig.get_axes().get_ylim(),label='Mean',color='r')\n plt.legend()\n axes.append(fig.get_axes())\n return axes",
"def add_histogram(self, tag, values, global_step=None, bins='tensorflow'):\n values = make_np(values)\n self.vis.histogram(make_np(values), opts={'title': tag})",
"def plot_hist(datasets, bins, labels, alphas):\n assert len(labels) == len(datasets)\n assert len(alphas) == len(datasets)\n plt.figure(figsize=[9,6])\n for idx, data in enumerate(datasets):\n plt.hist(data, bins=bins[idx], density=True, label=labels[idx], alpha=alphas[idx])\n plt.xlabel(\"PHQ score\")\n plt.ylabel(\"Probability\")\n plt.legend()\n plt.savefig(\"saved_plots/hist_\"+\"_\".join(labels)+\".png\")\n plt.show()",
"def plot_hist(self):\n labels = [self.get_class_str(action, obj)\n for (action, obj, subj, rec, beg, end) in self.action_clips]\n visualize.plot_hist(labels, proportion=True)",
"def plot_histogram(self,ax=None,**kwargs):\n if not ax:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n probs,bins,patches = ax.hist(self.scores_list,normed=True,label=\"Sample\",**kwargs)\n ax.vlines(self.xhat,*ax.get_ylim(),label='Mean',color='r')\n ax.legend()\n return ax,probs,bins",
"def plot_histograms(p_hist, p_hbins, title, figure_path=None):\n\n base_fig_size = 7\n h_fig = base_fig_size\n w_fig = base_fig_size * 4\n\n fig = plt.figure(figsize=(w_fig, h_fig))\n fig.suptitle(title)\n iplot = 0\n\n p_Nx, p_Ny = np.amax(p_hbins, axis=1) + 1\n\n p_hist = np.reshape(p_hist, (4, p_Ny, p_Nx))\n\n iplot += 1\n p_plot_title = 'Spectral BEHistogram Amp (log10 of counts)'\n p_plot = fig.add_subplot(1, 4, iplot, title=p_plot_title)\n p_im = p_plot.imshow(np.rot90(np.log10(p_hist[0])), interpolation='nearest')\n p_plot.axis('tight')\n fig.colorbar(p_im, fraction=0.1)\n\n iplot += 1\n p_plot_title = 'Spectral BEHistogram Phase (log10 of counts)'\n p_plot = fig.add_subplot(1, 4, iplot, title=p_plot_title)\n p_im = p_plot.imshow(np.rot90(np.log10(p_hist[1])), interpolation='nearest')\n p_plot.axis('tight')\n fig.colorbar(p_im, fraction=0.1)\n\n iplot += 1\n p_plot_title = 'Spectral BEHistogram Real (log10 of counts)'\n p_plot = fig.add_subplot(1, 4, iplot, title=p_plot_title)\n p_im = p_plot.imshow(np.rot90(np.log10(p_hist[2])), interpolation='nearest')\n p_plot.axis('tight')\n fig.colorbar(p_im, fraction=0.1)\n\n iplot += 1\n p_plot_title = 'Spectral BEHistogram Imag (log10 of counts)'\n p_plot = fig.add_subplot(1, 4, iplot, title=p_plot_title)\n p_im = p_plot.imshow(np.rot90(np.log10(p_hist[3])), interpolation='nearest')\n p_plot.axis('tight')\n fig.colorbar(p_im, fraction=0.1)\n\n if figure_path:\n plt.savefig(figure_path, format='png')\n\n return fig",
"def makeHistogram(values, numBins, xLabel, yLabel, title=None):\n pylab.hist(values, bins = numBins)\n pylab.xlabel(xLabel)\n pylab.ylabel(yLabel)\n if not title == None:\n pylab.title(title)\n pylab.show()",
"def plot_random_schedules(scores):\n\n\tplt.hist(scores, bins = len(scores))\n\tplt.ylabel(\"Score\")\n\tplt.xlabel(\"Times\")\n\tplt.title(\"Histogram random schedules\")\n\tplt.show()",
"def makeHistogram(values, numBins, xLabel, yLabel, title=None):\r\n pylab.hist(values, bins = numBins)\r\n pylab.xlabel(xLabel)\r\n pylab.ylabel(yLabel)\r\n if title != None:\r\n pylab.title(title)\r\n pylab.show()",
"def get_gridpoint_histograms(self):\n\n ind_array = np.indices(self.results_array.shape)\n\n def results_array_histograms(x, y, z):\n if isinstance(self.results_array[x][y][z], tuple):\n num_zeros = self.tup_max_length - len(self.results_array[x][y][z])\n if num_zeros != 0:\n print('Num_zeros: ', num_zeros)\n hist_arr = np.array(self.results_array[x][y][z])\n # hist, bin_edges = np.histogram(hist_arr, bins=20)\n colour_dict = {\"acceptor\": \"r\", \"donor\": \"b\", \"apolar\": \"y\"}\n hist_name = self.prot_name + '_' + self.probe + '_{}_{}_{}'.format(x, y, z)\n\n plt.figure(1)\n plt.hist(hist_arr, bins=20, color=colour_dict[self.probe])\n plt.figtext(0.6, 0.8, ('Number of zero values:' + str(num_zeros)))\n plt.title('Score distribution at point x:{}, y:{}, z:{}'.format(x, y, z))\n plt.xlabel('Fragment hotspot score')\n plt.ylabel('Frequency')\n plt.savefig(join(self.out_dir, hist_name))\n plt.close()\n\n print('Generating Histograms')\n vresults_array_histograms = np.vectorize(results_array_histograms)\n vresults_array_histograms(ind_array[0], ind_array[1], ind_array[2])",
"def plotPValHistogram(lXs, lYs, out=\"out.png\", title=\"title\", xax=\"xax\", yax=\"yax\"):\n\n fig = plt.Figure(figsize=(20,20))\n fig.suptitle(title, fontsize=32)\n ax = fig.add_subplot(111)\n ax.hist(lXs,lYs)\n axis_font = {'size':'28'}\n ax.set_xlabel(xax, **axis_font)\n ax.set_ylabel(yax, **axis_font)\n ax.tick_params(labelsize=20)\n canvas = FigureCanvasAgg(fig)\n canvas.print_figure(out, dpi=80)",
"def _hist(xs, bins=100, range=None, stats=('entries', 'mean', 'rms'),\n xylabels = (), stats_xypos=(0.1, 0.7),\n *args, **kargs):\n if (range==None):\n range = (np.min(xs), np.max(xs))\n cc = hst.hist(xs, bins=bins, range=range, *args, **kargs);\n if (not stats):\n return cc\n ys, xedges = np.histogram(xs, bins, range=range)\n ns = len(xs)\n sel = np.logical_and(xs >= range[0], xs <= range[1])\n nos, mean, rms = len(xs[sel]), np.mean(xs[sel]), np.std(xs[sel])\n epsilon = (1.*nos)/(1.*ns)\n ss = ''\n if ('total entries') in stats:\n ss += 'total entries {0:d} \\n'.format(ns)\n if ('entries') in stats:\n ss += 'entries {0:d} \\n'.format(nos)\n if ('mean') in stats:\n ss += 'mean {0:.3f} \\n'.format(mean)\n if ('rms') in stats:\n ss += 'rms {0:.3f} \\n'.format(rms)\n xp, yp = _xypos(xedges, ys, xf=stats_xypos[0], yf=stats_xypos[1])\n ##plt.set_label(ss)\n # plt.gca().set_label(ss)\n # plt.legend()\n plt.text(xp, yp, ss)\n return cc",
"def makeHistogram(values, numBins, xLabel, yLabel, title=None):\r\n # TODO\r\n pylab.hist(values, bins = numBins)\r\n pylab.xlabel(xLabel)\r\n pylab.ylabel(yLabel)\r\n if title != None:\r\n pylab.title(title)\r\n pylab.show()",
"def plot_histogram(self) -> None:\n\n if self.data:\n plt.hist(self.data)\n plt.title(\"Histogram of data\")\n plt.xlabel(\"data\")\n plt.ylabel(\"count\")\n else:\n raise ValueError(\"Histogram cannot be generated as no\\\n data has been provided\")",
"def plot_histogram(img):\n rgb_hist = rgb_histogram(img)\n plt.figure()\n for color, hist in rgb_hist.items():\n plt.plot(hist, color=color)\n plt.xlim([0, 256])",
"def plot_histogram(self, years_statistics):\n\n plt.hist(years_statistics, normed=True)\n plt.ylabel('Histogram');\n plt.hist(years_statistics)\n plt.title(\"Statistics for years\")\n plt.xlabel(\"Value\")\n plt.ylabel(\"Frequency\")\n plt.show()",
"def plot_histogram(hs, bins, ax=None, labels=None, title=None, **bar_params):\r\n # identify how many histogram series:\r\n if len(hs) == len(bins) - 1:\r\n nhs = 1\r\n hs = [hs]\r\n else:\r\n nhs = len(hs)\r\n if labels == None:\r\n labels = ['' for i in range(nhs)]\r\n width = (bins[1]-bins[0])/nhs\r\n x = np.array(bins[0:-1])\r\n if ax==None:\r\n f, ax = plt.subplots()\r\n for i in range(nhs):\r\n ax.bar(x + width * (i+0.5), hs[i], width=width, label=labels[i], **bar_params)\r\n if labels[0] != '':\r\n plt.legend()\r\n if title!=None:\r\n plt.title(title)\r\n return ax",
"def draw_histogram(data, # type: thelper.typedefs.ArrayType\n bins=50, # type: Optional[int]\n xlabel=\"\", # type: Optional[thelper.typedefs.LabelType]\n ylabel=\"Proportion\", # type: Optional[thelper.typedefs.LabelType]\n show=False, # type: Optional[bool]\n block=False, # type: Optional[bool]\n ): # type: (...) -> thelper.typedefs.DrawingType\n fig, ax = plt.subplots()\n ax.hist(data, density=True, bins=bins)\n if len(ylabel) > 0:\n ax.set_ylabel(ylabel)\n if len(xlabel) > 0:\n ax.set_xlabel(xlabel)\n ax.set_xlim(xmin=0)\n if show:\n fig.show()\n if block:\n plt.show(block=block)\n return fig\n plt.pause(0.5)\n return fig, ax",
"def histogram(values, title, fig_size=(4,3), path=None):\n plt.clf()\n f, ax = plt.subplots(1, figsize=fig_size)\n ax.hist(values, bins=60)\n ax.set_title(title)\n f.tight_layout()\n if(path != None):\n f.savefig(path+'/hist_'+title+'.png')",
"def drawHist(data, xLabel, unit, binSize, title):\n mean = np.mean(data)\n median = np.median(data)\n mode = stats.mode(data)[0].astype(float)\n \n q1, q3 = np.percentile(data, [25, 75])\n iqr = q3 - q1\n sigma = np.std(data)\n \n \n bins = np.arange(min(data), max(data) + 1, binSize)\n plt.style.use('dark_background')\n fig, ax = plt.subplots(figsize=(12,7))\n plt.hist(data, bins=bins, histtype='bar') \n plt.title(title)\n plt.xlabel(xLabel + \" \" + unit)\n plt.ylabel('count')\n ymax = ax.get_ylim()[1]\n ax.vlines(mean, 0, ymax, color='red', label='mean')\n ax.vlines(mean-sigma, 0, ymax, color='red', linestyle='--', \n label='mean +/- std')\n ax.vlines(mean+sigma, 0, ymax, color='red', linestyle='--')\n plt.legend()\n plt.show()\n \n print(\"Einheit: \", unit)\n print(\"Minimum: \", round(data.min(),3))\n print(\"Maximum: \", round(data.max(),3))\n print(\"Mittelwert: \", round(mean,3))\n print(\"Median: \", round(median,3))\n print(\"Modus: \", round(mode[0],3))\n print(\"Standardabweichung: \", round(sigma, 3))\n print(\"1. Quartil: \", round(q1,3))\n print(\"3. Quartil: \", round(q3,3))\n print(\"Quartilsdifferenz: \", round(iqr,3))",
"def plot_histogram_assess(assess_input, figure_output):\n\n sns.set_style(\"white\")\n raw_auc = pd.read_table(assess_input, index_col=\"Motif\")\n raw_auc = raw_auc.drop_duplicates()\n # df = df.T.drop_duplicates().T\n raw_auc = raw_auc.sort(columns=\"MNCP\", axis=0, ascending=False)\n labels = raw_auc.index\n x = 10\n if len(labels) > 50:\n x = 15\n elif len(labels) < 10:\n x = 5\n f, (ax1, ax2, ax3, ax4) = plt.subplots(4, 1, figsize=(x, 10), sharex=True)\n a = sns.barplot(x=labels, y=raw_auc[\"AUC\"],\n palette='colorblind', x_order=labels, ax=ax1)\n b = sns.barplot(x=labels, y=raw_auc[\"MNCP\"],\n palette=\"colorblind\", x_order=labels, ax=ax2)\n c = sns.barplot(x=labels, y=raw_auc[\"Spearman\"],\n palette=\"colorblind\", x_order=labels, ax=ax3)\n d = sns.barplot(x=labels, y=raw_auc[\"Pearson\"],\n palette=\"colorblind\", x_order=labels, ax=ax4)\n d.set_xticklabels(labels, rotation=90)\n\n sns.despine()\n f.savefig(figure_output + \".eps\", bbox_inches='tight')\n f.savefig(figure_output + \".png\", bbox_inches='tight')",
"def hog_histograms(*args, **kwargs): # real signature unknown\n pass",
"def histogram_discrete(name,\n data,\n bucket_min,\n bucket_max,\n step=None,\n description=None):\n summary_metadata = metadata.create_summary_metadata(\n display_name=None, description=description)\n summary_scope = (getattr(tf.summary.experimental, 'summary_scope', None)\n or tf.summary.summary_scope)\n with summary_scope(\n name, 'histogram_summary',\n values=[data, bucket_min, bucket_max, step]) as (tag, _):\n with tf.name_scope('buckets'):\n bucket_count = bucket_max - bucket_min + 1\n data = data - bucket_min\n one_hots = tf.one_hot(\n tf.reshape(data, shape=[-1]), depth=bucket_count)\n bucket_counts = tf.cast(\n tf.reduce_sum(input_tensor=one_hots, axis=0), tf.float64)\n edge = tf.cast(tf.range(bucket_count), tf.float64)\n # histogram can not draw when left_edge == right_edge\n left_edge = edge - 1e-12\n right_edge = edge + 1e-12\n tensor = tf.transpose(\n a=tf.stack([left_edge, right_edge, bucket_counts]))\n\n return tf.summary.write(\n tag=tag, tensor=tensor, step=step, metadata=summary_metadata)",
"def plot_hist(axis, data, title=None):\n axis.hist(data.ravel(), bins=256)\n axis.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0))\n\n if title:\n axis.set_title(title)\n\n return None",
"def plot_llr_histograms(self, llrarrays, llrhistmax, binning, colors,\n labels, best_name, alt_name, critical_value,\n critical_label, critical_height, llrhist,\n critical_color='k', plot_scaling_factor=1.55,\n greater=True, cls=False):\n import matplotlib.pyplot as plt\n plt.rcParams['text.usetex'] = True\n\n for llrarray, label, color in zip(llrarrays, labels, colors):\n plt.hist(\n llrarray,\n bins=binning,\n color=color,\n histtype='step',\n lw=2,\n label=label\n )\n plt.xlabel(r'Log-Likelihood Ratio', size='18', labelpad=18)\n plt.ylabel(r'Number of Trials (per %.2f)'%(binning[1]-binning[0]),\n size='18')\n # Nicely scale the plot\n plt.ylim(0, plot_scaling_factor*llrhistmax)\n # Add labels to show which side means what...\n xlim = plt.gca().get_xlim()\n plt.text(\n xlim[0]-0.05*(xlim[1]-xlim[0]),\n -0.09*plot_scaling_factor*llrhistmax,\n r'\\begin{flushleft} $\\leftarrow$ Prefers %s\\end{flushleft}'%(\n self.tex_axis_label(alt_name)),\n color='k',\n size='large'\n )\n plt.text(\n xlim[1]+0.05*(xlim[1]-xlim[0]),\n -0.09*plot_scaling_factor*llrhistmax,\n r'\\begin{flushright} Prefers %s $\\rightarrow$ \\end{flushright}'%(\n self.tex_axis_label(best_name)),\n color='k',\n size='large',\n horizontalalignment='right'\n )\n # Add the critical value with the desired height and colour.\n if critical_value is not None:\n plt.axvline(\n critical_value,\n color=critical_color,\n ymax=critical_height,\n lw=2,\n label=critical_label\n )\n if llrhist is not None:\n if cls:\n for hist, color in zip(llrhist, colors):\n finehist = np.repeat(hist, 100)\n finebinning = np.linspace(binning[0],\n binning[-1],\n (len(binning)-1)*100+1)\n finebinwidth = finebinning[1]-finebinning[0]\n finebincens = np.linspace(\n finebinning[0]+finebinwidth/2.0,\n finebinning[-1]-finebinwidth/2.0,\n len(finebinning)-1\n )\n where = (finebincens > critical_value)\n plt.fill_between(\n finebincens,\n 0,\n finehist,\n where=where,\n color=color,\n hatch='x',\n edgecolor='k',\n lw=0,\n alpha=0.3\n )\n else:\n # Create an object so that a hatch can be drawn over the\n # region of interest to the p-value.\n finehist = np.repeat(llrhist, 100)\n finebinning = np.linspace(binning[0], binning[-1],\n (len(binning)-1)*100+1)\n finebinwidth = finebinning[1]-finebinning[0]\n finebincens = np.linspace(finebinning[0]+finebinwidth/2.0,\n finebinning[-1]-finebinwidth/2.0,\n len(finebinning)-1)\n # Draw the hatch. This is between the x-axis (0) and the\n # finehist object made above. The \"where\" tells is to only\n # draw above the critical value. To make it just the hatch,\n # color is set to none and hatch is set to X. Also, so that\n # it doesn't have a border we set linewidth to zero.\n if greater:\n where = (finebincens > critical_value)\n else:\n where = (finebincens < critical_value)\n plt.fill_between(\n finebincens,\n 0,\n finehist,\n where=where,\n color='k',\n hatch='X',\n edgecolor='k',\n lw=0,\n alpha=0.3\n )\n plt.subplots_adjust(left=0.10, right=0.90, top=0.9, bottom=0.15)",
"def PlotHist(self, label=None):\n ys, xs, patches = plt.hist(self.test_stats)\n plt.vlines(self.actual, 0, max(ys), linewidth=3, color='black')\n plt.xlabel('test statistic')\n plt.ylabel('count')\n plt.show()",
"def distribution_sentimentscore_histogram(cur, var, table, label):\n x = select(cur,var, table)\n print(\"Number of entries: \", len(x))\n print(\"Maximum: \", max(x))\n print(\"Minimum: \", min(x))\n \n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n ax.set_xlabel(\"Sentiment Score\")\n ax.set_ylabel(\"Number of Loans\")\n fig.suptitle(label)\n ax.hist(x, bins = 15)\n plt.show()",
"def histogram_summary(self, tag, values, step, bins=1000):\n self.writer.add_histogram(tag, values, step, bins='auto')"
] | [
"0.65873337",
"0.65499824",
"0.6416419",
"0.6398828",
"0.6303546",
"0.6264724",
"0.6263202",
"0.6211154",
"0.6202789",
"0.61877424",
"0.6169316",
"0.6159507",
"0.61091715",
"0.6084146",
"0.60816425",
"0.6069394",
"0.6049768",
"0.603087",
"0.60236806",
"0.6020201",
"0.6001724",
"0.60007674",
"0.5970286",
"0.595861",
"0.5948098",
"0.5939127",
"0.5935669",
"0.5930126",
"0.5929814",
"0.59261614"
] | 0.6923532 | 0 |
Convert an ascii format PSD to XML. | def _convert_psd(self, ascii_format, ifo):
command = ["convert_psd_ascii2xml",
"--fname-psd-ascii", f"{ascii_format}",
"--conventional-postfix",
"--ifo", f"{ifo}"]
pipe = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = pipe.communicate()
self.logger.info(command, production = self.production)
if err:
self.production.status = "stuck"
if hasattr(self.production.event, "issue_object"):
raise PipelineException(f"An XML format PSD could not be created.\n{command}\n{out}\n\n{err}",
issue=self.production.event.issue_object,
production=self.production.name)
else:
raise PipelineException(f"An XML format PSD could not be created.\n{command}\n{out}\n\n{err}",
production=self.production.name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def exportXml ( w, xml ):\n assert str ( type ( xml ) ) == \"<type 'str'>\"\n rawText = xml\n pattern = re.compile (r'[^\\S ]+')\n text = re.sub ( pattern, \"\", rawText )\n reparsed = MD.parseString ( text )\n w.write ( reparsed.toprettyxml ( indent = \"\\t\", encoding = \"UTF-8\" ) )",
"def to_xml(self):\n # lines = super(FileCatNoEmpty, self).cat(filepath)\n structure = super(Point, self).to_xml()\n\n\n coords = GeometryTopologyData.__to_xml_vector__(self.coordinate, self.format)\n # description_str = ''\n # if self.description is not None:\n # description_str = '<Description>%s</Description>' % self.description\n\n return '<Point>%s<Coordinate>%s</Coordinate></Point>' % (structure, coords)",
"def toXML(self):\n return _libsbml.SpeciesGlyph_toXML(self)",
"def pprint_xml(et):\n \n return tostring(et, pretty_print=True)",
"def pprintXml(et):\n \n return tostring(et, pretty_print=True)",
"def toXML(self):\n return _libsbml.CompartmentGlyph_toXML(self)",
"def toXML(self):\n return _libsbml.GeneralGlyph_toXML(self)",
"def to_xml(self, enc='utf-8'):\n return b\"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE GIFTI SYSTEM \"http://www.nitrc.org/frs/download.php/115/gifti.dtd\">\n\"\"\" + xml.XmlSerializable.to_xml(self, enc)",
"def MakeTextXMLReady(text):\n dec_text = DecodeNonASCIIText(text)[0]\n items = []\n for char in dec_text:\n try:\n char = char.encode('ascii')\n except UnicodeEncodeError:\n # We have a non-ASCII character of type unicode. Convert it into an\n # XML-ready format.\n try:\n str(char)\n char.encode('utf-8')\n except UnicodeEncodeError:\n char = '%s;' % hex(ord(char)).replace('0x', '&#x')\n items.append(char)\n return ''.join(items)",
"def toXML(self):\n return _libsbml.TextGlyph_toXML(self)",
"def ConvertToXML (given_dict) :\r\n stream_thing = cStringIO.StringIO()\r\n WriteToXMLStream(given_dict, stream_thing, 'top')\r\n return stream_thing.getvalue()",
"def toXML(self):\n return _libsbml.ListOfSpeciesGlyphs_toXML(self)",
"def translateXml(node):\n\n if 'ip' in node.getAttribute('addrtype'):\n output.write(\"\\n\")\n ipaddr = node.getAttribute('addr')\n output.write(node.getAttribute('addr'))\n output.write(\",\")\n\n elif node.nodeName == \"port\":\n port.append(node.getAttribute(\"portid\"))\n output.write(node.getAttribute(\"portid\"))\n output.write(\",\")\n\n elif node.nodeName == \"state\":\n isopen = node.getAttribute('state')\n portstate.append(node.getAttribute('state'))\n output.write(node.getAttribute('state'))",
"def printFormattedXML(xmlString):\n\tp = subprocess.Popen(\"xmlstarlet fo\".split(), stdin=subprocess.PIPE)\n\tp.stdin.write(xmlString)\n\tp.stdin.close()\n\tp.wait()",
"def toXML(self, *args):\n return _libsbml.Point_toXML(self, *args)",
"def create_XML(directives, gui): \n # unpack the directives\n commands = directives.command_list\n Delay = directives.delay_time\n Ascii_delay = directives.ascii_time \n addr = directives.addr\n \n # Start XML\n aardvark = ET.Element('aardvark')\n \n # starup comment for historical reasons\n aardvark.append(ET.Comment('Configuration (Need pullups, ' + \n 'not sure why...)')) \n \n # Configuration Element\n config_attributes = {'i2c': str(int(pySCPI_aardvark.I2C)),\n 'spi': str(int(pySCPI_aardvark.SPI)),\n 'gpio': str(int(pySCPI_aardvark.GPIO)),\n 'pullups': str(int(pySCPI_aardvark.Pullups))}\n \n ET.SubElement(aardvark, 'configure', config_attributes)\n \n # Bitrate\n rate_attributes = {'khz': str(pySCPI_aardvark.Bitrate)}\n \n ET.SubElement(aardvark, 'i2c_bitrate', rate_attributes)\n \n # Start I2C\n ET.SubElement(aardvark, 'i2c_free_bus')\n \n # delay attributes\n delay_attributes = {'ms': str(Delay)} \n ascii_delay_attributes = {'ms': str(Ascii_delay)} \n \n # delay\n ET.SubElement(aardvark, 'sleep', delay_attributes) \n \n # iterate through commands\n for command in commands: \n \n if pySCPI_config.is_config(command):\n # add the configuration to the XML\n addr = update_XML(command, addr, aardvark)\n \n elif pySCPI_config.is_valid_raw(command):\n # it is a valid raw command so comment the command\n aardvark.append(ET.Comment(command))\n \n # split the command up\n raw_list = command[:-1].split(' ')\n raw_addr = '0x' + raw_list[1][2:-1]\n \n # determine the type of raw command it is\n if pySCPI_config.is_raw_write(command):\n write_attributes = {'addr': raw_addr,\n 'count': str(len(raw_list)-1),\n 'radix': str(pySCPI_aardvark.radix)}\n raw = ET.SubElement(aardvark, 'i2c_write',\n write_attributes)\n \n # add hexidecimal null terminated command as \n # text to the write element\n raw.text = ' '.join(\"{:02x}\".format(int(c, 16)) for \\\n c in raw_list[2:]) + ' 0a'\n \n else:\n read_attributes = {'addr': raw_addr,\n 'count': raw_list[2],\n 'radix': str(pySCPI_aardvark.radix)} \n \n ET.SubElement(aardvark, 'i2c_read', \n read_attributes) \n # end if\n \n # intermessage delay\n ET.SubElement(aardvark, 'sleep', delay_attributes) \n \n else:\n # this is a regular command so comment the SCPI command\n aardvark.append(ET.Comment(command))\n \n # define attributes for write element\n write_attributes = {'addr': addr,\n 'count': str(len(command)+1),\n 'radix': str(pySCPI_aardvark.radix)}\n \n # create write element if it is not a comment\n if not command.startswith('#'):\n scpi = ET.SubElement(aardvark,'i2c_write',write_attributes)\n \n # add hexidecimal null terminated command as \n # text to the write element\n scpi.text = ' '.join(\"{:02x}\".format(ord(c)) for \\\n c in command) + ' 0a' \n # end if\n \n \n \n if ('TEL?' in command) and not command.startswith('#'):\n # Read command was issued so a read needs to be performed\n \n if command.endswith('ascii'):\n # leave a longer delay for ascii commands\n ET.SubElement(aardvark, 'sleep', \n ascii_delay_attributes)\n \n else:\n # regular delay\n ET.SubElement(aardvark, 'sleep', delay_attributes)\n # end if\n \n # extract length from command\n command_len = pySCPI_formatting.read_length(command, gui) \n \n # define attributes for read element\n read_attributes = {'addr': addr,\n 'count': str(command_len),\n 'radix': str(pySCPI_aardvark.radix)} \n \n # create the read element\n ET.SubElement(aardvark, 'i2c_read', read_attributes) \n # end if\n \n # delay\n ET.SubElement(aardvark, 'sleep', delay_attributes) \n # end if\n\n #end for \n \n # beautify the xml\n file_string = beautify_xml(aardvark)\n \n # open window for saving the file\n file_opt = options = {}\n options['defaultextension'] = '.xml'\n options['filetypes'] = [('xml files', '.xml')]\n options['initialdir'] = os.getcwd() + '\\\\xml_files'\n options['initialfile'] = 'aardvark_script.xml'\n options['title'] = 'Save .xml file as:' \n \n # get the file name from the user\n filename = TKFD.asksaveasfilename(**file_opt)\n \n # see if the user selected a file or not\n if (filename != ''): \n # a file was selected so open file for writing\n \n if pySCPI_config.file_is_free(filename): \n myfile = open(filename, 'w+')\n \n # write file\n myfile.write(file_string)\n myfile.write('\\n')\n \n # close file\n myfile.close() \n \n print 'XML file \\''+ filename.split('/')[-1]+'\\' written'\n \n else:\n print '*** Requested XML file is open in another program ***'\n \n else: \n # no file was selected\n print '*** No XML file written ***'\n # end if\n \n return filename",
"def to_xml(self, resource):\n stream = StringIO.StringIO()\n stream.write(\"<?xml version='1.0' encoding='UTF-8'?>\")\n stream.write('<%s>' % \"GpsRO\")\n\n for item in resource.items():\n key, value = item\n if isinstance(value, str) or isinstance(value, unicode):\n stream.write('\\n<%s>%s</%s>' % (key, value, key))\n else:\n stream.write('\\n<%s>%d</%s>' % (key, value, key))\n\n stream.write('\\n</%s>' % \"GpsRO\")\n stream.seek(0)\n return stream.read()",
"def xml_out(db):\n stats = basic_stats(db)\n print('<?xml version=\"1.0\"?>')\n print('<idp-audit rps=\"%d\" logins=\"%d\" users=\"%d\">'\n % (stats['rps'], stats['logins'], stats['users']))\n for rp, i in list(db['rp'].items()):\n print(' <rp count=\"%d\">%s</rp>' % (i, rp))\n print(\"</idp-audit>\")",
"def handle_pi(self, text):\r\n if text[:3] == \"xml\":\r\n text = u\"xml version='1.0' encoding='%SOUP-ENCODING%'\"\r\n self._toStringSubclass(text, ProcessingInstruction)",
"def getMappedSymbolsXML(self, addrstring: unicode) -> unicode:\n ...",
"def toXML(self):\n return _libsbml.ListOfCompartmentGlyphs_toXML(self)",
"def get_xml(self):\n with io.StringIO() as string:\n string.write(ET.tostring(self.root, encoding=\"unicode\"))\n return string.getvalue()",
"def convert_to_layoutsymbol(cls, elem):\r\n if (len(elem) == 0):\r\n return None\r\n\r\n elem_content = io.StringIO(elem) # treat the string as if a file\r\n root = xml.etree.ElementTree.parse(elem_content).getroot()\r\n ## print(\"parse_from_mathml tree: \" + xml.etree.ElementTree.tostring(root,encoding=\"unicode\"))\r\n return LayoutSymbol.parse_from_mathml(root)",
"def stringify(self):\n buf = ''\n for i in self._sax:\n buf += self._alphabet[int(i)]\n return buf",
"def toXML(self):\n return _libsbml.SpeciesReferenceGlyph_toXML(self)",
"def to_xml(self):\n \n root = ET.Element(\"Document\")\n root.set('xmlns',\"urn:iso:std:iso:20022:tech:xsd:pacs.008.001.02\")\n root_fito = ET.SubElement(root, \"FIToFICstmrCdtTrf\")\n \n self.xml_header(root_fito)\n self.xml_transaction(root_fito)\n\n ET.ElementTree(root)\n \n return ET.tostring(root,encoding='utf-8',xml_declaration=True).decode('utf-8')",
"def test_convert_dosdp(self):\n schema = self._convert('dosdp_schema', 'yaml',\n name='dosdp',\n root_class_name='Pattern',\n data_files=['OMIM_disease_series_by_gene.yaml'],\n target_class='')\n #print(yaml_dumper.dumps(schema))\n axiom_type_options = schema.enums['axiom_type_options']\n self.assertIn('equivalentTo', axiom_type_options.permissible_values)\n self.assertIn('axiom_type', schema.slots)\n self.assertIn('PrintfClause', schema.classes)",
"def recipe12_3():\n from xml.sax.handler import ContentHandler\n import xml.sax\n\n class textHandler(ContentHandler):\n def characters(self,ch):\n sys.stdout.write(ch.encode(\"Latin-1\"))\n parser=xml.sax.make_parser()\n handler=textHandler()\n parser.setContentHandler(handler)\n parser.parse(\"sample.xml\")",
"def toString(doc):\n return doc.toxml()",
"def test_export_to_xml(self):\r\n module_system = DummySystem(load_error_modules=True)\r\n desc = VideoDescriptor(module_system, DictFieldData({}), ScopeIds(None, None, self.location, self.location))\r\n\r\n desc.youtube_id_0_75 = 'izygArpw-Qo'\r\n desc.youtube_id_1_0 = 'p2Q6BrNhdh8'\r\n desc.youtube_id_1_25 = '1EeWXzPdhSA'\r\n desc.youtube_id_1_5 = 'rABDYkeK0x8'\r\n desc.show_captions = False\r\n desc.start_time = datetime.timedelta(seconds=1.0)\r\n desc.end_time = datetime.timedelta(seconds=60)\r\n desc.track = 'http://www.example.com/track'\r\n desc.handout = 'http://www.example.com/handout'\r\n desc.download_track = True\r\n desc.html5_sources = ['http://www.example.com/source.mp4', 'http://www.example.com/source.ogg']\r\n desc.download_video = True\r\n desc.transcripts = {'ua': 'ukrainian_translation.srt', 'ge': 'german_translation.srt'}\r\n\r\n xml = desc.definition_to_xml(None) # We don't use the `resource_fs` parameter\r\n expected = etree.fromstring('''\\\r\n <video url_name=\"SampleProblem1\" start_time=\"0:00:01\" youtube=\"0.75:izygArpw-Qo,1.00:p2Q6BrNhdh8,1.25:1EeWXzPdhSA,1.50:rABDYkeK0x8\" show_captions=\"false\" end_time=\"0:01:00\" download_video=\"true\" download_track=\"true\">\r\n <source src=\"http://www.example.com/source.mp4\"/>\r\n <source src=\"http://www.example.com/source.ogg\"/>\r\n <track src=\"http://www.example.com/track\"/>\r\n <handout src=\"http://www.example.com/handout\"/>\r\n <transcript language=\"ge\" src=\"german_translation.srt\" />\r\n <transcript language=\"ua\" src=\"ukrainian_translation.srt\" />\r\n </video>\r\n ''')\r\n self.assertXmlEqual(expected, xml)"
] | [
"0.55738395",
"0.54602534",
"0.54489726",
"0.53523225",
"0.52660984",
"0.52540344",
"0.5241616",
"0.5230467",
"0.5184351",
"0.51782966",
"0.5153558",
"0.51349664",
"0.50608534",
"0.5040427",
"0.50386435",
"0.5021166",
"0.5018161",
"0.49920428",
"0.49768257",
"0.49715346",
"0.49603248",
"0.49345148",
"0.4925998",
"0.49139947",
"0.490516",
"0.48981646",
"0.485775",
"0.48508587",
"0.48462173",
"0.4844681"
] | 0.6901742 | 0 |
Construct a DAG file in order to submit a production to the condor scheduler using util_RIFT_pseudo_pipe.py | def build_dag(self, user=None):
cwd = os.getcwd()
#os.chdir(self.production.event.meta['working directory'])
#os.chdir(os.path.join(self.production.event.repository.directory,
# self.category))
if self.production.event.repository:
gps_file = self.production.get_timefile()
coinc_file = self.production.get_coincfile()
coinc_file = os.path.join(self.production.event.repository.directory, "C01_offline",
coinc_file)
ini = self.production.get_configuration().ini_loc
ini = os.path.join(self.production.event.repository.directory, "C01_offline", ini)
else:
gps_file = "gpstime.txt"
ini = os.path.join(self.production.event.meta['working directory'], f"{self.production.name}.ini")
coinc_file = os.path.join(cwd, "coinc.xml")
if self.production.get_meta("user"):
user = self.production.get_meta("user")
else:
user = config.get("condor", "user")
self.production.set_meta("user", user)
os.environ['LIGO_USER_NAME'] = f"{user}"
os.environ['LIGO_ACCOUNTING'] = f"{config.get('pipelines', 'accounting')}"
try:
calibration = config.get("general", "calibration")
except:
calibration = "C01"
approximant = self.production.meta['approximant']
#ini.save()
if self.production.rundir:
rundir = os.path.relpath(self.production.rundir, os.getcwd())
else:
rundir = os.path.join(os.path.expanduser("~"),
self.production.event.name,
self.production.name)
self.production.rundir = rundir
#lmax = self.production.meta['priors']['amp order']
if "lmax" in self.production.meta:
lmax = self.production.meta['lmax']
elif "HM" in self.production.meta['approximant']:
lmax = 4
else:
lmax = 2
if "cip jobs" in self.production.meta:
cip = self.production.meta['cip jobs']
else:
cip = 3
command = [os.path.join(config.get("pipelines", "environment"), "bin", "util_RIFT_pseudo_pipe.py"),
"--use-coinc", coinc_file,
"--l-max", f"{lmax}",
"--calibration", f"{calibration}",
"--add-extrinsic",
"--approx", f"{approximant}",
"--cip-explode-jobs", str(cip),
"--use-rundir", rundir,
"--ile-force-gpu",
"--use-ini", ini
]
print(" ".join(command))
# If a starting frequency is specified, add it
if "start-frequency" in self.production.meta:
command += ["--fmin-template", self.production.quality['start-frequency']]
self.logger.info(" ".join(command), production = self.production)
# Placeholder LI grid bootstrapping; conditional on it existing and location specification
if self.bootstrap:
if self.bootstrap == "manual":
if self.production.event.repository:
bootstrap_file = os.path.join(self.production.event.repository.directory, "C01_offline", f"{self.production.name}_bootstrap.xml.gz")
else:
bootstrap_file = "{self.production.name}_bootstrap.xml.gz"
else:
raise PipelineException(f"Unable to find the bootstrapping production for {self.production.name}.",
issue=self.production.event.issue_object,
production=self.production.name)
command += ["--manual-initial-grid", bootstrap_file]
self.logger.info(command, production = self.production)
os.chdir(self.production.event.meta['working directory'])
pipe = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = pipe.communicate()
if err:
self.production.status = "stuck"
if hasattr(self.production.event, "issue_object"):
self.logger.info(out, production = self.production)
self.logger.error(err, production = self.production)
raise PipelineException(f"DAG file could not be created.\n{command}\n{out}\n\n{err}",
issue=self.production.event.issue_object,
production=self.production.name)
else:
self.logger.info(out, production = self.production)
self.logger.error(err, production = self.production)
raise PipelineException(f"DAG file could not be created.\n{command}\n{out}\n\n{err}",
production=self.production.name)
else:
if self.production.event.repository:
os.chdir(self.production.rundir)
for psdfile in self.production.get_psds("xml"):
ifo = psdfile.split("/")[-1].split("_")[1].split(".")[0]
os.system(f"cp {psdfile} {ifo}-psd.xml.gz")
#os.system("cat *_local.cache > local.cache")
if hasattr(self.production.event, "issue_object"):
return PipelineLogger(message=out,
issue=self.production.event.issue_object,
production=self.production.name)
else:
return PipelineLogger(message=out,
production=self.production.name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create(\n metadata: ProjectMetadata, pipeline_name, env, target_path\n): # pylint: disable=too-many-locals\n loader = jinja2.FileSystemLoader(str(Path(__file__).parent))\n jinja_env = jinja2.Environment(autoescape=True, loader=loader, lstrip_blocks=True)\n jinja_env.filters[\"slugify\"] = slugify\n template = jinja_env.get_template(\"airflow_dag_template.j2\")\n\n project_path = metadata.project_path\n package_name = metadata.package_name\n dag_filename = f\"{package_name}_dag.py\"\n\n target_path = Path(target_path)\n target_path = target_path / dag_filename\n\n target_path.parent.mkdir(parents=True, exist_ok=True)\n with KedroSession.create(package_name, project_path, env=env) as session:\n context = session.load_context()\n pipeline = context.pipelines.get(pipeline_name)\n\n dependencies = defaultdict(list)\n for node, parent_nodes in pipeline.node_dependencies.items():\n for parent in parent_nodes:\n dependencies[parent].append(node)\n\n template.stream(\n dag_name=package_name,\n dependencies=dependencies,\n env=env,\n pipeline_name=pipeline_name,\n package_name=package_name,\n pipeline=pipeline,\n ).dump(str(target_path))\n\n secho(\"\")\n secho(\"An Airflow DAG has been generated in:\", fg=\"green\")\n secho(str(target_path))\n secho(\"This file should be copied to your Airflow DAG folder.\", fg=\"yellow\")\n secho(\n \"The Airflow configuration can be customized by editing this file.\", fg=\"green\"\n )\n secho(\"\")\n secho(\n \"This file also contains the path to the config directory, this directory will need to \"\n \"be available to Airflow and any workers.\",\n fg=\"yellow\",\n )\n secho(\"\")\n secho(\n \"Additionally all data sets must have an entry in the data catalog.\",\n fg=\"yellow\",\n )\n secho(\n \"And all local paths in both the data catalog and log config must be absolute paths.\",\n fg=\"yellow\",\n )\n secho(\"\")",
"def write_dag_file(dag_filename, condor_filename, status_filename, log_dir,\n copyToLocal, copyFromLocal, args):\n # to parse the MG5 specific parts\n mg5_parser = MG5ArgParser()\n mg5_args = mg5_parser.parse_args(args.args)\n\n log.info(\"DAG file: %s\" % dag_filename)\n with open(dag_filename, 'w') as dag_file:\n dag_file.write('# DAG for channel %s\\n' % args.channel)\n dag_file.write('# Outputting to %s\\n' % args.oDir)\n for job_ind in xrange(args.jobIdRange[0], args.jobIdRange[1] + 1):\n # add job to DAG\n job_name = '%d_%s' % (job_ind, args.channel)\n dag_file.write('JOB %s %s\\n' % (job_name, condor_filename))\n\n # args to pass to the script on the worker node\n job_opts = []\n\n # start with files to copyToLocal at the start of job running\n # ----------------------------------------------------------------\n if copyToLocal:\n for src, dest in copyToLocal.iteritems():\n job_opts.extend(['--copyToLocal', src, dest])\n\n mg5_args.iseed = job_ind # RNG seed using job index\n\n # Make sure output files are copied across afterwards\n # ----------------------------------------------------------------\n output_dir = os.path.join(args.channel, 'Events', 'run_01')\n name_stem = '%s_%dTeV_n%d_seed%d' % (args.channel, args.energy,\n mg5_args.nevents, mg5_args.iseed)\n\n lhe_zip = os.path.join(output_dir, 'events.lhe.gz')\n lhe_final_zip = '%s.lhe.gz' % name_stem\n\n hepmc_zip = os.path.join(output_dir, 'events_PYTHIA8_0.hepmc.gz')\n hepmc_final_zip = '%s.hepmc.gz' % name_stem\n\n job_opts.extend(['--copyFromLocal', lhe_zip, os.path.join(args.oDir, 'lhe', lhe_final_zip)])\n job_opts.extend(['--copyFromLocal', hepmc_zip, os.path.join(args.oDir, 'hepmc', hepmc_final_zip)])\n # Supplementary materials\n job_opts.extend(['--copyFromLocal', os.path.join(output_dir, 'RunMaterial.tar.gz'),\n os.path.join(args.oDir, 'other', 'RunMaterial_%d.tar.gz' % job_ind)])\n job_opts.extend(['--copyFromLocal', os.path.join(output_dir, 'summary.txt'),\n os.path.join(args.oDir, 'other', 'summary_%d.txt' % job_ind)])\n\n # add in any other files that should be copied from the worker at\n # the end of the job\n # ----------------------------------------------------------------\n if copyFromLocal:\n for src, dest in copyFromLocal.iteritems():\n job_opts.extend(['--copyFromLocal', src, dest])\n\n job_opts.append('--args')\n for k, v in mg5_args.__dict__.items():\n if k and v:\n job_opts.extend(['--' + str(k), str(v)])\n\n # make some replacements due to different destination variable name\n # screwing things up. Yuck!\n remap = {'--iseed': '--seed', '--pythia8_path': '--pythia8'}\n for k, v in remap.items():\n job_opts[job_opts.index(k)] = v\n job_opts.remove('--card')\n log.debug('job_opts: %s' % job_opts)\n\n # write job vars to file\n dag_file.write('VARS %s ' % job_name)\n log_name = os.path.splitext(os.path.basename(dag_filename))[0]\n dag_file.write('opts=\"%s\" logdir=\"%s\" logfile=\"%s\"\\n' % (' '.join(job_opts),\n log_dir,\n log_name))\n dag_file.write('NODE_STATUS_FILE %s 30\\n' % status_filename)",
"def write_dag_script(s):\n assert len(s.jobs) in (1,2),'ERROR: write_dag_script should be called from the final merge JobSet'\n s.dag = os.path.join( s.jobs[0].submitdir, 'global.dag')\n f = open(s.dag,'w')\n # condor submit scripts\n for dep in s.get_deps():\n print >>f,'Job %s %s'%(dep.jobname(),dep.condorfile)\n for job in s.jobs:\n print >>f,'Job %s %s'%(job.jobname(),job.condorfile)\n # retry instructions\n for dep in s.get_deps():\n print >>f,'Retry %s %s'%(dep.jobname(),NRETRY)\n for job in s.jobs:\n print >>f,'Retry %s %s'%(job.jobname(),NRETRY)\n a_parent = ' '.join( [ dep.jobname() for dep in s.get_deps() ] )\n for job in s.jobs:\n a_child = job.jobname()\n print >>f,'PARENT %s CHILD %s'%(a_parent,a_child)\n f.close()",
"def submit_dag(self):\n os.chdir(self.production.rundir)\n os.system(\"cat *_local.cache > local.cache\")\n\n for psdfile in self.production.get_psds(\"xml\"):\n ifo = psdfile.split(\"/\")[-1].split(\"_\")[1].split(\".\")[0]\n os.system(f\"cp {psdfile} {ifo}-psd.xml.gz\")\n\n\n self.before_submit()\n \n try:\n command = [\"condor_submit_dag\", \n \"-batch-name\", f\"rift/{self.production.event.name}/{self.production.name}\",\n os.path.join(self.production.rundir, \"marginalize_intrinsic_parameters_BasicIterationWorkflow.dag\")]\n dagman = subprocess.Popen(command,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n self.logger.info(command, production = self.production)\n except FileNotFoundError as error:\n raise PipelineException(\"It looks like condor isn't installed on this system.\\n\"\n f\"\"\"I wanted to run {\" \".join(command)}.\"\"\")\n\n stdout, stderr = dagman.communicate()\n\n\n if \"submitted to cluster\" in str(stdout):\n cluster = re.search(\"submitted to cluster ([\\d]+)\", str(stdout)).groups()[0]\n self.production.status = \"running\"\n self.production.job_id = int(cluster)\n return cluster, PipelineLogger(stdout)\n else:\n raise PipelineException(f\"The DAG file could not be submitted.\\n\\n{stdout}\\n\\n{stderr}\",\n issue=self.production.event.issue_object,\n production=self.production.name)",
"def prepare_submit(self, mapping):\n self.dag_path = self.mk_path('%(mex_id)s.dag', mapping)\n self.create_file(self.dag_path,\n self.template['condor.dag_template'], mapping)\n\n self.conf_path = self.mk_path('%(mex_id)s.dag.config', mapping)\n self.create_file(self.conf_path,\n self.template['condor.dag_config_template'], mapping)\n\n self.submit_path = self.mk_path('%(mex_id)s.cmd', mapping)\n self.create_file(self.submit_path,\n self.template['condor.submit_template'], mapping)",
"def dag_builder(conf):\n dag = DAG(dag_id=conf['dag_id'], schedule_interval=conf['schedule_interval'], start_date=conf['start_date'], catchup=conf['catchup'], default_args=conf['default_args'])\n task_conf = conf.get('tasks', [])\n dep_conf = conf.get('dependencies', [])\n tasks = {}\n if task_conf:\n tasks = attach_tasks(dag, task_conf)\n if dep_conf:\n build_flow(dep_conf, tasks)\n return dag",
"def __create_dag(self):\n dag_man = htc.DAGMan(\n filename=os.path.join(self.__job_dir, 'diamond.dag'),\n status_file=os.path.join(self.__job_dir, 'diamond.status'),\n dot='diamond.dot'\n )\n\n # layer 1 - ntuples\n ntuple_jobs = self.__create_ntuple_layer()\n for job in ntuple_jobs:\n dag_man.add_job(job, retry=RETRY_COUNT)\n\n # # layer 2 - analysis\n # for mode in ANALYSIS_MODES:\n # analysis_jobs = self.__create_analysis_layer(ntuple_jobs, mode)\n # for job in analysis_jobs:\n # dag_man.add_job(job, requires=ntuple_jobs, retry=RETRY_COUNT)\n # # layer 2b\n # # for each analysis mode create 1 merged file\n # merge_jobs = self.__create_merge_layer(analysis_jobs, mode)\n # for job in merge_jobs:\n # dag_man.add_job(job, requires=analysis_jobs, retry=2)\n\n self.__dag = dag_man",
"def create_flow(self, conf, dpid, params):\n\t\tpass",
"def test_dag():\n\n def f(task_id):\n return f\"OP:{task_id}\"\n\n with DAG(dag_id=\"test_xcom_dag\", default_args=DEFAULT_ARGS) as dag:\n operators = [PythonOperator(python_callable=f, task_id=f\"test_op_{i}\") for i in range(4)]\n return dag, operators",
"def call(argv):\n known_args, beam_options = parse_args(argv)\n\n yaml_string = known_args.dag.decode('string_escape')\n dag = yaml.load(yaml_string)\n\n pipeline_options = PipelineOptions(beam_options)\n pipeline_options.view_as(SetupOptions).save_main_session = True\n\n p = beam.Pipeline(options=pipeline_options)\n pcoll = p | 'Create' >> beam.Create(['pipeline'])\n create_graph(dag, pcoll, known_args)\n p.run()",
"def __init__(\n self,\n *,\n dag_id: str,\n cloud_workspace: CloudWorkspace,\n publisher_id: str,\n format_specification: str,\n bq_dataset_id: str = \"onix\",\n bq_table_name: str = \"onix\",\n bq_dataset_description: str = \"Thoth ONIX Feed\",\n bq_table_description: str = None,\n api_dataset_id: str = \"onix\",\n host_name: str = \"https://export.thoth.pub\",\n schema_folder: str = default_schema_folder(),\n observatory_api_conn_id: str = AirflowConns.OBSERVATORY_API,\n catchup: bool = False,\n start_date: pendulum.DateTime = pendulum.datetime(2022, 12, 1),\n schedule: str = \"@weekly\",\n ):\n super().__init__(\n dag_id,\n start_date=start_date,\n schedule=schedule,\n airflow_conns=[observatory_api_conn_id],\n catchup=catchup,\n tags=[\"oaebu\"],\n )\n\n self.dag_id = dag_id\n self.cloud_workspace = cloud_workspace\n self.publisher_id = publisher_id\n self.bq_dataset_id = bq_dataset_id\n self.bq_table_name = bq_table_name\n self.bq_dataset_description = bq_dataset_description\n self.bq_table_description = bq_table_description\n self.api_dataset_id = api_dataset_id\n self.host_name = host_name\n self.format_specification = format_specification\n self.schema_folder = schema_folder\n self.observatory_api_conn_id = observatory_api_conn_id\n\n check_workflow_inputs(self)\n\n self.add_setup_task(self.check_dependencies)\n self.add_task(self.download)\n self.add_task(self.upload_downloaded)\n self.add_task(self.transform)\n self.add_task(self.upload_transformed)\n self.add_task(self.bq_load)\n self.add_task(self.add_new_dataset_releases)\n self.add_task(self.cleanup)",
"def prepare_pr_condor_job(self, pool_type, pool_address, number_of_jobs, subtask_index, data_files, rank='0', extraArgs=''):\n ############\n copasi_file = 'auto_copasi_%d.$(Process).cps' % subtask_index\n output_file = 'output_%d.$(Process).txt' % subtask_index\n \n \n \n if pool_type == 'ec2':\n binary_dir = '/usr/local/bin'\n transfer_executable = 'NO'\n else:\n binary_dir, binary = os.path.split(settings.COPASI_LOCAL_BINARY)\n transfer_executable = 'YES'\n \n input_files_string = ', '\n for data_file in data_files:\n input_files_string += (data_file + ', ')\n input_files_string = input_files_string.rstrip(', ')\n\n condor_job_string = Template(condor_spec.raw_condor_job_string).substitute(copasiFile=copasi_file, \n otherFiles=input_files_string,\n rank=rank,\n binary_dir = binary_dir,\n transfer_executable = transfer_executable,\n pool_type = pool_type,\n pool_address = pool_address,\n subtask=str(subtask_index),\n n = number_of_jobs,\n outputFile = output_file,\n extraArgs='',\n )\n \n condor_job_filename = 'auto_condor_%d.job'%subtask_index\n condor_job_full_filename = os.path.join(self.path, condor_job_filename)\n condor_file = open(condor_job_full_filename, 'w')\n condor_file.write(condor_job_string)\n condor_file.close()\n\n return condor_job_filename",
"def dataflow():\n print 'Building',TRAINER_NAME,'package.'\n subprocess.check_call(['python', 'setup.py', 'sdist', '--format=gztar'])\n subprocess.check_call(['gsutil', '-q', 'cp',\n os.path.join('dist', TRAINER_NAME),\n TRAINER_URI])\n opts = None\n if args.cloud:\n options = {\n 'staging_location': os.path.join(args.output_dir, 'tmp', 'staging'),\n 'temp_location': os.path.join(args.output_dir, 'tmp'),\n 'job_name': ('cloud-ml-sample-iris' + '-'\n + datetime.datetime.now().strftime('%Y%m%d%H%M%S')),\n 'project': args.project_id,\n # Dataflow needs a copy of the version of the cloud ml sdk that\n # is being used.\n 'extra_packages': [ml.sdk_location, TRAINER_URI],\n 'teardown_policy': 'TEARDOWN_ALWAYS',\n 'no_save_main_session': True\n }\n opts = beam.pipeline.PipelineOptions(flags=[], **options)\n else:\n # For local runs, the trainer must be installed as a module.\n subprocess.check_call(['pip', 'install', '--upgrade', '--force-reinstall',\n '--user', os.path.join('dist', TRAINER_NAME)])\n\n p = beam.Pipeline(get_pipeline_name(), options=opts)\n\n # Every function below writes its ouput to a file. The inputs to these\n # functions are also optional; if they are missing, the input values are read\n # from a file. Therefore if running this script multiple times, some steps can\n # be removed to prevent recomputing values.\n metadata, train_features, eval_features, predict_features = preprocess(p)\n\n trained_model, results = train(p, train_features, eval_features, metadata)\n\n evaluations = evaluate(p, trained_model, eval_features)\n\n confusion_matrix, precision_recall, logloss = (\n model_analysis(p, evaluations, metadata))\n\n if args.cloud:\n deployed = deploy_model(p, args.deploy_model_name,\n args.deploy_model_version, trained_model)\n # Use our deployed model to run a batch prediction.\n output_uri = os.path.join(args.output_dir, 'batch_prediction_results')\n deployed | \"Batch Predict\" >> ml.Predict([args.predict_data], output_uri,\n region='us-central1',\n data_format='TEXT')\n\n print 'Deploying %s version: %s' % (args.deploy_model_name,\n args.deploy_model_version)\n\n p.run()\n\n if args.cloud:\n print 'Deployed %s version: %s' % (args.deploy_model_name,\n args.deploy_model_version)",
"def dag():\n return DAG(\n dag_id='pytest',\n template_undefined=jinja2.StrictUndefined,\n default_args={'start_date': datetime.now()}\n )",
"def create_pipeline_flow(\n self, cmp_deriv_subject_directory, nipype_deriv_subject_directory\n ):\n acquisition_model = self.stages[\"Diffusion\"].config.diffusion_imaging_model\n recon_tool = self.stages[\"Diffusion\"].config.recon_processing_tool\n\n recon_model = \"DTI\"\n\n if acquisition_model == \"DSI\":\n recon_model = \"SHORE\"\n else:\n if recon_tool == \"Dipy\" and self.stages[\"Diffusion\"].config.dipy_recon_config.local_model:\n recon_model = \"CSD\"\n elif recon_tool == \"MRtrix\" and self.stages[\"Diffusion\"].config.mrtrix_recon_config.local_model:\n recon_model = \"CSD\"\n\n tracking_model = self.stages[\"Diffusion\"].config.diffusion_model\n\n if tracking_model == \"Deterministic\":\n tracking_model = \"DET\"\n elif tracking_model == \"Probabilistic\":\n tracking_model = \"PROB\"\n\n if self.parcellation_scheme == \"Lausanne2018\":\n bids_atlas_label = \"L2018\"\n elif self.parcellation_scheme == \"NativeFreesurfer\":\n bids_atlas_label = \"Desikan\"\n elif self.parcellation_scheme == \"Custom\":\n bids_atlas_label = self.custom_atlas_name\n if self.custom_atlas_res is not None and self.custom_atlas_res != \"\":\n bids_atlas_label += f'_res-{self.custom_atlas_res}'\n\n # Clear previous outputs\n self.clear_stages_outputs()\n\n # Create diffusion workflow with input and output Identityinterface nodes\n diffusion_flow = pe.Workflow(\n name=\"diffusion_pipeline\",\n base_dir=os.path.abspath(nipype_deriv_subject_directory),\n )\n\n diffusion_inputnode = pe.Node(\n interface=util.IdentityInterface(\n fields=[\n \"diffusion\",\n \"bvecs\",\n \"bvals\",\n \"T1\",\n \"aseg\",\n \"aparc_aseg\",\n \"brain\",\n \"T2\",\n \"brain_mask\",\n \"wm_mask_file\",\n \"roi_volumes\",\n \"roi_graphMLs\",\n \"subjects_dir\",\n \"subject_id\",\n \"parcellation_scheme\",\n ]\n ),\n name=\"inputnode\",\n )\n diffusion_inputnode.inputs.parcellation_scheme = self.parcellation_scheme\n diffusion_inputnode.inputs.atlas_info = self.atlas_info\n\n diffusion_outputnode = pe.Node(\n interface=util.IdentityInterface(fields=[\"connectivity_matrices\"]),\n name=\"outputnode\",\n )\n\n diffusion_flow.add_nodes([diffusion_inputnode, diffusion_outputnode])\n\n # Data import\n datasource = self.create_datagrabber_node(\n base_directory=cmp_deriv_subject_directory,\n bids_atlas_label=bids_atlas_label\n )\n\n # Data sinker for output\n sinker = self.create_datasinker_node(\n base_directory=cmp_deriv_subject_directory,\n bids_atlas_label=bids_atlas_label,\n recon_model=recon_model,\n tracking_model=tracking_model\n )\n\n # fmt:off\n diffusion_flow.connect(\n [\n (datasource, diffusion_inputnode, [(\"diffusion\", \"diffusion\"),\n (\"bvecs\", \"bvecs\"),\n (\"bvals\", \"bvals\"),\n (\"T1\", \"T1\"),\n (\"aseg\", \"aseg\"),\n (\"aparc_aseg\", \"aparc_aseg\"),\n (\"brain\", \"brain\"),\n (\"brain_mask\", \"brain_mask\"),\n (\"wm_mask_file\", \"wm_mask_file\")]),\n ]\n )\n # fmt:on\n\n merge_roi_volumes = pe.Node(interface=Merge(5), name=\"merge_roi_volumes\")\n merge_roi_graphmls = pe.Node(interface=Merge(5), name=\"merge_roi_graphmls\")\n\n def remove_non_existing_scales(roi_volumes):\n \"\"\"Returns a list which do not contained any empty element.\n\n Parameters\n ----------\n roi_volumes : list\n A list of output parcellations that might contain empty element\n in the case of the monoscale Desikan scheme for instance\n\n Returns\n -------\n out_roi_volumes : list\n The list with no empty element\n \"\"\"\n out_roi_volumes = []\n for vol in roi_volumes:\n if vol is not None:\n out_roi_volumes.append(vol)\n return out_roi_volumes\n\n # fmt:off\n diffusion_flow.connect(\n [\n (datasource, merge_roi_volumes, [(\"roi_volume_s1\", \"in1\"),\n (\"roi_volume_s2\", \"in2\"),\n (\"roi_volume_s3\", \"in3\"),\n (\"roi_volume_s4\", \"in4\"),\n (\"roi_volume_s5\", \"in5\")]),\n (datasource, merge_roi_graphmls, [(\"roi_graphml_s1\", \"in1\"),\n (\"roi_graphml_s2\", \"in2\"),\n (\"roi_graphml_s3\", \"in3\"),\n (\"roi_graphml_s4\", \"in4\"),\n (\"roi_graphml_s5\", \"in5\")]),\n (merge_roi_volumes, diffusion_inputnode, [((\"out\", remove_non_existing_scales), \"roi_volumes\")],),\n (merge_roi_graphmls, diffusion_inputnode, [((\"out\", remove_non_existing_scales), \"roi_graphMLs\")],),\n ]\n )\n # fmt:on\n\n if self.stages[\"Preprocessing\"].enabled:\n preproc_flow = self.create_stage_flow(\"Preprocessing\")\n # fmt:off\n diffusion_flow.connect(\n [\n (diffusion_inputnode, preproc_flow, [(\"diffusion\", \"inputnode.diffusion\"),\n (\"brain\", \"inputnode.brain\"),\n (\"aseg\", \"inputnode.aseg\"),\n (\"aparc_aseg\", \"inputnode.aparc_aseg\"),\n (\"brain_mask\", \"inputnode.brain_mask\"),\n (\"wm_mask_file\", \"inputnode.wm_mask_file\"),\n (\"roi_volumes\", \"inputnode.roi_volumes\"),\n (\"bvecs\", \"inputnode.bvecs\"),\n (\"bvals\", \"inputnode.bvals\"),\n (\"T1\", \"inputnode.T1\")]),\n ]\n )\n # fmt:on\n\n if self.stages[\"Registration\"].enabled:\n reg_flow = self.create_stage_flow(\"Registration\")\n # fmt:off\n diffusion_flow.connect(\n [\n # (diffusion_inputnode,reg_flow,[('T2','inputnode.T2')]),\n (preproc_flow, reg_flow, [(\"outputnode.T1\", \"inputnode.T1\"),\n (\"outputnode.act_5TT\", \"inputnode.act_5TT\"),\n (\"outputnode.gmwmi\", \"inputnode.gmwmi\"),\n (\"outputnode.bvecs_rot\", \"inputnode.bvecs\"),\n (\"outputnode.bvals\", \"inputnode.bvals\"),\n (\"outputnode.wm_mask_file\", \"inputnode.wm_mask\"),\n (\"outputnode.partial_volume_files\", \"inputnode.partial_volume_files\",),\n (\"outputnode.roi_volumes\", \"inputnode.roi_volumes\"),\n (\"outputnode.brain\", \"inputnode.brain\"),\n (\"outputnode.brain_mask\", \"inputnode.brain_mask\"),\n (\"outputnode.brain_mask_full\", \"inputnode.brain_mask_full\"),\n (\"outputnode.diffusion_preproc\", \"inputnode.target\"),\n (\"outputnode.dwi_brain_mask\", \"inputnode.target_mask\")]),\n (preproc_flow, sinker, [(\"outputnode.bvecs_rot\", \"dwi.@bvecs_rot\"),\n (\"outputnode.diffusion_preproc\", \"dwi.@diffusion_preproc\"),\n (\"outputnode.dwi_brain_mask\", \"dwi.@diffusion_brainmask\")]),\n ]\n )\n # fmt:on\n if self.stages[\"Registration\"].config.registration_mode == \"BBregister (FS)\":\n # fmt:off\n diffusion_flow.connect(\n [\n (diffusion_inputnode, reg_flow, [(\"subjects_dir\", \"inputnode.subjects_dir\"), (\"subject_id\", \"inputnode.subject_id\")]),\n ]\n )\n # fmt:on\n\n if self.stages[\"Diffusion\"].enabled:\n diff_flow = self.create_stage_flow(\"Diffusion\")\n # fmt:off\n diffusion_flow.connect(\n [\n (preproc_flow, diff_flow, [(\"outputnode.diffusion_preproc\", \"inputnode.diffusion\")]),\n (reg_flow, diff_flow, [(\"outputnode.wm_mask_registered_crop\", \"inputnode.wm_mask_registered\",),\n (\"outputnode.brain_mask_registered_crop\", \"inputnode.brain_mask_registered\",),\n (\"outputnode.partial_volumes_registered_crop\", \"inputnode.partial_volumes\",),\n (\"outputnode.roi_volumes_registered_crop\", \"inputnode.roi_volumes\",),\n (\"outputnode.act_5tt_registered_crop\", \"inputnode.act_5tt_registered\",),\n (\"outputnode.gmwmi_registered_crop\", \"inputnode.gmwmi_registered\",),\n (\"outputnode.grad\", \"inputnode.grad\"),\n (\"outputnode.bvals\", \"inputnode.bvals\"),\n (\"outputnode.bvecs\", \"inputnode.bvecs\")]),\n (reg_flow, sinker, [(\"outputnode.target_epicorrected\", \"dwi.@bdiffusion_reg_crop\",),\n (\"outputnode.grad\", \"dwi.@diffusion_grad\"),\n (\"outputnode.affine_transform\", \"xfm.@affine_transform\"),\n (\"outputnode.warp_field\", \"xfm.@warp_field\"),\n (\"outputnode.T1_registered_crop\", \"anat.@T1_reg_crop\"),\n (\"outputnode.act_5tt_registered_crop\", \"anat.@act_5tt_reg_crop\",),\n (\"outputnode.gmwmi_registered_crop\", \"anat.@gmwmi_reg_crop\"),\n (\"outputnode.brain_registered_crop\", \"anat.@brain_reg_crop\"),\n (\"outputnode.brain_mask_registered_crop\", \"anat.@brain_mask_reg_crop\",),\n (\"outputnode.wm_mask_registered_crop\", \"anat.@wm_mask_reg_crop\",),\n (\"outputnode.roi_volumes_registered_crop\", \"anat.@roivs_reg_crop\",),\n (\"outputnode.partial_volumes_registered_crop\", \"anat.@pves_reg_crop\",)],),\n ]\n )\n # fmt:on\n\n if self.stages[\"Connectome\"].enabled:\n self.stages[\"Connectome\"].config.probtrackx = False\n self.stages[\"Connectome\"].config.subject = self.global_conf.subject\n con_flow = self.create_stage_flow(\"Connectome\")\n # fmt:off\n diffusion_flow.connect(\n [\n (diffusion_inputnode, con_flow, [(\"parcellation_scheme\", \"inputnode.parcellation_scheme\"),\n (\"atlas_info\", \"inputnode.atlas_info\"),\n (\"roi_graphMLs\", \"inputnode.roi_graphMLs\")]),\n (diff_flow, con_flow, [(\"outputnode.track_file\", \"inputnode.track_file\"),\n (\"outputnode.FA\", \"inputnode.FA\"),\n (\"outputnode.ADC\", \"inputnode.ADC\"),\n (\"outputnode.AD\", \"inputnode.AD\"),\n (\"outputnode.RD\", \"inputnode.RD\"),\n (\"outputnode.roi_volumes\", \"inputnode.roi_volumes_registered\",),\n (\"outputnode.skewness\", \"inputnode.skewness\"),\n (\"outputnode.kurtosis\", \"inputnode.kurtosis\"),\n (\"outputnode.P0\", \"inputnode.P0\"),\n (\"outputnode.mapmri_maps\", \"inputnode.mapmri_maps\"),\n (\"outputnode.shore_maps\", \"inputnode.shore_maps\")]),\n (con_flow, diffusion_outputnode, [(\"outputnode.connectivity_matrices\", \"connectivity_matrices\")]),\n (diff_flow, sinker, [(\"outputnode.fod_file\", \"dwi.@fod_file\"),\n (\"outputnode.FA\", \"dwi.@FA\"),\n (\"outputnode.ADC\", \"dwi.@ADC\"),\n (\"outputnode.AD\", \"dwi.@AD\"),\n (\"outputnode.RD\", \"dwi.@RD\"),\n (\"outputnode.skewness\", \"dwi.@skewness\"),\n (\"outputnode.kurtosis\", \"dwi.@kurtosis\"),\n (\"outputnode.P0\", \"dwi.@P0\"),\n (\"outputnode.mapmri_maps\", \"dwi.@mapmri_maps\"),\n (\"outputnode.shore_maps\", \"dwi.@shore_maps\")]),\n (con_flow, sinker, [(\"outputnode.streamline_final_file\", \"dwi.@streamline_final_file\"),\n (\"outputnode.connectivity_matrices\", \"dwi.@connectivity_matrices\")]),\n ]\n )\n # fmt:on\n\n return diffusion_flow",
"def run_huawei_2g_parser(parent_dag_name, child_dag_name, start_date, schedule_interval):\n dag = DAG(\n '%s.%s' % (parent_dag_name, child_dag_name),\n schedule_interval=schedule_interval,\n start_date=start_date,\n )\n\n t23 = DummyOperator( task_id='branch_huawei_2g_parser', dag=dag)\n\n t29 = BashOperator(\n task_id='run_huawei_2g_xml_nbi_parser',\n bash_command='java -jar /mediation/bin/boda-huaweinbixmlparser.jar /mediation/data/cm/huawei/raw/nbi_gsm /mediation/data/cm/huawei/parsed/nbi_gsm /mediation/conf/cm/hua_cm_2g_nbi_parameters.cfg',\n dag=dag)\n\n t29_2 = BashOperator(\n task_id='run_huawei_2g_mml_parser',\n bash_command='java -jar /mediation/bin/boda-huaweimmlparser.jar /mediation/data/cm/huawei/raw/mml_gsm /mediation/data/cm/huawei/parsed/mml_gsm /mediation/conf/cm/hua_cm_2g_mml_parser.cfg',\n dag=dag)\n\n t29_3 = BashOperator(\n task_id='run_huawei_2g_xml_gexport_parser',\n bash_command='java -jar /mediation/bin/boda-huaweicmobjectparser.jar /mediation/data/cm/huawei/raw/gexport_gsm /mediation/data/cm/huawei/parsed/gexport_gsm /mediation/conf/cm/gexport_gsm_parser.cfg',\n dag=dag)\n\n t_join = DummyOperator(\n task_id='join_huawei_2g_parser',\n dag=dag,\n )\n\n dag.set_dependency('branch_huawei_2g_parser', 'run_huawei_2g_mml_parser')\n dag.set_dependency('branch_huawei_2g_parser', 'run_huawei_2g_xml_nbi_parser')\n dag.set_dependency('branch_huawei_2g_parser', 'run_huawei_2g_xml_gexport_parser')\n\n dag.set_dependency('run_huawei_2g_mml_parser', 'join_huawei_2g_parser')\n dag.set_dependency('run_huawei_2g_xml_nbi_parser', 'join_huawei_2g_parser')\n dag.set_dependency('run_huawei_2g_xml_gexport_parser', 'join_huawei_2g_parser')\n\n\n return dag",
"def __init__(\n self,\n dag_id: str = DAG_ID,\n start_date: pendulum.DateTime = pendulum.datetime(2018, 5, 14),\n schedule_interval: str = \"@weekly\",\n dataset_id: str = \"crossref\",\n dataset_description: str = \"The Crossref Events dataset: https://www.eventdata.crossref.org/guide/\",\n queue: str = \"remote_queue\",\n merge_partition_field: str = \"id\",\n schema_folder: str = default_schema_folder(),\n batch_load: bool = True,\n airflow_vars: List = None,\n mailto: str = \"[email protected]\",\n max_threads: int = min(32, os.cpu_count() + 4),\n max_processes: int = os.cpu_count(),\n ):\n\n if airflow_vars is None:\n airflow_vars = [\n AirflowVars.DATA_PATH,\n AirflowVars.PROJECT_ID,\n AirflowVars.DATA_LOCATION,\n AirflowVars.DOWNLOAD_BUCKET,\n AirflowVars.TRANSFORM_BUCKET,\n ]\n super().__init__(\n dag_id,\n start_date,\n schedule_interval,\n dataset_id,\n merge_partition_field,\n schema_folder,\n dataset_description=dataset_description,\n queue=queue,\n batch_load=batch_load,\n airflow_vars=airflow_vars,\n load_bigquery_table_kwargs={\"ignore_unknown_values\": True},\n )\n self.mailto = mailto\n self.max_threads = max_threads\n self.max_processes = max_processes\n\n self.add_setup_task(self.check_dependencies)\n self.add_task_chain(\n [self.download, self.upload_downloaded, self.transform, self.upload_transformed, self.bq_load_partition]\n )\n self.add_task_chain([self.bq_delete_old, self.bq_append_new, self.cleanup], trigger_rule=\"none_failed\")",
"def create_task():",
"def write_unix(workloads, input_file_parameters, command_line_parameters):\r\n\r\n workload_index = 0\r\n workload_zfill_amount = len(str(len(workloads)))\r\n background_process_list = []\r\n for workload in workloads:\r\n # Each workflow part will have separate file to submit to TORQUE with\r\n # sbatch command. Each file has one or more associated subshell files\r\n # containing contents for each thread.\r\n\r\n # Generate strings describing current workload and thread indexes for\r\n # output file names\r\n workload_index += 1\r\n workload_index_string = str(workload_index).zfill(workload_zfill_amount)\r\n file_main_name = '{0}_UNIX_WORKLOAD_1'.format(NAME)\r\n\r\n # Add information about current workflow to the main shell script\r\n background_process_list.append('echo \"Running workload part {0}\"'.format(\r\n workload_index))\r\n\r\n # When --fix_run mode is used the output and log files files already\r\n # exist. To prevent overwriting these files with new ones specific\r\n # prefix or appendix strings are added to the new output file names.\r\n appendix = '.sh'\r\n i = 0\r\n if command_line_parameters.fix_run:\r\n mode = 'FIX'\r\n elif command_line_parameters.compress_run == 'compress':\r\n mode = 'COMPRESS'\r\n elif command_line_parameters.compress_run == 'decompress':\r\n mode = 'DECOMPRESS'\r\n else:\r\n mode = None\r\n while mode is 'FIX' and os.path.exists(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix)):\r\n i += 1\r\n appendix = '_{0}_{1}.sh'.format(mode, i)\r\n\r\n if mode in ('COMPRESS', 'DECOMPRESS'):\r\n appendix = '_{0}.sh'.format(mode)\r\n while os.path.exists(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix)):\r\n i += 1\r\n appendix = '_{0}_{1}.sh'.format(mode, i)\r\n\r\n\r\n # Generate subshell files\r\n thread_index = 0\r\n thread_zfill_amount = len(str(len(workload)))\r\n for thread_contents in workload:\r\n # Iterate over output commands of each thread and write necessary\r\n # subshell files for each\r\n out_lines = []\r\n cmds_in_thread = len(thread_contents)\r\n for i in xrange(cmds_in_thread):\r\n # Check if any modules need loading or are they loaded by previous command\r\n skip_module_loading = False\r\n if i > 0:\r\n if thread_contents[i].load_module == thread_contents[i-1].load_module:\r\n skip_module_loading = True\r\n # Check if any modules need unloading or will they be used by following command\r\n skip_module_unloading = False\r\n if i < cmds_in_thread-1:\r\n if thread_contents[i].load_module == thread_contents[i+1].load_module:\r\n skip_module_unloading = True\r\n out_lines += generate_subshell_file_contents(thread_contents[i],\r\n skip_module_loading,\r\n skip_module_unloading)\r\n\r\n\r\n # Write subshell file\r\n thread_index_string = str(thread_index).zfill(thread_zfill_amount)\r\n fl_name = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n thread_index_string,\r\n appendix)\r\n try:\r\n out_fl = open(os.path.join(input_file_parameters.output_dir,\r\n fl_name), 'w')\r\n except:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}'.format(os.path.join(\r\n input_file_parameters.output_dir,\r\n fl_name)))\r\n out_fl.write('\\n'.join(out_lines))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n # i.e. use UNIX source to run input shell script, redirect stdout\r\n # and stderr to an .out file.\r\n background_process_list.append('source {0} >> {0}.out 2>&1 &'.format(\r\n os.path.join(input_file_parameters.output_dir,\r\n fl_name)))\r\n thread_index += 1\r\n\r\n # Workflow steps are written to a single output file (instead of\r\n # separate files). \"wait\" command is inserted in between workflow parts\r\n # to synchronize workflows.\r\n background_process_list.append('wait\\n\\n')\r\n\r\n # Write the main shell script file\r\n resmng_config = list(input_file_parameters.resource_manager_params)\r\n resmng_config.append('\\n\\n')\r\n resmng_config.append('\\n'.join(background_process_list))\r\n\r\n out_fl_path = os.path.join(input_file_parameters.output_dir, file_main_name + appendix)\r\n try:\r\n out_fl = open(out_fl_path, 'w')\r\n except IOError as emsg:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}\\n with error message:\\n{1}'\r\n .format(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix),\r\n str(emsg)))\r\n out_fl.write('\\n'.join(resmng_config))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n return [out_fl_path]",
"def submit_dag(config, dag_file):\n with SUBMIT_LOCK:\n try:\n condor_dag_cmd = osp.join(get_condor_bin_dir(config),\n CONDOR_COMMAND['dag'])\n\n pipe = subprocess.Popen(args=(condor_dag_cmd, '-force', dag_file),\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n\n output = pipe.stdout.read()\n status = pipe.wait()\n return status, output\n except OSError, exc:\n return -1, str(exc)",
"def template_train_model(task_filename):\n task_filename_only = os.path.basename(task_filename)\n return {\n 'basename': 'generate_tasks',\n 'task_dep': ['generate_job_batch'],\n 'name': task_filename_only,\n #'file_dep': [task_filename], # does not work if mv\n 'targets': ['tests/'+task_filename_only],\n 'actions': [\n 'python pipeline_train_model.py '+task_filename,\n #'rm '+task_filename\n ],\n }",
"def generate_workflow(self) -> str:\n analysisTasks = self._parse_parameters()\n terminalTasks = self._identify_terminal_tasks(analysisTasks)\n\n ruleList = {k: SnakemakeRule(v, self._pythonPath)\n for k, v in analysisTasks.items()}\n\n workflowString = 'rule all: \\n\\tinput: ' + \\\n ','.join([ruleList[x].full_output()\n for x in terminalTasks]) + '\\n\\n'\n workflowString += '\\n'.join([x.as_string() for x in ruleList.values()])\n\n return self._dataSet.save_workflow(workflowString)",
"def prepare_rw_condor_job(self, pool_type, address, repeats, raw_mode_args, data_files, output_files, rank='0'):\n \n #Prepare a customized condor job string\n #Somewhat confusingly, the original string was called raw_condor_string\n #We'll call this one raw_mode_string_with_args\n \n #We want to substitute '$filename' to ${copasiFile}\n args_string = Template(raw_mode_args).substitute(filename = '${copasiFile}', new_filename='run_${copasiFile}')\n\n raw_mode_string_with_args = Template(condor_spec.raw_mode_string).safe_substitute(args=args_string)\n \n if pool_type == 'ec2':\n binary_dir = '/usr/local/bin'\n transfer_executable = 'NO'\n else:\n binary_dir, binary = os.path.split(settings.COPASI_LOCAL_BINARY)\n transfer_executable = 'YES'\n\n \n #Build up a string containing a comma-seperated list of data files\n input_files_string = ', '\n output_files_string = ' ,'\n for data_file in data_files:\n input_files_string += data_file + ', '\n #And the same for the output files\n for output_file in output_files:\n output_files_string += '$(Process)_' + output_file + ', '\n input_files_string = input_files_string.rstrip(', ')\n output_files_string = output_files_string.rstrip(', ')\n ############\n #Build the appropriate .job file for the raw task\n copasi_file = 'auto_copasi_1.$(Process).cps'\n \n condor_job_string = Template(raw_mode_string_with_args).substitute(pool_type=pool_type,\n pool_address=address,\n binary_dir=binary_dir,\n transfer_executable=transfer_executable,\n copasiFile=copasi_file,\n otherFiles=input_files_string,\n outputFile=output_files_string,\n n=repeats,\n extraArgs='',\n rank=rank,\n )\n \n condor_job_filename = 'auto_condor_1.job'\n condor_file = open(os.path.join(self.path, condor_job_filename), 'w')\n condor_file.write(condor_job_string)\n condor_file.close()\n\n\n return condor_job_filename",
"def build_metric_submit_file(self, metric):\n\n log_dir = self.rsv.get_metric_log_dir()\n environment = \"PATH=/usr/bin:/bin\\n\"\n condor_id = metric.get_unique_name()\n arguments = \"-v 3 -r -u %s %s %s\" % (metric.host, metric.name, metric.get_settings())\n timestamp = strftime(\"%Y-%m-%d %H:%M:%S %Z\")\n\n probe_interval = metric.get_probe_interval()\n if not probe_interval:\n cron = metric.get_cron_entry()\n if not cron:\n self.rsv.log(\"ERROR\", \"Invalid cron time for metric %s on host %s. Will not start.\" %\n (metric.name, metric.host))\n return \"\"\n\n submit = \"\"\n submit += \"######################################################################\\n\"\n submit += \"# Temporary submit file generated by rsv-control\\n\"\n submit += \"# Generated at %s \" % timestamp\n submit += \"######################################################################\\n\"\n submit += \"Environment = %s\\n\" % environment\n\n if probe_interval:\n submit += \"DeferralPrepTime = ifThenElse(%d - ScheddInterval + 31 > 0, %d - ScheddInterval + 31, 180) \\n\" % (probe_interval, probe_interval)\n submit += \"DeferralTime = (CurrentTime + %d + random(30))\\n\" % probe_interval\n submit += \"DeferralWindow = 99999999\\n\"\n submit += \"+OSGRSVProbeInterval = %d\\n\" % probe_interval\n else:\n submit += \"CronPrepTime = 180\\n\"\n submit += \"CronWindow = 99999999\\n\"\n submit += \"CronMonth = %s\\n\" % cron[\"Month\"]\n submit += \"CronDayOfWeek = %s\\n\" % cron[\"DayOfWeek\"]\n submit += \"CronDayOfMonth = %s\\n\" % cron[\"DayOfMonth\"]\n submit += \"CronHour = %s\\n\" % cron[\"Hour\"]\n submit += \"CronMinute = %s\\n\" % cron[\"Minute\"]\n submit += \"Executable = %s\\n\" % self.rsv.get_wrapper()\n submit += \"Error = %s/%s.err\\n\" % (log_dir, condor_id)\n submit += \"Output = %s/%s.out\\n\" % (log_dir, condor_id)\n submit += \"Log = %s/%s.log\\n\" % (log_dir, condor_id)\n submit += \"Arguments = %s\\n\" % arguments\n submit += \"Universe = local\\n\"\n submit += \"Notification = never\\n\"\n submit += \"OnExitRemove = false\\n\"\n submit += \"PeriodicRelease = HoldReasonCode =!= 1\\n\"\n submit += \"+OSGRSV = \\\"metrics\\\"\\n\"\n submit += \"+OSGRSVHost = \\\"%s\\\"\\n\" % metric.host\n submit += \"+OSGRSVMetric = \\\"%s\\\"\\n\" % metric.name\n submit += \"+OSGRSVUniqueName = \\\"%s\\\"\\n\" % condor_id\n submit += \"Queue\\n\"\n \n return submit",
"def submit(dagName, workDir):\n # All the files we need (the .dag file and the .job files) are in `workDir`\n # and have the names defined in the .dag file (which we are given as\n # `dagName`). So, first thing is to parse `dagName`.\n dag = DAG.new_from_classad(open(os.path.join(workDir, dagName)).read(), workDir)\n\n # Extract the dataset name. We assume dagName = dataset.dag\n dataset, ext = os.path.splitext(dagName)\n\n # Create the XGrid plist\n f = open(os.path.join(workDir, dataset + '.plist'), 'w')\n f.write(dag.to_xgrid_plist(dataset))\n f.close()\n\n print('XGrid batch job file written in work directory %s' % (workDir))\n return(0)",
"def test_dag():\n return airflow.DAG(\n \"testdag\",\n default_args={\"owner\": \"airflow\", \"start_date\": airflow.utils.dates.days_ago(0), 'provide_context': True},\n schedule_interval=\"@daily\",\n )",
"def parse_and_import_nokia_raml20(parent_dag_name, child_dag_name, start_date, schedule_interval):\n dag_id = '%s.%s' % (parent_dag_name, child_dag_name)\n\n dag = DAG(\n '%s.%s' % (parent_dag_name, child_dag_name),\n schedule_interval=schedule_interval,\n start_date=start_date,\n )\n\n task_clean_mo_names = BashOperator(\n task_id='clean_raml20_mo_names',\n bash_command=\"true\",\n#\t\tr\"\"\"\n#sed -i \"\n#/lowerMarginCio/ s//lowerMarginCIO/g;\n#/upperMarginCio/ s//upperMarginCIO/g;\n#\" /mediation/data/cm/nokia/raw/raml20/*.xml || true\n# \"\"\",\n dag=dag\n )\n\t\n parse_nokia_raml20_cm_files = BashOperator(\n task_id='parse_nokia_raml20_cm_files',\n bash_command='java -jar /mediation/bin/boda-nokiacmdataparser.jar -i /mediation/data/cm/nokia/raw/raml20 -o /mediation/data/cm/nokia/parsed/raml20 -c /mediation/conf/cm/nokia_raml20_parser.cfg',\n dag=dag)\n\n import_nokia_raml20_csv = BashOperator(\n task_id='import_nokia_raml20_parsed_csv',\n bash_command='python /mediation/bin/load_cm_data_into_db.py nokia_raml20 /mediation/data/cm/nokia/parsed/raml20',\n dag=dag)\n\n t_run_nokia_raml20_insert_queries = BashOperator(\n task_id='run_nokia_raml20_insert_queries',\n bash_command='python /mediation/bin/run_cm_load_insert_queries.py nokia_raml20',\n dag=dag)\n\n def clear_nokia_raml20_cm_tables():\n pass\n\n t50 = PythonOperator(\n task_id='clear_nokia_raml20_cm_tables',\n python_callable=clear_nokia_raml20_cm_tables,\n dag=dag)\n\n dag.set_dependency('clean_raml20_mo_names', 'parse_nokia_raml20_cm_files')\n dag.set_dependency('parse_nokia_raml20_cm_files', 'clear_nokia_raml20_cm_tables')\n dag.set_dependency('clear_nokia_raml20_cm_tables', 'import_nokia_raml20_parsed_csv')\n dag.set_dependency('import_nokia_raml20_parsed_csv', 'run_nokia_raml20_insert_queries')\n\n return dag",
"def test_complex_taskgroup_dag():\n\n def f(task_id):\n return f\"OP:{task_id}\"\n\n with DAG(dag_id=\"test_complex_dag\", default_args=DEFAULT_ARGS) as dag:\n with TaskGroup(\"group_1\") as group:\n group_emp1 = EmptyOperator(task_id=\"group_empty1\")\n group_emp2 = EmptyOperator(task_id=\"group_empty2\")\n group_emp3 = EmptyOperator(task_id=\"group_empty3\")\n emp_in1 = EmptyOperator(task_id=\"empty_in1\")\n emp_in2 = EmptyOperator(task_id=\"empty_in2\")\n emp_in3 = EmptyOperator(task_id=\"empty_in3\")\n emp_in4 = EmptyOperator(task_id=\"empty_in4\")\n emp_out1 = EmptyOperator(task_id=\"empty_out1\")\n emp_out2 = EmptyOperator(task_id=\"empty_out2\")\n emp_out3 = EmptyOperator(task_id=\"empty_out3\")\n emp_out4 = EmptyOperator(task_id=\"empty_out4\")\n op_in1 = PythonOperator(python_callable=f, task_id=\"op_in1\")\n op_out1 = PythonOperator(python_callable=f, task_id=\"op_out1\")\n\n return (\n dag,\n group,\n (\n group_emp1,\n group_emp2,\n group_emp3,\n emp_in1,\n emp_in2,\n emp_in3,\n emp_in4,\n emp_out1,\n emp_out2,\n emp_out3,\n emp_out4,\n op_in1,\n op_out1,\n ),\n )",
"def write_torque(workloads, input_file_parameters, command_line_parameters):\r\n validate_resource_manager_parameters(\r\n input_file_parameters.resource_manager_params,\r\n ['#PBS -k', '#PBS -N', '#PBS -d', '#PBS -e', '#PBS -t'])\r\n\r\n workload_index = 0\r\n workload_zfill_amount = len(str(len(workloads)))\r\n workload_file_paths = []\r\n for workload in workloads:\r\n # Each workflow part will have separate file to submit to TORQUE with\r\n # sbatch command. Each file has one or more associated subshell files\r\n # containing contents for each thread.\r\n\r\n # Generate strings describing current workload and thread indexes for\r\n # output file names\r\n workload_index += 1\r\n workload_index_string = str(workload_index).zfill(workload_zfill_amount)\r\n file_main_name = '{0}_TORQUE_WORKLOAD_{1}'.format(NAME,\r\n workload_index_string)\r\n\r\n # When --fix_run mode is used the output and log files files already\r\n # exist. To prevent overwriting these files with new ones specific\r\n # prefix or appendix strings are added to the new output file names.\r\n appendix = '.sh'\r\n i = 0\r\n if command_line_parameters.fix_run:\r\n mode = 'FIX'\r\n elif command_line_parameters.compress_run == 'compress':\r\n mode = 'COMPRESS'\r\n elif command_line_parameters.compress_run == 'decompress':\r\n mode = 'DECOMPRESS'\r\n else:\r\n mode = None\r\n while mode is not None and os.path.exists(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix)):\r\n i += 1\r\n appendix = '_{0}_{1}.sh'.format(mode, i)\r\n\r\n # Generate subshell files\r\n thread_index = 0\r\n for thread_contents in workload:\r\n # Iterate over output commands of each thread and write necessary\r\n # subshell files for each\r\n out_lines = []\r\n cmds_in_thread = len(thread_contents)\r\n for i in xrange(cmds_in_thread):\r\n # Check if any modules need loading or are they loaded by previous command\r\n skip_module_loading = False\r\n if i > 0:\r\n if thread_contents[i].load_module == thread_contents[i-1].load_module:\r\n skip_module_loading = True\r\n # Check if any modules need unloading or will they be used by following command\r\n skip_module_unloading = False\r\n if i < cmds_in_thread-1:\r\n if thread_contents[i].load_module == thread_contents[i+1].load_module:\r\n skip_module_unloading = True\r\n out_lines += generate_subshell_file_contents(thread_contents[i],\r\n skip_module_loading,\r\n skip_module_unloading)\r\n\r\n\r\n # Write subshell file\r\n thread_index_string = str(thread_index)\r\n fl_name = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n thread_index_string,\r\n appendix)\r\n try:\r\n out_fl = open(os.path.join(input_file_parameters.output_dir,\r\n fl_name), 'w')\r\n except:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}'.format(os.path.join(\r\n input_file_parameters.output_dir,\r\n fl_name)))\r\n out_fl.write('\\n'.join(out_lines))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n thread_index += 1\r\n\r\n # Create lines for TORQUE input file by generating job-name, output,\r\n # error and array parameters based on user input\r\n\r\n # IF YOU ADD NEW AUTOMATICALLY INFERRED PARAMETERS, REMEMBER TO VALIDATE\r\n # THEM AT THE BEGINNING OF THIS FUNCTION\r\n resmng_config = list(input_file_parameters.resource_manager_params)\r\n resmng_config.append('#PBS -k eo')\r\n resmng_config.append('#PBS -N {0}'.format(input_file_parameters.job_name))\r\n resmng_config.append('#PBS -d {0}'.format(input_file_parameters.output_dir))\r\n resmng_config.append('#PBS -e {0}'.format(input_file_parameters.output_dir))\r\n resmng_config.append('#PBS -t {0}-{1}'.format(0, len(workload)-1))\r\n\r\n resmng_config.append('\\n\\n')\r\n subshell_file_path = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n '\"${PBS_ARRAYID}\"',\r\n appendix)\r\n subshell_file_path = os.path.join(input_file_parameters.output_dir,\r\n subshell_file_path)\r\n resmng_config.append('source {0}'.format(subshell_file_path))\r\n\r\n out_fl_path = os.path.join(input_file_parameters.output_dir,file_main_name + appendix)\r\n workload_file_paths.append(out_fl_path)\r\n try:\r\n out_fl = open(out_fl_path, 'w')\r\n except IOError as emsg:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}\\n with error message:\\n{1}'\r\n .format(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix),\r\n str(emsg)))\r\n out_fl.write('\\n'.join(resmng_config))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n return workload_file_paths",
"def main():\n # get arguments from command line\n args = parse_arguments()\n\n # checks on the output file\n # if args.stats_only:\n # assert args.output, \"The output file was not provided\"\n if args.output and os.path.exists(args.output):\n warnings.warn(\"Overwriting task file \" + args.output, UserWarning)\n os.remove(args.output)\n\n # initialize the task\n task = Task(\n args.database, args.on,\n across=args.across,\n by=args.by,\n filters=args.filters,\n regressors=args.regressors,\n verbose=args.verbose)\n\n if args.stats_only:\n task.print_stats()\n else:\n if args.tempdir and not os.path.exists(args.tempdir):\n os.makedirs(args.tempdir)\n\n # generate triplets and unique pairs\n task.generate_triplets(\n output=args.output,\n threshold=args.threshold,\n tmpdir=args.tempdir,\n seed=args.seed)"
] | [
"0.6791086",
"0.66254175",
"0.6543903",
"0.6439742",
"0.62908417",
"0.6201106",
"0.5974608",
"0.5915672",
"0.58422476",
"0.57603854",
"0.56926197",
"0.5625026",
"0.5587469",
"0.556881",
"0.55680734",
"0.5542769",
"0.54487556",
"0.5446099",
"0.54422826",
"0.5420124",
"0.54078233",
"0.5398419",
"0.53784937",
"0.5375819",
"0.5354532",
"0.5331991",
"0.53298175",
"0.53220487",
"0.5305897",
"0.5303826"
] | 0.67467165 | 1 |
Submit a DAG file to the condor cluster (using the RIFT dag name). This is an overwrite of the near identical parent function submit_dag() | def submit_dag(self):
os.chdir(self.production.rundir)
os.system("cat *_local.cache > local.cache")
for psdfile in self.production.get_psds("xml"):
ifo = psdfile.split("/")[-1].split("_")[1].split(".")[0]
os.system(f"cp {psdfile} {ifo}-psd.xml.gz")
self.before_submit()
try:
command = ["condor_submit_dag",
"-batch-name", f"rift/{self.production.event.name}/{self.production.name}",
os.path.join(self.production.rundir, "marginalize_intrinsic_parameters_BasicIterationWorkflow.dag")]
dagman = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self.logger.info(command, production = self.production)
except FileNotFoundError as error:
raise PipelineException("It looks like condor isn't installed on this system.\n"
f"""I wanted to run {" ".join(command)}.""")
stdout, stderr = dagman.communicate()
if "submitted to cluster" in str(stdout):
cluster = re.search("submitted to cluster ([\d]+)", str(stdout)).groups()[0]
self.production.status = "running"
self.production.job_id = int(cluster)
return cluster, PipelineLogger(stdout)
else:
raise PipelineException(f"The DAG file could not be submitted.\n\n{stdout}\n\n{stderr}",
issue=self.production.event.issue_object,
production=self.production.name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def submit_dag(config, dag_file):\n with SUBMIT_LOCK:\n try:\n condor_dag_cmd = osp.join(get_condor_bin_dir(config),\n CONDOR_COMMAND['dag'])\n\n pipe = subprocess.Popen(args=(condor_dag_cmd, '-force', dag_file),\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n\n output = pipe.stdout.read()\n status = pipe.wait()\n return status, output\n except OSError, exc:\n return -1, str(exc)",
"def submit(dagName, workDir):\n # All the files we need (the .dag file and the .job files) are in `workDir`\n # and have the names defined in the .dag file (which we are given as\n # `dagName`). So, first thing is to parse `dagName`.\n dag = DAG.new_from_classad(open(os.path.join(workDir, dagName)).read(), workDir)\n\n # Extract the dataset name. We assume dagName = dataset.dag\n dataset, ext = os.path.splitext(dagName)\n\n # Create the XGrid plist\n f = open(os.path.join(workDir, dataset + '.plist'), 'w')\n f.write(dag.to_xgrid_plist(dataset))\n f.close()\n\n print('XGrid batch job file written in work directory %s' % (workDir))\n return(0)",
"def _submit(self, dag_name, work_dir, flavour='condor', extra_env=None,\n wait=False):\n if(extra_env is None):\n extra_env = {}\n\n if(flavour != 'condor'):\n wait = False\n\n # If we are asked to (by specifying extra_env) agument the user\n # environment.\n if(extra_env):\n os.environ.update(extra_env)\n\n plugin = getattr(plugins, flavour + '_plugin')\n\n if(wait):\n return(plugin.submit(dag_name, work_dir, wait=True))\n return(plugin.submit(dag_name, work_dir))",
"def write_dag_file(dag_filename, condor_filename, status_filename, log_dir,\n copyToLocal, copyFromLocal, args):\n # to parse the MG5 specific parts\n mg5_parser = MG5ArgParser()\n mg5_args = mg5_parser.parse_args(args.args)\n\n log.info(\"DAG file: %s\" % dag_filename)\n with open(dag_filename, 'w') as dag_file:\n dag_file.write('# DAG for channel %s\\n' % args.channel)\n dag_file.write('# Outputting to %s\\n' % args.oDir)\n for job_ind in xrange(args.jobIdRange[0], args.jobIdRange[1] + 1):\n # add job to DAG\n job_name = '%d_%s' % (job_ind, args.channel)\n dag_file.write('JOB %s %s\\n' % (job_name, condor_filename))\n\n # args to pass to the script on the worker node\n job_opts = []\n\n # start with files to copyToLocal at the start of job running\n # ----------------------------------------------------------------\n if copyToLocal:\n for src, dest in copyToLocal.iteritems():\n job_opts.extend(['--copyToLocal', src, dest])\n\n mg5_args.iseed = job_ind # RNG seed using job index\n\n # Make sure output files are copied across afterwards\n # ----------------------------------------------------------------\n output_dir = os.path.join(args.channel, 'Events', 'run_01')\n name_stem = '%s_%dTeV_n%d_seed%d' % (args.channel, args.energy,\n mg5_args.nevents, mg5_args.iseed)\n\n lhe_zip = os.path.join(output_dir, 'events.lhe.gz')\n lhe_final_zip = '%s.lhe.gz' % name_stem\n\n hepmc_zip = os.path.join(output_dir, 'events_PYTHIA8_0.hepmc.gz')\n hepmc_final_zip = '%s.hepmc.gz' % name_stem\n\n job_opts.extend(['--copyFromLocal', lhe_zip, os.path.join(args.oDir, 'lhe', lhe_final_zip)])\n job_opts.extend(['--copyFromLocal', hepmc_zip, os.path.join(args.oDir, 'hepmc', hepmc_final_zip)])\n # Supplementary materials\n job_opts.extend(['--copyFromLocal', os.path.join(output_dir, 'RunMaterial.tar.gz'),\n os.path.join(args.oDir, 'other', 'RunMaterial_%d.tar.gz' % job_ind)])\n job_opts.extend(['--copyFromLocal', os.path.join(output_dir, 'summary.txt'),\n os.path.join(args.oDir, 'other', 'summary_%d.txt' % job_ind)])\n\n # add in any other files that should be copied from the worker at\n # the end of the job\n # ----------------------------------------------------------------\n if copyFromLocal:\n for src, dest in copyFromLocal.iteritems():\n job_opts.extend(['--copyFromLocal', src, dest])\n\n job_opts.append('--args')\n for k, v in mg5_args.__dict__.items():\n if k and v:\n job_opts.extend(['--' + str(k), str(v)])\n\n # make some replacements due to different destination variable name\n # screwing things up. Yuck!\n remap = {'--iseed': '--seed', '--pythia8_path': '--pythia8'}\n for k, v in remap.items():\n job_opts[job_opts.index(k)] = v\n job_opts.remove('--card')\n log.debug('job_opts: %s' % job_opts)\n\n # write job vars to file\n dag_file.write('VARS %s ' % job_name)\n log_name = os.path.splitext(os.path.basename(dag_filename))[0]\n dag_file.write('opts=\"%s\" logdir=\"%s\" logfile=\"%s\"\\n' % (' '.join(job_opts),\n log_dir,\n log_name))\n dag_file.write('NODE_STATUS_FILE %s 30\\n' % status_filename)",
"def prepare_submit(self, mapping):\n self.dag_path = self.mk_path('%(mex_id)s.dag', mapping)\n self.create_file(self.dag_path,\n self.template['condor.dag_template'], mapping)\n\n self.conf_path = self.mk_path('%(mex_id)s.dag.config', mapping)\n self.create_file(self.conf_path,\n self.template['condor.dag_config_template'], mapping)\n\n self.submit_path = self.mk_path('%(mex_id)s.cmd', mapping)\n self.create_file(self.submit_path,\n self.template['condor.submit_template'], mapping)",
"def submit_task(self, op_data):\n\n task_path = op_data['file_path']\n t = Task()\n t.task_id = '0'\n t.task_status = Global.get_status_separating()\n parser = xml.sax.make_parser()\n parser.setFeature(xml.sax.handler.feature_namespaces, 0)\n parser.setContentHandler(t)\n parser.parse(task_path)\n self.__task_set[t.task_id] = t\n self.__task_queue.put(t)\n logging.info(\"submitted task %s\\n\" % t.task_name)",
"def write_dag_script(s):\n assert len(s.jobs) in (1,2),'ERROR: write_dag_script should be called from the final merge JobSet'\n s.dag = os.path.join( s.jobs[0].submitdir, 'global.dag')\n f = open(s.dag,'w')\n # condor submit scripts\n for dep in s.get_deps():\n print >>f,'Job %s %s'%(dep.jobname(),dep.condorfile)\n for job in s.jobs:\n print >>f,'Job %s %s'%(job.jobname(),job.condorfile)\n # retry instructions\n for dep in s.get_deps():\n print >>f,'Retry %s %s'%(dep.jobname(),NRETRY)\n for job in s.jobs:\n print >>f,'Retry %s %s'%(job.jobname(),NRETRY)\n a_parent = ' '.join( [ dep.jobname() for dep in s.get_deps() ] )\n for job in s.jobs:\n a_child = job.jobname()\n print >>f,'PARENT %s CHILD %s'%(a_parent,a_child)\n f.close()",
"def submit(self, **kwargs):\n pwd = curdir\n wd = dirname(self.logFile)\n chdir(wd)\n d = OrderedDict()\n #d['universe'] = 'vanilla'\n #d['executable'] = self.command\n\td['job-name'] = self.name\n\td['nodes'] = 1\n\td['partition'] = defaults.get('queue')\n\td['time'] = defaults.get(\"cputime\")\n\td['mem'] = defaults.get(\"memory\")\n d['output'] = op_join(wd,\"output.log\")\n d['error'] = op_join(wd,\"output.err\")\n csi_file = open(\"submit.sh\", \"w\")\n\tcsi_file.write(\"#!/bin/bash\\n\")\n data = [\"#SBATCH --%s=%s\\n\" % (k, v) for k, v in d.iteritems()]\n csi_file.write(\"\".join(data))\n\tcsi_file.write(\"export DAMPE_WORKFLOW_SERVER_URL=%s\\n\"%DAMPE_WORKFLOW_URL)\n csi_file.write(\"bash script\\n\")\n csi_file.close()\n output = self.__run__(\"sbatch submit.sh\")\n chdir(pwd)\n return self.__regexId__(output)",
"def terraform_run(**kwargs):\n\n # Push xcom with start date of this DAG run both for start and destroy\n ti: TaskInstance = kwargs[\"ti\"]\n if kwargs[\"dag_run\"].dag_id == TerraformTasks.DAG_ID_CREATE_VM:\n prev_start_time_vm = ti.xcom_pull(\n key=TerraformTasks.XCOM_START_TIME_VM,\n task_ids=TerraformTasks.TASK_ID_RUN,\n dag_id=TerraformTasks.DAG_ID_CREATE_VM,\n include_prior_dates=True,\n )\n ti.xcom_push(TerraformTasks.XCOM_PREV_START_TIME_VM, prev_start_time_vm)\n ti.xcom_push(TerraformTasks.XCOM_START_TIME_VM, ti.start_date)\n if kwargs[\"dag_run\"].dag_id == TerraformTasks.DAG_ID_DESTROY_VM:\n ti.xcom_push(TerraformTasks.XCOM_DESTROY_TIME_VM, ti.start_date)\n\n token = BaseHook.get_connection(AirflowConns.TERRAFORM).password\n terraform_api = TerraformApi(token)\n\n target_addrs = TerraformTasks.TERRAFORM_MODULE_WORKER_VM\n workspace_id = get_workspace_id()\n message = f'Triggered from airflow DAG \"{kwargs[\"dag_run\"].dag_id}\" at {ti.start_date}'\n\n run_id = terraform_api.create_run(workspace_id, target_addrs, message)\n logging.info(run_id)\n\n # Push run id\n ti.xcom_push(TerraformTasks.XCOM_TERRAFORM_RUN_ID, run_id)",
"def main():\n # Backup the tweets\n subprocess.call(['tar -czvf tweet.tar.gz /usr/local/airflow/data/', '-1'], shell=True)\n\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n print (join(path,'dags/daglibs/token.pickle'))\n if os.path.exists(join(path,'dags/daglibs/token.pickle')):\n with open(join(path,'dags/daglibs/token.pickle'), 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(join(path,\n 'dags/daglibs/credentials.json'), SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open(join(path,'dags/daglibs/token.pickle'), 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('drive', 'v3', credentials=creds)\n\n # Call the Drive v3 API\n file_metadata = {'name': 'tweet.tar.gz'}\n media = MediaFileUpload('/usr/local/airflow/tweet.tar.gz', mimetype='*/*')\n file = service.files().create(body=file_metadata,\n media_body=media,\n fields='id').execute()\n print (\"File ID: {}\".format(file.get('id')))\n if file.get('id'):\n return True\n return False",
"def create(\n metadata: ProjectMetadata, pipeline_name, env, target_path\n): # pylint: disable=too-many-locals\n loader = jinja2.FileSystemLoader(str(Path(__file__).parent))\n jinja_env = jinja2.Environment(autoescape=True, loader=loader, lstrip_blocks=True)\n jinja_env.filters[\"slugify\"] = slugify\n template = jinja_env.get_template(\"airflow_dag_template.j2\")\n\n project_path = metadata.project_path\n package_name = metadata.package_name\n dag_filename = f\"{package_name}_dag.py\"\n\n target_path = Path(target_path)\n target_path = target_path / dag_filename\n\n target_path.parent.mkdir(parents=True, exist_ok=True)\n with KedroSession.create(package_name, project_path, env=env) as session:\n context = session.load_context()\n pipeline = context.pipelines.get(pipeline_name)\n\n dependencies = defaultdict(list)\n for node, parent_nodes in pipeline.node_dependencies.items():\n for parent in parent_nodes:\n dependencies[parent].append(node)\n\n template.stream(\n dag_name=package_name,\n dependencies=dependencies,\n env=env,\n pipeline_name=pipeline_name,\n package_name=package_name,\n pipeline=pipeline,\n ).dump(str(target_path))\n\n secho(\"\")\n secho(\"An Airflow DAG has been generated in:\", fg=\"green\")\n secho(str(target_path))\n secho(\"This file should be copied to your Airflow DAG folder.\", fg=\"yellow\")\n secho(\n \"The Airflow configuration can be customized by editing this file.\", fg=\"green\"\n )\n secho(\"\")\n secho(\n \"This file also contains the path to the config directory, this directory will need to \"\n \"be available to Airflow and any workers.\",\n fg=\"yellow\",\n )\n secho(\"\")\n secho(\n \"Additionally all data sets must have an entry in the data catalog.\",\n fg=\"yellow\",\n )\n secho(\n \"And all local paths in both the data catalog and log config must be absolute paths.\",\n fg=\"yellow\",\n )\n secho(\"\")",
"def _submitInstance( self, imageName, workDir ):\n return S_OK()",
"def convert_to_airflow_op(self):\n return spark_submit_operator.SparkSubmitOperator(\n task_id=self.task_id,\n trigger_rule=self.trigger_rule,\n params=self.params,\n # Spark specific\n conn_id='spark_default',\n name=self.name,\n application=self.application,\n conf=self.conf,\n files=self.files,\n py_files=self.py_files,\n jars=self.jars,\n java_class=self.java_class,\n packages=self.packages,\n exclude_packages=self.exclude_packages,\n repositories=self.repositories,\n total_executor_cores=self.total_executor_cores,\n executor_cores=self.executor_cores,\n executor_memory=self.executor_memory,\n driver_memory=self.driver_memory,\n keytab=self.keytab,\n principal=self.principal,\n num_executors=self.num_executors,\n application_args=self.application_args,\n verbose=self.verbose,\n env_vars=self.env_vars,\n driver_classpath=self.driver_classpath\n )",
"def submit(self, target=None, name: str = None, args: Tuple = (), kwargs: Dict = None, *, daemon: bool = None):\n raise NotImplementedError",
"def _submit_to_queue(self, script_file):",
"def submit_run_request(\n asset_graph: ExternalAssetGraph,\n run_request: RunRequest,\n instance: DagsterInstance,\n workspace: BaseWorkspaceRequestContext,\n pipeline_and_execution_plan_cache: Dict[int, Tuple[ExternalJob, ExternalExecutionPlan]],\n) -> None:\n repo_handle = asset_graph.get_repository_handle(\n cast(Sequence[AssetKey], run_request.asset_selection)[0]\n )\n location_name = repo_handle.code_location_origin.location_name\n job_name = _get_implicit_job_name_for_assets(\n asset_graph, cast(Sequence[AssetKey], run_request.asset_selection)\n )\n if job_name is None:\n check.failed(\n \"Could not find an implicit asset job for the given assets:\"\n f\" {run_request.asset_selection}\"\n )\n\n if not run_request.asset_selection:\n check.failed(\"Expected RunRequest to have an asset selection\")\n\n pipeline_selector = JobSubsetSelector(\n location_name=location_name,\n repository_name=repo_handle.repository_name,\n job_name=job_name,\n asset_selection=run_request.asset_selection,\n op_selection=None,\n )\n\n selector_id = hash_collection(pipeline_selector)\n\n if selector_id not in pipeline_and_execution_plan_cache:\n code_location = workspace.get_code_location(repo_handle.code_location_origin.location_name)\n\n external_job = code_location.get_external_job(pipeline_selector)\n\n external_execution_plan = code_location.get_external_execution_plan(\n external_job,\n {},\n step_keys_to_execute=None,\n known_state=None,\n instance=instance,\n )\n pipeline_and_execution_plan_cache[selector_id] = (\n external_job,\n external_execution_plan,\n )\n\n external_job, external_execution_plan = pipeline_and_execution_plan_cache[selector_id]\n\n run = instance.create_run(\n job_snapshot=external_job.job_snapshot,\n execution_plan_snapshot=external_execution_plan.execution_plan_snapshot,\n parent_job_snapshot=external_job.parent_job_snapshot,\n job_name=external_job.name,\n run_id=None,\n resolved_op_selection=None,\n op_selection=None,\n run_config={},\n step_keys_to_execute=None,\n tags=run_request.tags,\n root_run_id=None,\n parent_run_id=None,\n status=DagsterRunStatus.NOT_STARTED,\n external_job_origin=external_job.get_external_origin(),\n job_code_origin=external_job.get_python_origin(),\n asset_selection=frozenset(run_request.asset_selection),\n )\n\n instance.submit_run(run.run_id, workspace)",
"def submit_job_emr(self, context):\n # Get snowflake connection details based on conn_id\n self.hook = SnowFlakeHook(conn_id=self.conn_id)\n self.conn = self.hook.get_conn()\n\n # Update the parameters for the spark job\n # to use the snowflake conn details\n import base64\n self.parameters.update({'account_name': self.conn.host,\n 'database': self.conn.schema,\n 'username': self.conn.login,\n 'password': base64.b64encode(self.conn.password),\n 'warehouse': self.conn.extra_dejson.get('warehouse', ''),\n 'role': self.conn.extra_dejson.get('role', '')})\n\n # Set spark job related configs if provided\n spark_configs = self.parameters.get('spark_configs', ' ')\n if self.packages:\n spark_packages=self.packages\n else:\n spark_packages = ' --packages net.snowflake:snowflake-jdbc:3.4.2,net.snowflake:spark-snowflake_2.11:2.2.8 '\n geniesnowflake_sparkjob = 's3://nike-emr-bin/' + self.env_type + '/common/scripts/geniesnowflake_sparkload.py '\n\n import json\n self.command_json = json.dumps(self.parameters)\n self.conn_id = self.emr_conn_id\n self.command = ' --master yarn --deploy-mode cluster ' + \\\n spark_configs + \\\n spark_packages + \\\n geniesnowflake_sparkjob + \\\n self.command_json\n super(GenieSnowflakeOperator, self).execute(context)\n self.conn_id = self.snow_conn_id",
"def execute(self, context: dict):\n conf = copy.deepcopy(context[\"dag_run\"].conf)\n logger.debug(f\"Got conf {conf}.\")\n execution_context = conf[\"execution_context\"]\n if \"Payload\" in execution_context:\n payload_context = Context.populate(execution_context)\n else:\n payload_context = Context(data_partition_id=execution_context[\"data-partition-id\"],\n app_key=execution_context.get(\"AppKey\", \"\"))\n workflow_name = conf[\"workflow_name\"]\n run_id = conf[\"run_id\"]\n status = self.status.value\n status_updater = UpdateStatus(\n workflow_name=workflow_name,\n workflow_url=Variable.get(\"core__service__workflow__host\"),\n workflow_id=\"\",\n run_id=run_id,\n status=status,\n token_refresher=AirflowTokenRefresher(),\n context=payload_context\n )\n status_updater.update_workflow_status()\n\n if self._show_skipped_ids:\n skipped_ids, saved_record_ids = self._create_skipped_report(context)\n context[\"ti\"].xcom_push(key=\"skipped_ids\", value=skipped_ids)\n context[\"ti\"].xcom_push(key=\"saved_record_ids\", value=saved_record_ids)\n\n if self.status is self.prev_ti_state.FAILED:\n raise PipelineFailedError(\"Dag failed\")",
"def _submit_special(self, config, job_id, job_params):\n (module, method) = job_params[\"method\"].split(\".\")\n self.logger.log(\"Submit %s as a %s:%s job\" % (job_id, module, method))\n\n self.sr.run(\n config,\n job_params,\n job_id,\n callback=self.callback_url,\n fin_q=[self.jr_queue],\n )",
"def test_dag():\n\n def f(task_id):\n return f\"OP:{task_id}\"\n\n with DAG(dag_id=\"test_xcom_dag\", default_args=DEFAULT_ARGS) as dag:\n operators = [PythonOperator(python_callable=f, task_id=f\"test_op_{i}\") for i in range(4)]\n return dag, operators",
"def arcSubmit(model_list, config,rootDir, verbose=False, resubmit=None, runCode=None):\r\n\tjobID = []\r\n\tfor model in model_list:\r\n\t\t# put some dummy data in the ouput file\r\n\t\tmodelSubmitName=model.submit()\r\n\t\tif verbose: print \"Submitting \",modelSubmitName\r\n\t\twith cd(model.dirPath):\r\n\t\t\tjID = subprocess.check_output(\"sbatch -J %s --export=ALL %s\" % (model.name(), modelSubmitName), shell=True) # submit the script (change devel after, and shouldn't have to ssh in)\r\n\t\tjobID.append(jID[20:-1])\r\n\t\t\r\n\tjobIDstr=':$'.join(jobID) # make single string appropriately formatted of job ids..\r\n\t# now re-run this entire script so that the next iteration in the algorithm.\r\n\t# can be run\r\n\tif resubmit is not None:\r\n\t\t# Submit the next job in the iteration. runOptimise is very quick so no need to submit to ARC again - just run on the front end.\r\n\t\t\r\n\t\tjobName='RE'+config.name()\r\n\t\t# TODO move to better python syntax for var printing. Think can use named vars in...\r\n\t\tcmd = [\"sbatch -p devel --export=ALL --time=10 --dependency=afterany:%s -J %s \"%(jobIDstr,jobName)]\r\n\t\tcmd.extend(resubmit) # add the arguments in including the programme to run..\r\n\t\t#cmd = resubmit\r\n\t\tcmd=' '.join(cmd) # convert to one string.\r\n\t\tcmd = cmd + \" &>progressResubmit.txt\"\r\n\t\tif verbose: print \"Next iteration cmd is \", cmd\r\n\t\tjid = subprocess.check_output(cmd, shell=True) # submit the script. Good to remove shell=True \r\n\t\t#subprocess.check_output(cmd, shell=True)\r\n\t\tif verbose: print \"Job ID for next iteration is %s\"%jid[20:-1]\r\n\r\n\treturn True",
"def submit(id, host):",
"def __init__(\n self,\n *,\n dag_id: str,\n cloud_workspace: CloudWorkspace,\n publisher_id: str,\n format_specification: str,\n bq_dataset_id: str = \"onix\",\n bq_table_name: str = \"onix\",\n bq_dataset_description: str = \"Thoth ONIX Feed\",\n bq_table_description: str = None,\n api_dataset_id: str = \"onix\",\n host_name: str = \"https://export.thoth.pub\",\n schema_folder: str = default_schema_folder(),\n observatory_api_conn_id: str = AirflowConns.OBSERVATORY_API,\n catchup: bool = False,\n start_date: pendulum.DateTime = pendulum.datetime(2022, 12, 1),\n schedule: str = \"@weekly\",\n ):\n super().__init__(\n dag_id,\n start_date=start_date,\n schedule=schedule,\n airflow_conns=[observatory_api_conn_id],\n catchup=catchup,\n tags=[\"oaebu\"],\n )\n\n self.dag_id = dag_id\n self.cloud_workspace = cloud_workspace\n self.publisher_id = publisher_id\n self.bq_dataset_id = bq_dataset_id\n self.bq_table_name = bq_table_name\n self.bq_dataset_description = bq_dataset_description\n self.bq_table_description = bq_table_description\n self.api_dataset_id = api_dataset_id\n self.host_name = host_name\n self.format_specification = format_specification\n self.schema_folder = schema_folder\n self.observatory_api_conn_id = observatory_api_conn_id\n\n check_workflow_inputs(self)\n\n self.add_setup_task(self.check_dependencies)\n self.add_task(self.download)\n self.add_task(self.upload_downloaded)\n self.add_task(self.transform)\n self.add_task(self.upload_transformed)\n self.add_task(self.bq_load)\n self.add_task(self.add_new_dataset_releases)\n self.add_task(self.cleanup)",
"def run(\n path,\n host,\n params={}\n ):\n\n logging.info(\"Running '%s' in '%s'...\", path, host)\n client = kfp.Client(f\"{host}\")\n try:\n result = client.create_run_from_pipeline_package(\n pipeline_file=path,\n arguments=params\n )\n logging.info(\"View run: %s/#/runs/details/%s\",\n host,\n result.run_id)\n except Exception as ex:\n logging.error(\"Failed to run '{%s}' with error:\\n{%s}\", path, ex)\n sys.exit(1)",
"def register_pickled_dag(dag,dag_folder_path=''):\n\n # set fileloc so that WebUi shows the pickle reader\n dag.fileloc = dag._full_filepath\n dag.sync_to_db()\n\n dag_name = ''.join(['auto_',dag.dag_id])\n \n if not dag_folder_path:\n dag_folder_path = settings.DAGS_FOLDER\n \n dag_pkl_name = os.path.join(dag_folder_path,''.join([dag_name,'.pkl']))\n dag_pyfile_name = os.path.join(dag_folder_path,''.join([dag_name,'.py']))\n \n print(dag_pkl_name)\n\n with open(dag_pkl_name,'wb') as f:\n pickle.dump(dag,f,pickle.HIGHEST_PROTOCOL)\n\n pyscript = \"\"\"\n import pickle\n from airflow.models import DAG\n \n with open('{}', 'rb') as f:\n tmp_object = pickle.load(f)\n \n if isinstance(tmp_object,DAG):\n tmp_object.fileloc = tmp_object._full_filepath\n globals()['{}'] = tmp_object\n del tmp_object\n \"\"\"\n pyscript = pyscript.format(dag_pkl_name,dag_name)\n dedented_pyscript = textwrap.dedent(pyscript).strip()\n\n with open(dag_pyfile_name,'w') as f:\n f.write(dedented_pyscript)",
"def test_dag():\n return airflow.DAG(\n \"testdag\",\n default_args={\"owner\": \"airflow\", \"start_date\": airflow.utils.dates.days_ago(0), 'provide_context': True},\n schedule_interval=\"@daily\",\n )",
"def submit_simulation(sim_dir, job_file):\n subprocess.run(['sbatch', job_file], cwd=sim_dir)\n pass",
"def submit(fragment,njobs,nevts,outdir=\"\",first=None,indices=None,logdir=\"\",tag=\"\",dry=False,slc6=False,verb=0):\n print(\">>> Submitting...\")\n indir = os.path.dirname(fragment) or '.'\n fullfrag = os.path.abspath(fragment)\n ensuredir(os.path.join(indir,logdir)) # log directory\n ensuredir(outdir) # ensure output directory exists before submitting\n #args = f\"{outdir} {fullfrag} maxevts={nevts} index=$(ProcId) seed=$$([$(ProcId)+1])\" # start from 0\n args = f\"{outdir} {fullfrag} maxevts={nevts} index=$$([$(ProcId)+1]) seed=$$([$(ProcId)+1])\" # start from 1\n if tag:\n args += f\" tag={tag}\"\n if indices:\n indices_ = [ ]\n for index in indices:\n if isinstance(index,str) and index.count(':')==1:\n start, end = index.split(':') # e.g. '1:4' = [1, 2, 3, 4]\n for i in range(int(start),int(end)+1):\n indices_.append(i)\n else:\n indices_.append(int(index))\n args = args.replace('$(ProcId)','$(i)')\n queue = f\"-queue i in {', '.join(str(i) for i in indices_)}\"\n #queue = f\"-a 'queue i from ( {', '.join(str(i) for i in indices_)} )'\"\n elif first:\n args = args.replace('$(ProcId)','$(i)')\n queue = f\"-queue i from seq {first} {first+njobs-1} \\|\"\n #queue = f\"-a 'queue from seq {first} {njobs}|'\"\n else:\n queue = f\"-queue {njobs}\"\n name = f\"{os.path.basename(fragment).replace('.py','')}\"\n log = os.path.join(logdir,f\"submit_fragment{tag}.$(ClusterId).$(ProcId).log\")\n subcmd = f\"condor_submit submit_fragment.sub -a 'initialdir={indir}' -a 'mylogfile={log}'\"\n subcmd += f\" -a 'arguments={args}'\" # -a 'should_transfer_files=no'\n subcmd += f\" -batch-name {name} {queue}\" #-queue '{queue}'\n if slc6:\n subcmd += f\" -a 'requirements = (OpSysAndVer =?= \\\"SLCern6\\\")'\"\n if verb>=4:\n subcmd += \" -verbose\"\n print(\">>> \"+subcmd)\n if not dry:\n os.system(subcmd)",
"def submit(self):\n if self._submit:\n raise Exception('The workfow execution has already been started.')\n\n out, err = subprocess.Popen('pegasus-run %s' % self.submit_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n shell=True, cwd=self.base_dir).communicate()\n if err:\n raise Exception(err)\n\n self._submit = True\n print('The pegasus workflow has started its execution.\\n'\n 'Please, use the status() method to follow the progress of the workflow execution.')",
"def submit_job_snowflake(self, sql_file_path):\n try:\n self.get_cursor()\n sql_file_path = str(sql_file_path).strip()\n self.snowflake_query_exec(self.cur, self.conn.schema, sql_file_path)\n except:\n self.cur.close()\n raise Exception(\"Snowflake step Failed, Job failed\")\n finally:\n self.cur.close()"
] | [
"0.7314224",
"0.709528",
"0.63085747",
"0.6191946",
"0.60718346",
"0.5973721",
"0.59433025",
"0.5939774",
"0.5474602",
"0.5430673",
"0.53907543",
"0.53773415",
"0.5344598",
"0.52862775",
"0.52801114",
"0.52279294",
"0.5198155",
"0.5184064",
"0.5162414",
"0.5139023",
"0.5124112",
"0.51012325",
"0.5069351",
"0.5051364",
"0.5048282",
"0.50337166",
"0.4966841",
"0.4960457",
"0.49560687",
"0.4940974"
] | 0.7345251 | 0 |
Collect all of the log files which have been produced by this production and return their contents as a dictionary. | def collect_logs(self):
logs = glob.glob(f"{self.production.rundir}/*.err") #+ glob.glob(f"{self.production.rundir}/*/logs/*")
logs += glob.glob(f"{self.production.rundir}/*.out")
messages = {}
for log in logs:
with open(log, "r") as log_f:
message = log_f.read()
messages[log.split("/")[-1]] = message
return messages | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getAllEntries(self):\n \n log_entries_dict = collections.defaultdict(list)\n for logfile in os.listdir(self.log_folder):\n log = os.path.join(self.log_folder, logfile)\n with open(log, 'rb') as l:\n logCSVreader = csv.reader(l, delimiter=\"|\")\n logCSVreader.next() # skip header\n try:\n for row in logCSVreader:\n zip_file = row[0]\n log_entries_dict[zip_file].append(row)\n except:\n pass\n return log_entries_dict",
"def _get_logs(self):\n contents = dict()\n contents[\"Scheduler\"] = self._parse_log_content(\n self.scheduler.client.get_scheduler_logs()\n )\n log_workers = self.scheduler.client.get_worker_logs()\n for i, (_, worker_content) in enumerate(log_workers.items()):\n contents[f\"Worker-{i}\"] = self._parse_log_content(worker_content)\n return contents",
"def read_logs(self):\n for system, filenames in SmokeTests.INPUT_FILES.items():\n input_file = filenames[\"logs\"]\n with open(input_file) as fin:\n self._logs[system] = fin.read()",
"def _get_daemon_logs_files(self):\n for fname in os.listdir('/tmp/'):\n fname = os.path.join('/tmp/', fname)\n if fname.lower().endswith('.log'):\n yield fname",
"def all_logs(self):\n return os.listdir(LOGS_BASE_PATH)",
"def get_all_logs(directory, artifacts):\n log_files = {}\n if artifacts:\n dirs = [f.filename for f in view_base.gcs_ls('%s/artifacts' % directory)\n if f.is_dir]\n else:\n dirs = [directory]\n for d in dirs:\n log_files[d] = []\n for f in view_base.gcs_ls(d):\n log_name = regex.log_re.search(f.filename)\n if log_name:\n log_files[d].append(f.filename)\n return log_files",
"def init_log_files(self): \n \n dir_path = self.init_logs_directory()\n log_files = self.join_path(dir_path, PATH_FOR_LOG_FILES)\n \n return log_files",
"def getLogs():",
"def getLogs():",
"def logs(self):\n return self._logs",
"def logs(self):\n return self._logs",
"def logs(self):\n if not self._logs:\n self.read_logs()\n return self._logs",
"def get_all(self):\n # read log\n d = {}\n log = self.get_logfile()\n if not os.path.isfile(log):\n return d\n f = open(log, \"r\")\n if f.mode == 'r':\n lines = f.readlines()\n for line in lines:\n dline = json.loads(line)\n d.update(dline)\n f.close()\n return d",
"def log(self):\n return {\n _.hash(): {\n \"audio_file\": _.audio_file.location,\n \"audio_file_hash\": _.audio_file.hash(),\n \"transcript_file\": _.transcript_file.location,\n \"transcript_file_hash\": _.transcript_file.hash(),\n }\n for _ in self.exemplars\n }",
"def get_logs(build_dir, log_files, pod_name, filters, objref_dict):\n all_logs = {}\n results = {}\n old_dict_len = len(objref_dict)\n\n all_logs = get_all_logs(build_dir, True)\n apiserver_filename = find_log_files(all_logs, \"kube-apiserver.log\")\n kubelet_filenames = find_log_files(all_logs, \"kubelet.log\")\n if not pod_name and not objref_dict:\n return get_logs_no_pod(apiserver_filename, kubelet_filenames, filters,\n objref_dict, all_logs)\n for kubelet_log in kubelet_filenames:\n if pod_name:\n parsed_dict, pod_in_file = parse_log_file(kubelet_log, pod_name, make_dict=True,\n objref_dict=objref_dict)\n objref_dict.update(parsed_dict)\n if len(objref_dict) > old_dict_len or not pod_name or pod_in_file or not objref_dict:\n if log_files == []:\n log_files = [kubelet_log]\n if apiserver_filename:\n log_files.extend(apiserver_filename)\n for log_file in log_files:\n parsed_file = parse_log_file(log_file, pod_name, filters,\n objref_dict=objref_dict)\n if parsed_file:\n results[log_file] = parsed_file\n break\n\n return all_logs, results, objref_dict, log_files",
"def log_entries(self) -> Generator[dict, None, None]:\n if self.log_stream:\n yield from logs.fetch_stream(self.log_stream)\n else:\n yield from []",
"def read_logs(self) -> Dict[str, Any]:\n return self.maps[0]",
"def logs(self):\n return self.logger.logs()",
"def fetchLogs(self):\n return [record.msg for record in self.handler.buffer]",
"def getLogs():\n # in flux, it may be possible to provide more structured information\n # like python Failure instances",
"def fileHandlers(self):\n fileHandlers = list()\n handlers = self.logger.handlers\n for handler in handlers:\n try:\n if handler._name.startswith(\"LogFile-\"):\n fileHandlers.append(handler)\n except:\n pass\n return fileHandlers",
"def list_logs():\n resource_route = \"/static/log/\"\n file_request_path = request.base_url[:request.base_url.rfind('/')] + resource_route\n path_to_current_file = os.path.dirname(os.path.abspath(__file__))\n logs_path = os.path.join(path_to_current_file, 'static', 'log')\n directory_list = os.listdir(logs_path)\n log_files = [f for f in directory_list if os.path.isfile(os.path.join(logs_path, f))]\n log_files.sort()\n if '.gitignore' in log_files:\n log_files.remove('.gitignore')\n full_log_paths = [file_request_path + f for f in log_files]\n response_code = 200\n return make_response(jsonify({'files': full_log_paths}), response_code)",
"def list_log_files():\n for filename in os.listdir(\"/home/malyhass/log-parser\"):\n if filename.startswith(\"access.log\"):\n yield filename",
"def extract_values(self):\n log_unmatched = []\n stdout_unmatched = []\n stderr_unmatched = []\n result = True\n\n regex_sources = []\n if self.logpath and self.cfg.log_regexps:\n regex_sources.append(\n (self.logpath, self.cfg.log_regexps, log_unmatched)\n )\n if self.outpath and self.cfg.stdout_regexps:\n regex_sources.append(\n (self.outpath, self.cfg.stdout_regexps, stdout_unmatched)\n )\n if self.errpath and self.cfg.stderr_regexps:\n regex_sources.append(\n (self.errpath, self.cfg.stderr_regexps, stderr_unmatched)\n )\n\n for outfile, regexps, unmatched in regex_sources:\n file_result, file_extracts, file_unmatched = match_regexps_in_file(\n logpath=outfile, log_extracts=regexps, return_unmatched=True\n )\n unmatched.extend(file_unmatched)\n for k, v in file_extracts.items():\n if isinstance(v, bytes):\n self.extracts[k] = v.decode(\"utf-8\")\n else:\n self.extracts[k] = v\n result = result and file_result\n\n if log_unmatched or stdout_unmatched or stderr_unmatched:\n\n err = (\n \"Timed out starting {}({}):\" \" unmatched log_regexps in {}.\"\n ).format(type(self).__name__, self.name, self.logpath)\n\n err += format_regexp_matches(\n name=\"log_regexps\",\n regexps=self.cfg.log_regexps,\n unmatched=log_unmatched,\n )\n\n err += format_regexp_matches(\n name=\"stdout_regexps\",\n regexps=self.cfg.stdout_regexps,\n unmatched=stdout_unmatched,\n )\n\n err += format_regexp_matches(\n name=\"stderr_regexps\",\n regexps=self.cfg.stderr_regexps,\n unmatched=stderr_unmatched,\n )\n\n if self.extracts:\n err += \"{newline}Matching groups:{newline}\".format(\n newline=os.linesep\n )\n err += os.linesep.join(\n [\n \"\\t{}: {}\".format(key, value)\n for key, value in self.extracts.items()\n ]\n )\n return FailedAction(error_msg=err)\n return result",
"def FindLogFiles(base_dir):\n logcat_filter = re.compile(r'^logcat_(\\S+)_(\\d+)$')\n # list of tuples (<device_id>, <seq num>, <full file path>)\n filtered_list = []\n for cur_file in os.listdir(base_dir):\n matcher = logcat_filter.match(cur_file)\n if matcher:\n filtered_list += [(matcher.group(1), int(matcher.group(2)),\n os.path.join(base_dir, cur_file))]\n filtered_list.sort()\n file_map = {}\n for device_id, _, cur_file in filtered_list:\n if device_id not in file_map:\n file_map[device_id] = []\n\n file_map[device_id] += [cur_file]\n return file_map",
"def upload_logs():\n return {\n 'page': 'upload_logs',\n 'raw_logs': '',\n }",
"def _get_logs(self):\n logstart = self.LOGSTART%(self.session.uuid, self.session.run_counter)\n logend = self.LOGEND%(self.session.uuid, self.session.run_counter)\n log = self.container.logs().decode('UTF-8')\n while log.find(logstart) == -1 or log.find(logend) == -1:\n log = self.container.logs().decode('UTF-8')\n cleaned_log = self._get_cleaned_logs(log, logstart, logend)\n self.session.run_counter = self.session.run_counter + 1\n self.session.save()\n return cleaned_log",
"def log_paths(self): # pylint:disable=function-redefined\n return self._log_paths",
"def get_server_logs(self):\n self.response.content\n binary_body = re.split('--==.*==', self.response.content)[2].split('\\r\\n')[5]\n\n f = StringIO.StringIO()\n f.write(bytearray(binary_body))\n\n memory_zip = ZipFile(f)\n zip_content = {name: memory_zip.read(name) for name in memory_zip.namelist()}\n oracc_log = zip_content['oracc.log']\n request_log = zip_content['request.log']\n\n # Check if server returns a lemmatised file\n autolem = None \n for key, value in zip_content.iteritems():\n if key.endswith(\"autolem.atf\"):\n autolem = value\n\n print zip_content.keys()\n print \"@\"*30\n print oracc_log\n print \"@\"*30\n print request_log\n print \"@\"*30\n if autolem:\n print autolem\n print \"@\"*30\n\n return oracc_log, request_log, autolem",
"def load_logs(self, mode):\n\t\ttry:\n\t\t\tif mode == \"c\":\n\t\t\t\twith open(LOGS_FULL_PATH, 'rb') as f:\n\t\t\t\t\treturn pickle.load(f)\n\t\t\telif mode == \"a\":\n\t\t\t\twith open(ARCHIVES_FULL_PATH, 'rb') as f:\n\t\t\t\t\treturn pickle.load(f)\n\t\texcept IOError:\n\t\t\tprint(\"Failed to open logs files\")\n\t\t\treturn {}\n\t\texcept EOFError:\n\t\t\treturn {}"
] | [
"0.7099511",
"0.685938",
"0.6689108",
"0.65805835",
"0.654927",
"0.65117294",
"0.6469987",
"0.6433183",
"0.6433183",
"0.63312954",
"0.63312954",
"0.6329445",
"0.626463",
"0.6235582",
"0.6214215",
"0.61539227",
"0.61492366",
"0.6038331",
"0.60342073",
"0.6025179",
"0.5985445",
"0.5954207",
"0.5908937",
"0.5900402",
"0.58887696",
"0.5869581",
"0.58526",
"0.58461016",
"0.58267736",
"0.58225346"
] | 0.8644327 | 0 |
Builds the sbatch file in order to combine genomics.vcf samples contained in current_batch in a single one. | def build_GenotypeGVCFs_sbatch(working_dir, combined_gvcf_files, scratch=False, interval=None):
name_batch1 = os.path.basename([item for item in combined_gvcf_files if "batch1" in item][0])
interval_name = ""
#there must be at least one batch so look for it, not elegant but works
if name_batch1.split("batch1") != ".g.vcf.gz":
interval_name = name_batch1.split("batch1")[1].split(".")[0]
job_name = "GenotypeGVCFs{}".format(interval_name)
output_file = "{}_joincalled{}.g.vcf.gz".format(CONFIG["output_header"], interval_name)
#create the sbatch file to analyse the current batch of samples
sbatch_file = os.path.join(working_dir, "sbatch", "{}.sbatch".format(job_name))
with open(sbatch_file, "w") as GenotypeGVCFs:
slurm = slurm_header(CONFIG["uppmax_project"], job_name, working_dir)
GenotypeGVCFs.write(slurm)
GenotypeGVCFs.write("\n")
#rsync to scratch all samples
if scratch:
GenotypeGVCFs.write("mkdir -p $SNIC_TMP/{} \n".format(job_name)) # create tmp directory
GenotypeGVCFs.write("mkdir -p $SNIC_TMP/{}/VCF/ \n".format(job_name)) # create tmp directory
#now cycle over the samples, build the GATK command
combined_gvcf_string_input = ""
for combined_gvcf in combined_gvcf_files:
combined_gvcf_path_dir = combined_gvcf
if scratch:
GenotypeGVCFs.write("rsync -rptoDLv {}* $SNIC_TMP/{}/\n".format(combined_gvcf, job_name))
combined_gvcf_name = os.path.basename(combined_gvcf)
combined_gvcf_path_dir = "$SNIC_TMP/{}/{}".format(job_name, combined_gvcf_name)
combined_gvcf_string_input += "-V {} \\\n".format(combined_gvcf_path_dir)
GATK_command= "java -Xmx250g -jar {} -T GenotypeGVCFs \\\n".format(CONFIG["GATK"])
for option in CONFIG["walkers"]["GenotypeGVCFs"]:
GATK_command += "{} \\\n".format(option)
GATK_command += "{} ".format(combined_gvcf_string_input)
if interval is not None:
GATK_command += "-L {} \\\n".format(interval)
if scratch:
GATK_command += "-o $SNIC_TMP/{}/VCF/{}\n".format(job_name, output_file)
#once this is done rsync back to lupus
GATK_command += "rsync $SNIC_TMP/{}/VCF/{}* {}/VCF/\n".format(job_name, output_file , working_dir)
else:
GATK_command += "-o {}/VCF/{}\n\n".format(working_dir, output_file)
GenotypeGVCFs.write(GATK_command)
#return path to sbach file
return sbatch_file | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def GenotypeGVCFs():\n #creates sbatch files to merge batches of batch_size genomics vcf\n cwd = os.getcwd()\n sbatch_files = []\n if not os.path.isdir(os.path.join(cwd, \"01_CombineGVCFs\")):\n sys.exit(\"Directory 01_CombineGVCFs does not exits exists, something went wrong here.\")\n if os.path.isdir(os.path.join(cwd, \"02_GenotypeGVCFs\")):\n print \"WARNING: 02_GenotypeGVCFs already present, assuming this step has been completed with success.\"\n return sbatch_files\n else:\n #create the folder structure\n os.mkdir(os.path.join(cwd, \"02_GenotypeGVCFs\"))\n os.mkdir(os.path.join(cwd, \"02_GenotypeGVCFs\", \"sbatch\"))\n os.mkdir(os.path.join(cwd, \"02_GenotypeGVCFs\", \"std_err\"))\n os.mkdir(os.path.join(cwd, \"02_GenotypeGVCFs\", \"std_out\"))\n os.mkdir(os.path.join(cwd, \"02_GenotypeGVCFs\", \"VCF\"))\n #Build the sbatch files for the join calling step\n working_dir = os.path.join(cwd, \"02_GenotypeGVCFs\")\n #now retrive the VCF stored in 01_CombineGVCFs/VCF/\n combined_gvcfs_to_process = []\n if len(CONFIG[\"intervals_list\"]) == 0:\n #no intervals, I have one file for each batch\n combined_gvcf_files = []\n for current_batch in range(1, CONFIG[\"batch_number\"] +1):\n # for each batch create the vcf file that need to be created by combine step\n combined_gvcf_name = \"{}_batch{}.g.vcf.gz\".format(CONFIG[\"output_header\"], current_batch)\n combined_gvcf_full_path = os.path.join(cwd, \"01_CombineGVCFs\", \"VCF\", combined_gvcf_name)\n combined_gvcf_files.append(combined_gvcf_full_path)\n combined_gvcfs_to_process.append(combined_gvcf_files)\n else:\n for interval in CONFIG[\"intervals_list\"]:\n interval_name = os.path.basename(interval).split(\".\")[0]\n combined_gvcf_files = []\n for current_batch in range(1, CONFIG[\"batch_number\"] +1):\n # for each batch create the vcf file that need to be created by combine step\n combined_gvcf_name = \"{}_batch{}_{}.g.vcf.gz\".format(CONFIG[\"output_header\"], current_batch, interval_name)\n combined_gvcf_full_path = os.path.join(cwd, \"01_CombineGVCFs\", \"VCF\", combined_gvcf_name)\n combined_gvcf_files.append(combined_gvcf_full_path)\n #now ceate a list with interval file and all gvcf to be combines\n interval_plus_gvcfs = [interval ,combined_gvcf_files]\n combined_gvcfs_to_process.append(interval_plus_gvcfs)\n for interval_plus_gvcfs in combined_gvcfs_to_process:\n interval = interval_plus_gvcfs[0]\n combined_gvcf_files = interval_plus_gvcfs[1]\n sbatch_file = build_GenotypeGVCFs_sbatch(working_dir, combined_gvcf_files, CONFIG[\"scratch\"], interval)\n sbatch_files.append(sbatch_file)\n return sbatch_files",
"def build_batches(self):\n\n # get directories for all batches and logs\n batches_dir = join(self.path, 'batches')\n logs_dir = join(self.path, 'log')\n\n # create index file for batches\n index_path = join(batches_dir, 'index.txt')\n index = open(index_path, 'w')\n\n # write file containing simulation paths for each batch\n for i, simulation_path in self.simulation_paths.items():\n\n # determine batch ID\n batch_id = i // self.batch_size\n\n # process new batch\n if i % self.batch_size == 0:\n\n # open batch file and append to index\n batch_path = join(batches_dir, '{:d}.txt'.format(batch_id))\n index.write('{:s}\\n'.format(relpath(batch_path, self.path)))\n batch_file = open(batch_path, 'w')\n\n # create log directory for batch\n mkdir(join(logs_dir, '{:d}'.format(batch_id)))\n\n # write paths to batch file\n batch_file.write('{:s}\\n'.format(simulation_path))\n\n # close batch file\n if i % self.batch_size == (self.batch_size - 1):\n batch_file.close()\n chmod(batch_path, 0o755)\n\n index.close()\n\n chmod(index_path, 0o755)",
"def build(self):\n writer = None\n out_complete = ''\n\n reader = self.module_loader.get_reader()()\n reader.set_configs(self.configs)\n reader.verify_parameters()\n input_path = self.configs.get_resolved('parameters', 'input', 'path')\n output_path = self.configs.get_resolved('parameters', 'output', 'path')\n pathlib.Path(output_path).mkdir(parents=True, exist_ok=True)\n \n files = reader.fetch_input_files(input_path)\n\n for i, group in enumerate(files):\n\n obs = self.params('output_block_size') if self.configs.exists('parameters', 'output_block_size') else 1\n if obs is None:\n obs = len(files)\n\n first_of_batch = (i % obs == 0)\n\n if first_of_batch:\n output_name = reader.output_filename(group['id'])\n writer = self.module_loader.get_writer()(output_path, output_name)\n out_complete = writer.file_path()\n\n Logger.log('started_r_files', group['files'])\n\n with writer.appending(not first_of_batch) as dataset:\n Logger.log('writing_file', out_complete, '' if first_of_batch else '(appending)')\n \n self.read_attributes(dataset)\n self.read_variables(dataset)\n \n if reader.data_grouping:\n complete_path = tuple([path.join(input_path, f) for f in group['files']])\n else:\n complete_path = path.join(input_path, group['files'])\n reader.read_to(dataset, complete_path, self.configs, not first_of_batch)\n\n Logger.info('done')",
"def _fillBatches(self):\n\n batchRE = r\"\"\"\n B\n (?P<observebatch>\\d+?)\n (?P<startend>[SE])\n (?P<sequence>\\d+?)\n _SR\n (?:_(?P<extraInjections>\\d+?|\\w+?))?\n $\n \"\"\"\n batchRE = re.compile(batchRE, re.VERBOSE)\n # We canot infer batches unless we have runorder\n if 'Run Order' in self.sampleMetadata.keys():\n currentBatch = 0\n # Loop over samples in run order\n for index, row in self.sampleMetadata.sort_values(by='Run Order').iterrows():\n nameComponents = batchRE.search(row['Sample File Name'])\n if nameComponents:\n # Batch start\n if nameComponents.group('startend') == 'S':\n # New batch - increment batch no\n if nameComponents.group('sequence') == '1':\n currentBatch = currentBatch + 1\n\n # Don't include the dilution series or blanks\n if not ((row['AssayRole'] == AssayRole.LinearityReference) or (row['SampleType'] == SampleType.ProceduralBlank)):\n self.sampleMetadata.loc[index, 'Batch'] = currentBatch\n self.sampleMetadata.loc[index, 'Correction Batch'] = currentBatch\n\n else:\n warnings.warn('Unable to infer batches without run order, skipping.')\n return",
"def run(self):\n lineage_csv_gz = self.input_files_local[0][0]\n output_db = self.output_files_local()[0]\n log.write(f\"input: {lineage_csv_gz} output: {output_db}\")\n\n with IdSeqDictForUpdate(output_db, IdSeqDictValue.VALUE_TYPE_ARRAY) as lineage_dict:\n batch_list = {}\n with gzip.open(lineage_csv_gz, \"rt\") as gzf:\n for line in gzf:\n fields = line.rstrip().split(\",\")\n taxid = fields[0]\n species, genus, family = fields[-1:-4:-1]\n batch_list[taxid] = [species, genus, family]\n if len(batch_list) >= BATCH_INSERT_SIZE:\n lineage_dict.batch_inserts(batch_list.items())\n batch_list = {}\n lineage_dict.batch_inserts(batch_list.items())",
"def createScript_sbatch(self):\n tools_createScript_sbatch(\n sbatch_script_file_name = self.sbatchFile_addMEM,\n executable = self.executable_addMEM,\n command_line_parameters = self.cfgFiles_addMEM_modified,\n input_file_names = self.inputFiles,\n output_file_names = self.outputFiles,\n script_file_names = self.shFiles_addMEM_modified,\n log_file_names = self.logFiles_addMEM,\n keep_logs = False,\n working_dir = self.workingDir,\n max_num_jobs = 100000000, # it's really silly to limit the number of jobs; use an enormous number as the ,,fix''\n cvmfs_error_log = self.cvmfs_error_log,\n pool_id = self.pool_id,\n use_home = self.use_home,\n validate_outputs = self.check_output_files,\n max_num_submittedJobs = 2000,\n )",
"def gen_vars(input_fqs):\n k_sizes = range(*CONFIG['abyss_bloom']['k_mer_sizes'])\n sr = re.search(PATH_RE, input_fqs[0])\n sr2 = re.search(PATH_RE, input_fqs[1])\n # should be of conventional directory hierarchy\n try:\n assert sr.groups() == sr2.groups()\n except AssertionError:\n print '{0} != {1}'.format(sr.groups(), sr2.groups())\n raise\n\n bfs, bf_flags, fas, fa_flags = [], [], [], []\n for k_size in k_sizes:\n # for abyss_bloom\n # bn: basename\n bf_bn = '{0}_k{1}.bf.gz'.format(sr.group('celltype'), k_size)\n bf_flag_bn = '{0}.SUCCESS'.format(bf_bn)\n bf_dir = os.path.join(sr.group('prefix'), 'kon', sr.group('chr'), 'bf')\n bf = os.path.join(bf_dir, bf_bn)\n bf_flag = os.path.join(bf_dir, bf_flag_bn)\n bfs.append(bf)\n bf_flags.append(bf_flag)\n\n # for konnector\n fa_all_bn = '{0}_k{1}_allpaths.fa.gz'.format(sr.group('celltype'), k_size)\n fa_mer_bn = '{0}_k{1}_merged.fa.gz'.format(sr.group('celltype'), k_size)\n fa_flag_bn = '{0}_k{1}.SUCCESS'.format(sr.group('celltype'), k_size)\n fa_dir = os.path.join(sr.group('prefix'), 'kon', sr.group('chr'), 'fafq')\n fa_all = os.path.join(fa_dir, fa_all_bn)\n fa_mer = os.path.join(fa_dir, fa_mer_bn)\n fa_flag = os.path.join(fa_dir, fa_flag_bn)\n fas.extend([fa_all, fa_mer])\n fa_flags.append(fa_flag)\n\n return k_sizes, bfs, bf_flags, fas, fa_flags",
"def batch_process(in_batch, in_ref, merge_stats=True, dir_counts='', dir_stats='',\r\n in_counts=None, in_stats=None, save='all', out_folder='',\r\n out_prefix='', return_df=None):\r\n\r\n # import ref files and define variables/paths\r\n path = Path.cwd()\r\n df_ref = pd.read_csv(in_ref)\r\n if 'sgRNA_seq' not in df_ref.columns.tolist():\r\n raise Exception('in_ref is missing column: sgRNA_seq')\r\n df_batch = pd.read_csv(in_batch)\r\n list_reqcols = ['sample_id', 'fastq_file', 'condition']\r\n list_batchcols = df_batch.columns.tolist()\r\n if not all(col in list_batchcols for col in list_reqcols):\r\n list_miss = [col for col in list_reqcols if col not in list_batchcols]\r\n raise Exception('Error! in_batch is missing column(s): ' + str(list_miss))\r\n if 't0' not in df_batch['condition'].tolist():\r\n raise Exception('t0 condition not found in the in_batch file')\r\n # defaults to cwd if subdir == ''\r\n counts_path = path / dir_counts\r\n stats_path = path / dir_stats\r\n if in_counts is None:\r\n df_batch['counts_files'] = df_batch['sample_id'] + '_counts.csv'\r\n else:\r\n df_temp = pd.DataFrame(in_counts, columns=['sample_id', 'counts_files'])\r\n df_batch = df_batch.merge(df_temp, on='sample_id', how='left')\r\n if in_stats is None:\r\n df_batch['stats_files'] = df_batch['sample_id'] + '_stats.txt'\r\n else:\r\n df_temp = pd.DataFrame(in_stats, columns=['sample_id', 'stats_files'])\r\n df_batch = df_batch.merge(df_temp, on='sample_id', how='left')\r\n\r\n # import csv files and generate dfs for raw reads and log2 norm\r\n df_reads, df_log2 = df_ref.copy(), df_ref.copy()\r\n for row in df_batch.itertuples():\r\n file = counts_path / row.counts_files\r\n df_temp = pd.read_csv(file, names=['sgRNA_seq', row.sample_id])\r\n # merge on sgRNA_seq to aggregate columns\r\n df_reads = pd.merge(df_reads, df_temp, on='sgRNA_seq')\r\n # perform log2 normalization (brian/broad method)\r\n total_reads = df_reads[row.sample_id].sum()\r\n df_log2[row.sample_id] = df_reads[row.sample_id].apply(lambda x: np.log2((x * 1000000 / total_reads) + 1))\r\n\r\n # perform t0 normalization\r\n df_t0 = df_ref.copy()\r\n t0 = df_batch.loc[df_batch['condition'] == 't0']['sample_id']\r\n if t0.shape[0] != 1:\r\n raise Exception('Only a single t0 sample is allowed')\r\n t0 = t0[0]\r\n for row in df_batch.itertuples():\r\n df_t0[row.sample_id] = df_log2[row.sample_id].sub(df_log2[t0])\r\n df_t0.drop(columns=t0, inplace=True) # drop the t0 col\r\n\r\n # average replicates by condition\r\n list_conds = df_batch['condition'].unique().tolist()\r\n list_conds.remove('t0')\r\n df_conds = df_ref.copy()\r\n for cond in list_conds:\r\n reps = df_batch.loc[df_batch['condition'] == cond]['sample_id'].tolist()\r\n if len(reps) > 1:\r\n df_conds[cond] = df_t0[reps].mean(axis=1)\r\n elif len(reps) == 1:\r\n df_conds[cond] = df_t0[reps]\r\n else:\r\n raise Exception('Error! Invalid number of replicates')\r\n\r\n # merge statistics files\r\n if merge_stats:\r\n df_stats = pd.DataFrame(columns=['parameters'])\r\n for row in df_batch.itertuples():\r\n file = stats_path / row.stats_files\r\n df_temp = pd.read_csv(file, sep=': ', engine='python', names=['parameters', row.sample_id])\r\n df_stats = pd.merge(df_stats, df_temp, on='parameters', how='outer')\r\n\r\n # export files and return dataframes if necessary\r\n outpath = path / out_folder\r\n Path.mkdir(outpath, exist_ok=True)\r\n # dictionary to map kws to dfs and output file names\r\n dict_df = {'reads': (df_reads, out_prefix + 'reads.csv'),\r\n 'log2': (df_log2, out_prefix + 'log2.csv'),\r\n 't0': (df_t0, out_prefix + 't0_reps.csv'),\r\n 'conds': (df_conds, out_prefix + 't0_conds.csv')}\r\n if merge_stats:\r\n dict_df.update({'stats': (df_stats, out_prefix + 'stats.csv')})\r\n # determine which files to export\r\n if save == 'all':\r\n save = ['reads','log2','t0', 'conds', 'stats']\r\n if isinstance(save, list):\r\n for key in save:\r\n dict_df[key][0].to_csv(outpath / dict_df[key][1], index=False)\r\n elif save is None:\r\n pass\r\n else:\r\n warnings.warn('Invalid value for save. No files exported')\r\n # determine df to return\r\n print('Batch processing completed')\r\n if return_df in dict_df.keys():\r\n return dict_df[return_df][0]\r\n elif return_df is None:\r\n return\r\n else:\r\n print('Invalid value for return_df. No dataframe returned')\r\n return",
"def run_concat_vcfs(job, context, vcf_ids, tbi_ids):\n\n work_dir = job.fileStore.getLocalTempDir()\n\n vcf_names = ['chrom_{}.vcf.gz'.format(i) for i in range(len(vcf_ids))]\n out_name = 'genome.vcf.gz'\n\n for vcf_id, tbi_id, vcf_name in zip(vcf_ids, tbi_ids, vcf_names):\n job.fileStore.readGlobalFile(vcf_id, os.path.join(work_dir, vcf_name))\n job.fileStore.readGlobalFile(tbi_id, os.path.join(work_dir, vcf_name + '.tbi'))\n\n cmd = ['bcftools', 'concat'] + [vcf_name for vcf_name in vcf_names] + ['-O', 'z']\n \n with open(os.path.join(work_dir, out_name), 'wb') as out_file:\n context.runner.call(job, cmd, work_dir=work_dir, outfile = out_file)\n\n cmd = ['tabix', '-f', '-p', 'vcf', out_name]\n context.runner.call(job, cmd, work_dir=work_dir)\n\n out_vcf_id = context.write_intermediate_file(job, os.path.join(work_dir, out_name))\n out_tbi_id = context.write_intermediate_file(job, os.path.join(work_dir, out_name + '.tbi'))\n\n return out_vcf_id, out_tbi_id",
"def build_index(self):\n # format output and input\n ref_file = f'{self.genome_database}BSB_ref.fa'\n # collect external command\n index_command = [f'{self.bwa_path}', 'index', '-a', 'bwtsw', '-b', f'{self.block_size}', ref_file]\n # run external command\n subprocess.run(args=index_command)",
"def add_batch(batch_index, pCS, orphans, fasta_d, cpus, dun_use_partial):\n cur_file = \"batch{0}.fasta\".format(batch_index)\n seqids = set([r.id for r in SeqIO.parse(open(cur_file), 'fasta')])\n o = ar.run_minimap(cur_file, \"seed{0}.S.fasta\".format(batch_index), cpus=cpus)\n print >> sys.stderr, \"processing\", o\n pCS, remains = sp.process_align_to_pCS(o, seqids, pCS, MiniReader, dun_use_partial=dun_use_partial)\n print >> sys.stderr, \"pCS: {0}, tucked: {1}, orphans: {2}, remains: {3}\".format( \\\n len(pCS.S), sum(v == 'T' for v in pCS.seq_stat.itervalues()), len(orphans), len(remains))\n # write batch<i>.remains.fasta\n cur_file = \"batch{0}.remains.fasta\".format(batch_index)\n FileIO.write_seqids_to_fasta(remains, cur_file, fasta_d)\n o = ar.run_minimap(cur_file, \"seed{0}.orphans.fasta\".format(batch_index), cpus=cpus)\n print >> sys.stderr, \"processing\", o\n pCS, orphans, remains = sp.process_align_to_orphan(o, remains, orphans, pCS, MiniReader, dun_use_partial=dun_use_partial)\n print >> sys.stderr, \"pCS: {0}, tucked: {1}, orphans: {2}, remains: {3}\".format( \\\n len(pCS.S), sum(v == 'T' for v in pCS.seq_stat.itervalues()), len(orphans), len(remains))\n # write batch<i>.remains2.fasta and self align\n cur_file = \"batch{0}.remains2.fasta\".format(batch_index)\n FileIO.write_seqids_to_fasta(remains, cur_file, fasta_d)\n o = ar.run_minimap(cur_file, cur_file, cpus=cpus)\n print >> sys.stderr, \"processing\", o\n pCS, remains = sp.process_self_align_into_seed(o, remains, MiniReader, pCS, dun_use_partial=dun_use_partial)\n print >> sys.stderr, \"pCS: {0}, tucked: {1}, orphans: {2}, remains: {3}\".format( \\\n len(pCS.S), sum(v == 'T' for v in pCS.seq_stat.itervalues()), len(orphans), len(remains))\n # combine remains+orphans to new orphans\n orphans = orphans.union(remains)\n FileIO.write_preClusterSet_to_fasta(pCS, \"seed{0}.S.fasta\".format(batch_index+1), fasta_d)\n FileIO.write_seqids_to_fasta(orphans, \"seed{0}.orphans.fasta\".format(batch_index+1), fasta_d)\n\n return pCS, orphans",
"def build(self, ref_path, reffile_template):\n SMALL_INDEX_MAX_SIZE = 4 * 1024**3 - 200 # From bowtie2-build wrapper\n assert os.stat(ref_path).st_size <= SMALL_INDEX_MAX_SIZE\n self.check_logger()\n for line in self.yield_output(['--wrapper',\n 'micall-0',\n '--quiet',\n '-f',\n ref_path,\n reffile_template],\n stderr=subprocess.STDOUT):\n if line != 'Building a SMALL index\\n':\n self.logger.debug(line)",
"def prepareBatchFile(self,detector,exposureTime):\n \n inp = open(ini.Ini().getParTestFile(\"BEST\",\"best_batch_file_template\"), 'r')\n t = Template(inp.read())\n s = t.substitute(besthome=ini.Ini().getPar(\"BEST\",\"besthome\"),\n bestbin=ini.Ini().getPar(\"BEST\",\"best_bin\"),\n detector=detector,exposure_time=exposureTime,folder=self.runFolder)\n \n self.completePath = os.path.join(self.runFolder,ini.Ini().getPar(\"BEST\",\"best_batch_file\") )\n outp = open(self.completePath, 'w')\n outp.write(s)\n outp.close()\n # give execute permissions\n #os.chmod(self.completePath, 0755)\n self.make_exe(self.completePath) \n self.log.logger.debug(\"Batch best file created: \" + self.completePath)\n \n return self.completePath",
"def cuffmerge(job, config, name, samples, manifest):\n\n stats_root = \"{}_cuffmerge_stats\".format(config['run_id'])\n logfile = \"{}.cuffmerge.log\".format(config['run_id'])\n\n command = [\"{}\".format(config['cuffmerge']['bin']),\n \"-g {}\".format(config['transcript_reference']),\n \"-s {}\".format(config['reference']),\n \"-p {}\".format(config['cuffmerge']['num_cores']),\n \"{}\".format(manifest)]\n\n job.fileStore.logToMaster(\"Cuffmerge Command: {}\\n\".format(command))\n pipeline.run_and_log_command(\" \".join(command), logfile)\n\n pwd = os.getcwd()\n config['merged_transcript_reference'] = os.path.join(pwd, \"merged.gtf\")\n\n return stats_root",
"def batch(self, coeff_count=13, db=False):\n mfccs, _ = self.mfcc(coeff_count)\n if db:\n mfccs = utils.dbspec(mfccs)\n delta1, delta2 = self.delta_coeffs(mfccs)\n self._annotate(mfccs)\n\n mfccs_len = mfccs.shape[1]\n batch_x = np.concatenate((mfccs, delta1, delta2), axis=0).transpose()\n batch_y = np.array(self.annotated_samples)\n print(\"AudioClip--Generated Batch\")\n return (batch_x, batch_y)",
"def generate_batch(self):\n\n # sbatch = list()\n # tbatch = list()\n # for i in range(self.dict_paras['batch_size']):\n # sbatch.append(self.lst_triplet_train_map[self.data_index])\n # self.data_index = (self.data_index + 1) % self.triplet_train_size\n\n sbatch = random.sample(self.lst_triplet_train_map, self.dict_paras['batch_size'])\n tbatch = list()\n\n for ele in sbatch:\n corrupted1, corrupted2 = self.get_corrupted_triplet(ele)\n tbatch.append((ele, corrupted1))\n tbatch.append((ele, corrupted2))\n return tbatch",
"def run(self, dataset_size=4, n_jobs=-1, starting_block=0):\n data_files = sorted(self.input_directory.glob(\"**/*.txt\"))\n log.info(f\"Creating shape file based on {len(data_files)} samples.\")\n\n n_blocks = int(len(data_files) / dataset_size)\n data_file_blocks = split(data_files, n_blocks)\n dataset_blocks_ids = np.arange(len(data_file_blocks))\n\n if starting_block != 0:\n data_file_blocks = data_file_blocks[starting_block:]\n dataset_blocks_ids = dataset_blocks_ids[starting_block:]\n log.info(f\"Starting at a different block number: {starting_block}.\")\n n_blocks = int(len(data_file_blocks))\n\n log.info(f\"Going through {n_blocks} blocks in parallel.\")\n Parallel(n_jobs=n_jobs)(\n delayed(self.generate_single_block)(data_file_block, dataset_block_id)\n for (data_file_block, dataset_block_id) in tqdm(\n zip(data_file_blocks, dataset_blocks_ids)\n )\n )\n\n log.info(\"Combining the separate index files..\")\n index_floorplan = sorted(self.output_directory.glob(\"index_floorplans_*.csv\"))\n log.info(f\"Found {len(index_floorplan)} index block files.\")\n index_files = pd.concat([pd.read_csv(_file) for _file in index_floorplan])\n index_files = index_files.fillna(0)\n index_files.to_csv(self.output_directory / \"index_floorplans.csv\", index=False)",
"def _cmd_batch(args):\n logging.info(\"CNVkit %s\", __version__)\n # Validate/restrict options, beyond what argparse mutual exclusion can do\n bad_args_msg = \"\"\n if args.reference:\n bad_flags = [\n flag\n for is_used, flag in (\n (args.normal is not None, \"-n/--normal\"),\n (args.fasta, \"-f/--fasta\"),\n (args.targets, \"-t/--targets\"),\n (args.antitargets, \"-a/--antitargets\"),\n (args.access, \"-g/--access\"),\n (args.annotate, \"--annotate\"),\n (args.short_names, \"--short-names\"),\n (args.target_avg_size, \"--target-avg-size\"),\n (args.antitarget_avg_size, \"--antitarget-avg-size\"),\n (args.antitarget_min_size, \"--antitarget-min-size\"),\n )\n if is_used\n ]\n if bad_flags:\n bad_args_msg = (\n \"If -r/--reference is given, options to construct \"\n \"a new reference (%s) should not be used.\" % \", \".join(bad_flags)\n )\n elif args.normal is None:\n bad_args_msg = (\n \"Option -n/--normal must be given to build a new \"\n \"reference if -r/--reference is not used.\"\n )\n elif args.seq_method in (\"hybrid\", \"amplicon\") and not args.targets:\n bad_args_msg = (\n \"For the '%r' sequencing method, option -t/--targets \"\n \"(at least) must be given to build a new reference if \"\n \"-r/--reference is not used.\" % args.seq_method\n )\n if bad_args_msg:\n sys.exit(bad_args_msg + \"\\n(See: cnvkit.py batch -h)\")\n\n # Ensure sample IDs are unique to avoid overwriting outputs\n seen_sids = {}\n for fname in (args.bam_files or []) + (args.normal or []):\n sid = core.fbase(fname)\n if sid in seen_sids:\n sys.exit(f\"Duplicate sample ID {sid!r} (from {fname} and {seen_sids[sid]})\")\n seen_sids[sid] = fname\n\n if args.processes < 1:\n args.processes = multiprocessing.cpu_count()\n\n if not args.reference:\n # Build a copy number reference; update (anti)targets upon request\n args.reference, args.targets, args.antitargets = batch.batch_make_reference(\n args.normal,\n args.targets,\n args.antitargets,\n args.male_reference,\n args.diploid_parx_genome,\n args.fasta,\n args.annotate,\n args.short_names,\n args.target_avg_size,\n args.access,\n args.antitarget_avg_size,\n args.antitarget_min_size,\n args.output_reference,\n args.output_dir,\n args.processes,\n args.count_reads,\n args.seq_method,\n args.cluster,\n )\n elif args.targets is None and args.antitargets is None:\n # Extract (anti)target BEDs from the given, existing CN reference\n ref_arr = read_cna(args.reference)\n targets, antitargets = reference.reference2regions(ref_arr)\n ref_pfx = os.path.join(args.output_dir, core.fbase(args.reference))\n args.targets = ref_pfx + \".target-tmp.bed\"\n args.antitargets = ref_pfx + \".antitarget-tmp.bed\"\n tabio.write(targets, args.targets, \"bed4\")\n tabio.write(antitargets, args.antitargets, \"bed4\")\n\n if args.bam_files:\n if args.processes == 1:\n procs_per_bam = 1\n logging.info(\"Running %d samples in serial\", len(args.bam_files))\n else:\n procs_per_bam = max(1, args.processes // len(args.bam_files))\n logging.info(\n \"Running %d samples in %d processes (that's %d processes per bam)\",\n len(args.bam_files),\n args.processes,\n procs_per_bam,\n )\n\n with parallel.pick_pool(args.processes) as pool:\n for bam in args.bam_files:\n pool.submit(\n batch.batch_run_sample,\n bam,\n args.targets,\n args.antitargets,\n args.reference,\n args.output_dir,\n args.male_reference,\n args.diploid_parx_genome,\n args.scatter,\n args.diagram,\n args.rscript_path,\n args.count_reads,\n args.drop_low_coverage,\n args.seq_method,\n args.segment_method,\n procs_per_bam,\n args.cluster,\n args.fasta,\n args.diploid_parx_genome,\n )\n else:\n logging.info(\n \"No tumor/test samples (but %d normal/control samples) \"\n \"specified on the command line.\",\n len(args.normal),\n )",
"def skesa_assemble(self):\n with progressbar(self.metadata) as bar:\n for sample in bar:\n # Initialise the assembly command\n sample.commands.assemble = str()\n try:\n if sample.general.trimmedcorrectedfastqfiles:\n # If the sample is a pure isolate, assemble it. Otherwise, run the pre-metagenome pipeline\n try:\n status = sample.run.Description\n except AttributeError:\n status = 'unknown'\n if status == 'metagenome':\n self.merge(sample)\n else:\n # Set the output directory\n sample.general.assembly_output = os.path.join(sample.general.outputdirectory,\n 'assembly_output')\n make_path(sample.general.assembly_output)\n sample.general.assemblyfile = os.path.join(sample.general.assembly_output,\n '{name}_unfiltered.fasta'\n .format(name=sample.name))\n sample.general.bestassemblyfile = os.path.join(sample.general.assembly_output,\n '{name}.fasta'\n .format(name=sample.name))\n fastqfiles = sample.general.trimmedcorrectedfastqfiles\n\n # Set the the forward fastq files\n sample.general.assemblyfastq = fastqfiles\n forward = fastqfiles[0]\n gz = True if '.gz' in forward else False\n # If there are two fastq files\n if len(fastqfiles) == 2:\n # Set the reverse fastq name https://github.com/ncbi/SKESA/issues/7\n sample.commands.assemble = 'skesa --fastq {fastqfiles} --cores {threads} ' \\\n '--use_paired_ends --vector_percent 1 ' \\\n '--contigs_out {contigs}'\\\n .format(fastqfiles=','.join(fastqfiles),\n threads=self.cpus,\n contigs=sample.general.assemblyfile)\n # Same as above, but use single read settings for the assembler\n else:\n sample.commands.assemble = 'skesa --fastq {fastqfiles} --cores {threads} ' \\\n '--vector_percent 1 --contigs_out {contigs}'\\\n .format(fastqfiles=','.join(fastqfiles),\n threads=self.cpus,\n contigs=sample.general.assemblyfile)\n # Specify that the files are gzipped\n if gz:\n sample.commands.assemble += ' --gz'\n # If there are no fastq files, populate the metadata appropriately\n else:\n sample.general.assembly_output = 'NA'\n sample.general.assemblyfastq = 'NA'\n sample.general.bestassemblyfile = 'NA'\n except AttributeError:\n sample.general.assembly_output = 'NA'\n sample.general.assemblyfastq = 'NA'\n sample.general.trimmedcorrectedfastqfiles = 'NA'\n sample.general.bestassemblyfile = 'NA'\n if sample.commands.assemble and not os.path.isfile(sample.general.assemblyfile):\n # Run the assembly\n out, err = run_subprocess(sample.commands.assemble)\n write_to_logfile(sample.commands.assemble,\n sample.commands.assemble,\n self.logfile,\n sample.general.logout,\n sample.general.logerr,\n None,\n None)\n write_to_logfile(out,\n err,\n self.logfile,\n sample.general.logout,\n sample.general.logerr,\n None,\n None)",
"def prepare_subset_vcf_files_by_population():\n if not os.path.exists(VCF_BY_POPULATION_PATH):\n print(\"preparing subset vcf by population\")\n os.makedirs(VCF_BY_POPULATION_PATH)\n sleep(10)\n vcf_tools_runner = VCFToolsDockerRunner()\n samples = glob(f\"{SAMPLES_FOLDER}/*.csv\")\n\n with tqdm(total=len(samples)) as pbar:\n for sample in samples:\n sample = sample.replace('\\\\', '/')\n sample_name = get_filename_from_path(sample)\n sample_path = \"/\".join([IMAGE_SHARE_FOLDER_PATH] + sample.split('/')[1:])\n pbar.set_description(f\"Processing {sample_name}\")\n vcf_tools_runner(\n f\"vcf-subset -c {sample_path} \"\n f\"{IMAGE_SHARE_FOLDER_PATH}/{GENOTYPE_DATA_FOLDER}/{MERGED_VCF_FILE_NAME} | fill-an-ac > \"\n f\"{IMAGE_SHARE_FOLDER_PATH}/{VCF_BY_POPULATION_FOLDER}/{sample_name}.vcf\")\n pbar.update(1)\n else:\n print(f\"Subset VCF files by population already exist in: {VCF_BY_POPULATION_PATH}\")",
"def seqff(self):\r\n\r\n start = time.time()\r\n\r\n # load bininfo\r\n bininfo = load_bininfo(self.bininfodata_loc)\r\n\r\n # load input files\r\n if os.path.isdir(self.input_loc):\r\n input_list = [self.input_loc + x for x in os.listdir(self.input_loc)]\r\n\r\n elif os.path.isfile(self.input_loc):\r\n input_list = [self.input_loc]\r\n\r\n else:\r\n raise FileNotFoundError(\"error occurred : inputData is not a Directory or File\")\r\n\r\n for i, file in enumerate(input_list):\r\n filetype = file.split(\".\")[-1]\r\n # filetype : 'sam' or 'bam' or 'newtemp'\r\n if 'sam' in filetype:\r\n bincount = load_sam(file)\r\n\r\n elif 'newtemp' in filetype:\r\n bincount = load_counts(file)\r\n file = file.replace(\".newtemp\", \"\") # TEMP .newtemp -> .bam\r\n\r\n elif 'bam' in filetype:\r\n bincount = load_bam(file)\r\n\r\n else:\r\n continue\r\n\r\n #CREATE newtemp file in \"output_loc\"/newtemp/\r\n create_newtemp(bincount, file, self.newtemp_loc)\r\n\r\n newtemp = pd.DataFrame.from_dict(bincount, orient='index')\r\n newtemp.reset_index(level=0, inplace=True)\r\n newtemp.rename(columns={'index': 'binName', 0: 'counts'}, inplace=True)\r\n\r\n temp_bininfo = bininfo.copy(deep=True)\r\n temp_bininfo = temp_bininfo.merge(newtemp, on='binName',\r\n how='left') # missing value : NaN, not NA in pandas\r\n temp_bininfo['counts'] = temp_bininfo['counts'].fillna(0)\r\n\r\n temp_bininfo.sort_values(by='binorder', inplace=True)\r\n temp_bininfo.reset_index(drop=True)\r\n\r\n ####DATA PROCESSING #######################\r\n autosomebinsonly = []\r\n for index in range(61927):\r\n boolean = (temp_bininfo['FRS'][index] != 'NA') and \\\r\n (float(temp_bininfo['GC'][index]) > 0.316) and \\\r\n (temp_bininfo['CHR'][index] != 'chrX') and \\\r\n (temp_bininfo['CHR'][index] != 'chrY')\r\n autosomebinsonly.append(boolean)\r\n autosomebinsonly = pd.Series(autosomebinsonly)\r\n\r\n alluseablebins = []\r\n for index in range(61927):\r\n boolean = (temp_bininfo['FRS'][index] != \"NA\") and (float(temp_bininfo['GC'][index]) > 0.316)\r\n alluseablebins.append(boolean)\r\n alluseablebins = pd.Series(alluseablebins)\r\n\r\n #CREATE alluseablebins file in \"output_loc\"/alluseablebins\r\n #create_alluseablebins(alluseablebins, file, self.alluseablebins_loc)\r\n\r\n sum_counts = pd.Series(temp_bininfo['counts'])\r\n sum_counts = sum_counts[autosomebinsonly].sum(skipna=True)\r\n\r\n autoscaledtemp = pd.Series(temp_bininfo['counts'].loc[(autosomebinsonly)],\r\n copy=True) / sum_counts # NA-related code removed\r\n allscaledtemp = pd.Series(temp_bininfo['counts'].loc[(alluseablebins)], copy=True) / sum_counts\r\n\r\n gc_index = {}\r\n cnt = 0\r\n for index, isauto in enumerate(autosomebinsonly):\r\n if isauto:\r\n if temp_bininfo['GC'].iat[index] in gc_index:\r\n gc_index[temp_bininfo['GC'].iat[index]].append(float(autoscaledtemp.iat[cnt]))\r\n cnt += 1\r\n\r\n else:\r\n gc_index[temp_bininfo['GC'].iat[index]] = [float(autoscaledtemp.iat[cnt])]\r\n cnt += 1\r\n\r\n key_list = []\r\n val_list = []\r\n for key, val in gc_index.items():\r\n key_list.append(key)\r\n val_list.append(np.median(val))\r\n\r\n loess_var = loess(key_list, val_list) # default span : 0.75\r\n loess_var.fit()\r\n # y = loess.loess_prediction(newData, loessVar)\r\n # temp_loessPredict.loess_debugging(loessVar)\r\n\r\n ###prediction###\r\n loess_x = [float(gc) for index, gc in enumerate(temp_bininfo['GC']) if (alluseablebins[index])]\r\n # print(temp_bininfo['GC'])\r\n loess_fitted = loess_var.predict(loess_x)\r\n loess_fitted = list(loess_fitted.values)\r\n # print(loess_fitted)\r\n\r\n median_autoscaledtemp = np.median(autoscaledtemp)\r\n median_autoscaledtemp = float(median_autoscaledtemp) # for fixed constant\r\n\r\n normalizedbincount = [(x + (median_autoscaledtemp - loess_fitted[index])) for index, x in\r\n enumerate(allscaledtemp)]\r\n\r\n #CREATE normalizedbincount in \"output_loc\"/normalizedbincount\r\n create_normalizedbincount(normalizedbincount, file, self.normalizedbincount_loc)\r\n\r\n bincounts = pd.Series(data=np.repeat(a=0.0, repeats=61927), index=temp_bininfo['binName'], dtype=np.float64)\r\n\r\n sum_normalizedbincount = sum([val for val in normalizedbincount if not math.isnan(val)])\r\n sum_normalizedbincount = float(sum_normalizedbincount) # deep copy temporarily\r\n\r\n cnt = 0\r\n for index, x in enumerate(alluseablebins):\r\n if x == True:\r\n data = (normalizedbincount[cnt] / sum_normalizedbincount) * len(normalizedbincount)\r\n bincounts.iat[index] = data\r\n cnt += 1\r\n\r\n #CREATE bincounts in \"output_loc\"/bincounts\r\n create_bincounts(bincounts, file, self.bincounts_loc)\r\n\r\n wrsc = self.prediction(bincounts, self.B, self.mu, self.parameter_1, self.parameter_2)\r\n enet = np.dot(bincounts, (self.elnetbeta)) + (self.elnetintercept)\r\n ff = (wrsc+enet) / 2\r\n\r\n result_lines = list()\r\n result_lines.append(\"SeqFF\\tEnet\\tWRSC\")\r\n result_lines.append(\"{}\\t{}\\t{}\".format(ff, enet, wrsc))\r\n\r\n #CREATE results of seqff (seqff paper result covered) in \"output_loc\"/results\r\n create_results(result_lines, file, self.results_loc)\r\n\r\n end = time.time()\r\n elapsed = end - start\r\n h = int(elapsed) // 3600\r\n m = (int(elapsed) - (h * 3600)) // 60\r\n s = (int(elapsed) % 60)\r\n print(\"elapsed time: %d hr %d min %d sec\" % (h, m, s))\r\n print(\"elapsed :\", elapsed)\r\n print(\"progress : {} / {}\".format(i + 1, self.progress))",
"def __iter__(self):\n\n batch_sp = []\n batch_noise = []\n batch_mix = []\n batch_count = 0\n\n while True:\n\n # Randomizing wav lists\n random.shuffle(self._lst_spk_files)\n random.shuffle(self._lst_noise_files)\n\n for spk_file, noise_file in zip(self._lst_spk_files, self._lst_noise_files):\n\n # Read wav files\n sig_spk = self.__read_wav_file(spk_file)\n sig_noise = self.__read_wav_file(noise_file)\n\n # Align signal\n min_length = min(sig_spk.shape[0], sig_noise.shape[0])\n\n if min_length < self._fftsize:\n raise Exception(\"ERROR: Too short signals in dataset\")\n\n sig_spk = sig_spk[:min_length]\n sig_noise = sig_noise[:min_length]\n\n # Generate need SNR\n need_snr = random.uniform(self._min_snr, self._max_snr)\n\n # Calc scaled signals\n sig_spk, sig_noise = self.__mix_with_snr(sig_spk, sig_noise, need_snr)\n\n # Calc STFT\n stft_spk = stft(sig_spk, fftsize=self._fftsize, overlap=self._overlap)\n stft_noise = stft(sig_noise, fftsize=self._fftsize, overlap=self._overlap)\n stft_mix = stft_spk + stft_noise\n\n # Skip small segments\n frames, bin = stft_mix.shape\n if frames <= self._context_size:\n continue\n\n # Collect batch\n i = 0\n while i + self._context_size < frames:\n\n batch_sp.append(stft_spk[i:i + self._context_size, :])\n batch_noise.append(stft_noise[i:i + self._context_size, :])\n batch_mix.append(stft_mix[i:i + self._context_size, :])\n\n i += self._context_size // 2\n batch_count += 1\n\n if batch_count == self._batch_size:\n sp = np.array(batch_sp).reshape((self._batch_size,\n self._context_size, -1))\n noise = np.array(batch_noise).reshape((self._batch_size,\n self._context_size, -1))\n mix = np.array(batch_mix).reshape((self._batch_size,\n self._context_size, -1))\n yield sp, noise, mix\n\n batch_sp = []\n batch_noise = []\n batch_mix = []\n batch_count = 0",
"def addVCFSubsetJobs(self, workflow=None, inputData=None, db_vervet=None, sampleIDFile=None, transferOutput=True,\\\n\t\t\t\t\t\trefFastaFList=None, GenomeAnalysisTKJar=None,\\\n\t\t\t\t\t\tmaxContigID=None, outputDirPrefix=\"\"):\n\t\tif workflow is None:\n\t\t\tworkflow = self\n\t\tif GenomeAnalysisTKJar is None:\n\t\t\tGenomeAnalysisTKJar = workflow.GenomeAnalysisTKJar\n\t\tif refFastaFList is None:\n\t\t\trefFastaFList = self.refFastaFList\n\t\t\n\t\tsys.stderr.write(\"Adding vcf-subset jobs for %s vcf files ... \"%(len(inputData.jobDataLs)))\n\t\tno_of_jobs= 0\n\t\t\n\t\t\n\t\ttopOutputDir = \"%sVCFSubset\"%(outputDirPrefix)\n\t\ttopOutputDirJob = self.addMkDirJob(outputDir=topOutputDir)\n\t\tno_of_jobs += 1\n\t\t\n\t\treturnData = PassingData()\n\t\treturnData.jobDataLs = []\n\t\tfor jobData in inputData.jobDataLs:\n\t\t\tinputF = jobData.vcfFile\n\t\t\tchr = self.getChrFromFname(inputF.name)\n\t\t\tif maxContigID:\n\t\t\t\tcontig_id = self.getContigIDFromFname(inputF.name)\n\t\t\t\ttry:\n\t\t\t\t\tcontig_id = int(contig_id)\n\t\t\t\t\tif contig_id>maxContigID:\t#skip the small contigs\n\t\t\t\t\t\tcontinue\n\t\t\t\texcept:\n\t\t\t\t\tsys.stderr.write('Except type: %s\\n'%repr(sys.exc_info()))\n\t\t\t\t\timport traceback\n\t\t\t\t\ttraceback.print_exc()\n\t\t\tinputFBaseName = os.path.basename(inputF.name)\n\t\t\tcommonPrefix = inputFBaseName.split('.')[0]\n\t\t\toutputVCF = File(os.path.join(topOutputDir, '%s.subset.vcf'%(commonPrefix)))\n\t\t\tvcfSubsetJob = self.addVCFSubsetJob(workflow, executable=workflow.vcfSubset, vcfSubsetPath=workflow.vcfSubsetPath, \\\n\t\t\t\t\t\tsampleIDFile=sampleIDFile,\\\n\t\t\t\t\t\tinputVCF=inputF, outputF=outputVCF, \\\n\t\t\t\t\t\tparentJobLs=[topOutputDirJob]+jobData.jobLs, transferOutput=False, job_max_memory=200,\\\n\t\t\t\t\t\textraArguments=None, extraDependentInputLs=None)\n\t\t\t\n\t\t\t#2012.10.5\n\t\t\t#selectVariants would generate AC, AF so that TrioCaller could read it.\n\t\t\t#samtools uses 'AC1' instead of AC, 'AF1' instead of AF.\n\t\t\tVCF4OutputF = File(os.path.join(topOutputDir, '%s.niceformat.vcf'%commonPrefix))\n\t\t\tvcfConvertJob = self.addSelectVariantsJob(workflow, SelectVariantsJava=workflow.SelectVariantsJava, \\\n\t\t\t\t\tinputF=vcfSubsetJob.output, outputF=VCF4OutputF, \\\n\t\t\t\t\trefFastaFList=refFastaFList, parentJobLs=[vcfSubsetJob], \\\n\t\t\t\t\textraDependentInputLs=[], transferOutput=False, \\\n\t\t\t\t\textraArguments=None, job_max_memory=2000, interval=chr)\n\t\t\t\n\t\t\tVCFGzipOutputF = File(\"%s.gz\"%VCF4OutputF.name)\n\t\t\tVCFGzipOutput_tbi_F = File(\"%s.gz.tbi\"%VCF4OutputF.name)\n\t\t\tbgzip_tabix_VCF_job = self.addBGZIP_tabix_Job(workflow, bgzip_tabix=workflow.bgzip_tabix, \\\n\t\t\t\t\tparentJobLs=[vcfConvertJob], inputF=vcfConvertJob.output, outputF=VCFGzipOutputF, \\\n\t\t\t\t\ttransferOutput=transferOutput)\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t\treturnData.jobDataLs.append(PassingData(jobLs=[bgzip_tabix_VCF_job], vcfFile=VCFGzipOutputF, \\\n\t\t\t\t\t\t\t\t\ttbi_F=VCFGzipOutput_tbi_F, \\\n\t\t\t\t\t\t\t\t\tfileLs=[VCFGzipOutputF, VCFGzipOutput_tbi_F]))\n\t\t\t\n\t\tsys.stderr.write(\"%s jobs.\\n\"%(self.no_of_jobs))\n\t\treturn returnData",
"def generate_flatbuffer_binaries():\n for element in FLATBUFFERS_CONVERSION_DATA:\n schema = element.schema\n output_path = element.output_path\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n for json in element.input_files:\n target = processed_json_path(json)\n if needs_rebuild(json, target) or needs_rebuild(schema, target):\n convert_json_to_flatbuffer_binary(\n json, schema, output_path)",
"def filesToBlender(context, prefix, max_blocks=200):\n # Get reference matrix\n refMatrix = None\n if context.scene.maps_models_importer_is_ref_matrix_valid:\n values = context.scene.maps_models_importer_ref_matrix\n refMatrix = Matrix((values[0:4], values[4:8], values[8:12], values[12:16]))\n\n drawcallId = 0\n while max_blocks <= 0 or drawcallId < max_blocks:\n if not os.path.isfile(\"{}{:05d}-indices.bin\".format(prefix, drawcallId)):\n break\n\n try:\n indices, positions, uvs, img, constants = loadData(prefix, drawcallId)\n except FileNotFoundError as err:\n print(\"Skipping ({})\".format(err))\n continue\n\n uvOffsetScale, matrix, refMatrix = extractUniforms(constants, refMatrix)\n\n # Make triangles from triangle strip index buffer\n n = len(indices)\n tris = [ [ indices[i+j] for j in [[0,1,2],[0,2,1]][i%2] ] for i in range(n - 3)]\n tris = [ t for t in tris if t[0] != t[1] and t[0] != t[2] and t[1] != t[2] ]\n verts = [ [ p[0], p[1], p[2] ] for p in positions ]\n\n [ou, ov, su, sv] = uvOffsetScale\n uvs = [ [ (floor(u * 65535.0 + 0.5) + ou) * su, (floor(v * 65535.0 + 0.5) + ov) * sv ] for u, v in uvs ]\n \n if len(indices) == 0:\n continue\n\n mesh_name = \"BuildingMesh-{:05d}\".format(drawcallId)\n obj = addMesh(context, mesh_name, verts, tris, uvs)\n obj.matrix_world = matrix\n\n mat_name = \"BuildingMat-{:05d}\".format(drawcallId)\n addImageMaterial(mat_name, obj, img)\n\n drawcallId += 1\n\n # Save reference matrix\n if refMatrix:\n values = sum([list(v) for v in refMatrix], [])\n context.scene.maps_models_importer_ref_matrix = values\n context.scene.maps_models_importer_is_ref_matrix_valid = True",
"def generate_batch(self) -> Tuple[np.ndarray, np.ndarray, List[str]]:\n batch = []\n labels = []\n filelist = []\n for i in range(self.batch_size):\n filename = self.filelist[self.data_index]\n filelist.append(self.filelist[self.data_index].split(\"\\\\\")[-1].split(\"/\")[-1])\n greyimg, colorimg = read_img(filename)\n batch.append(greyimg)\n labels.append(colorimg)\n self.data_index = (self.data_index + 1) % self.size\n batch = np.asarray(batch) / 255\n labels = np.asarray(labels) / 255\n return batch, labels, filelist",
"def make_batch(self, batch_size):\n filenames = self.get_filenames()\n\n if os.path.isdir(filenames):\n num_records = len(os.listdir(filenames))\n print(\"Loading from directory. \" + str(num_records) + \" tfRecords found.\")\n files = tf.data.Dataset.list_files(filenames + \"/\" + \"*.tfrecord\").shuffle(num_records)\n dataset = files.apply(\n tf.contrib.data.parallel_interleave(\n lambda x: tf.data.TFRecordDataset(x, num_parallel_reads=256, buffer_size=8*1024*1024),\n cycle_length=32, sloppy=True)\n )\n else:\n print(\"Loading from single tfRecord...\")\n dataset = tf.data.TFRecordDataset(filenames + \".tfrecord\").repeat()\n \n dataset = dataset.map(self.parser, num_parallel_calls=128)\n \n if self.subset == 'train':\n min_queue_examples = int(\n Cifar10DataSet.num_examples_per_epoch(self.subset) * 0.4)\n # Ensure that the capacity is sufficiently large to provide good random\n # shuffling.\n dataset = dataset.shuffle(buffer_size=min_queue_examples + 3 * batch_size)\n \n dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(batch_size))\n dataset = dataset.prefetch(10)\n \n iterator = dataset.make_one_shot_iterator()\n seq_batch, input_batch, map_batch, transformation_batch = iterator.get_next()\n\n return seq_batch, input_batch, map_batch, transformation_batch",
"def write_batch_file(self, dot_input, dot_aln, hyphy_batch_file=\"\", hyphy_result_file=\"\"):\r\n gene_id = os.path.basename(dot_input).split(os.path.extsep)[0]\r\n path_main = os.path.splitext(dot_input)[0]\r\n\r\n if \"\" == hyphy_batch_file:\r\n hyphy_batch_file = path_main + \".bf\"\r\n\r\n if \"\" == hyphy_result_file:\r\n hyphy_result_file = path_main + \".result\"\r\n\r\n if \"\" == self.batch_content:\r\n raise BFError\r\n\r\n # replace begins here\r\n batch_content, num_hits = re.subn(self.f_input, dot_input, self.batch_content)\r\n self._error_no_hit(num_hits)\r\n\r\n # partition is optional\r\n if (0, 0) == self.partition:\r\n if self.f_partition in batch_content:\r\n batch_content, num_hits = re.subn(self.f_partition, \"\", batch_content)\r\n self._error_no_hit(num_hits)\r\n\r\n else:\r\n batch_content, num_hits = re.subn(self.f_partition, \"%d-%d\" % self.partition, batch_content)\r\n self._error_no_hit(num_hits)\r\n\r\n batch_content, num_hits = re.subn(self.f_mdl, self.mdl_file, batch_content)\r\n self._error_no_hit(num_hits)\r\n\r\n # only support 1 matrix now :2014-5-26\r\n batch_content, num_hits = re.subn(self.f_matrix_name, self.matrix_name[0], batch_content)\r\n self._error_no_hit(num_hits)\r\n\r\n if self.use_given_tree:\r\n tree_newick_string = self.tree_definition_external\r\n else:\r\n genes_share_aln = pHdata.aln_reader(dot_aln)\r\n tree_newick_string = self.build_tree(genes_share_aln)\r\n\r\n batch_content, num_hits = re.subn(self.f_tree, tree_newick_string, batch_content)\r\n self._error_no_hit(num_hits)\r\n\r\n batch_content, num_hits = re.subn(self.f_output, hyphy_result_file, batch_content)\r\n self._error_no_hit(num_hits)\r\n\r\n self.check_whether_incomplete(batch_content)\r\n\r\n with open(name=hyphy_batch_file, mode=\"w\") as bf_writer:\r\n bf_writer.write(batch_content)",
"def save_batch(self):\n self._batch_counter += 1\n write_to_disk(\n self._batch_cases,\n os.path.join(\n self.crop.location,\n \"batches\",\n BTCH_NM.format(self._batch_counter),\n ),\n )\n self._batch_cases = []\n self._counter = 0",
"async def addToFingerPrint(samples, sampleset=, allsampleset=\"all\", workspace=WORKSPACE, sid=, vcf_list=None, \nvcf_list_dir=, working_dir, crosscheck_batch_size, recreate_batch, bamcolname,\ntaiga_dataset, taiga_filename):\n bams = samples[bamcolname]\n bams[sid] = bams.index\n print('adding '+str(len(bams))+' new samples to the fingerprint')\n wm = dm.WorkspaceManager(workspace).disable_hound()\n \n # Create batch files listing all vcfs in fingerprints dir and upload to bucket\n # (NEW VERSION ONLY) will only needed if need to recreate batches\n if recreate_batch:\n if not vcf_list:\n vcf_list = gcp.lsFiles([vcf_list_dir])\n vcf_list = wm.get_samples()[\"fingerprint_vcf\"].tolist()\n batches = []\n for i, l in enumerate(range(0, len(vcf_list), crosscheck_batch_size)):\n f = open(working_dir + \"vcf_batch_\"+str(i), 'w')\n f.write(\"\\n\".join(vcf_list[l:l + crosscheck_batch_size]))\n f.close()\n batches.append(working_dir+\"vcf_batch_\"+str(i))\n gcp.cpFiles(batches, vcf_list_dir)\n\n # Upload sample sheet\n samples_df = pd.DataFrame()\n samples_df[[\"bam_filepath\", \"bai_filepath\", \"sample_id\",\n \"participant_id\"]] = bams[bamcolname + [sid, sid]].values\n samples_df = samples_df.set_index('sample_id')\n wm.upload_samples(samples_df, add_participant_samples=True)\n wm.update_sample_set(sampleset, samples_df.index)\n\n # Submit jobs \n submission_id = wm.create_submission(\"fingerprint_bam_with_liftover\", sampleset, \n 'sample_set', expression='this.samples')\n await terra.waitForSubmission(workspace, submission_id)\n\n #1.2 Crosscheck Fingerprint VCFs\n #Here we use Dalmation to run the crosscheck_vcfs workflow on Terra. \n # This workflow calls Picard CrosscheckFingerprints to compare the new \n # fingerprint vcfs to batches of existing fingerprint vcfs in fingerprints_dir\n # Create list with new vcfs and upload to bucket\n f = open(working_dir + sampleset, 'w')\n f.write(('\\n').join(wm.get_samples().loc[samples_df.index, 'fingerprints'].tolist()))\n f.close()\n gcp.cpFiles(working_dir + sampleset, vcf_list_dir)\n os.system('rm '+working_dir + sampleset)\n\n # Upload sample sheet\n if recreate_batch:\n sample_group_df = pd.DataFrame(data={\"entity:sample_group_id\" : batches, \"vcf_group\" : [vcf_list_dir + x for x in batches]}).set_index('entity:sample_group_id')\n else:\n sample_group_df = pd.DataFrame(data={\"entity:sample_group_id\" : [sampleset], \"vcf_group\" : [vcf_list_dir+sampleset]}).set_index('entity:sample_group_id')\n \n print(wm.get_entities('sample_group').index.tolist())\n wm.upload_entities(\"sample_group\", sample_group_df)\n try:\n wm.update_entity_set(\"sample_group\", set_id=allsampleset,\n entity_ids=wm.get_entities('sample_group').index)\n except:\n print(\"still can't update entitis, please upload directly from the file in ../temp.tsv\")\n #in case it does not work\n sample_group_df.to_csv(\"../temp.tsv\", sep='\\t')\n\n # Submit jobs\n conf = wm.get_config(\"crosscheck_vcfs\")\n conf['inputs']['crosscheck.run_crosscheck.vcf_second_input_file'] = '\"'+vcf_list_dir+sampleset+'\"'\n wm.update_config(conf)\n submission_id = wm.create_submission(\"crosscheck_vcfs\", allsampleset, \n 'sample_set',expression='this.samples')\n await terra.waitForSubmission(workspace, submission_id)\n\n #1.3 Update LOD matrix\n #Here we update the fingerprint LOD matrix on taiga with the new fingerprints\n # Generate matrix with LOD score for new fingerprint vcfs\n new_lod_list = []\n samples_df = wm.get_entities(\"sample_group\")['cross_checks_out'].tolist()\n for batch in samples_df:\n # could be pd concat\n df = pd.read_csv(batch, sep='\\t', comment='#')\n lod_mat = df.pivot(index=\"LEFT_SAMPLE\",\n columns=\"RIGHT_SAMPLE\", values=\"LOD_SCORE\")\n new_lod_list.append(lod_mat)\n new_lod_mat = pd.concat(new_lod_list)\n new_lod_mat.index.name = None\n new_lod_mat = new_lod_mat.T\n\n # Update LOD matrix ( have to update (A+a)*(B+b) = (AB)+(aB)+(Ab)+(ab))\n prev_lod_mat = tc.get(name=taiga_dataset,file=taiga_filename)\n new_ids = set(new_lod_mat.index)\n old_ids = set(prev_lod_mat.index) - set(new_ids)\n updated_lod_mat = pd.concat((prev_lod_mat.loc[old_ids,old_ids],\n new_lod_mat.loc[new_ids,old_ids]), axis=0)\n updated_lod_mat = pd.concat((updated_lod_mat.loc[new_ids.union(old_ids), old_ids], \n new_lod_mat.transpose().loc[new_ids.union(old_ids, new_ids)]), axis=1)\n updated_lod_mat.to_csv(working_dir+taiga_filename+'.csv')\n \n # Upload updated LOD matrix to Tiaga\n tc.update_dataset(dataset_permaname=taiga_dataset,\n changes_description=\"New bam fingerprints added for \"+sampleset,\n upload_files=[\n {\n \"path\": working_dir+taiga_filename+'.csv',\n \"name\": taiga_filename,\n \"format\": \"NumericMatrixCSV\",\n \"encoding\": \"utf-8\"\n }\n ],\n add_all_existing_files=True)\n\n # finding issues with the dataset\n v = updated_lod_mat.loc[new_ids]\n ref = tracker.getTracker()\n ref = ref.append(samples)\n should = {}\n print(\"\\n\\nsamples that should match but don't:\")\n for u in set(fbams.arxspan_id):\n res = v.loc[fbams[fbams.arxspan_id == u].index,\n ref[ref.arxspan_id == u].index.tolist()]\n for i, j in [(res.index[x], res.columns[y]) for x, y in np.argwhere(res.values < 100)]:\n print('__________________________')\n print(res.loc[i, j])\n print(i, ':', tuple(ref.loc[i, ['arxspan_id', 'version', 'datatype', 'participant_id']].values), j, ':', tuple(\n ref.loc[j, ['arxspan_id', 'version', 'datatype', 'participant_id', 'blacklist']]))\n \n print(\"\\n\\nsamples that shouldn't match but do\")\n previ = ''\n shouldnt = {}\n for i, j in [(v.index[x], v.columns[y]) for x, y in np.argwhere(v.values > 500)]:\n if i == j:\n continue\n if ref.loc[i]['participant_id'] == ref.loc[j]['participant_id']:\n continue\n if i != previ:\n if previ != '':\n shouldnt.update({'_'.join(ref.loc[previ, ['arxspan_id', 'version', 'datatype',\n 'participant_id', \n 'stripped_cell_line_name']].astype(str).values.tolist()): n})\n n = [tuple(ref.loc[j, ['arxspan_id', 'version', 'datatype',\n 'participant_id', 'stripped_cell_line_name']].values)]\n else:\n n.append(tuple(ref.loc[j, ['arxspan_id', 'version', 'datatype',\n 'participant_id', 'stripped_cell_line_name']].values))\n previ = i\n return updated_lod_mat, should, shouldnt"
] | [
"0.672763",
"0.58610225",
"0.5566432",
"0.5434747",
"0.5414289",
"0.5365546",
"0.53649545",
"0.5345907",
"0.5329101",
"0.52768123",
"0.52546614",
"0.5217361",
"0.5212177",
"0.5211676",
"0.5201498",
"0.5170869",
"0.5153864",
"0.5102112",
"0.5095486",
"0.509299",
"0.5088659",
"0.5079078",
"0.5077062",
"0.50588095",
"0.5046648",
"0.5032905",
"0.5029239",
"0.5021231",
"0.4982987",
"0.49702692"
] | 0.7443647 | 0 |
Runs GenotypeGVCFs on all combined files produced previosuly (assumes folder structure) | def GenotypeGVCFs():
#creates sbatch files to merge batches of batch_size genomics vcf
cwd = os.getcwd()
sbatch_files = []
if not os.path.isdir(os.path.join(cwd, "01_CombineGVCFs")):
sys.exit("Directory 01_CombineGVCFs does not exits exists, something went wrong here.")
if os.path.isdir(os.path.join(cwd, "02_GenotypeGVCFs")):
print "WARNING: 02_GenotypeGVCFs already present, assuming this step has been completed with success."
return sbatch_files
else:
#create the folder structure
os.mkdir(os.path.join(cwd, "02_GenotypeGVCFs"))
os.mkdir(os.path.join(cwd, "02_GenotypeGVCFs", "sbatch"))
os.mkdir(os.path.join(cwd, "02_GenotypeGVCFs", "std_err"))
os.mkdir(os.path.join(cwd, "02_GenotypeGVCFs", "std_out"))
os.mkdir(os.path.join(cwd, "02_GenotypeGVCFs", "VCF"))
#Build the sbatch files for the join calling step
working_dir = os.path.join(cwd, "02_GenotypeGVCFs")
#now retrive the VCF stored in 01_CombineGVCFs/VCF/
combined_gvcfs_to_process = []
if len(CONFIG["intervals_list"]) == 0:
#no intervals, I have one file for each batch
combined_gvcf_files = []
for current_batch in range(1, CONFIG["batch_number"] +1):
# for each batch create the vcf file that need to be created by combine step
combined_gvcf_name = "{}_batch{}.g.vcf.gz".format(CONFIG["output_header"], current_batch)
combined_gvcf_full_path = os.path.join(cwd, "01_CombineGVCFs", "VCF", combined_gvcf_name)
combined_gvcf_files.append(combined_gvcf_full_path)
combined_gvcfs_to_process.append(combined_gvcf_files)
else:
for interval in CONFIG["intervals_list"]:
interval_name = os.path.basename(interval).split(".")[0]
combined_gvcf_files = []
for current_batch in range(1, CONFIG["batch_number"] +1):
# for each batch create the vcf file that need to be created by combine step
combined_gvcf_name = "{}_batch{}_{}.g.vcf.gz".format(CONFIG["output_header"], current_batch, interval_name)
combined_gvcf_full_path = os.path.join(cwd, "01_CombineGVCFs", "VCF", combined_gvcf_name)
combined_gvcf_files.append(combined_gvcf_full_path)
#now ceate a list with interval file and all gvcf to be combines
interval_plus_gvcfs = [interval ,combined_gvcf_files]
combined_gvcfs_to_process.append(interval_plus_gvcfs)
for interval_plus_gvcfs in combined_gvcfs_to_process:
interval = interval_plus_gvcfs[0]
combined_gvcf_files = interval_plus_gvcfs[1]
sbatch_file = build_GenotypeGVCFs_sbatch(working_dir, combined_gvcf_files, CONFIG["scratch"], interval)
sbatch_files.append(sbatch_file)
return sbatch_files | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def genotype_gvcfs(gatk, xmx, cores,\n inputs, output,\n reference, bed_file=None):\n commands = []\n command = GENOTYPEGVCFS_TEMPLATE.format(xmx, gatk, reference, output)\n command = command + ' --variant ' + ' --variant '.join(inputs)\n if bed_file is not None:\n command = command + \" -L \" + bed_file\n commands.append(command)\n output = os.path.join(os.path.dirname(output), 'all_sites.vcf')\n command = GENOTYPEGVCFS_TEMPLATE.format(xmx, gatk, reference, output)\n command = command + ' --variant ' + ' --variant '.join(inputs)\n command = command + ' --includeNonVariantSites'\n if bed_file is not None:\n command = command + \" -L \" + bed_file\n commands.append(command)\n queue_jobs(commands, \"genotypeGVCFs\", cores)",
"def build_GenotypeGVCFs_sbatch(working_dir, combined_gvcf_files, scratch=False, interval=None):\n \n name_batch1 = os.path.basename([item for item in combined_gvcf_files if \"batch1\" in item][0])\n interval_name = \"\"\n #there must be at least one batch so look for it, not elegant but works\n if name_batch1.split(\"batch1\") != \".g.vcf.gz\":\n interval_name = name_batch1.split(\"batch1\")[1].split(\".\")[0]\n job_name = \"GenotypeGVCFs{}\".format(interval_name)\n output_file = \"{}_joincalled{}.g.vcf.gz\".format(CONFIG[\"output_header\"], interval_name)\n #create the sbatch file to analyse the current batch of samples\n sbatch_file = os.path.join(working_dir, \"sbatch\", \"{}.sbatch\".format(job_name))\n with open(sbatch_file, \"w\") as GenotypeGVCFs:\n slurm = slurm_header(CONFIG[\"uppmax_project\"], job_name, working_dir)\n GenotypeGVCFs.write(slurm)\n GenotypeGVCFs.write(\"\\n\")\n #rsync to scratch all samples\n if scratch:\n GenotypeGVCFs.write(\"mkdir -p $SNIC_TMP/{} \\n\".format(job_name)) # create tmp directory\n GenotypeGVCFs.write(\"mkdir -p $SNIC_TMP/{}/VCF/ \\n\".format(job_name)) # create tmp directory\n #now cycle over the samples, build the GATK command\n combined_gvcf_string_input = \"\"\n for combined_gvcf in combined_gvcf_files:\n combined_gvcf_path_dir = combined_gvcf\n if scratch:\n GenotypeGVCFs.write(\"rsync -rptoDLv {}* $SNIC_TMP/{}/\\n\".format(combined_gvcf, job_name))\n combined_gvcf_name = os.path.basename(combined_gvcf)\n combined_gvcf_path_dir = \"$SNIC_TMP/{}/{}\".format(job_name, combined_gvcf_name)\n combined_gvcf_string_input += \"-V {} \\\\\\n\".format(combined_gvcf_path_dir)\n\n GATK_command= \"java -Xmx250g -jar {} -T GenotypeGVCFs \\\\\\n\".format(CONFIG[\"GATK\"])\n for option in CONFIG[\"walkers\"][\"GenotypeGVCFs\"]:\n GATK_command += \"{} \\\\\\n\".format(option)\n GATK_command += \"{} \".format(combined_gvcf_string_input)\n if interval is not None:\n GATK_command += \"-L {} \\\\\\n\".format(interval)\n\n if scratch:\n GATK_command += \"-o $SNIC_TMP/{}/VCF/{}\\n\".format(job_name, output_file)\n #once this is done rsync back to lupus\n GATK_command += \"rsync $SNIC_TMP/{}/VCF/{}* {}/VCF/\\n\".format(job_name, output_file , working_dir)\n else:\n GATK_command += \"-o {}/VCF/{}\\n\\n\".format(working_dir, output_file)\n GenotypeGVCFs.write(GATK_command)\n #return path to sbach file\n return sbatch_file",
"def main(arguments):\n folder_list = glob.glob(f\"{arguments.f}/*/\")\n for d in folder_list:\n if \"GALEN\" in d: continue\n # get vcf file\n try:\n vcf = glob.glob(f\"{d}/*.vcf\")[0]\n except:\n raise FileNotFoundError(f\"{d} - vcf file not found\")\n \n print(f\"Processing {vcf}...\")\n f = load(vcf)\n final = find_gene(f, arguments.c)\n output_file = vcf.replace(\".vcf\", \"_analyzed.csv\")\n final.to_csv(output_file, index=False)\n print(\"Done!\")",
"def merge_gvcfs(gatk, xmx, cores, gvcfs, reference):\n commands = []\n outputs = []\n no_groups = (len(gvcfs)/SPLIT_SIZE) + 1\n for i in range(0, no_groups):\n output = str(i) + '.g.vcf'\n outputs.append(output)\n command = MERGE_GVCFS_TEMPLATE.format(xmx, gatk, reference, output)\n command = command + '--variant ' + ' --variant '.join(gvcfs[i:(i*SPLIT_SIZE + SPLIT_SIZE)])\n commands.append(command)\n queue_jobs(commands, \"mergeGVCFs\", cores)\n return outputs",
"def combine_gvcf(self, reference, gvcf_list, output, input_is_sorted=False, extension_list=[\"g.vcf\",],\n tmp_dir=\"./tmp_combine_gvcf/\", max_files_per_merging=50, iteration=0, threads=None,\n remove_intermediate_files=False):\n\n filtered_gvcf_list = []\n for filename in gvcf_list:\n for extension in extension_list:\n if extension == filename[-len(extension):]:\n filtered_gvcf_list.append(filename)\n break\n \n if len(filtered_gvcf_list) <= max_files_per_merging:\n options = self.parse_options(reference, filtered_gvcf_list, output, input_is_sorted, extension_list=extension_list)\n self.execute(options, runtype=\"cp\")\n if remove_intermediate_files:\n shutil.rmtree(tmp_dir, ignore_errors=True)\n\n else:\n self.safe_mkdir(tmp_dir)\n iteration_dir = \"%s/iteration_%i/\" % (tmp_dir, iteration)\n self.safe_mkdir(iteration_dir)\n\n number_of_files = len(filtered_gvcf_list)\n\n bins = np.arange(0, number_of_files, max_files_per_merging)\n #print(bins)\n if bins[-1] != number_of_files:\n if number_of_files - bins[-1] < 2:\n bins[-1] = number_of_files\n else:\n bins = np.append(bins, number_of_files)\n\n output_file_list = []\n options_list = []\n\n merged_files = 0\n for i in range(0, len(bins)-1):\n output_file = \"%s/%i.g.vcf\" % (iteration_dir, i)\n output_file_list.append(output_file)\n #print(bins[i], bins[i+1])\n\n merged_files += bins[i+1] - bins[i]\n options_list.append(self.parse_options(reference,\n filtered_gvcf_list[bins[i]:bins[i+1]],\n output_file,\n input_is_sorted, extension_list=extension_list))\n print(\"%i/%i files will be merged\" % (merged_files, number_of_files))\n\n self.parallel_execute(options_list, threads=threads, runtype=\"cp\")\n\n self.combine_gvcf(reference, output_file_list, output, input_is_sorted=input_is_sorted,\n extension_list=extension_list,\n tmp_dir=tmp_dir,\n max_files_per_merging=max_files_per_merging, iteration=iteration+1)",
"def main():\n parser = argparse.ArgumentParser(description='MergeGVCFs and genotype them using the GATK')\n parser.add_argument('-g', '--gatk', dest='gatk', help=\"Location of the GATK\", required=True)\n parser.add_argument('-x', '--xmx', dest='xmx', help=\"Memory to use with JAVA\", required=True)\n parser.add_argument('-c', '--cores', dest='cores', help=\"Number of cores to use\")\n parser.add_argument('-o', '--output', dest='output', \n help='Final output from the haplotype caller')\n parser.add_argument('-r', '--reference', dest='reference', \n help='Reference FASTA file')\n parser.add_argument('-b','--bed', dest='bed_file',\n help=\"Bed file for limiting the GATK\")\n parser.add_argument('-p', '--ploidy', dest='ploidy', \n help=\"Sample ploidy\", default=2)\n parser.add_argument('-d', '--out_directory', dest='directory', help='Output director')\n parser.add_argument('bams', nargs=\"*\", help='gVCF variant call files output from the GATK')\n args = parser.parse_args()\n args.cores = int(args.cores)\n args.xmx = args.xmx.strip('\"')\n print args.bams\n genovcfs = haplotype_caller(gatk=args.gatk, xmx=args.xmx, cores=args.cores,\n bams=args.bams, reference=args.reference,\n out_directory=args.directory, ploidy=args.ploidy, bed_file=args.bed_file)\n outputs = merge_gvcfs(gatk=args.gatk, xmx=args.xmx, cores=args.cores,\n gvcfs=genovcfs, reference=args.reference)\n genotype_gvcfs(gatk=args.gatk, xmx=args.xmx, cores=args.cores,\n inputs=outputs, output=args.output, reference=args.reference,bed_file=args.bed_file)\n #haplotype_single(gatk=args.gatk, xmx=args.xmx, cores=args.cores,\n # inputs=args.gvcfs, reference=args.reference)",
"def main():\n\n # this will analyze all files in the input_files directory\n for folder in [x for x in os.listdir(os.path.join(os.getcwd(), 'test_directory')) if os.path.isdir(os.path.join(os.getcwd(), 'test_directory', x))]:\n try:\n print(f'Creating GED_Repo for files in {folder}')\n g = GED_Repo([os.path.join(os.getcwd(), 'test_directory', folder, f) for f in os.listdir(os.path.join(os.getcwd(), 'test_directory', folder)) if f.endswith('.ged')])\n g.check_data()\n g.print_data()\n g.print_individuals()\n g.print_families()\n except ValueError as v:\n print(v)\n except FileNotFoundError as f:\n print(f)",
"def main():\n\n file_list = []\n # this will analyze all files in the input_files directory\n for folder in [x for x in os.listdir(os.path.join(os.getcwd(), 'test_directory')) if os.path.isdir(os.path.join(os.getcwd(), 'test_directory', x))]:\n try:\n # print(f'Reading files in {folder}')\n file_list = file_list + [os.path.join(os.getcwd(), 'test_directory', folder, f) for f in os.listdir(os.path.join(os.getcwd(), 'test_directory', folder)) if f.endswith('.ged')]\n except ValueError as v:\n print(v)\n except FileNotFoundError as f:\n print(f)\n\n try:\n print(f'Analyzing final cumulative file data.')\n # print(file_list)\n g = GED_Repo(file_list)\n g.check_data()\n g.print_data()\n g.print_individuals()\n g.print_families()\n except ValueError as v:\n print(v)\n except FileNotFoundError as f:\n print(f)",
"def run(self):\n\n # If the specified outdir doesn't exist, make it.\n if os.path.exists(self.outdir) == False:\n os.mkdir(self.outdir)\n\n # Get occurrence data.\n self.get_gbif_occs()",
"def combineAllGraphFiles(chroms, final_out):\n outfile = open(final_out,'w');\n outfile.close();\n \n for chrom in chroms:\n graph_file = chrom + \".graph\";\n try:\n if os.system('%s %s >> %s' %\n (cat, graph_file, final_out)): raise\n except: sys.stderr.write(\"cat failed at %s\\n\" % chrom)",
"def run_concat_vcfs(job, context, vcf_ids, tbi_ids):\n\n work_dir = job.fileStore.getLocalTempDir()\n\n vcf_names = ['chrom_{}.vcf.gz'.format(i) for i in range(len(vcf_ids))]\n out_name = 'genome.vcf.gz'\n\n for vcf_id, tbi_id, vcf_name in zip(vcf_ids, tbi_ids, vcf_names):\n job.fileStore.readGlobalFile(vcf_id, os.path.join(work_dir, vcf_name))\n job.fileStore.readGlobalFile(tbi_id, os.path.join(work_dir, vcf_name + '.tbi'))\n\n cmd = ['bcftools', 'concat'] + [vcf_name for vcf_name in vcf_names] + ['-O', 'z']\n \n with open(os.path.join(work_dir, out_name), 'wb') as out_file:\n context.runner.call(job, cmd, work_dir=work_dir, outfile = out_file)\n\n cmd = ['tabix', '-f', '-p', 'vcf', out_name]\n context.runner.call(job, cmd, work_dir=work_dir)\n\n out_vcf_id = context.write_intermediate_file(job, os.path.join(work_dir, out_name))\n out_tbi_id = context.write_intermediate_file(job, os.path.join(work_dir, out_name + '.tbi'))\n\n return out_vcf_id, out_tbi_id",
"def FeaturesGen(ChopChopresults, outputDir, sgRNA_type):\n \n #make output Directory if it does not already exist\n if not os.path.isdir(outputDir):\n os.makedirs(outputDir)\n \n #list the directory contents \n for i,j,k in os.walk(ChopChopresults): #use walk to go through and find all directories\n \n if j == []: #no subdirectories\n saveDF = pd.DataFrame() #initiate dataframe\n for target in k: #loop through to find the sgRNA sequences\n if target.endswith('.offtargets'):\n with open(os.path.join(i,target), 'r+') as f:\n guide = f.readlines()\n #add them to a dataframe\n temp = pd.Series()\n temp['guideNo'] = target.split('.')[0] + sgRNA_type\n temp['guideSeq'] = guide.pop(0).rstrip()\n \n saveDF = saveDF.append(temp.to_frame().transpose())\n saveDF['type'] = 'sgRNA'\n \n if sgRNA_type == 'General' or sgRNA_type == None:\n saveDF['fwd'] = 'pink'\n saveDF['rev'] = 'green'\n elif sgRNA_type == 'GG':\n saveDF['fwd'] = 'yellow'\n saveDF['rev'] = 'plum'\n elif sgRNA_type == 'GA':\n saveDF['fwd'] = 'cyan'\n saveDF['rev'] = 'cornflower blue'\n \n \n #save to txt file with tab delimiter\n saveDF.to_csv(os.path.join(outputDir, os.path.basename(i) + '_features.txt'),\\\n index = False, header = False, sep = '\\t')\n \n del saveDF",
"def main():\n\n\n\n skulls_folder = os.listdir(RAW_IMAGE_DIRECTORY)\n\n # fetch and sort the .mnc and .tag files\n mnc_files = [f for f in skulls_folder if 'mnc' in f]\n tag_files = [f for f in skulls_folder if 'tag' in f]\n mnc_names = [i.split('.mnc')[0] for i in mnc_files]\n \n mnc_files.sort()\n tag_files.sort()\n mnc_names.sort()\n\n # Process and package ndarrays as tuples inside npy file\n package_to_npy(RAW_IMAGE_DIRECTORY, mnc_files, tag_files, mnc_names)\n \n print('\\n' * 5)\n\n # Push the npy files to GCP Cloud Storage\n upload_to_gcp(PROCESSED_IMAGE_DIRECTORY, GCP_PROJECT_NAME, GCP_BUCKET_NAME)",
"def prepare_subset_vcf_files_by_population():\n if not os.path.exists(VCF_BY_POPULATION_PATH):\n print(\"preparing subset vcf by population\")\n os.makedirs(VCF_BY_POPULATION_PATH)\n sleep(10)\n vcf_tools_runner = VCFToolsDockerRunner()\n samples = glob(f\"{SAMPLES_FOLDER}/*.csv\")\n\n with tqdm(total=len(samples)) as pbar:\n for sample in samples:\n sample = sample.replace('\\\\', '/')\n sample_name = get_filename_from_path(sample)\n sample_path = \"/\".join([IMAGE_SHARE_FOLDER_PATH] + sample.split('/')[1:])\n pbar.set_description(f\"Processing {sample_name}\")\n vcf_tools_runner(\n f\"vcf-subset -c {sample_path} \"\n f\"{IMAGE_SHARE_FOLDER_PATH}/{GENOTYPE_DATA_FOLDER}/{MERGED_VCF_FILE_NAME} | fill-an-ac > \"\n f\"{IMAGE_SHARE_FOLDER_PATH}/{VCF_BY_POPULATION_FOLDER}/{sample_name}.vcf\")\n pbar.update(1)\n else:\n print(f\"Subset VCF files by population already exist in: {VCF_BY_POPULATION_PATH}\")",
"def main():\n file_one_path, file_two_path, output_path =\\\n get_command_line_arguments(\n ['/home/ehler002/project/groups/go/Data/Cluster_Data/Dataset.txt',\n '/home/ehler002/project/groups/go/Data/Cluster_Data/translated_genes.fpkm_table',\n '/home/ehler002/project/groups/go/Data/Cluster_Data/Full_fpkm_Table.txt'])\n pattern = 'CRO_T'\n for file_path in [file_one_path, file_two_path]:\n assert os.path.exists(file_path), 'File %s does not exist.' % file_path\n start_time = datetime.datetime.now()\n print('Started concatenation at %s' % start_time)\n file_contents, headers = get_file_contents(file_two_path)\n file_contents = sort_file_contents(file_contents)\n file_contents = remove_pattern(file_contents, pattern)\n concatenate_files(file_one_path, file_contents, headers, output_path)\n print('Finished concatenation in %s' % (datetime.datetime.now() - start_time))",
"def process_cgc(path, return_dataframe=False, fusions=False):\n # read in data\n df = pd.read_table(path)\n\n # keep small somatic variants\n if not fusions:\n s = df['Mutation Types']\n is_small = s.str.contains('Mis|F|N|S').fillna(False)\n is_somatic = ~df['Tumour Types(Somatic)'].isnull()\n df = df[is_small & is_somatic].copy()\n\n # label oncogenes / TSG\n df['Is Oncogene (CGC)'] = 'No'\n df.loc[df['Role in Cancer'].fillna('').str.contains('oncogene'), 'Is Oncogene'] = 'Yes'\n df['Is Tumor Suppressor Gene (CGC)'] = 'No'\n df.loc[df['Role in Cancer'].fillna('').str.contains('TSG'), 'Is Tumor Suppressor Gene'] = 'Yes'\n df['Is Driver Gene (CGC)'] = 'Yes'\n\n # rename columns\n df = df.rename(columns={'Entrez GeneId': 'Entrez Gene ID', 'Gene Symbol': 'Hugo Symbol'})\n\n # get gene names\n if not return_dataframe:\n cgc_genes = df['Gene Symbol'].tolist()\n else:\n cgc_genes = df\n\n return cgc_genes\n else:\n # return fusion gene information\n has_fus_partner = ~df['Translocation Partner'].isnull()\n output_list = []\n for ix, row in df[has_fus_partner].iterrows():\n g1 = row[\"Gene Symbol\"]\n for g2 in row['Translocation Partner'].split(', '):\n output_list.append([g1, g2])\n output_df = pd.DataFrame(output_list, columns=[\"Gene1\", \"Gene2\"])\n output_df['GENE_ID'] = output_df['Gene1'] + '--' + output_df['Gene2']\n\n if not return_dataframe:\n cgc_genes = list(set(output_df[\"Gene1\"].unique()) | set(output_df[\"Gene2\"]))\n else:\n cgc_genes = output_df\n\n return cgc_genes",
"def main():\n\n args = parseArgs()\n\n path = args.path\n is_open_gl = args.g\n\n success, failure = genFiles(path, is_open_gl)\n\n print(\"Success: \", \", \".join(success))\n print(\"Failure: \", \", \".join(failure))\n\n ratio = len(success) / (len(success) + len(failure))\n\n print(\"%% success = %.2f\" % (100 * ratio))",
"def main():\n\n #Getthefiles\n all_plasmid_path = []\n path_to_all_info = '/Users/gustavotamasco/mdrkrp/project_MDR_KRP/all_vir_files'\n dirpath=os.getcwd()\n os.chdir(path_to_all_info)\n files = list_files_simple(path_to_all_info)\n\n bad_files = [\"Hemo_536_vfdb_genome.tsv\", \"MI_119_vfdb.tsv\", \"Hemo_536_vfdb.tsv\",\n \"MI_119_vfdb_genome.tsv\",\n \"URO_775_vfdb_genome.tsv\", \"Hemo_825_vfdb.tsv\", \"URO_775_vfdb.tsv\",\n \"Hemo_825_vfdb_genome.tsv\",\n \"MI_329_vfdb.tsv\", \"MI_569_vfdb_genome.tsv\", \"MI_329_vfdb_genome.tsv\",\n \"MI_569_vfdb.tsv\",\n \"Hemo_989_vfdb_genome.tsv\", \"MI_78_vfdb.tsv\", \"Hemo_989_vfdb.tsv\",\n \"MI_78_vfdb_genome.tsv\"]\n\n final_files = list([x for x in files if x not in bad_files])\n print(len(final_files))\n\n\n '''Building metadata'''\n #All genes to each genome\n metadata = {}\n for file in final_files:\n with open(file) as vir_info:\n parse_genes_v2(file, vir_info, metadata)\n\n\n #All genes that occured\n all_genes = sorted(set(get_all_genes(metadata)))\n print(all_genes)\n\n #All vir classess\n\n\n '''Build dataframe for the classes plot'''\n df_info = {}\n df_major_classes = build_class_df(df_info, all_genes, metadata)\n df = pd.DataFrame.from_dict(df_major_classes, orient='index', columns=['entA', 'entB', 'entE', 'entS', 'fepA', 'fepB', 'fepC', 'fepD', 'fepG', 'fimA', 'fimE', 'fyuA', 'irp1', 'irp2', 'mgtB', 'mgtC', 'ompA', 'xcpA/pilD', 'xcpR', 'yagV/ecpE', 'yagW/ecpD', 'yagX/ecpC', 'yagY/ecpB', 'yagZ/ecpA', 'ybtA', 'ybtE', 'ybtP', 'ybtQ', 'ybtS', 'ybtT', 'ybtU', 'ybtX', 'ykgK/ecpR'])\n #df = df.transpose()\n #df.to_csv('arg_genes.csv', sep='\\t', encoding='utf-8')\n #sns.set(font_scale=0.65)\n #Need both\n #not_full = sns.clustermap(df, label='small', cmap=\"vlag\", standard_scale=1, linewidths=0)\n full_plot = sns.clustermap(df, label='small', cmap=\"vlag\", linewidths=0)\n #plt.title('Antibiotic resistance genes across 34 organism', fontsize=15)\n #sns.set(font_scale=1)\n plt.show()\n full_plot.savefig(\"final_genome_plasmid_vir.pdf\", bbox_inches='tight')\n #not_full.savefig(\"final_genome_plasmid_vir_scalled.pdf\", bbox_inches='tight')",
"def GatherVcfs(\n b: hb.Batch,\n input_vcfs: List,\n disk_size: int,\n output_vcf_path: str = None,\n) -> Job:\n j = b.new_job('VQSR: FinalGatherVcf')\n j.image(utils.GATK_IMAGE)\n j.memory(f'16G')\n j.storage(f'{disk_size}G')\n j.declare_resource_group(\n output_vcf={'vcf.gz': f'{NAME}_gathered.vcf.gz', 'vcf.gz.tbi': f'{NAME}_gathered.vcf.gz.tbi'}\n )\n\n input_cmdl = ' '.join([f'--input {v}' for v in input_vcfs])\n j.command(\n f\"\"\"set -euo pipefail\n # --ignore-safety-checks makes a big performance difference so we include it in \n # our invocation. This argument disables expensive checks that the file headers \n # contain the same set of genotyped samples and that files are in order \n # by position of first record.\n gatk --java-options -Xms6g \\\\\n GatherVcfsCloud \\\\\n --gather-type BLOCK \\\\\n {input_cmdl} \\\\\n --output {j.output_vcf['vcf.gz']}\n tabix {j.output_vcf['vcf.gz']}\"\"\"\n )\n if output_vcf_path:\n b.write_output(j.output_vcf, f'{output_vcf_path}{NAME}_gathered{LABEL}')\n return j",
"def generate_all(files, alignement_h5f, input_h5f,\n nframes=7, vad=None):\n def try_remove(fname):\n try:\n os.remove(fname)\n except:\n pass\n try:\n directory = os.path.dirname(os.path.abspath(input_h5f))\n\n # create temporary files:\n _, fb_h5f = tempfile.mkstemp(dir=directory)\n _, fb_mvn_h5f = tempfile.mkstemp(dir=directory)\n os.remove(fb_h5f)\n os.remove(fb_mvn_h5f)\n\n # generate mfccs:\n h5features_compute(files, alignement_h5f, featfunc=do_mfccs)\n\n # generate stacked mvn fbanks:\n h5features_compute(files, fb_h5f, featfunc=do_fbank)\n mean_variance_normalisation(fb_h5f, fb_mvn_h5f, vad=vad)\n h5features_feats2stackedfeats(fb_mvn_h5f, input_h5f, nframes=nframes)\n finally:\n try_remove(fb_h5f)\n try_remove(fb_mvn_h5f)",
"def main():\n processSetOfCerFiles(sys.argv[1:])",
"def main():\n onlyfiles = [f for f in listdir(RAWDATA_PATH) if isfile(join(RAWDATA_PATH, f))]\n for file in onlyfiles:\n create_RCSB_fastas(file)",
"def main():\n nbin = 60\n nbinM = 100\n \n maxmag = -20\n minmag = -29\n mag = np.linspace(minmag,maxmag,nbinM) \n minz = 0.1\n maxz = 5\n z = np.linspace(minz,maxz,nbin)\n \n minv = 10**1.6\n maxv = 10**2.6\n vel = np.linspace(minv,maxv,nbin)\n \n # writing the script for galfic once and for all\n gl.write_script()\n\n result = [[] for i in range(5)]\n \n for zl in z:\n zrange = z[np.where(z > zl)]\n for zs in zrange:\n for v in vel:\n gl.write_initfile(v,zl,zs)\n os.system('./script_gl > /dev/null 2>&1')\n sigma = gl.analyse_output(mag,zs,zl,v)\n for i in range(nbinM):\n result[0].append(zs)\n result[1].append(zl)\n result[2].append(v)\n result[3].append(mag[i])\n result[4].append(sigma[i])\n\n np.savez('crosssection.npz',x=result)",
"def generate_files(self):\n\t\tapply_stemmer, xml_file, query_file, expected_file = self.read_config_file()\n\t\tself.generate_query_file(query_file, xml_file, apply_stemmer)\n\t\tself.generate_expected_file(expected_file, xml_file)\n\t\tlogging.info('FINALIZADO: MÓDULO PROCESSADOR DE CONSULTAS')",
"def cleanup_sub(vg_dir):\n\tnew_dir = vg_dir+'_clean'\n\tos.mkdir(new_dir)\n\tphot_vg_files = filter(lambda x: '.txt' in x, os.listdir(vg_dir))\n\t# phot_vg_phottot_files = filter(lambda x: 'phottot' in x, phot_vg_files)\n\tfor f in phot_vg_files:\n\t\tdf = pd.read_table(vg_dir+'/'+f,\n\t\t\tnames = ['id','ra','dec','flux','unc','x','y','flux_uncor'],\n\t\t\tdelim_whitespace=True)\n\t\tstarnums, dithers = zip(*[i.split('_')[1:4:2] for i in df.id])\n\t\tdf['id'] = [int(i) for i in starnums]\n\t\tdf['dither'] = [int(i) for i in dithers]\n\t\tsorted_df = df.sort(['id','dither'])\n\t\t# new: remove the aperture correction applied by varoujan to the uncertainties\n\t\tch = f.split('-')[2]\n\t\tif ch == 'ch1':\n\t\t\tsorted_df['unc'] /= 1.205\n\t\telif ch == 'ch2':\n\t\t\tsorted_df['unc'] /= 1.221\n\t\telse:\n\t\t\traise(TypeError(\"unexpected channel\"))\n\t\tfnew = '_'.join(f.split('-')[::2])+'_raw.csv'\n\t\tsorted_df.to_csv(new_dir+'/'+fnew, index=False, float_format='%.8f')\n\t\t# also calculate mean RA/Dec, flux, and quadrature sum uncertainty\n\t\tgrouped = sorted_df.groupby('id')\n\t\tagg = grouped[['ra','dec','flux']].aggregate(np.median)\n\t\tquadsum = grouped['unc'].aggregate(lambda x: np.sqrt(np.sum(x**2)))\n\t\tagg['unc'] = quadsum\n\t\tfnew = '_'.join(f.split('-')[::2])+'_agg.csv'\n\t\tagg.to_csv(new_dir+'/'+fnew, index=True, float_format='%.8f')",
"def __load_cogs(self):\n for cog in self.__cogs.get():\n logging.info('loading %s', cog)\n self.load_extension(cog)",
"def features_from_folder(label_folder, audio_folder, output_folder):\n print('Listing label files from folder.')\n #scan labels folder\n labels_list = os.listdir(label_folder)\n label_files = []\n for filename in labels_list:\n #get its extension\n file_extension = filename.split('.')[-1]\n if file_extension != 'txt':\n continue\n #save to without its extension\n label_files.append(filename[:-4])\n\n print('Listing audio files from folder.')\n #scan audio folder\n audios_list = os.listdir(audio_folder)\n audio_files = []\n for filename in audios_list:\n #get its extension\n file_extension = filename.split('.')[-1]\n if file_extension != 'wav':\n continue\n #save to without its extension\n audio_files.append(filename[:-4])\n\n print('Removing files without matches')\n #use only the files with matching audio/label\n files_to_process = []\n for label_file in label_files:\n if label_file in audio_files:\n files_to_process.append(label_file)\n\n print('Processing each file...')\n i = 1\n class_count = {}\n total_f = len(files_to_process)\n #for each file\n for processing in files_to_process:\n print('File', str(i) + '/' + str(total_f))\n i += 1\n\n #\n label_file = os.path.join(label_folder, processing + \".txt\")\n audio_file = os.path.join(audio_folder, processing + \".wav\")\n\n #get the segments from the corresponding label file\n segments = get_segments(label_file)\n\n #\n total_s = len(segments)\n j = 1\n #for each segment\n for segment in segments:\n print('\\tSegment', str(j) + '/' + str(total_s), segment['class'])\n j += 1\n\n if class_count.get(segment['class']) is None:\n class_count[segment['class']] = 1\n else:\n class_count[segment['class']] += 1\n output_filename = segment['class']\n output_filename += '-' + format(class_count[segment['class']], '04d')\n output_filename = os.path.join(output_folder, output_filename)\n\n #get its features\n segment_features = features_from_label(audio_file, segment)\n\n #save it to a file\n fe.write_as_bin(output_filename, segment_features)",
"def generate_megafile():\n\n print(\"\\nFetching testing dataset…\")\n testing = get_testing()\n\n print(\"\\nFetching ECDC dataset…\")\n ecdc = get_ecdc()\n\n location_mismatch = set(testing.location).difference(set(ecdc.location))\n for loc in location_mismatch:\n print(f\"<!> Location '{loc}' has testing data but is absent from ECDC data\")\n\n print(\"\\nFetching OxCGRT dataset…\")\n cgrt = get_cgrt()\n\n all_covid = (\n ecdc\n .merge(testing, on=[\"date\", \"location\"], how=\"outer\")\n .merge(cgrt, on=[\"date\", \"location\"], how=\"left\")\n .sort_values([\"location\", \"date\"])\n )\n\n # Add ISO codes\n print(\"Adding ISO codes…\")\n iso_codes = pd.read_csv(os.path.join(INPUT_DIR, \"iso/iso3166_1_alpha_3_codes.csv\"))\n\n missing_iso = set(all_covid.location).difference(set(iso_codes.location))\n if len(missing_iso) > 0:\n print(missing_iso)\n raise Exception(\"Missing ISO code for some locations\")\n\n all_covid = iso_codes.merge(all_covid, on=\"location\")\n\n # Add continents\n print(\"Adding continents…\")\n continents = pd.read_csv(\n os.path.join(INPUT_DIR, \"owid/continents.csv\"),\n names=[\"_1\", \"iso_code\", \"_2\", \"continent\"],\n usecols=[\"iso_code\", \"continent\"],\n header=0\n )\n\n all_covid = continents.merge(all_covid, on=\"iso_code\", how=\"right\")\n\n # Add macro variables\n # - the key is the name of the variable of interest\n # - the value is the path to the corresponding file\n macro_variables = {\n \"population\": \"un/population_2020.csv\",\n \"population_density\": \"wb/population_density.csv\",\n \"median_age\": \"un/median_age.csv\",\n \"aged_65_older\": \"wb/aged_65_older.csv\",\n \"aged_70_older\": \"un/aged_70_older.csv\",\n \"gdp_per_capita\": \"wb/gdp_per_capita.csv\",\n \"extreme_poverty\": \"wb/extreme_poverty.csv\",\n \"cardiovasc_death_rate\": \"gbd/cardiovasc_death_rate.csv\",\n \"diabetes_prevalence\": \"wb/diabetes_prevalence.csv\",\n \"female_smokers\": \"wb/female_smokers.csv\",\n \"male_smokers\": \"wb/male_smokers.csv\",\n \"handwashing_facilities\": \"un/handwashing_facilities.csv\",\n \"hospital_beds_per_thousand\": \"owid/hospital_beds.csv\",\n \"life_expectancy\": \"owid/life_expectancy.csv\",\n \"human_development_index\": \"un/human_development_index.csv\",\n }\n all_covid = add_macro_variables(all_covid, macro_variables)\n\n print(\"Writing to CSV…\")\n all_covid.to_csv(os.path.join(DATA_DIR, \"owid-covid-data.csv\"), index=False)\n\n print(\"Writing to XLSX…\")\n all_covid.to_excel(os.path.join(DATA_DIR, \"owid-covid-data.xlsx\"), index=False)\n\n print(\"Writing to JSON…\")\n df_to_json(all_covid, os.path.join(DATA_DIR, \"owid-covid-data.json\"), macro_variables.keys())\n\n # Store the last updated time\n timestamp_filename = os.path.join(DATA_DIR, \"owid-covid-data-last-updated-timestamp.txt\")\n with open(timestamp_filename, \"w\") as timestamp_file:\n timestamp_file.write(datetime.utcnow().replace(microsecond=0).isoformat())\n\n print(\"All done!\")",
"def gci(path):\n parents = os.listdir(path)\n for parent in parents:\n if parent == \"forgifs\" or parent == \"hilariousgifs\":\n pass\n else:\n child = os.path.join(path,parent)\n #print(child)\n if os.path.isdir(child):\n gci(child)\n else:\n filepath.append(child)\n #print(child)",
"def main():\n args = get_args()\n out_dir = args.outdir\n pct_gc = args.pct_gc\n\n if not os.path.isdir(out_dir):\n os.makedirs(out_dir)\n\n if not 0 < pct_gc <= 100:\n die('--pct_gc \"{}\" must be between 0 and 100'.format(pct_gc))\n\n num_seqs = 0\n for i, file in enumerate(args.fasta, start=1):\n if not os.path.isfile(file):\n warn('\"{}\" is not a file'.format(file))\n continue\n\n print('{:3}: {}'.format(i, os.path.basename(file)))\n\n base, ext = os.path.splitext(os.path.basename(file))\n high_file = os.path.join(out_dir, ''.join([base, '_high', ext]))\n low_file = os.path.join(out_dir, ''.join([base, '_low', ext]))\n\n high_fh = open(high_file, 'wt')\n low_fh = open(low_file, 'wt')\n\n for rec in SeqIO.parse(file, 'fasta'):\n num_seqs += 1\n bases = Counter(rec.seq.upper())\n gc = bases.get('G', 0) + bases.get('C', 0)\n pct = int((gc / len(rec.seq)) * 100)\n SeqIO.write(rec, low_fh if pct < pct_gc else high_fh, 'fasta')\n\n print('Done, wrote {} sequence{} to out dir \"{}\"'.format(\n num_seqs, '' if num_seqs == 1 else 's', out_dir))"
] | [
"0.6848684",
"0.6842689",
"0.66581684",
"0.65295035",
"0.6394109",
"0.6241394",
"0.62080246",
"0.6191771",
"0.6087738",
"0.60208416",
"0.5983195",
"0.59366655",
"0.5929118",
"0.5920719",
"0.5885988",
"0.58807164",
"0.5872283",
"0.5867078",
"0.5824099",
"0.5782577",
"0.57666737",
"0.5722888",
"0.56962204",
"0.56616205",
"0.56566554",
"0.5621083",
"0.56126213",
"0.5597145",
"0.5577475",
"0.5570402"
] | 0.77105707 | 0 |
Returns a duplicate of the profile instance. | def duplicate(self):
duplicate = Profile()
for i in self.__dict__:
if type(getattr(self, i)) is dict:
setattr(duplicate, i, getattr(self, i).copy())
else:
setattr(duplicate, i, getattr(self, i))
return duplicate | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def copy(self):\n return Population(self)",
"def copy(self):\n return self.__class__(dict(self))",
"def copy(self):\n return self.__class__(self)",
"def copy(self):\n return self.__class__(self)",
"def strip_copy(self):\n return strip_profiles_copy(self)",
"def get_full_profile(self) -> Profile:\n return Profile(**{**self.profile, **self.contact})",
"def copy(self):\n out = type(self).__new__(self.__class__)\n out.__dict__.update(self.__dict__)\n # make sure the copy has its own unique random number generator\n seed_seq = self.rng._bit_generator._seed_seq.spawn(1)[0]\n out.__dict__['rng'] = get_generator(seed_seq)\n return out",
"def copy(self):\n new = object.__new__(type(self))\n new.avatar_hash = self.avatar_hash\n new.avatar_type = self.avatar_type\n new.boosts_since = self.boosts_since\n new.flags = self.flags\n new.joined_at = self.joined_at\n new.nick = self.nick\n new.pending = self.pending\n role_ids = self.role_ids\n if (role_ids is not None):\n role_ids = (*role_ids,)\n new.role_ids = role_ids\n new.timed_out_until = self.timed_out_until\n return new",
"def clone(self):\n return self.__class__(self, self.spectrum, wallet=self.wallet)",
"def copy(self):\n cls = self.__class__\n result = cls.__new__(cls)\n result.__dict__.update(self.__dict__)\n return result",
"def clone(self):\n return self.copy()",
"def clone(self):\n return self",
"def copy(self):\n new = self\n return new",
"def copy(self):\n return self.__class__(**vars(self))",
"def copy(self):\n return object.__new__(type(self))",
"def copy(self):\r\n return copy.copy(self)",
"def copy (self):\n return self.__class__(self.name, self[:])",
"def copy(self):\n return copy.copy(self)",
"def copy(self):\n return copy.copy(self)",
"def copy(self):\n return copy.copy(self)",
"def copy(self):\n return self.__copy__()",
"def copy(self):\n return self.__copy__()",
"def copy(self):\n return self.__copy__()",
"def copy(self):\n return self.__copy__()",
"def copy(self):\n\t\ttemp = self.__class__()\n\t\ttemp.copy_from(self)\n\t\treturn temp",
"def copy(self):\n return copy(self)",
"def copy(self):\n return copy(self)",
"def __copy__(self):\n return self.copy()",
"def copy(self):\n return self.mutate().simple_copy()",
"def copy(self):\n return self.__class__(self.value, self.is_cloud)"
] | [
"0.65038234",
"0.6457992",
"0.6320341",
"0.6320341",
"0.6286995",
"0.62803096",
"0.6278374",
"0.62589824",
"0.6257265",
"0.62398297",
"0.62353045",
"0.6209959",
"0.6207495",
"0.61983556",
"0.61954135",
"0.61781174",
"0.61590487",
"0.6154005",
"0.6154005",
"0.6154005",
"0.61380666",
"0.61380666",
"0.61380666",
"0.61380666",
"0.613654",
"0.6103244",
"0.6103244",
"0.6096617",
"0.6070034",
"0.6063737"
] | 0.81918967 | 0 |
To save this profile intance to xml file using a XmlWriter. xwriter>should be a XmlWriter instance. | def save_to_xml(self, xwriter):
xwriter.WriteStartElement("Profile")
xwriter.WriteAttributeString("Name", self.Name)
xwriter.WriteStartAttribute("Version")
xwriter.WriteValue(self.Version)
xwriter.WriteEndAttribute()
for var_name in self.__dict__:
var_type = type(getattr(self, var_name))
if var_type is str and var_name != "Name":
self.write_string_to_xml(var_name, xwriter)
elif var_type is bool:
self.write_bool_to_xml(var_name, xwriter)
elif var_type is dict:
self.write_dict_to_xml(var_name, xwriter)
elif var_type is list and var_name != "ExcludeRules":
self.write_list_to_xml(var_name, xwriter)
xwriter.WriteStartElement("ExcludeRules")
xwriter.WriteAttributeString("Operator", self.ExcludeOperator)
xwriter.WriteAttributeString("ExcludeMode", self.ExcludeMode)
for rule in self.ExcludeRules:
if rule:
rule.save_xml(xwriter)
xwriter.WriteEndElement()
xwriter.WriteEndElement() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_profile(file_path, profile):\r\n try:\r\n xSettings = XmlWriterSettings()\r\n xSettings.Indent = True\r\n with XmlWriter.Create(file_path, xSettings) as writer:\r\n profile.save_to_xml(writer)\r\n except Exception, ex:\r\n MessageBox.Show(\"An error occured writing the settings file. The error was:\\n\\n\" + ex.message, \"Error saving settings file\", MessageBoxButtons.OK, MessageBoxIcon.Error)",
"def save_profiles(file_path, profiles, lastused=\"\"):\r\n try:\r\n xSettings = XmlWriterSettings()\r\n xSettings.Indent = True\r\n with XmlWriter.Create(file_path, xSettings) as writer:\r\n writer.WriteStartElement(\"Profiles\")\r\n if lastused:\r\n writer.WriteAttributeString(\"LastUsed\", \",\".join(lastused))\r\n for profile in profiles:\r\n profiles[profile].save_to_xml(writer)\r\n writer.WriteEndElement()\r\n except Exception, ex:\r\n MessageBox.Show(\"An error occured writing the settings file. The error was:\\n\\n\" + ex.message, \"Error saving settings file\", MessageBoxButtons.OK, MessageBoxIcon.Error)",
"def saveSessionToXML(self, filename):\r\n xmlStr = self.createXMLStr()\r\n \r\n #Write to the file\r\n #xml.dom.ext.PrettyPrint(doc, open(filename, 'w'))\r\n xmlFile = open(filename, 'w')\r\n xmlFile.write(xmlStr)\r\n xmlFile.close()",
"def saving_file(xml):\r\n\r\n xml_string = etree.tostring(xml)\r\n parsed = minidom.parseString(xml_string)\r\n with open(self.app_path + \"\\\\temp_\\\\\" + file_name + \".xml\", \"w\") as file:\r\n file.write(parsed.toprettyxml(indent=\" \"))",
"def write_xml(self, xmlfile):\n system.xml.write_file(xmlfile, self.status, 'status')",
"def xmlWrite(self, xmlWriter, font, value, name, attrs):\n raise NotImplementedError(self)",
"def saveToXml(self) -> org.jdom.Element:\n ...",
"def save_profile(self, dir):\n filename = \"profile.pkl\"\n with open(osp.join(dir, filename), \"wb\") as f:\n pickle.dump(self, f)",
"def save(self, filename=None):\n f = filename if filename else self.path\n etree.register_namespace('', TEI)\n etree.register_namespace('mith', MITH)\n self.doc.write(f, xml_declaration=True, encoding='utf-8', method='xml')",
"def to_xml_file(self, xml_file_path):\n s = self.to_xml()\n with open(xml_file_path, \"w+b\") as f:\n f.write(s)",
"def save_profile(self):\n self.save()",
"def xmlwrite(self, doc, filename):\n pathname = os.path.join(self.session.session_dir, filename)\n f = open(pathname, \"w\")\n doc.writexml(writer=f, indent=\"\", addindent=\" \", newl=\"\\n\", encoding=\"UTF-8\")\n f.close()",
"def save(self, save_path=None):\n if self._xml is None:\n raise IOError(\"There's nothing to save\")\n\n path = self._path_to_xml if save_path is None else save_path\n\n with open(path, 'w') as f:\n rough_string = Et.tostring(self._xml, 'utf-8')\n par = etree.XMLParser(remove_blank_text=True)\n elem = etree.XML(rough_string, parser=par)\n parsed = minidom.parseString(etree.tostring(elem))\n f.write(parsed.toprettyxml(indent=\" \"))",
"def save(self, pretty=True):\n self.endInstance()\n if pretty:\n _indent(self.root, whitespace=self._whiteSpace)\n tree = ET.ElementTree(self.root)\n tree.write(self.path, encoding=\"utf-8\", method='xml', xml_declaration=True)\n if self.logger:\n self.logger.info(\"Writing %s\", self.path)",
"def save_xml(self, filename):\n if \".xml\" not in filename:\n filename = filename + \".xml\"\n\n shutil.copyfile(self.env.model_file, filename)",
"def Save_xml(self, accounts):\n try:\n\n self.extension = \".xml\"\n\n colors.info(\"Saving as XML in {}{}\".format(self.file, self.extension))\n\n Main = ET.Element(\"SpotCheck\")\n\n SpotifyFree = ET.SubElement(Main, 'SpotifyFree')\n SpotifyPremium = ET.SubElement(Main, 'SpotifyPremium')\n PremiumFamily = ET.SubElement(Main, 'PremiumFamily')\n AdminPremiumFamily = ET.SubElement(Main, 'AdminPremiumFamily')\n BadAccounts = ET.SubElement(Main, 'BadAccounts')\n\n for account in accounts:\n if account.get(\"account_login\") == \"error\":\n temp = ET.SubElement(BadAccounts, \"account\")\n temp.set(\"Username\", account[\"Username\"])\n temp.set(\"Password\", account[\"Password\"])\n else:\n if account.get(\"AccountType\") == \"Spotify Free\":\n temp = ET.SubElement(SpotifyFree, \"account\")\n temp.set(\"Username\", account[\"Username\"])\n temp.set(\"Password\", account[\"Password\"])\n temp.set(\"Country\", account[\"Country\"])\n elif account.get(\"AccountType\") == \"Spotify Premium\":\n temp = ET.SubElement(SpotifyPremium, \"account\")\n temp.set(\"Username\", account[\"Username\"])\n temp.set(\"Password\", account[\"Password\"])\n temp.set(\"Country\", account[\"Country\"])\n elif account.get(\"AccountType\") == \"Premium Family\":\n if account.get(\"Admin\"):\n temp = ET.SubElement(AdminPremiumFamily, \"account\")\n temp.set(\"Username\", account[\"Username\"])\n temp.set(\"Password\", account[\"Password\"])\n temp.set(\"Country\", account[\"Country\"])\n else:\n temp = ET.SubElement(PremiumFamily, \"account\")\n temp.set(\"Username\", account[\"Username\"])\n temp.set(\"Password\", account[\"Password\"])\n temp.set(\"Country\", account[\"Country\"])\n XML = ET.tostring(Main)\n with open(self.file + self.extension, \"w\") as output_:\n output_.write(XML)\n colors.correct(\"Done! All saved successfully\")\n except Exception as e:\n colors.error(str(e))\n _exit(1)",
"def saveXML(self, filename):\n root = ET.Element('root')\n pklot = ET.SubElement(root, \"ParkingLot\")\n\n lotname = ET.SubElement(pklot, \"LotName\", name=str(self.name))\n idCounter = ET.SubElement(pklot, \"NextAvailableID\", counter=str(self.spotIDCounter))\n for spot in self.parkingSpots:\n ET.SubElement(pklot, 'Spot', id=str(spot.id), location=' '.join(str(x) for x in spot.location))\n\n tree = ET.ElementTree(root)\n tree.write(filename)",
"def save_as(self, fname, base = None, indent = '', topns = True, namespaces = {}):\n with codecs.open(fname, \"w\", encoding=\"utf-8\") as outf:\n self.serialize_xml(outf.write, base=base, indent=indent, topns=topns, namespaces=namespaces)",
"def save(self, filename):\n o = open(filename, 'w')\n o.write(self.write())\n o.close()",
"def export_to_file(self, filename):\n if len(filename.split(\".\")) == 1:\n filename += \".xml\"\n xmlstring = self._dommodel.toprettyxml(\" \", \"\\n\")\n with open(filename, \"w\") as f:\n f.write(xmlstring)",
"def write_to_xml(filename, xmlelement):\n xmlpretty = prettify(xmlelement)\n with open(filename, 'w') as f:\n f.write(xmlpretty)",
"def saveState(self, file):\n state = self.context.getState(getPositions=True, getVelocities=True, getParameters=True, getIntegratorParameters=True)\n xml = mm.XmlSerializer.serialize(state)\n if isinstance(file, str):\n with open(file, 'w') as f:\n f.write(xml)\n else:\n file.write(xml)",
"def write_xosc(self, generated_xml):\n reparsed_xml = minidom.parseString(generated_xml).toprettyxml(indent=\" \")\n xosc_file = open(self._filepath, \"w\")\n xosc_file.write(reparsed_xml)\n xosc_file.close()\n\n msg = QMessageBox()\n if self._warning_message:\n msg.setIcon(QMessageBox.Warning)\n text = f\"Exported OpenSCENARIO file {self._filepath} has warnings!\\n\\n\"\n text += \"\\n\".join(self._warning_message)\n else:\n msg.setIcon(QMessageBox.Information)\n text = f\"Successfully exported OpenSCENARIO file to {self._filepath}\"\n msg.setText(text)\n msg.setWindowTitle(\"OpenSCENARIO Export\")\n msg.setStandardButtons(QMessageBox.Ok)\n msg.exec()",
"def _create_xml_report(self, test, xml_obj):\n xml_report_path = os.path.join(test.work_dir,\n self.XML_REPORT_PATH)\n with open(xml_report_path, 'w') as xml_report:\n xml_report.write(etree.tostring(xml_obj, pretty_print=True))",
"def save(self, fname):\n pass",
"def write(self, file_or_filename):\n etMap = revert(self)\n xmlTree = ET.ElementTree(etMap)\n xmlTree.write(file_or_filename)",
"def Save_Current_Profile(self):\r\n #name = tkFileDialog.asksaveasfilename()\r\n #if( name == \"\" ):\r\n # return\r\n #self.system.Save_Current_Profile(name)\r\n self.system.Save_Current_Profile()",
"def saveFile(self, filename):\n ret = libxml2mod.xmlSaveFile(filename, self._o)\n return ret",
"def save(self, filename):\n with open(filename, \"w\") as fp:\n dump(self, fp)",
"def write(self):\n temp_string = minidom.parseString(ET.tostring(self.root)).toprettyxml(encoding=\"UTF-8\")\n with open(self.xml_file, 'w') as f:\n f.write(temp_string)\n # f = open(self.xml_file, \"w\")\n # f.write(temp_string)\n # f.close()"
] | [
"0.75539035",
"0.66137195",
"0.6487311",
"0.64123094",
"0.61329263",
"0.6101124",
"0.60908806",
"0.60279024",
"0.60243684",
"0.59840643",
"0.59765226",
"0.58507067",
"0.585065",
"0.58390087",
"0.58205575",
"0.58169997",
"0.5809403",
"0.58001035",
"0.56534475",
"0.5646809",
"0.5583415",
"0.55652213",
"0.5553969",
"0.5545845",
"0.5525345",
"0.5517458",
"0.5493895",
"0.54818946",
"0.548075",
"0.5452474"
] | 0.7909768 | 0 |
Writes a dictionary to an xml file in the form of etc. attribute_name>The name of the dictonary attribute to write. xmlwriter>The xml writer to write with. write_empty>A bool of whether to write empty values to the xml file. Default is don't write them. | def write_dict_to_xml(self, attribute_name, xmlwriter, write_empty=False):
if attribute_name in ("IllegalCharacters", "Months"):
write_empty = True
dictionary = getattr(self, attribute_name)
xmlwriter.WriteStartElement(attribute_name)
for key in dictionary:
if dictionary[key] or write_empty:
xmlwriter.WriteStartElement("Item")
xmlwriter.WriteStartAttribute("Name")
xmlwriter.WriteValue(key)
xmlwriter.WriteEndAttribute()
xmlwriter.WriteStartAttribute("Value")
xmlwriter.WriteValue(dictionary[key])
xmlwriter.WriteEndAttribute()
xmlwriter.WriteEndElement()
xmlwriter.WriteEndElement() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def writeDictToXMLFile(outfile, target, dict):\n targetStr = \"\\t\\t<Target>%s</Target>\\n\" % (escape(target),)\n for key in dict.keys():\n outfile.write('\\t<AVU>\\n')\n outfile.write(targetStr)\n outfile.write(\"\\t\\t<Attribute>%s</Attribute>\\n\" % (escape(key),) )\n outfile.write(\"\\t\\t<Value>%s</Value>\\n\" % (escape(dict[key]),) )\n outfile.write('\\t\\t<Unit />\\n')\n outfile.write('\\t</AVU>\\n')",
"def write_to_xml(dictData, metadata, xmlfile):\n\tfout = codecs.open(xmlfile, 'w', 'utf-8')\n\tfout.write('<?xml version = \"1.0\" encoding = \"UTF-8\" standalone = \"no\" ?>\\n')\n\tfout.write('<?xml-stylesheet type=\"text/xsl\" href=\"maketable.xsl\"?>\\n')\n\tfout.write('<root>\\n')\n\tfout.write('<meta>\\n')\n\tfor key, value in metadata.items():\n\t\tfout.write('<' + key + '>' + value + '</' + key + '>\\n')\n\tfout.write('</meta>\\n')\n\tfout.write('<content>\\n')\n\tfor (hw, meanings, verse, verseNumDetails, pageNumDetails) in dictData:\n\t\txmlline = ''\n\t\txmlline += '<word><headword>' + hw + '</headword><meanings>'\n\t\tfor meaning in meanings:\n\t\t\txmlline += '<m>' + meaning + '</m>'\n\t\txmlline += '</meanings>'\n\t\txmlline += '<verse>'\n\t\tlines = verse.split('<BR>')\n\t\tfor line in lines:\n\t\t\txmlline += '<line>' + line + '</line>'\n\t\txmlline += '</verse>'\n\t\txmlline += '<verseNumber>' + verseNumDetails + '</verseNumber>'\n\t\txmlline += '<pageNumber>' + pageNumDetails + '</pageNumber></word>'\n\t\t# Write in babylon format. <BR><BR> is to separate verses.\n\t\tfout.write(xmlline + '\\n')\n\t\txmlline = ''\n\tfout.write('</content>\\n</root>')\n\tfout.close()\n\n\t# Give some summary to the user\n\tprint('XML file generated. Success!')\n\tprint('{} metadata lines and {} content lines written to XML file.'.format(len(metadata), len(dictData)))",
"def write_dictionary(args, dictio):\n if not args.dictfile.endswith(\".file\"):\n args.dictfile += \".file\"\n with open(args.dictfile, \"wb\") as f:\n dump(dictio, f, protocol=HIGHEST_PROTOCOL)",
"def write(self, file_or_filename):\n etMap = revert(self)\n xmlTree = ET.ElementTree(etMap)\n xmlTree.write(file_or_filename)",
"def file_write(filename, dic):\n d = dic \n f = open(filename, 'w') \n f.write(str(d))\n f.close()",
"def write_dict(outputfilename, dictionary):\r\n # May want to modify this code to pickle the key and value and alter the read dictionary to do the same.\r\n try:\r\n with open(outputfilename, 'w', newline='', encoding='utf-8') as outfile:\r\n for key, value in dictionary.items():\r\n outfile.write('%s,%s\\n' % (key, value))\r\n except:\r\n input(\"File still open! Please close and press enter to continue\")\r\n with open(outputfilename, 'w', newline='', encoding='utf-8') as outfile:\r\n for key, value in dictionary.items():\r\n outfile.write('%s,%s\\n' % (key, value))",
"def write_dictionary(dictionary, path):\n with open(path, 'w+') as file:\n file.truncate() # Erase contents of config file\n for key in dictionary:\n file.write(f'{key}={dictionary[key]}\\n')",
"def SaveDictFile(file,dict_):\n with open(file,'w') as f:\n for id in dict_:\n f.write(dict_[id] + \"=\" + str(id)+\"\\n\")",
"def _write_dict_to_mdin(self, f, dictionary):\n\n for key, val in dictionary.items():\n if val is not None:\n f.write(\" {:15s} {:s},\\n\".format(key+\" =\", str(val)))\n f.write(\" /\\n\")",
"def write_string_to_xml(self, attribute_name, xmlwriter, write_empty=True):\r\n string = getattr(self, attribute_name)\r\n if string or write_empty:\r\n xmlwriter.WriteElementString(attribute_name, string)",
"def GenerateXML(dictionary, fileName=\"labelling.xml\") : \n root = gfg.Element(\"annotation\") \n #the big section is called Annotation\n for key in dictionary:\n #for every polygon list in inside object witho subelement name and attributes and the type \"polygon\"\n objectElement = gfg.Element(\"object\") \n root.append(objectElement) \n subElement1 = gfg.SubElement(objectElement, \"name:\".strip(\":\"))\n subElement1.text = str(dictionary[key][\"name\"])\n subElement2 = gfg.SubElement(objectElement, \"attributes\".strip(\":\"))\n subElement2.text = str(dictionary[key][\"attributes\"])\n subElement3 = gfg.SubElement(objectElement, \"polygon\")\n \n for i in range(0, len(dictionary[key])-2):\n #for every vertex of the polygon list it's rounded x, y on xml\n SubInsidePolygon = gfg.SubElement(subElement3, \"pt\")\n sub_x = gfg.SubElement(SubInsidePolygon, \"x\")\n sub_y = gfg.SubElement(SubInsidePolygon, \"y\")\n sub_x.text = str(int(round(dictionary[key][\"x_y_\" + str(i)][0])))\n sub_y.text = str(int(round(dictionary[key][\"x_y_\" + str(i)][1])))\n tree = gfg.ElementTree(root) \n #create the xml tree\n with open (fileName, \"wb\") as files : \n tree.write(files) \n #if xml does not exist create one otherwise rewrite to it",
"def _write_event_xml(infodict, event_dir):\n event_info = infodict['input']['event_information']\n\n otime = datetime.strptime(event_info['origin_time'], '%Y-%m-%dT%H:%M:%SZ')\n eqdict = {'id': event_info['event_id'],\n 'lat': '%.4f' % event_info['latitude'],\n 'lon': '%.4f' % event_info['longitude'],\n 'depth': '%.1f' % event_info['depth'],\n 'mag': '%.1f' % event_info['magnitude'],\n 'year': '%i' % otime.year,\n 'month': '%i' % otime.month,\n 'day': '%i' % otime.day,\n 'hour': '%i' % otime.hour,\n 'minute': '%i' % otime.minute,\n 'second': '%i' % otime.second,\n 'timezone': 'GMT',\n 'locstring': event_info['event_description']}\n eq = Element('earthquake', eqdict)\n xmlfile = os.path.join(event_dir, 'event.xml')\n tree = ElementTree(eq)\n tree.write(xmlfile)",
"def meta2xml(meta, filename):\n\n # this is stupid, just use dict2xml\n xml = dict2xml(meta)\n with open(filename, 'w+') as output:\n output.write(xml)",
"def write_list_to_xml(self, attribute_name, xmlwriter, write_empty=False):\r\n attribute_list = getattr(self, attribute_name)\r\n xmlwriter.WriteStartElement(attribute_name)\r\n for item in attribute_list:\r\n if item or write_empty:\r\n xmlwriter.WriteElementString(\"Item\", item)\r\n xmlwriter.WriteEndElement()",
"def write_config_file(input_dict, file_name, entry_char='>',\n attribution_char='=', usekeys=None):\n fp = open(file_name, \"w\")\n fp.write(write_config_string(input_dict, entry_char, attribution_char,\n usekeys))\n fp.close()",
"def xml_obj(dict):\n string = \"\"\n for key in dict.keys():\n string += ' <{}>{}</{}>\\n'.format(key, dict[key], key)\n return string",
"def write_completed_dictionary_to_file(the_dict):\n\ttry:\n\t\toutputLocation = open('usable_dictionary.json','w')\n\t\toutputString = str(the_dict)\n\t\toutputLocation.write(outputString)\n\t\toutputLocation.close()\n\texcept IOError:\n\t\tprint (\"could not open file\")",
"def ConvertToXML (given_dict) :\r\n stream_thing = cStringIO.StringIO()\r\n WriteToXMLStream(given_dict, stream_thing, 'top')\r\n return stream_thing.getvalue()",
"def _output_dict(self):\n lang = self.ddnGuiLanguage.get()\n\n fileout = os.path.normpath('{}/{}-{}.xml'.\\\n format(self.MapCreator, self.Source, self.ddnCurProject.get()))\n linesout = ['<?xml version=\"1.0\" encoding=\"UTF-8\"?>', \\\n '<DictionarySet xmlns:mc=\"urn:fmosoft-map-creator\" xmlns=\"urn:fmosoft-map-creator\" Version=\"1\">', \\\n ' <Dictionary SourceLanguage=\"{}\" SourceLanguageIsPredefined=\"true\" TargetLanguage=\"{}\" TargetLanguageIsPredefined=\"false\">'.\\\n format(self.Source, self.ddnCurProject.get()), \\\n ]\n for child in self.tree.get_children('approved'):\n vv = self.tree.item(child)['values']\n linesout.append(' <Translation Source=\"{}\" Target=\"{}\"/>'.format(vv[0], vv[1]))\n linesout.append(' </Dictionary>')\n linesout.append('</DictionarySet>')\n linesout.append('')\n\n if os.path.exists(fileout):\n os.remove(fileout)\n\n if fileout:\n output = codecs.open(fileout, mode='w', encoding='utf-8')\n output.write('\\n'.join(linesout))\n output.close()\n pass",
"def write_bool_to_xml(self, attribute_name, xmlwriter):\r\n xmlwriter.WriteStartElement(attribute_name)\r\n xmlwriter.WriteValue(getattr(self, attribute_name))\r\n xmlwriter.WriteEndElement()",
"def write_dictionary():\n for dictionary in dictionaries:\n for values in dictionary.values():\n with open(sys.argv[1] + \"-1\", \"ab\") as dest_file:\n dest_file.write(values)",
"def save_to_file():\n dict_from_file.update(temp_dict)\n plik=open('data.txt', 'w')\n for key in dict_from_file.keys():\n plik.write(key)\n plik.write(\" \")\n plik.write(str(dict_from_file[key][0]))\n plik.write(' ')\n plik.write(dict_from_file[key][1].replace(' ','_'))\n plik.write(' ')\n plik.write(str(dict_from_file[key][2]))\n plik.write('\\n')",
"def print_xml_config(config_dictionary,**kwargs):\n \n #Check if we have passed a filename\n #If not, pass a default filename\n if 'config_file' in kwargs:\n config_file = kwargs['config_file']\n else:\n config_file = 'ebtel_config.xml'\n \n #Open the file\n f = open(config_file,'w')\n \n #Print necessary header info\n f.write('<?xml version=\"1.0\" ?>\\n')\n f.write('<input>\\n')\n\n #Loop through dictionary and print to xml file\n for key in config_dictionary:\n #Print tab delimiter, brackets and keyword\n f.write('\\t<')\n f.write(key)\n f.write('>')\n #Check if entry is a list\n #If so print it as a list\n if isinstance(config_dictionary[key],list) or type(config_dictionary[key]).__name__ == 'ndarray':\n #Make temporary list\n temp = config_dictionary[key]\n #Skip to new line\n f.write('\\n')\n #Begin loop over list\n for i in range(len(config_dictionary[key])):\n f.write('\\t\\t<')\n f.write(key+str(i))\n f.write('>')\n f.write(str(temp[i]))\n f.write('</')\n f.write(key+str(i))\n f.write('>\\n')\n #Print additional tab to preserve alignment\n f.write('\\t')\n else:\n #Print value\n f.write(str(config_dictionary[key]))\n #Close the brackets and print newline\n f.write('</')\n f.write(key)\n f.write('>\\n')\n \n #Close the main node of the file\n f.write('</input>')\n \n #Close the file\n f.close()",
"def xmlwrite(self, doc, filename):\n pathname = os.path.join(self.session.session_dir, filename)\n f = open(pathname, \"w\")\n doc.writexml(writer=f, indent=\"\", addindent=\" \", newl=\"\\n\", encoding=\"UTF-8\")\n f.close()",
"def save_to_xml(self, xwriter):\r\n\r\n xwriter.WriteStartElement(\"Profile\")\r\n xwriter.WriteAttributeString(\"Name\", self.Name)\r\n xwriter.WriteStartAttribute(\"Version\")\r\n xwriter.WriteValue(self.Version)\r\n xwriter.WriteEndAttribute()\r\n\r\n for var_name in self.__dict__:\r\n var_type = type(getattr(self, var_name))\r\n\r\n if var_type is str and var_name != \"Name\":\r\n self.write_string_to_xml(var_name, xwriter)\r\n\r\n elif var_type is bool:\r\n self.write_bool_to_xml(var_name, xwriter)\r\n\r\n elif var_type is dict:\r\n self.write_dict_to_xml(var_name, xwriter)\r\n\r\n elif var_type is list and var_name != \"ExcludeRules\":\r\n self.write_list_to_xml(var_name, xwriter)\r\n\r\n xwriter.WriteStartElement(\"ExcludeRules\")\r\n xwriter.WriteAttributeString(\"Operator\", self.ExcludeOperator)\r\n xwriter.WriteAttributeString(\"ExcludeMode\", self.ExcludeMode)\r\n for rule in self.ExcludeRules:\r\n if rule:\r\n rule.save_xml(xwriter)\r\n xwriter.WriteEndElement()\r\n \r\n xwriter.WriteEndElement()",
"def WriteFile(self, filename) :\n\n # open file for writing:\n f = open(filename, 'w')\n\n ## loop over key/value pairs:\n #for k,v in self.iteritems():\n # # add line; at least the specified number of characters \n # # is used for the key:\n # f.write( '%-20s:%s\\n' % (k,v) )\n ##endfor\n\n # write processed input:\n f.writelines(self.outfile)\n \n # close file:\n f.close()",
"def GdictWrite(filename=\"turtle_docstringdict\"):\n docsdict = {}\n\n for methodname in gScreenFunc:\n key = \"_Screen.\"+methodname\n docsdict[key] = eval(key).__doc__\n for methodname in gMoveFunc:\n key = \"Myturtle.\"+methodname\n docsdict[key] = eval(key).__doc__\n\n with open(\"%s.py\" % filename,\"w\") as f:\n keys = sorted(x for x in docsdict\n if x.split('.')[1] not in _alias_list)\n f.write('docsdict = {\\n\\n')\n for key in keys[:-1]:\n f.write('%s :\\n' % repr(key))\n f.write(' \"\"\"%s\\n\"\"\",\\n\\n' % docsdict[key])\n key = keys[-1]\n f.write('%s :\\n' % repr(key))\n f.write(' \"\"\"%s\\n\"\"\"\\n\\n' % docsdict[key])\n f.write(\"}\\n\")\n f.close()",
"def write(self, fp, **kwds):\n json.dump(self._dict, fp)",
"def xmlWrite(self, xmlWriter, font, value, name, attrs):\n raise NotImplementedError(self)",
"def save(self):\n if not os.path.exists(self.dictionary_save_path) and self.dictionary_save_path != \"\":\n os.makedirs(self.dictionary_save_path)\n with open(self.dictionary_save_path + \"dictionary\", 'w') as f:\n f.write(\"{}\\n\".format(len(self.dictionary)))\n\n for word in self.dictionary:\n f.write(\"{}\\t{}\\n\".format(self.dictionary[word]['id'], word))"
] | [
"0.6729531",
"0.65415585",
"0.6262941",
"0.6016264",
"0.5983849",
"0.5939113",
"0.58700234",
"0.58539236",
"0.5813194",
"0.5684836",
"0.56377923",
"0.56335074",
"0.5623472",
"0.56098765",
"0.55822074",
"0.5524061",
"0.5433924",
"0.5403624",
"0.5400973",
"0.53912425",
"0.5356394",
"0.5355461",
"0.53117216",
"0.5308995",
"0.5303472",
"0.52977353",
"0.5287409",
"0.525638",
"0.52556443",
"0.5217364"
] | 0.80239266 | 0 |
Writes a list to an xml file in the form of value value etc. attribute_name>The name of the list attribute to write. xmlwriter>The xml writer to write with. write_empty>A bool of whether to write empty values to the xml file. Default is don't write them. | def write_list_to_xml(self, attribute_name, xmlwriter, write_empty=False):
attribute_list = getattr(self, attribute_name)
xmlwriter.WriteStartElement(attribute_name)
for item in attribute_list:
if item or write_empty:
xmlwriter.WriteElementString("Item", item)
xmlwriter.WriteEndElement() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write(lst):\n # TODO",
"def write(self, data, filename):\n id_ = 1\n weightlist_el = Element('weight-list')\n for dataset in data:\n weight_el = SubElement(weightlist_el, 'weight')\n id_el = SubElement(weight_el, 'id')\n id_el.text = str(id_)\n date_el = SubElement(weight_el, 'date')\n date_el.text = str(dataset.date) + 'T12:00:00'\n value_el = SubElement(weight_el, 'value')\n value_el.text = str(dataset.weight)\n comment_el = SubElement(weight_el, 'comment')\n comment_el.text = dataset.note\n id_ += 1\n st_tree = ElementTree(weightlist_el)\n st_tree.write(filename, encoding='UTF-8')",
"def write_list(outputfilename, list):\r\n try:\r\n with open(outputfilename, 'w', newline='', encoding='utf-8') as outfile:\r\n itemwriter = csv.writer(outfile, delimiter=\",\")\r\n for item in list:\r\n itemwriter.writerow(item)\r\n except:\r\n input(\"File still open! Please close and press enter to continue\")\r\n with open(outputfilename, 'w', newline='', encoding='utf-8') as outfile:\r\n itemwriter = csv.writer(outfile, delimiter=\",\")\r\n for item in list:\r\n itemwriter.writerow(item)",
"def write_list_to_file(myList, filename):\r\n\r\n with open(filename, \"w\") as outfile:\r\n for entries in myList:\r\n outfile.write(entries)\r\n\t\t\t# add a return after each line\r\n outfile.write(\"\\n\")",
"def write_list(args, file_list):\n if not args.listfile.endswith(\".txt\"):\n args.listfile += \".txt\"\n outputfile = open(args.listfile, 'w')\n for name in file_list:\n outputfile.write(name)\n outputfile.write(\"\\n\")\n outputfile.close()",
"def writexml(file):\n OUTFILE=open(file,\"w\")\n doc = xml.dom.minidom.Document()\n\n # Create the <dec_reg_list> base element\n decl_reg_list = doc.createElement(\"decl_reg_list\")\n doc.appendChild(decl_reg_list)\n\n regname_old=\"\"\n rows.pop(0)\n for row in rows:\n (regdesc,regname,offset,default,regtype,expose_reg,depth,incsz,bitdesc,bitname,loc,bittype)= row\n if regname != regname_old:\n # Create the register element\n register = doc.createElement(\"register\")\n register.setAttribute(\"name\", regname)\n register.setAttribute(\"offset\", offset)\n if default != \"\" : register.setAttribute(\"default\", default)\n register.setAttribute(\"type\", regtype)\n if expose_reg == \"1\": register.setAttribute(\"usr\", expose_reg)\n if depth != \"\": register.setAttribute(\"size\", depth)\n if incsz != \"\": register.setAttribute(\"incsz\", incsz)\n text = doc.createTextNode(regdesc)\n register.appendChild(text)\n decl_reg_list.appendChild(register)\n \n # Create the field element\n if bitname != \"\":\n field = doc.createElement(\"field\")\n field.setAttribute(\"name\", bitname)\n if loc !=\"\": field.setAttribute(\"loc\", addcolon(loc))\n if bittype != \"\": field.setAttribute(\"type\", bittype)\n if bitdesc != \"\":\n text = doc.createTextNode(bitdesc)\n field.appendChild(text)\n register.appendChild(field)\n regname_old = regname\n\n\n # Print our newly created XML\n #print doc.toprettyxml(indent=\" \")\n #OUTFILE.write(doc.saveXML(decl_reg_list))\n OUTFILE.write(doc.toprettyxml(indent=\" \"))\n OUTFILE.close()",
"def write_dict_to_xml(self, attribute_name, xmlwriter, write_empty=False):\r\n if attribute_name in (\"IllegalCharacters\", \"Months\"):\r\n write_empty = True\r\n dictionary = getattr(self, attribute_name)\r\n xmlwriter.WriteStartElement(attribute_name)\r\n for key in dictionary:\r\n if dictionary[key] or write_empty:\r\n xmlwriter.WriteStartElement(\"Item\")\r\n xmlwriter.WriteStartAttribute(\"Name\")\r\n xmlwriter.WriteValue(key)\r\n xmlwriter.WriteEndAttribute()\r\n xmlwriter.WriteStartAttribute(\"Value\")\r\n xmlwriter.WriteValue(dictionary[key])\r\n xmlwriter.WriteEndAttribute()\r\n xmlwriter.WriteEndElement()\r\n xmlwriter.WriteEndElement()",
"def save_list_to_file(the_list, filepath):\n with open(filepath, 'w') as file_handler:\n for item in the_list:\n file_handler.write(\"{}\\n\".format(item))",
"def export_list_to_xacro(list, filename):\n global robot, OUTPUT\n doc = Document()\n root = doc.createElement('robot')\n doc.appendChild(root)\n root.setAttribute(\"xmlns:xacro\", \"http://www.ros.org/wiki/xacro\")\n print ('exporting ' + os.path.basename(filename))\n for string in list:\n for link in robot.links:\n if robot.links[link].name.find(string) != -1:\n root.appendChild(robot.links[link].to_xml(doc))\n for joint in robot.joints:\n if robot.joints[joint].child == robot.links[link].name:\n root.appendChild(robot.joints[joint].to_xml(doc))\n write_comments_in_xacro(doc, filename)",
"def write_list_to_file(file_name: str, list_name: List[str]):\n # Write to a file, overwriting the old contents\n file = open(file_name, 'w')\n\n # Loop through the list, append a newline character to each line\n for item in list_name:\n file.writelines(item + '\\n')\n\n # Close the file\n file.close()",
"def write(file_path, tag_list, attr_name='kMDItemUserTags'):\n tag_data = ['<string>{}</string>'.format(tag) for tag in tag_list]\n tag_data.insert(0, ('<!DOCTYPE plist PUBLIC'\n '\"-//Apple//DTD PLIST 1.0//EN\"'\n '\"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">'\n '<plist version=\"1.0\"><array>'))\n tag_data.append('</array></plist>')\n tag_text = ''.join(tag_data)\n\n xattr = \"com.apple.metadata:{}\".format(attr_name)\n # Other attributes you might want to try:\n # ['kMDItemOMUserTags', 'kOMUserTags',\n # 'kMDItemkeywords', 'kMDItemFinderComment']\n cmd = ['xattr',\n '-w',\n xattr,\n tag_text.encode(\"utf8\"),\n file_path]\n return utils.run_process(cmd)",
"def write_list(l, fname):\n thefile = open(fname, \"w\")\n for line in l:\n thefile.write(\"%s\\n\" % line)\n thefile.close()",
"def write_list(self, register, data):\n raise NotImplementedError",
"def write_into_file(name, liste):\n file = open(name, \"w\")\n for item in liste:\n file.write(item)\n file.write('\\n')\n file.close()",
"def write_list(self):\n with open(self.path, 'w') as file:\n for i in map(self.addziros, range(1, int(str(1) + self.number_length * '0') + 1)):\n file.write(i + '\\n')\n file.close()",
"def createXMLFile(list, stock_symbol, market):\n \n stock = ET.Element(\"stock\")\n \n stock.set(\"source\", 'yahoo finance')\n exchange = ET.SubElement(stock, \"exchange\")\n exchange.set(\"market\", market)\n \n for s in list: \n \n if s.array[0] == 'Date' or list[0].array[0] != 'Date':\n continue\n dividend_date = ET.SubElement(exchange, \"dividend_date\")\n dividend_date.set(\"date\", s.array[0])\n \n price = ET.SubElement(dividend_date, \"price\")\n price.text = s.array[1]\n \n \n indent(stock)\n tree = ET.ElementTree(stock)\n \n tree.write(\"dividend_history.xml\", xml_declaration=True, encoding='utf-8', method=\"xml\")\n print 'xml created for ' + stock_symbol",
"def __writeToFile(self, filePath, lst): \n \n if not self.outDir is None: \n filePath = os.path.join(self.outDir, filePath) \n \n open(filePath,'a').writelines(lst)",
"def write_list_to_file(input_list, output_folder, delimiter=\" \", header=None):\n with open(output_folder, 'w') as doc_out:\n if header:\n doc_out.write(delimiter.join(header) + \"\\n\")\n for element in input_list:\n doc_out.write(delimiter.join([str(i) for i in element]) + \"\\n\")",
"def write_to_xml(filename, xmlelement):\n xmlpretty = prettify(xmlelement)\n with open(filename, 'w') as f:\n f.write(xmlpretty)",
"def write_list_to_file(program, list_to_write):\n with open(program.split('.')[0] + \".output.json\", 'a+') as output_file:\n output_file.write(json.dumps(list_to_write, indent=3, sort_keys=False))",
"def write(self):\r\n for prop in self.prpnames:\r\n elem = SubElement(self._root, prop)\r\n data = self.__getattribute__(prop)\r\n if self.prpnames[prop]['type'] == \"text\":\r\n elem.text = data\r\n elif self.prpnames[prop]['type'] == 'list':\r\n for x in data:\r\n SubElement(elem, 'regel').text = x\r\n elif self.prpnames[prop]['type'] == 'attr':\r\n elem.set(self.prpnames[prop]['naam'], data)\r\n tree = ElementTree(self._root)\r\n tree.write(self._fn)\r\n if not self.exists:\r\n self.exists = True",
"def toXML( self, indent = '', **kwargs ) :\n\n return( '\\n'.join( self.toXMLList( **kwargs ) ) )",
"def write_package_list(_, package_list, *args):\n logger.debug(\"Attempting to write package list\")\n try:\n with open(Base.get_package_list_filepath(), \"w\") as file:\n yaml.dump(package_list, file, sort_keys=True)\n except:\n logger.exception(\"Could not write package list\")\n exit(1)\n logger.debug(\"Packages written to file\")",
"def writexml(self, writer, indent=\"\", add_indent=\"\", new_line=\"\"):\n writer.write(indent + \"<\" + self.tagName)\n attrs = self._get_attributes()\n\n for a_name in attrs.keys():\n writer.write(\" %s=\\\"\" % a_name)\n self.write_data(writer, attrs[a_name].value)\n writer.write(\"\\\"\")\n if self.childNodes:\n writer.write(\">\")\n if len(self.childNodes) == 1 and self.childNodes[0].nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):\n self.childNodes[0].writexml(writer, '', '', '')\n else:\n writer.write(new_line)\n for node in self.childNodes:\n node.writexml(writer, indent + add_indent, add_indent, new_line)\n writer.write(indent)\n writer.write(\"</%s>%s\" % (self.tagName, new_line))\n else:\n writer.write(\"/>%s\" % new_line)",
"def write_2D_list(self, list_name, statistics):\n filename = os.getcwd() + list_name + \".csv\"\n print(filename)\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n with open(filename, mode='w+', newline='',encoding='utf8') as list_file:\n list_writer = csv.writer(list_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n for item in statistics:\n list_writer.writerow(item)",
"def save_to_xml(self, xwriter):\r\n\r\n xwriter.WriteStartElement(\"Profile\")\r\n xwriter.WriteAttributeString(\"Name\", self.Name)\r\n xwriter.WriteStartAttribute(\"Version\")\r\n xwriter.WriteValue(self.Version)\r\n xwriter.WriteEndAttribute()\r\n\r\n for var_name in self.__dict__:\r\n var_type = type(getattr(self, var_name))\r\n\r\n if var_type is str and var_name != \"Name\":\r\n self.write_string_to_xml(var_name, xwriter)\r\n\r\n elif var_type is bool:\r\n self.write_bool_to_xml(var_name, xwriter)\r\n\r\n elif var_type is dict:\r\n self.write_dict_to_xml(var_name, xwriter)\r\n\r\n elif var_type is list and var_name != \"ExcludeRules\":\r\n self.write_list_to_xml(var_name, xwriter)\r\n\r\n xwriter.WriteStartElement(\"ExcludeRules\")\r\n xwriter.WriteAttributeString(\"Operator\", self.ExcludeOperator)\r\n xwriter.WriteAttributeString(\"ExcludeMode\", self.ExcludeMode)\r\n for rule in self.ExcludeRules:\r\n if rule:\r\n rule.save_xml(xwriter)\r\n xwriter.WriteEndElement()\r\n \r\n xwriter.WriteEndElement()",
"def write_list_to_file(ls, save_path):\n # Open in appendation mode given that this function may be called multiple\n # times on the same file (positive and negative sentiment are in separate\n # directories).\n out_file = open(save_path, \"w+\")\n for example in ls:\n out_file.write(example)\n out_file.write('\\n')",
"def writeXml(self):\n curdir = os.getcwd()\n os.chdir(self.Imagedir)\n allImageLists = [self.sciImlist, self.ctxImlist, self.wgtImlist, self.rmsImlist]\n \n for imlist in allImageLists:\n for im in imlist:\n file = xmlUtil.markupImage(im,dataset=self.obsName)\n \n # Don't write these images as output of this module, which\n # really doesn't have any.\n \n #if file not in self.outputList.keys():\n # self.outputList[file] = [im]\n \n os.chdir(curdir)\n return",
"def write_string_to_xml(self, attribute_name, xmlwriter, write_empty=True):\r\n string = getattr(self, attribute_name)\r\n if string or write_empty:\r\n xmlwriter.WriteElementString(attribute_name, string)",
"def write_bool_to_xml(self, attribute_name, xmlwriter):\r\n xmlwriter.WriteStartElement(attribute_name)\r\n xmlwriter.WriteValue(getattr(self, attribute_name))\r\n xmlwriter.WriteEndElement()"
] | [
"0.6497926",
"0.6048247",
"0.60110986",
"0.5987103",
"0.59648246",
"0.5957363",
"0.5939886",
"0.57804435",
"0.5734696",
"0.57158196",
"0.5674186",
"0.5656518",
"0.56491786",
"0.55900544",
"0.55637085",
"0.55263245",
"0.546913",
"0.54400545",
"0.54337585",
"0.54084957",
"0.53953457",
"0.5391533",
"0.5352481",
"0.5325219",
"0.53213125",
"0.5320877",
"0.5298301",
"0.52947754",
"0.5280493",
"0.5278833"
] | 0.8491795 | 0 |
Writes a string to an xml file in the form of string attribute_name>The name of the string attribute to write. xmlwriter>The xml writer to write with. write_empty>A bool of whether to write empty strings to the xml file. Default is write empty strings. | def write_string_to_xml(self, attribute_name, xmlwriter, write_empty=True):
string = getattr(self, attribute_name)
if string or write_empty:
xmlwriter.WriteElementString(attribute_name, string) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_write_string():\n buf = make_buffer()\n writer = XmlWriter(buf)\n writer.write_element('value', 'myvalue')\n writer.flush()\n assert_equals(decode_buffer(buf), '<value>myvalue</value>')",
"def write_dict_to_xml(self, attribute_name, xmlwriter, write_empty=False):\r\n if attribute_name in (\"IllegalCharacters\", \"Months\"):\r\n write_empty = True\r\n dictionary = getattr(self, attribute_name)\r\n xmlwriter.WriteStartElement(attribute_name)\r\n for key in dictionary:\r\n if dictionary[key] or write_empty:\r\n xmlwriter.WriteStartElement(\"Item\")\r\n xmlwriter.WriteStartAttribute(\"Name\")\r\n xmlwriter.WriteValue(key)\r\n xmlwriter.WriteEndAttribute()\r\n xmlwriter.WriteStartAttribute(\"Value\")\r\n xmlwriter.WriteValue(dictionary[key])\r\n xmlwriter.WriteEndAttribute()\r\n xmlwriter.WriteEndElement()\r\n xmlwriter.WriteEndElement()",
"def write(self, str: str, /) -> None:",
"def write_output_file(self, xml_text, xml_file):\n xml_fo = open(xml_file, 'w')\n xml_fo.write(xml_text+'</xml>')\n xml_fo.close()\n return",
"def write(self):\n temp_string = minidom.parseString(ET.tostring(self.root)).toprettyxml(encoding=\"UTF-8\")\n with open(self.xml_file, 'w') as f:\n f.write(temp_string)\n # f = open(self.xml_file, \"w\")\n # f.write(temp_string)\n # f.close()",
"def write_to_xml(filename, xmlelement):\n xmlpretty = prettify(xmlelement)\n with open(filename, 'w') as f:\n f.write(xmlpretty)",
"def xmlWrite(self, xmlWriter, font, value, name, attrs):\n raise NotImplementedError(self)",
"def write_xml(self, filepath=None, escapeNewline=True, indent=False):\n\n if not filepath:\n filepath = self.mFilePath\n\n if indent:\n self.indent(self.tree.getroot())\n\n output = StringIO()\n\n self.tree.write(output, encoding=\"UTF-8\")\n\n outFile = open(filepath, \"w\")\n if escapeNewline:\n # we need to make sure newline 
 is written correctly\n print >> outFile, re.sub(\"###newline_escape###\", \"
\", output.getvalue())\n else:\n print >> outFile, output.getvalue()\n\n outFile.close",
"def writeString(self, str):\n ret = libxml2mod.xmlOutputBufferWriteString(self._o, str)\n return ret",
"def writeFile(self, filename):\n s = ET.tostring(self._root)\n\n #Remove all formatting\n s = s.replace('\\n','')\n s = s.replace('\\t','')\n s = s.replace('\\r','')\n\n f = open(filename, 'w')\n f.write(minidom.parseString(s).toprettyxml())\n f.close()",
"def writetree(self, string):\n self.treebuf.write(string)\n if self.animal != None:\n self.animal.writetree(string)",
"def exportXml ( w, xml ):\n assert str ( type ( xml ) ) == \"<type 'str'>\"\n rawText = xml\n pattern = re.compile (r'[^\\S ]+')\n text = re.sub ( pattern, \"\", rawText )\n reparsed = MD.parseString ( text )\n w.write ( reparsed.toprettyxml ( indent = \"\\t\", encoding = \"UTF-8\" ) )",
"def write_file(name_file, string):\n with open(name_file, 'w') as file:\n file.write(string)",
"def test_write_defaults(self):\n xml = (\n u'<?xml version=\"1.0\" encoding=\"utf-8\"?>'\n u'<DocRoot>'\n u'<Elem1>默认جذ</Elem1>'\n u'<Elem2/>'\n u'</DocRoot>'\n )\n io_string = six.StringIO()\n self.builder.write_doc(io_string)\n if six.PY2:\n self.assertEqual(xml.encode('utf-8'), io_string.getvalue())\n else:\n self.assertEqual(xml, io_string.getvalue())",
"def write_list_to_xml(self, attribute_name, xmlwriter, write_empty=False):\r\n attribute_list = getattr(self, attribute_name)\r\n xmlwriter.WriteStartElement(attribute_name)\r\n for item in attribute_list:\r\n if item or write_empty:\r\n xmlwriter.WriteElementString(\"Item\", item)\r\n xmlwriter.WriteEndElement()",
"def write_bool_to_xml(self, attribute_name, xmlwriter):\r\n xmlwriter.WriteStartElement(attribute_name)\r\n xmlwriter.WriteValue(getattr(self, attribute_name))\r\n xmlwriter.WriteEndElement()",
"def writexml(self, writer, indent=\"\", add_indent=\"\", new_line=\"\"):\n writer.write(indent + \"<\" + self.tagName)\n attrs = self._get_attributes()\n\n for a_name in attrs.keys():\n writer.write(\" %s=\\\"\" % a_name)\n self.write_data(writer, attrs[a_name].value)\n writer.write(\"\\\"\")\n if self.childNodes:\n writer.write(\">\")\n if len(self.childNodes) == 1 and self.childNodes[0].nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):\n self.childNodes[0].writexml(writer, '', '', '')\n else:\n writer.write(new_line)\n for node in self.childNodes:\n node.writexml(writer, indent + add_indent, add_indent, new_line)\n writer.write(indent)\n writer.write(\"</%s>%s\" % (self.tagName, new_line))\n else:\n writer.write(\"/>%s\" % new_line)",
"def write(self, string):\n self.__file.write(string)",
"def writeFile(string_to_write: str, outfile: str):\n with open(outfile, \"w\") as f:\n f.write(string_to_write)",
"def xmlwrite(self, doc, filename):\n pathname = os.path.join(self.session.session_dir, filename)\n f = open(pathname, \"w\")\n doc.writexml(writer=f, indent=\"\", addindent=\" \", newl=\"\\n\", encoding=\"UTF-8\")\n f.close()",
"def _write_to_file(self, string):\n with open(self.p.base_dir + '/' + self.p.filename, 'w') as f:\n f.write(string)",
"def write_file(filename, string):\n import sys\n #ugly fix, hopefully we can find a better one\n if sys.version_info[0] >= 3:\n with open(filename, 'w', encoding=\"utf-8\") as f:\n f.write(string)\n else:\n with open(filename, 'w') as f:\n f.write(string.encode(\"utf-8\"))",
"def write(writer: BitStreamWriter, value: str) -> None:\n\n writer.writeString(value)",
"def WriteStringToFile(string, filepath):\n with open(filepath, 'w') as file_handle:\n file_handle.write(string)",
"def XMLWrite(one, two, three, four, five, six, seven, eight):\n filePath = \"/mnt/RAM/kanban.xml\"\n xmlFile = open(filePath, 'w')\n\n xmlFile.write('<kanbanShelf>\\n')\n xmlFile.write(' <one>%s</one>\\n' % one)\n xmlFile.write(' <two>%s</two>\\n' % two)\n xmlFile.write(' <three>%s</three>\\n' % three)\n xmlFile.write(' <four>%s</four>\\n' % four)\n xmlFile.write(' <five>%s</five>\\n' % five)\n xmlFile.write(' <six>%s</six>\\n' % six)\n xmlFile.write(' <seven>%s</seven>\\n' % seven)\n xmlFile.write(' <eight>%s</eight>\\n' % eight)\n xmlFile.write('</kanbanShelf>')",
"def XMLWriter(\n fd,\n encoding=\"utf-8\",\n pretty=True,\n compactempty=True,\n indentation=_DEFAULT_INDENTATION\n):\n return _document(fd, encoding, pretty, compactempty, indentation)",
"def write(self, filename):\n with open(filename, \"w\") as f:\n f.write(self.get_string())",
"def write(self, filename):\n with open(filename, \"w\") as f:\n f.write(self.get_string())",
"def save(string, file):\n\n save_file = open(file, 'w')\n save_file.write(string)\n save_file.close()",
"def save(self, save_path=None):\n if self._xml is None:\n raise IOError(\"There's nothing to save\")\n\n path = self._path_to_xml if save_path is None else save_path\n\n with open(path, 'w') as f:\n rough_string = Et.tostring(self._xml, 'utf-8')\n par = etree.XMLParser(remove_blank_text=True)\n elem = etree.XML(rough_string, parser=par)\n parsed = minidom.parseString(etree.tostring(elem))\n f.write(parsed.toprettyxml(indent=\" \"))"
] | [
"0.6247003",
"0.58558315",
"0.5849161",
"0.57865465",
"0.5765893",
"0.5722448",
"0.5617337",
"0.5580295",
"0.55794567",
"0.554683",
"0.55136317",
"0.55108947",
"0.54859346",
"0.54719436",
"0.5470503",
"0.54250485",
"0.53961504",
"0.5380822",
"0.5374993",
"0.53526664",
"0.5339592",
"0.5307603",
"0.529733",
"0.52938664",
"0.5293728",
"0.523522",
"0.52173513",
"0.52173513",
"0.5216224",
"0.5183375"
] | 0.8222901 | 0 |
Writes a boolean to an xml file in the form of true/false attribute_name>The name of the attribute to write. xmlwriter>The xml writer to write with. | def write_bool_to_xml(self, attribute_name, xmlwriter):
xmlwriter.WriteStartElement(attribute_name)
xmlwriter.WriteValue(getattr(self, attribute_name))
xmlwriter.WriteEndElement() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def writeAttribute(self, *args):\n if type(args[1]) == type(True): return _libsbml.XMLOutputStream_writeAttributeBool(self, *args)\n\n\n return _libsbml.XMLOutputStream_writeAttribute(self, *args)",
"def writeAttributeBool(self, *args):\n return _libsbml.XMLOutputStream_writeAttributeBool(self, *args)",
"def write(writer: BitStreamWriter, value: bool) -> None:\n\n writer.writeBool(value)",
"def write_boolean(self, boolean: bool) -> None:\n self.write(bytearray([bool(boolean)]))",
"def _writeBool(self, val):\n self.__writeValue(self.boolFormat, val)",
"def writeBoolean(self, value: bool):\n self.writeByte(1 if value else 0)",
"def write_bool(self, b: bool) -> None:\n self.buffer += struct.pack(\"<?\", b)",
"def bool_attr(attr):\n if attr.lower() == \"true\":\n val = True\n elif attr.lower() == \"false\":\n val = False\n else:\n raise EzXMLError(\"Must be \"\\\n \"'true' or 'false'. Not %s\" % (attr))\n return val",
"def htmlIsBooleanAttr(name):\n ret = libxml2mod.htmlIsBooleanAttr(name)\n return ret",
"def set(self, attr, value=True):\n if type(value) == bool:\n self.__dict__['_'+attr] = value\n print attr, \"set to\", value\n else:\n print 'Value must be a bool, either \"True\" or \"False\" (no quotes)!'",
"def set_boolean(x):\n\n if x:\n return \"True\"\n else:\n return \"False\"",
"def set_boolean(dict, name, elem):\n node = elem.find(name)\n if node is not None:\n if node.text.lower() == 'yes':\n dict[name] = True\n elif node.text.lower() == 'no':\n dict[name] = False",
"def write_xml(self, xmlfile):\n system.xml.write_file(xmlfile, self.status, 'status')",
"def saveState(self):\n e = xml.Element(self.type)\n e.attrib['lastUpdate'] = str(clock.now())\n e.attrib['name'] = self.name\n #e.attrib['status'] = ('true' if self.status else 'false')\n return e",
"def write_string_to_xml(self, attribute_name, xmlwriter, write_empty=True):\r\n string = getattr(self, attribute_name)\r\n if string or write_empty:\r\n xmlwriter.WriteElementString(attribute_name, string)",
"def update_bool(file_path):\n with open(\n file_path, 'r'\n ) as the_result_file_from_spark_for_read_and_abbr_not_allowed_by_pylint:\n content = the_result_file_from_spark_for_read_and_abbr_not_allowed_by_pylint.read(\n )\n update = content.replace('true', 'True').replace('false', 'False')\n with open(\n file_path,\n 'w') as the_result_file_from_spark_for_write_and_abbr_not_allowed:\n the_result_file_from_spark_for_write_and_abbr_not_allowed.write(update)",
"def set_bool_attribute(self, id: str, b: Optional[bool]):\n self.set_attribute(id, None if not b else ConstInt(1))",
"def xmlWrite(self, xmlWriter, font, value, name, attrs):\n raise NotImplementedError(self)",
"def boolean_function(bool_variable):\n\tif bool_variable:\n\t\treturn \"The boolean variable is True\"\n\telse:\n\t\treturn \"The boolean variable is False\"",
"def write_flag(path): \r\n f = open(path, \"r+\")\r\n line = f.readlines()\r\n line[1] = 'flag = 1'\r\n s=''.join(line) \r\n f.seek(0)\r\n f.write(s)\r\n f.close()",
"def get_xml_bool_attribute(elem, attribute, default=None):\n value = elem.get(attribute, default)\n if value is None:\n raise XMLSchemaKeyError(attribute)\n elif value in ('true', '1') or value is True:\n return True\n elif value in ('false', '0') or value is False:\n return False\n else:\n raise XMLSchemaTypeError(\"an XML boolean value is required for attribute %r\" % attribute)",
"def save_to_xml(self, xwriter):\r\n\r\n xwriter.WriteStartElement(\"Profile\")\r\n xwriter.WriteAttributeString(\"Name\", self.Name)\r\n xwriter.WriteStartAttribute(\"Version\")\r\n xwriter.WriteValue(self.Version)\r\n xwriter.WriteEndAttribute()\r\n\r\n for var_name in self.__dict__:\r\n var_type = type(getattr(self, var_name))\r\n\r\n if var_type is str and var_name != \"Name\":\r\n self.write_string_to_xml(var_name, xwriter)\r\n\r\n elif var_type is bool:\r\n self.write_bool_to_xml(var_name, xwriter)\r\n\r\n elif var_type is dict:\r\n self.write_dict_to_xml(var_name, xwriter)\r\n\r\n elif var_type is list and var_name != \"ExcludeRules\":\r\n self.write_list_to_xml(var_name, xwriter)\r\n\r\n xwriter.WriteStartElement(\"ExcludeRules\")\r\n xwriter.WriteAttributeString(\"Operator\", self.ExcludeOperator)\r\n xwriter.WriteAttributeString(\"ExcludeMode\", self.ExcludeMode)\r\n for rule in self.ExcludeRules:\r\n if rule:\r\n rule.save_xml(xwriter)\r\n xwriter.WriteEndElement()\r\n \r\n xwriter.WriteEndElement()",
"def validate_boolean_attribute(tag, attribute_name, attribute_value):\n if not attribute_value:\n return\n\n if not isinstance(attribute_value, bool):\n raise AttributeError('<{tag}>: {attribute_name} attribute should be a '\n 'boolean value.'\n .format(tag=tag, attribute_name=attribute_name))",
"def bool_to_on_off(boolean: bool):\n if boolean:\n return \"on\"\n return \"off\"",
"def xmlwrite(self, doc, filename):\n pathname = os.path.join(self.session.session_dir, filename)\n f = open(pathname, \"w\")\n doc.writexml(writer=f, indent=\"\", addindent=\" \", newl=\"\\n\", encoding=\"UTF-8\")\n f.close()",
"def get_boolean_attribute_value(attrs, attr_name):\n return 1 if attrs.get(attr_name, 0) in [\"True\", \"1\"] else 0",
"def convertToString(boolean: bool) -> str:\n ...",
"def boolean(self, state, label=None):\n self.savepos()\n label = self._colorize(label, fg = \"base0\")\n\n msg = (self._colorize(\"☑\", fg = \"green\") if state else self._colorize(\"☒\", fg = \"red\")) + \" \" + label\n\n self.out.write(msg)\n self.restorepos()",
"def XMLWrite(self, one, two, three, four, five, six, seven, eight):\n filePath = \"/mnt/RAM/kanban.xml\"\n xmlFile = open(filePath, 'w')\n \n xmlFile.write('<kanban>\\n')\n xmlFile.write(' <n1>%s</n1>\\n' % one)\n xmlFile.write(' <n2>%s</n2>\\n' % two)\n xmlFile.write(' <n3>%s</n3>\\n' % three)\n xmlFile.write(' <n4>%s</n4>\\n' % four)\n xmlFile.write(' <n5>%s</n5>\\n' % five)\n xmlFile.write(' <n6>%s</n6>\\n' % six)\n xmlFile.write(' <n7>%s</n7>\\n' % seven)\n xmlFile.write(' <n8>%s</n8>\\n' % eight)\n xmlFile.write('</kanban>')",
"def XMLWrite(one, two, three, four, five, six, seven, eight):\n filePath = \"/mnt/RAM/kanban.xml\"\n xmlFile = open(filePath, 'w')\n\n xmlFile.write('<kanbanShelf>\\n')\n xmlFile.write(' <one>%s</one>\\n' % one)\n xmlFile.write(' <two>%s</two>\\n' % two)\n xmlFile.write(' <three>%s</three>\\n' % three)\n xmlFile.write(' <four>%s</four>\\n' % four)\n xmlFile.write(' <five>%s</five>\\n' % five)\n xmlFile.write(' <six>%s</six>\\n' % six)\n xmlFile.write(' <seven>%s</seven>\\n' % seven)\n xmlFile.write(' <eight>%s</eight>\\n' % eight)\n xmlFile.write('</kanbanShelf>')"
] | [
"0.74465925",
"0.73603874",
"0.7117586",
"0.67680955",
"0.66733044",
"0.63848156",
"0.62347335",
"0.59897983",
"0.591663",
"0.5848001",
"0.57468945",
"0.564458",
"0.5640521",
"0.5636943",
"0.561803",
"0.5609525",
"0.5577795",
"0.557284",
"0.55520767",
"0.5486341",
"0.54855955",
"0.5472363",
"0.5398986",
"0.5393929",
"0.53881365",
"0.53400695",
"0.53359395",
"0.5328683",
"0.53096366",
"0.530688"
] | 0.8478797 | 0 |
Load profiles from a xml file. If no profiles are found it creates a blank profile. file_path>The absolute path to the profile file Returns a dict of the found profiles and a list of the lastused profile(s) | def load_profiles(file_path):
profiles, lastused = load_profiles_from_file(file_path)
if len(profiles) == 0:
#Just in case
profiles["Default"] = Profile()
profiles["Default"].Name = "Default"
#Some default templates
profiles["Default"].FileTemplate = "{<series>}{ Vol.<volume>}{ #<number2>}{ (of <count2>)}{ ({<month>, }<year>)}"
profiles["Default"].FolderTemplate = "{<publisher>}\{<imprint>}\{<series>}{ (<startyear>{ <format>})}"
if not lastused:
lastused = [profiles.keys()[0]]
return profiles, lastused | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_profiles_from_file(file_path):\r\n profiles = {}\r\n\r\n lastused = \"\"\r\n\r\n if File.Exists(file_path):\r\n try:\r\n with StreamReader(file_path) as xmlfile:\r\n xmldoc = XmlDocument()\r\n xmldoc.Load(xmlfile)\r\n\r\n if xmldoc.DocumentElement.Name == \"Profiles\":\r\n nodes = xmldoc.SelectNodes(\"Profiles/Profile\")\r\n #Individual exported profiles are saved with the document element as Profile\r\n elif xmldoc.DocumentElement.Name == \"Profile\":\r\n nodes = xmldoc.SelectNodes(\"Profile\")\r\n\r\n #Changed from 1.7 to 2.0 to use Profiles/Profile instead of Settings/Setting\r\n elif xmldoc.DocumentElement.Name == \"Settings\":\r\n nodes = xmldoc.SelectNodes(\"Settings/Setting\")\r\n elif xmldoc.DocumentElement.Name == \"Setting\":\r\n nodes = xmldoc.SelectNodes(\"Setting\")\r\n\r\n #No valid root elements\r\n else:\r\n MessageBox.Show(file_path + \" is not a valid Library Organizer profile file.\", \"Not a valid profile file\", MessageBoxButtons.OK, MessageBoxIcon.Error)\r\n return profiles, lastused\r\n\r\n if nodes.Count > 0:\r\n for node in nodes: \r\n profile = Profile()\r\n profile.Name = node.Attributes[\"Name\"].Value\r\n result = profile.load_from_xml(node)\r\n\r\n #Error loading the profile\r\n if result == False:\r\n MessageBox.Show(\"An error occured loading the profile \" + profile.Name + \". That profile has been skipped.\")\r\n\r\n else:\r\n profiles[profile.Name] = profile\r\n\r\n\r\n #Load the last used profile\r\n rootnode = xmldoc.DocumentElement\r\n if rootnode.HasAttribute(\"LastUsed\"):\r\n lastused = rootnode.Attributes[\"LastUsed\"].Value.split(\",\")\r\n\r\n except Exception, ex:\r\n MessageBox.Show(\"Something seems to have gone wrong loading the xml file.\\n\\nThe error was:\\n\" + str(ex), \"Error loading file\", MessageBoxButtons.OK, MessageBoxIcon.Error)\r\n\r\n return profiles, lastused",
"def import_profiles(file_path):\r\n profiles, lastused = load_profiles_from_file(file_path)\r\n\r\n return profiles",
"def load_profiles(profiles_file: TextIO) -> Tuple[Dict[str, List[str]],\n Dict[str, List[str]]]:\n individuals = [[]]\n sublist = 0\n content = profiles_file.readlines()\n for i in content:\n if i != '\\n':\n individuals[sublist].append(i)\n else:\n sublist += 1\n individuals.append([])\n return sort_profile(individuals)",
"def loadProfiles():\n with open(userProfilesDir, \"r\") as infile:\n profiles = json.loads(\"\\n\".join(infile.readlines()))\n infile.close()\n return profiles",
"def load(path):\n\n parser = ConfigParser()\n parser.read(str(path))\n\n def _get(section, option):\n try:\n return parser.get(section, option)\n except (NoSectionError, NoOptionError):\n return None\n\n profiles = {}\n\n for section in parser.sections():\n profiles[section] = Profile(\n domain=_get(section, \"domain\"),\n protocol=_get(section, \"protocol\"),\n client_id=_get(section, \"client_id\"),\n client_secret=_get(section, \"client_secret\"),\n )\n\n return profiles",
"def save_profiles(file_path, profiles, lastused=\"\"):\r\n try:\r\n xSettings = XmlWriterSettings()\r\n xSettings.Indent = True\r\n with XmlWriter.Create(file_path, xSettings) as writer:\r\n writer.WriteStartElement(\"Profiles\")\r\n if lastused:\r\n writer.WriteAttributeString(\"LastUsed\", \",\".join(lastused))\r\n for profile in profiles:\r\n profiles[profile].save_to_xml(writer)\r\n writer.WriteEndElement()\r\n except Exception, ex:\r\n MessageBox.Show(\"An error occured writing the settings file. The error was:\\n\\n\" + ex.message, \"Error saving settings file\", MessageBoxButtons.OK, MessageBoxIcon.Error)",
"def get_profile(path=\"~\"):\n global profiles\n profile = profiles.get(path,None)\n if not profile:\n profile = InitFileConfig(os.path.join(path,\".myradioprofile\"), {} )\n profiles[path] = profile\n return profile",
"def _read_profiles(profile_directory):\n # Initialize key variables\n profiles = defaultdict(\n lambda: defaultdict(lambda: defaultdict()))\n\n # Read the yaml files in the profiles directory\n files = os.listdir(profile_directory)\n filenames = ['{}{}{}'.format(\n profile_directory, os.sep, nextfile) for nextfile in files]\n\n for _filename in sorted(filenames):\n # Get rid of excess os.sep separators\n pathitems = _filename.split(os.sep)\n filename = os.sep.join(pathitems)\n\n # Skip obvious\n if os.path.isfile(filename) is False:\n continue\n if filename.lower().endswith('.yaml') is False:\n continue\n\n with open(filename, 'r') as stream:\n try:\n _profiles = yaml.load(stream)['data']\n except yaml.YAMLError as exc:\n print(exc)\n\n # Create dictionary\n for item in _profiles:\n firstname = item['firstname']\n lastname = item['lastname']\n height = item['height']\n weight = item['weight']\n birthdate = item['birthdate']\n profiles[lastname][firstname][birthdate] = {\n 'height': height, 'weight': weight}\n\n return profiles",
"def populate_profiles_from_directory(self):\n\n self.profiles = []\n\n # Go over all the files and create a profile object\n _profile_files = listdir(self.save_dir)\n\n for profile_filename in _profile_files:\n # Only check for .yaml files\n if path.splitext(profile_filename)[1] == '.yaml':\n # Get the data and create a new profile\n _file_data = self._load_data_from_file(path.join(self.save_dir, profile_filename))\n _profile = Profile(path.splitext(profile_filename)[0])\n _profile.player_data = _file_data\n self.profiles.append(_profile)",
"def load_profile(path, profile):\n profiles = load(path)\n try:\n return profiles[profile]\n except KeyError:\n return Profile(None, None, None, None)",
"def _recurse_load_profile(self, text, profile_path):\n try:\n inherited_profile = Profile()\n cwd = os.path.dirname(os.path.abspath(profile_path)) if profile_path else None\n profile_parser = _ProfileParser(text)\n # Iterate the includes and call recursive to get the profile and variables\n # from parent profiles\n for include in profile_parser.includes:\n # Recursion !!\n profile = self._load_profile(include, cwd)\n inherited_profile.compose_profile(profile)\n\n # Current profile before update with parents (but parent variables already applied)\n inherited_profile = _ProfileValueParser.get_profile(profile_parser.profile_text,\n inherited_profile)\n return inherited_profile\n except ConanException:\n raise\n except Exception as exc:\n raise ConanException(\"Error parsing the profile text file: %s\" % str(exc))",
"def _sloppy_parse_profiles (self, contents):\n profile_start = contents.find('profiles\":')\n profile_list_start = contents.find('profilesList')\n if int(profile_start) > -1 and int(profile_list_start) > -1:\n try:\n try:\n return json.loads('{\"a\":{\"' + contents[profile_start:profile_list_start-2].decode('string_escape') + '}}').get('a').get('profiles')\n except ValueError, e:\n return None\n except TypeError, e:\n return None\n return None",
"def get_profiles(self):\n # print(self.uir) #checkpoint\n if os.path.isdir(self.uir+\"/profiles\"):\n profiles=os.listdir(self.uir+\"/profiles\")\n # print(profiles) #checkpoint\n for profile in profiles:\n wsadmin=self.uir+\"/profiles/\"+profile+\"/bin/wsadmin.bat\"\n if os.path.isfile(wsadmin): #check for wsadmin.bat.\n self.profiles.append(self.uir+\"/profiles/\"+profile)\n\n else: print(self.uir+' Instance does not have \"profile\" folder in '+self.uir)\n return",
"def get_profiles(profile_file_directory):\r\n\t\r\n\tprofile_file_path = profile_file_directory+ \"/profiles.txt\"\r\n\tlist_of_all_allele_numbers_tuple = []\r\n\tdatabase = None\r\n\tlocusList = []\r\n\t\r\n\tfor l in open(profile_file_path):\r\n\t if database is None:\r\n\t\tdatabase = {}\r\n\t\tlocusList = l.split()[1:]\r\n\t\tcontinue\r\n\t t = l.split()\r\n\t st = t[0]\r\n\t v = ' '.join([s for s in t[1:]])\r\n\t if v in database:\r\n\t\tprint >> sys.stderr, 'sequence type ' + str(st) + ' is a duplicate of ' + str(database[v])\r\n\t database[v] = st\r\n\t covert_string_to_tuple_list_of_allele_numbers = tuple(int(x) for x in re.findall(\"[0-9]+\", v)) \r\n\t list_of_all_allele_numbers_tuple.append(covert_string_to_tuple_list_of_allele_numbers)\r\n\t\t\r\n\treturn (database, locusList, list_of_all_allele_numbers_tuple)",
"def profile(filename: str) -> 'Iterator[None]':\n profiler = Profile()\n profiler.enable()\n\n yield\n\n profiler.disable()\n profiler.create_stats()\n profiler.dump_stats('profiles/{}'.format(filename))",
"def list_profiles(self) -> dict:\n wsc = self.read_ws_configuration()\n out = OrderedDict()\n for name, json in wsc.profiles.items():\n out[name] = Profile(name, self.ws_data_folder / name, json)\n # Try to find current profile\n try:\n out[self.current_profile_name].is_current = True\n except Exception:\n pass\n return out",
"def from_file(cls, file_path, ngram_sizes, profile_len):\n profile = cls.from_files((file_path, ), ngram_sizes, profile_len)\n return profile",
"def profiles(self):\n if not self._profiles:\n self.GetAllProfiles()\n return self._profiles",
"def profile_files(profile):\n flist = os.listdir(osp.join(profile, 'startup'))\n profile_path = osp.join(osp.abspath('.'), profile)\n return [osp.join(profile_path, 'startup', x) for x in flist]",
"def GetAllProfiles(self):\n profiles = []\n feed_uri = self._gd_client.GetFeedUri('profiles')\n while feed_uri:\n feed = self._gd_client.GetProfilesFeed(uri=feed_uri)\n profiles.extend(feed.entry)\n feed_uri = feed.FindNextLink()\n self._profiles = profiles",
"def load_people(self, file_path):\n pass",
"def read_profiles(filename):\n profiles = []\n with gzip.open(filename, mode='rt', encoding='utf8') as infile:\n for line in infile:\n profiles.append(Counter(line.split()))\n return profiles",
"def have_profile_dir(path, maxdepth=3, filename=\"profiles.desc\"):\n\twhile path != \"/\" and maxdepth:\n\t\tif os.path.exists(os.path.join(path, \"profiles\", filename)):\n\t\t\treturn normalize_path(path)\n\t\tpath = normalize_path(path + \"/..\")\n\t\tmaxdepth -= 1",
"def parse_file(file_path):\n with open(file_path) as f:\n return XmlPropertyListParser().parse(f)",
"def profiles_path(self) -> Path:\n return self._config.data_path / \"hmm\" / \"profiles.hmm\"",
"def _load_profile(self, profile_name, cwd):\n\n profile_path = self.get_profile_path(profile_name, cwd)\n try:\n text = load_user_encoded(profile_path)\n except Exception as e:\n raise ConanException(f\"Cannot load profile:\\n{e}\")\n\n # All profiles will be now rendered with jinja2 as first pass\n base_path = os.path.dirname(profile_path)\n file_path = os.path.basename(profile_path)\n context = {\"platform\": platform,\n \"os\": os,\n \"profile_dir\": base_path,\n \"profile_name\": file_path,\n \"conan_version\": conan_version}\n rtemplate = Environment(loader=FileSystemLoader(base_path)).from_string(text)\n text = rtemplate.render(context)\n\n try:\n return self._recurse_load_profile(text, profile_path)\n except ConanException as exc:\n raise ConanException(\"Error reading '%s' profile: %s\" % (profile_name, exc))",
"def read_pardus_profiles(self):\n\n self.lan_config = ConfigParser.ConfigParser()\n self.lan_config.read(self.lan_config_path)\n connection_type = \"802-3-ethernet\"\n for section in self.lan_config.sections():\n lan_settings = {}\n for option in self.lan_config.options(section):\n if option == \"device\":\n #To strip device name from long device string\n lan_settings[option] = self.lan_config.get(section, option).split(\"_\")[-1]\n else:\n lan_settings[option] = self.lan_config.get(section, option)\n p = PardusNetworkProfile(section, connection_type, lan_settings)\n self.pardus_profiles.append(p)\n\n self.wlan_config = ConfigParser.ConfigParser()\n self.wlan_config.read(self.wlan_config_path)\n connection_type = \"802-11-wireless\"\n for section in self.wlan_config.sections():\n wlan_settings = {}\n for option in self.wlan_config.options(section):\n if option == \"device\":\n wlan_settings[option] = self.wlan_config.get(section, option).split(\"_\")[-1]\n else:\n wlan_settings[option] = self.wlan_config.get(section, option)\n p = PardusNetworkProfile(section, connection_type, wlan_settings)\n self.pardus_profiles.append(p)",
"def _find_cb_profiles():\n dir_locations = [\".carbonblack\", os.path.join(os.path.expanduser(\"~\"), \".carbonblack\")]\n cred_file = \"credentials.response\"\n profiles = []\n\n for dir in dir_locations:\n cred_file_path = os.path.join(dir, cred_file)\n _MOD_LOGGER.debug(\"Searching CB profiles on '%s'\", cred_file_path)\n if os.path.exists(cred_file_path):\n _MOD_LOGGER.debug(\"File exists, parsing...\")\n config = configparser.ConfigParser(default_section=\"cbbackend\", strict=True)\n config.read(cred_file_path)\n profiles += [sec_name for sec_name in config.keys() if sec_name != \"cbbackend\"]\n\n if profiles:\n _MOD_LOGGER.debug(\"Requested to read 'all' profiles. Found: %s\", \",\".join(profiles))\n\n return profiles",
"def get_profile(self, profiles, settings=None, options=None, conf=None, cwd=None):\n assert isinstance(profiles, list), \"Please provide a list of profiles\"\n cache = ClientCache(self._conan_api.cache_folder)\n loader = ProfileLoader(cache)\n profile = loader.from_cli_args(profiles, settings, options, conf, cwd)\n profile.conf.validate()\n cache.new_config.validate()\n # Apply the new_config to the profiles the global one, so recipes get it too\n profile.conf.rebase_conf_definition(cache.new_config)\n return profile",
"def loadProfile(fname):\n \n x = np.loadtxt(fname)\n return x[:,1]"
] | [
"0.81233364",
"0.77697146",
"0.67844415",
"0.6566505",
"0.636305",
"0.62747896",
"0.6168049",
"0.6128615",
"0.61190236",
"0.6096743",
"0.5969697",
"0.5946075",
"0.59331304",
"0.5897405",
"0.58071595",
"0.57577217",
"0.5669829",
"0.564856",
"0.55757815",
"0.5557105",
"0.55390906",
"0.55295134",
"0.5503458",
"0.5449346",
"0.5407596",
"0.5406341",
"0.5399971",
"0.53979677",
"0.53647906",
"0.5352732"
] | 0.7774989 | 1 |
Loads profiles from a file. file_path>The absolute path the xml file Returns a dict of the profiles | def load_profiles_from_file(file_path):
profiles = {}
lastused = ""
if File.Exists(file_path):
try:
with StreamReader(file_path) as xmlfile:
xmldoc = XmlDocument()
xmldoc.Load(xmlfile)
if xmldoc.DocumentElement.Name == "Profiles":
nodes = xmldoc.SelectNodes("Profiles/Profile")
#Individual exported profiles are saved with the document element as Profile
elif xmldoc.DocumentElement.Name == "Profile":
nodes = xmldoc.SelectNodes("Profile")
#Changed from 1.7 to 2.0 to use Profiles/Profile instead of Settings/Setting
elif xmldoc.DocumentElement.Name == "Settings":
nodes = xmldoc.SelectNodes("Settings/Setting")
elif xmldoc.DocumentElement.Name == "Setting":
nodes = xmldoc.SelectNodes("Setting")
#No valid root elements
else:
MessageBox.Show(file_path + " is not a valid Library Organizer profile file.", "Not a valid profile file", MessageBoxButtons.OK, MessageBoxIcon.Error)
return profiles, lastused
if nodes.Count > 0:
for node in nodes:
profile = Profile()
profile.Name = node.Attributes["Name"].Value
result = profile.load_from_xml(node)
#Error loading the profile
if result == False:
MessageBox.Show("An error occured loading the profile " + profile.Name + ". That profile has been skipped.")
else:
profiles[profile.Name] = profile
#Load the last used profile
rootnode = xmldoc.DocumentElement
if rootnode.HasAttribute("LastUsed"):
lastused = rootnode.Attributes["LastUsed"].Value.split(",")
except Exception, ex:
MessageBox.Show("Something seems to have gone wrong loading the xml file.\n\nThe error was:\n" + str(ex), "Error loading file", MessageBoxButtons.OK, MessageBoxIcon.Error)
return profiles, lastused | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def import_profiles(file_path):\r\n profiles, lastused = load_profiles_from_file(file_path)\r\n\r\n return profiles",
"def load_profiles(file_path):\r\n profiles, lastused = load_profiles_from_file(file_path)\r\n\r\n if len(profiles) == 0:\r\n #Just in case\r\n profiles[\"Default\"] = Profile()\r\n profiles[\"Default\"].Name = \"Default\"\r\n #Some default templates\r\n profiles[\"Default\"].FileTemplate = \"{<series>}{ Vol.<volume>}{ #<number2>}{ (of <count2>)}{ ({<month>, }<year>)}\"\r\n profiles[\"Default\"].FolderTemplate = \"{<publisher>}\\{<imprint>}\\{<series>}{ (<startyear>{ <format>})}\"\r\n \r\n if not lastused:\r\n lastused = [profiles.keys()[0]]\r\n \r\n return profiles, lastused",
"def load(path):\n\n parser = ConfigParser()\n parser.read(str(path))\n\n def _get(section, option):\n try:\n return parser.get(section, option)\n except (NoSectionError, NoOptionError):\n return None\n\n profiles = {}\n\n for section in parser.sections():\n profiles[section] = Profile(\n domain=_get(section, \"domain\"),\n protocol=_get(section, \"protocol\"),\n client_id=_get(section, \"client_id\"),\n client_secret=_get(section, \"client_secret\"),\n )\n\n return profiles",
"def loadProfiles():\n with open(userProfilesDir, \"r\") as infile:\n profiles = json.loads(\"\\n\".join(infile.readlines()))\n infile.close()\n return profiles",
"def load_profiles(profiles_file: TextIO) -> Tuple[Dict[str, List[str]],\n Dict[str, List[str]]]:\n individuals = [[]]\n sublist = 0\n content = profiles_file.readlines()\n for i in content:\n if i != '\\n':\n individuals[sublist].append(i)\n else:\n sublist += 1\n individuals.append([])\n return sort_profile(individuals)",
"def load_people(self, file_path):\n pass",
"def load_profile(path, profile):\n profiles = load(path)\n try:\n return profiles[profile]\n except KeyError:\n return Profile(None, None, None, None)",
"def get_profile(path=\"~\"):\n global profiles\n profile = profiles.get(path,None)\n if not profile:\n profile = InitFileConfig(os.path.join(path,\".myradioprofile\"), {} )\n profiles[path] = profile\n return profile",
"def _read_profiles(profile_directory):\n # Initialize key variables\n profiles = defaultdict(\n lambda: defaultdict(lambda: defaultdict()))\n\n # Read the yaml files in the profiles directory\n files = os.listdir(profile_directory)\n filenames = ['{}{}{}'.format(\n profile_directory, os.sep, nextfile) for nextfile in files]\n\n for _filename in sorted(filenames):\n # Get rid of excess os.sep separators\n pathitems = _filename.split(os.sep)\n filename = os.sep.join(pathitems)\n\n # Skip obvious\n if os.path.isfile(filename) is False:\n continue\n if filename.lower().endswith('.yaml') is False:\n continue\n\n with open(filename, 'r') as stream:\n try:\n _profiles = yaml.load(stream)['data']\n except yaml.YAMLError as exc:\n print(exc)\n\n # Create dictionary\n for item in _profiles:\n firstname = item['firstname']\n lastname = item['lastname']\n height = item['height']\n weight = item['weight']\n birthdate = item['birthdate']\n profiles[lastname][firstname][birthdate] = {\n 'height': height, 'weight': weight}\n\n return profiles",
"def parse_file(file_path):\n with open(file_path) as f:\n return XmlPropertyListParser().parse(f)",
"def from_file(cls, file_path, ngram_sizes, profile_len):\n profile = cls.from_files((file_path, ), ngram_sizes, profile_len)\n return profile",
"def _recurse_load_profile(self, text, profile_path):\n try:\n inherited_profile = Profile()\n cwd = os.path.dirname(os.path.abspath(profile_path)) if profile_path else None\n profile_parser = _ProfileParser(text)\n # Iterate the includes and call recursive to get the profile and variables\n # from parent profiles\n for include in profile_parser.includes:\n # Recursion !!\n profile = self._load_profile(include, cwd)\n inherited_profile.compose_profile(profile)\n\n # Current profile before update with parents (but parent variables already applied)\n inherited_profile = _ProfileValueParser.get_profile(profile_parser.profile_text,\n inherited_profile)\n return inherited_profile\n except ConanException:\n raise\n except Exception as exc:\n raise ConanException(\"Error parsing the profile text file: %s\" % str(exc))",
"def loadProfile(fname):\n \n x = np.loadtxt(fname)\n return x[:,1]",
"def _load_profile(self, profile_name, cwd):\n\n profile_path = self.get_profile_path(profile_name, cwd)\n try:\n text = load_user_encoded(profile_path)\n except Exception as e:\n raise ConanException(f\"Cannot load profile:\\n{e}\")\n\n # All profiles will be now rendered with jinja2 as first pass\n base_path = os.path.dirname(profile_path)\n file_path = os.path.basename(profile_path)\n context = {\"platform\": platform,\n \"os\": os,\n \"profile_dir\": base_path,\n \"profile_name\": file_path,\n \"conan_version\": conan_version}\n rtemplate = Environment(loader=FileSystemLoader(base_path)).from_string(text)\n text = rtemplate.render(context)\n\n try:\n return self._recurse_load_profile(text, profile_path)\n except ConanException as exc:\n raise ConanException(\"Error reading '%s' profile: %s\" % (profile_name, exc))",
"def _sloppy_parse_profiles (self, contents):\n profile_start = contents.find('profiles\":')\n profile_list_start = contents.find('profilesList')\n if int(profile_start) > -1 and int(profile_list_start) > -1:\n try:\n try:\n return json.loads('{\"a\":{\"' + contents[profile_start:profile_list_start-2].decode('string_escape') + '}}').get('a').get('profiles')\n except ValueError, e:\n return None\n except TypeError, e:\n return None\n return None",
"def _load_data(self, filename):\n if not os.path.isfile(filename):\n return False\n\n with open(filename) as f:\n data = pickle.load(f)\n if data:\n self.profiles = data['profiles']\n self.user_data = data['user_data']\n self.api_data = data['api_data']\n else:\n return False",
"def populate_profiles_from_directory(self):\n\n self.profiles = []\n\n # Go over all the files and create a profile object\n _profile_files = listdir(self.save_dir)\n\n for profile_filename in _profile_files:\n # Only check for .yaml files\n if path.splitext(profile_filename)[1] == '.yaml':\n # Get the data and create a new profile\n _file_data = self._load_data_from_file(path.join(self.save_dir, profile_filename))\n _profile = Profile(path.splitext(profile_filename)[0])\n _profile.player_data = _file_data\n self.profiles.append(_profile)",
"def read_plist(path: str) -> dict:\n return _read_plist(path, plistlib.FMT_XML)",
"def _load_file(self, file_path: str) -> dict:\n raise NotImplementedError()",
"def load(cls, filepath):\n return BaseProfiler.load(filepath)",
"def read_profiles(filename):\n profiles = []\n with gzip.open(filename, mode='rt', encoding='utf8') as infile:\n for line in infile:\n profiles.append(Counter(line.split()))\n return profiles",
"def save_profiles(file_path, profiles, lastused=\"\"):\r\n try:\r\n xSettings = XmlWriterSettings()\r\n xSettings.Indent = True\r\n with XmlWriter.Create(file_path, xSettings) as writer:\r\n writer.WriteStartElement(\"Profiles\")\r\n if lastused:\r\n writer.WriteAttributeString(\"LastUsed\", \",\".join(lastused))\r\n for profile in profiles:\r\n profiles[profile].save_to_xml(writer)\r\n writer.WriteEndElement()\r\n except Exception, ex:\r\n MessageBox.Show(\"An error occured writing the settings file. The error was:\\n\\n\" + ex.message, \"Error saving settings file\", MessageBoxButtons.OK, MessageBoxIcon.Error)",
"def profiles_path(self) -> Path:\n return self._config.data_path / \"hmm\" / \"profiles.hmm\"",
"def get_resolved_profile_catalog(trestle_root: pathlib.Path, profile_path: pathlib.Path) -> cat.Catalog:\n logger.debug(f'get resolved profile catalog for {profile_path} via generated Import.')\n import_ = prof.Import(href=str(profile_path), include_all={})\n import_filter = ProfileResolver.Import(trestle_root, import_)\n logger.debug('launch pipeline')\n result = next(import_filter.process())\n return result",
"def profile_directory_path(request, file):\n return directory_path('profile', file)",
"def get_profiles(profile_file_directory):\r\n\t\r\n\tprofile_file_path = profile_file_directory+ \"/profiles.txt\"\r\n\tlist_of_all_allele_numbers_tuple = []\r\n\tdatabase = None\r\n\tlocusList = []\r\n\t\r\n\tfor l in open(profile_file_path):\r\n\t if database is None:\r\n\t\tdatabase = {}\r\n\t\tlocusList = l.split()[1:]\r\n\t\tcontinue\r\n\t t = l.split()\r\n\t st = t[0]\r\n\t v = ' '.join([s for s in t[1:]])\r\n\t if v in database:\r\n\t\tprint >> sys.stderr, 'sequence type ' + str(st) + ' is a duplicate of ' + str(database[v])\r\n\t database[v] = st\r\n\t covert_string_to_tuple_list_of_allele_numbers = tuple(int(x) for x in re.findall(\"[0-9]+\", v)) \r\n\t list_of_all_allele_numbers_tuple.append(covert_string_to_tuple_list_of_allele_numbers)\r\n\t\t\r\n\treturn (database, locusList, list_of_all_allele_numbers_tuple)",
"def list_profiles(self) -> dict:\n wsc = self.read_ws_configuration()\n out = OrderedDict()\n for name, json in wsc.profiles.items():\n out[name] = Profile(name, self.ws_data_folder / name, json)\n # Try to find current profile\n try:\n out[self.current_profile_name].is_current = True\n except Exception:\n pass\n return out",
"def _read_profile_file(cls, profile_file):\n qid2title: Dict[str, str] = {}\n qid2desc: Dict[str, str] = {}\n alias2qids: Dict[str, list] = {}\n type_systems: Dict[str, Dict[str, List[str]]] = {}\n qid2relations: Dict[str, Dict[str, List[str]]] = {}\n\n num_lines = sum(1 for _ in open(profile_file))\n with open(profile_file, \"r\") as in_f:\n for line in tqdm(in_f, total=num_lines, desc=\"Reading profile\"):\n line = ujson.loads(line)\n\n # Check keys and schema\n assert all(\n k in line.keys() for k in REQUIRED_KEYS\n ), f\"A key from {REQUIRED_KEYS} was not in {line}\"\n try:\n # Asserts the types are correct\n ent = EntityObj(\n entity_id=line[\"entity_id\"],\n mentions=line[\"mentions\"],\n title=line.get(\"title\", line[\"entity_id\"]),\n description=line.get(\"description\", \"\"),\n types=line.get(\"types\", {}),\n relations=line.get(\"relations\", []),\n )\n except ValidationError as e:\n print(e.json())\n raise e\n if ent.entity_id in qid2title:\n raise ValueError(f\"{ent.entity_id} is already in our dump\")\n qid2title[ent.entity_id] = ent.title\n qid2desc[ent.entity_id] = ent.description\n # For each [mention, score] value, create a value of mention -> [qid, score] in the alias2qid dict\n for men_pair in ent.mentions:\n # Lower case mentions for mention extraction\n new_men = get_lnrm(men_pair[0], strip=True, lower=True)\n if new_men not in alias2qids:\n alias2qids[new_men] = []\n alias2qids[new_men].append([ent.entity_id, men_pair[1]])\n # Add type systems of type_sys -> QID -> list of type names\n for type_sys in ent.types:\n if type_sys not in type_systems:\n type_systems[type_sys] = {}\n type_systems[type_sys][ent.entity_id] = ent.types[type_sys]\n # Add kg relations QID -> relation -> list of object QIDs\n for rel_pair in ent.relations:\n if \"relation\" not in rel_pair or \"object\" not in rel_pair:\n raise ValueError(\n \"For each value in relations, it must be a JSON with keys relation and object\"\n )\n if ent.entity_id not in qid2relations:\n qid2relations[ent.entity_id] = {}\n if rel_pair[\"relation\"] not in qid2relations[ent.entity_id]:\n qid2relations[ent.entity_id][rel_pair[\"relation\"]] = []\n qid2relations[ent.entity_id][rel_pair[\"relation\"]].append(\n rel_pair[\"object\"]\n )\n\n # Sort mentions based on score, highest first\n for al in list(alias2qids.keys()):\n alias2qids[al] = sorted(alias2qids[al], key=lambda x: x[1], reverse=True)\n # Add all qids to the type systems and KG connections with empty values\n # This isn't strictly required but can make the sets more clean as they'll have consistent keys\n for qid in qid2title:\n for type_sys in type_systems:\n if qid not in type_systems[type_sys]:\n type_systems[type_sys][qid] = []\n if qid not in qid2relations:\n qid2relations[qid] = {}\n return qid2title, qid2desc, alias2qids, type_systems, qid2relations",
"def get_profiles(self):\n # print(self.uir) #checkpoint\n if os.path.isdir(self.uir+\"/profiles\"):\n profiles=os.listdir(self.uir+\"/profiles\")\n # print(profiles) #checkpoint\n for profile in profiles:\n wsadmin=self.uir+\"/profiles/\"+profile+\"/bin/wsadmin.bat\"\n if os.path.isfile(wsadmin): #check for wsadmin.bat.\n self.profiles.append(self.uir+\"/profiles/\"+profile)\n\n else: print(self.uir+' Instance does not have \"profile\" folder in '+self.uir)\n return",
"def profile(filename: str) -> 'Iterator[None]':\n profiler = Profile()\n profiler.enable()\n\n yield\n\n profiler.disable()\n profiler.create_stats()\n profiler.dump_stats('profiles/{}'.format(filename))"
] | [
"0.7816373",
"0.7494791",
"0.69231015",
"0.68052244",
"0.669596",
"0.6337234",
"0.6331917",
"0.60487854",
"0.6018389",
"0.59737855",
"0.59296596",
"0.59177107",
"0.57330143",
"0.57177866",
"0.5666612",
"0.5610871",
"0.56013894",
"0.5478734",
"0.54241526",
"0.5402424",
"0.5382448",
"0.5379713",
"0.5368854",
"0.5361319",
"0.5332461",
"0.53192705",
"0.53029263",
"0.529909",
"0.5262802",
"0.525197"
] | 0.755472 | 1 |
Load profiles from a xml file. If no profiles are found it returns an empty dict. file_path>The absolute path to the profile file Returns a dict of the found profiles. | def import_profiles(file_path):
profiles, lastused = load_profiles_from_file(file_path)
return profiles | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_profiles_from_file(file_path):\r\n profiles = {}\r\n\r\n lastused = \"\"\r\n\r\n if File.Exists(file_path):\r\n try:\r\n with StreamReader(file_path) as xmlfile:\r\n xmldoc = XmlDocument()\r\n xmldoc.Load(xmlfile)\r\n\r\n if xmldoc.DocumentElement.Name == \"Profiles\":\r\n nodes = xmldoc.SelectNodes(\"Profiles/Profile\")\r\n #Individual exported profiles are saved with the document element as Profile\r\n elif xmldoc.DocumentElement.Name == \"Profile\":\r\n nodes = xmldoc.SelectNodes(\"Profile\")\r\n\r\n #Changed from 1.7 to 2.0 to use Profiles/Profile instead of Settings/Setting\r\n elif xmldoc.DocumentElement.Name == \"Settings\":\r\n nodes = xmldoc.SelectNodes(\"Settings/Setting\")\r\n elif xmldoc.DocumentElement.Name == \"Setting\":\r\n nodes = xmldoc.SelectNodes(\"Setting\")\r\n\r\n #No valid root elements\r\n else:\r\n MessageBox.Show(file_path + \" is not a valid Library Organizer profile file.\", \"Not a valid profile file\", MessageBoxButtons.OK, MessageBoxIcon.Error)\r\n return profiles, lastused\r\n\r\n if nodes.Count > 0:\r\n for node in nodes: \r\n profile = Profile()\r\n profile.Name = node.Attributes[\"Name\"].Value\r\n result = profile.load_from_xml(node)\r\n\r\n #Error loading the profile\r\n if result == False:\r\n MessageBox.Show(\"An error occured loading the profile \" + profile.Name + \". That profile has been skipped.\")\r\n\r\n else:\r\n profiles[profile.Name] = profile\r\n\r\n\r\n #Load the last used profile\r\n rootnode = xmldoc.DocumentElement\r\n if rootnode.HasAttribute(\"LastUsed\"):\r\n lastused = rootnode.Attributes[\"LastUsed\"].Value.split(\",\")\r\n\r\n except Exception, ex:\r\n MessageBox.Show(\"Something seems to have gone wrong loading the xml file.\\n\\nThe error was:\\n\" + str(ex), \"Error loading file\", MessageBoxButtons.OK, MessageBoxIcon.Error)\r\n\r\n return profiles, lastused",
"def load_profiles(file_path):\r\n profiles, lastused = load_profiles_from_file(file_path)\r\n\r\n if len(profiles) == 0:\r\n #Just in case\r\n profiles[\"Default\"] = Profile()\r\n profiles[\"Default\"].Name = \"Default\"\r\n #Some default templates\r\n profiles[\"Default\"].FileTemplate = \"{<series>}{ Vol.<volume>}{ #<number2>}{ (of <count2>)}{ ({<month>, }<year>)}\"\r\n profiles[\"Default\"].FolderTemplate = \"{<publisher>}\\{<imprint>}\\{<series>}{ (<startyear>{ <format>})}\"\r\n \r\n if not lastused:\r\n lastused = [profiles.keys()[0]]\r\n \r\n return profiles, lastused",
"def load(path):\n\n parser = ConfigParser()\n parser.read(str(path))\n\n def _get(section, option):\n try:\n return parser.get(section, option)\n except (NoSectionError, NoOptionError):\n return None\n\n profiles = {}\n\n for section in parser.sections():\n profiles[section] = Profile(\n domain=_get(section, \"domain\"),\n protocol=_get(section, \"protocol\"),\n client_id=_get(section, \"client_id\"),\n client_secret=_get(section, \"client_secret\"),\n )\n\n return profiles",
"def load_profiles(profiles_file: TextIO) -> Tuple[Dict[str, List[str]],\n Dict[str, List[str]]]:\n individuals = [[]]\n sublist = 0\n content = profiles_file.readlines()\n for i in content:\n if i != '\\n':\n individuals[sublist].append(i)\n else:\n sublist += 1\n individuals.append([])\n return sort_profile(individuals)",
"def loadProfiles():\n with open(userProfilesDir, \"r\") as infile:\n profiles = json.loads(\"\\n\".join(infile.readlines()))\n infile.close()\n return profiles",
"def _read_profiles(profile_directory):\n # Initialize key variables\n profiles = defaultdict(\n lambda: defaultdict(lambda: defaultdict()))\n\n # Read the yaml files in the profiles directory\n files = os.listdir(profile_directory)\n filenames = ['{}{}{}'.format(\n profile_directory, os.sep, nextfile) for nextfile in files]\n\n for _filename in sorted(filenames):\n # Get rid of excess os.sep separators\n pathitems = _filename.split(os.sep)\n filename = os.sep.join(pathitems)\n\n # Skip obvious\n if os.path.isfile(filename) is False:\n continue\n if filename.lower().endswith('.yaml') is False:\n continue\n\n with open(filename, 'r') as stream:\n try:\n _profiles = yaml.load(stream)['data']\n except yaml.YAMLError as exc:\n print(exc)\n\n # Create dictionary\n for item in _profiles:\n firstname = item['firstname']\n lastname = item['lastname']\n height = item['height']\n weight = item['weight']\n birthdate = item['birthdate']\n profiles[lastname][firstname][birthdate] = {\n 'height': height, 'weight': weight}\n\n return profiles",
"def load_profile(path, profile):\n profiles = load(path)\n try:\n return profiles[profile]\n except KeyError:\n return Profile(None, None, None, None)",
"def get_profile(path=\"~\"):\n global profiles\n profile = profiles.get(path,None)\n if not profile:\n profile = InitFileConfig(os.path.join(path,\".myradioprofile\"), {} )\n profiles[path] = profile\n return profile",
"def _recurse_load_profile(self, text, profile_path):\n try:\n inherited_profile = Profile()\n cwd = os.path.dirname(os.path.abspath(profile_path)) if profile_path else None\n profile_parser = _ProfileParser(text)\n # Iterate the includes and call recursive to get the profile and variables\n # from parent profiles\n for include in profile_parser.includes:\n # Recursion !!\n profile = self._load_profile(include, cwd)\n inherited_profile.compose_profile(profile)\n\n # Current profile before update with parents (but parent variables already applied)\n inherited_profile = _ProfileValueParser.get_profile(profile_parser.profile_text,\n inherited_profile)\n return inherited_profile\n except ConanException:\n raise\n except Exception as exc:\n raise ConanException(\"Error parsing the profile text file: %s\" % str(exc))",
"def _sloppy_parse_profiles (self, contents):\n profile_start = contents.find('profiles\":')\n profile_list_start = contents.find('profilesList')\n if int(profile_start) > -1 and int(profile_list_start) > -1:\n try:\n try:\n return json.loads('{\"a\":{\"' + contents[profile_start:profile_list_start-2].decode('string_escape') + '}}').get('a').get('profiles')\n except ValueError, e:\n return None\n except TypeError, e:\n return None\n return None",
"def populate_profiles_from_directory(self):\n\n self.profiles = []\n\n # Go over all the files and create a profile object\n _profile_files = listdir(self.save_dir)\n\n for profile_filename in _profile_files:\n # Only check for .yaml files\n if path.splitext(profile_filename)[1] == '.yaml':\n # Get the data and create a new profile\n _file_data = self._load_data_from_file(path.join(self.save_dir, profile_filename))\n _profile = Profile(path.splitext(profile_filename)[0])\n _profile.player_data = _file_data\n self.profiles.append(_profile)",
"def load_people(self, file_path):\n pass",
"def parse_file(file_path):\n with open(file_path) as f:\n return XmlPropertyListParser().parse(f)",
"def list_profiles(self) -> dict:\n wsc = self.read_ws_configuration()\n out = OrderedDict()\n for name, json in wsc.profiles.items():\n out[name] = Profile(name, self.ws_data_folder / name, json)\n # Try to find current profile\n try:\n out[self.current_profile_name].is_current = True\n except Exception:\n pass\n return out",
"def from_file(cls, file_path, ngram_sizes, profile_len):\n profile = cls.from_files((file_path, ), ngram_sizes, profile_len)\n return profile",
"def get_profiles(self):\n # print(self.uir) #checkpoint\n if os.path.isdir(self.uir+\"/profiles\"):\n profiles=os.listdir(self.uir+\"/profiles\")\n # print(profiles) #checkpoint\n for profile in profiles:\n wsadmin=self.uir+\"/profiles/\"+profile+\"/bin/wsadmin.bat\"\n if os.path.isfile(wsadmin): #check for wsadmin.bat.\n self.profiles.append(self.uir+\"/profiles/\"+profile)\n\n else: print(self.uir+' Instance does not have \"profile\" folder in '+self.uir)\n return",
"def _load_profile(self, profile_name, cwd):\n\n profile_path = self.get_profile_path(profile_name, cwd)\n try:\n text = load_user_encoded(profile_path)\n except Exception as e:\n raise ConanException(f\"Cannot load profile:\\n{e}\")\n\n # All profiles will be now rendered with jinja2 as first pass\n base_path = os.path.dirname(profile_path)\n file_path = os.path.basename(profile_path)\n context = {\"platform\": platform,\n \"os\": os,\n \"profile_dir\": base_path,\n \"profile_name\": file_path,\n \"conan_version\": conan_version}\n rtemplate = Environment(loader=FileSystemLoader(base_path)).from_string(text)\n text = rtemplate.render(context)\n\n try:\n return self._recurse_load_profile(text, profile_path)\n except ConanException as exc:\n raise ConanException(\"Error reading '%s' profile: %s\" % (profile_name, exc))",
"def read_plist(path: str) -> dict:\n return _read_plist(path, plistlib.FMT_XML)",
"def save_profiles(file_path, profiles, lastused=\"\"):\r\n try:\r\n xSettings = XmlWriterSettings()\r\n xSettings.Indent = True\r\n with XmlWriter.Create(file_path, xSettings) as writer:\r\n writer.WriteStartElement(\"Profiles\")\r\n if lastused:\r\n writer.WriteAttributeString(\"LastUsed\", \",\".join(lastused))\r\n for profile in profiles:\r\n profiles[profile].save_to_xml(writer)\r\n writer.WriteEndElement()\r\n except Exception, ex:\r\n MessageBox.Show(\"An error occured writing the settings file. The error was:\\n\\n\" + ex.message, \"Error saving settings file\", MessageBoxButtons.OK, MessageBoxIcon.Error)",
"def profiles(self):\n if not self._profiles:\n self.GetAllProfiles()\n return self._profiles",
"def get_profiles(profile_file_directory):\r\n\t\r\n\tprofile_file_path = profile_file_directory+ \"/profiles.txt\"\r\n\tlist_of_all_allele_numbers_tuple = []\r\n\tdatabase = None\r\n\tlocusList = []\r\n\t\r\n\tfor l in open(profile_file_path):\r\n\t if database is None:\r\n\t\tdatabase = {}\r\n\t\tlocusList = l.split()[1:]\r\n\t\tcontinue\r\n\t t = l.split()\r\n\t st = t[0]\r\n\t v = ' '.join([s for s in t[1:]])\r\n\t if v in database:\r\n\t\tprint >> sys.stderr, 'sequence type ' + str(st) + ' is a duplicate of ' + str(database[v])\r\n\t database[v] = st\r\n\t covert_string_to_tuple_list_of_allele_numbers = tuple(int(x) for x in re.findall(\"[0-9]+\", v)) \r\n\t list_of_all_allele_numbers_tuple.append(covert_string_to_tuple_list_of_allele_numbers)\r\n\t\t\r\n\treturn (database, locusList, list_of_all_allele_numbers_tuple)",
"def _load_data(self, filename):\n if not os.path.isfile(filename):\n return False\n\n with open(filename) as f:\n data = pickle.load(f)\n if data:\n self.profiles = data['profiles']\n self.user_data = data['user_data']\n self.api_data = data['api_data']\n else:\n return False",
"def profile(filename: str) -> 'Iterator[None]':\n profiler = Profile()\n profiler.enable()\n\n yield\n\n profiler.disable()\n profiler.create_stats()\n profiler.dump_stats('profiles/{}'.format(filename))",
"def loadProfile(fname):\n \n x = np.loadtxt(fname)\n return x[:,1]",
"def get_user():\n with open(app.config['DATA_XML'], 'r') as xmlfile:\n root = ElementTree.parse(xmlfile).getroot()\n\n for item in root.iter('server'):\n result = '{}://{}'.format(\n item.find('protocol').text,\n item.find('host').text\n )\n\n data = {\n user.attrib['id']: {\n 'name': user.find('name').text,\n 'avatar': '{}{}'.format(\n result,\n user.find('avatar').text\n )\n }\n for user in root.iter('user')\n }\n return OrderedDict(\n sorted(\n data.items(),\n key=lambda result: itemgetter('name')(itemgetter(1)(result)),\n cmp=locale.strcoll\n )\n )",
"def GetAllProfiles(self):\n profiles = []\n feed_uri = self._gd_client.GetFeedUri('profiles')\n while feed_uri:\n feed = self._gd_client.GetProfilesFeed(uri=feed_uri)\n profiles.extend(feed.entry)\n feed_uri = feed.FindNextLink()\n self._profiles = profiles",
"def ReadWiredNetworkProfile(self, profilename):\n profile = {}\n profilename = misc.to_unicode(profilename)\n config = ConfigParser.ConfigParser()\n config.read(self.wired_conf)\n if config.has_section(profilename):\n for x in config.options(profilename):\n profile[x] = misc.Noneify(config.get(profilename, x))\n profile['use_global_dns'] = bool(profile.get('use_global_dns'))\n profile['use_static_dns'] = bool(profile.get('use_static_dns'))\n self.WiredNetwork = profile\n return \"100: Loaded Profile\"\n else:\n self.WiredNetwork = None\n return \"500: Profile Not Found\"",
"def profiles_path(self) -> Path:\n return self._config.data_path / \"hmm\" / \"profiles.hmm\"",
"def read_pardus_profiles(self):\n\n self.lan_config = ConfigParser.ConfigParser()\n self.lan_config.read(self.lan_config_path)\n connection_type = \"802-3-ethernet\"\n for section in self.lan_config.sections():\n lan_settings = {}\n for option in self.lan_config.options(section):\n if option == \"device\":\n #To strip device name from long device string\n lan_settings[option] = self.lan_config.get(section, option).split(\"_\")[-1]\n else:\n lan_settings[option] = self.lan_config.get(section, option)\n p = PardusNetworkProfile(section, connection_type, lan_settings)\n self.pardus_profiles.append(p)\n\n self.wlan_config = ConfigParser.ConfigParser()\n self.wlan_config.read(self.wlan_config_path)\n connection_type = \"802-11-wireless\"\n for section in self.wlan_config.sections():\n wlan_settings = {}\n for option in self.wlan_config.options(section):\n if option == \"device\":\n wlan_settings[option] = self.wlan_config.get(section, option).split(\"_\")[-1]\n else:\n wlan_settings[option] = self.wlan_config.get(section, option)\n p = PardusNetworkProfile(section, connection_type, wlan_settings)\n self.pardus_profiles.append(p)",
"def have_profile_dir(path, maxdepth=3, filename=\"profiles.desc\"):\n\twhile path != \"/\" and maxdepth:\n\t\tif os.path.exists(os.path.join(path, \"profiles\", filename)):\n\t\t\treturn normalize_path(path)\n\t\tpath = normalize_path(path + \"/..\")\n\t\tmaxdepth -= 1"
] | [
"0.7626121",
"0.73465997",
"0.66420317",
"0.64548403",
"0.6422596",
"0.61153334",
"0.60498744",
"0.59523565",
"0.5855331",
"0.57868946",
"0.5670626",
"0.56380814",
"0.5517114",
"0.5462929",
"0.54440254",
"0.54290825",
"0.5421971",
"0.5367816",
"0.53443223",
"0.53429097",
"0.52782893",
"0.52520305",
"0.5212792",
"0.5212367",
"0.5189856",
"0.51876795",
"0.5160056",
"0.5152693",
"0.5114987",
"0.5097414"
] | 0.74111307 | 1 |
Saves the profiles to an xml file. | def save_profiles(file_path, profiles, lastused=""):
try:
xSettings = XmlWriterSettings()
xSettings.Indent = True
with XmlWriter.Create(file_path, xSettings) as writer:
writer.WriteStartElement("Profiles")
if lastused:
writer.WriteAttributeString("LastUsed", ",".join(lastused))
for profile in profiles:
profiles[profile].save_to_xml(writer)
writer.WriteEndElement()
except Exception, ex:
MessageBox.Show("An error occured writing the settings file. The error was:\n\n" + ex.message, "Error saving settings file", MessageBoxButtons.OK, MessageBoxIcon.Error) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_profile(file_path, profile):\r\n try:\r\n xSettings = XmlWriterSettings()\r\n xSettings.Indent = True\r\n with XmlWriter.Create(file_path, xSettings) as writer:\r\n profile.save_to_xml(writer)\r\n except Exception, ex:\r\n MessageBox.Show(\"An error occured writing the settings file. The error was:\\n\\n\" + ex.message, \"Error saving settings file\", MessageBoxButtons.OK, MessageBoxIcon.Error)",
"def save_to_xml(self, xwriter):\r\n\r\n xwriter.WriteStartElement(\"Profile\")\r\n xwriter.WriteAttributeString(\"Name\", self.Name)\r\n xwriter.WriteStartAttribute(\"Version\")\r\n xwriter.WriteValue(self.Version)\r\n xwriter.WriteEndAttribute()\r\n\r\n for var_name in self.__dict__:\r\n var_type = type(getattr(self, var_name))\r\n\r\n if var_type is str and var_name != \"Name\":\r\n self.write_string_to_xml(var_name, xwriter)\r\n\r\n elif var_type is bool:\r\n self.write_bool_to_xml(var_name, xwriter)\r\n\r\n elif var_type is dict:\r\n self.write_dict_to_xml(var_name, xwriter)\r\n\r\n elif var_type is list and var_name != \"ExcludeRules\":\r\n self.write_list_to_xml(var_name, xwriter)\r\n\r\n xwriter.WriteStartElement(\"ExcludeRules\")\r\n xwriter.WriteAttributeString(\"Operator\", self.ExcludeOperator)\r\n xwriter.WriteAttributeString(\"ExcludeMode\", self.ExcludeMode)\r\n for rule in self.ExcludeRules:\r\n if rule:\r\n rule.save_xml(xwriter)\r\n xwriter.WriteEndElement()\r\n \r\n xwriter.WriteEndElement()",
"def saving_file(xml):\r\n\r\n xml_string = etree.tostring(xml)\r\n parsed = minidom.parseString(xml_string)\r\n with open(self.app_path + \"\\\\temp_\\\\\" + file_name + \".xml\", \"w\") as file:\r\n file.write(parsed.toprettyxml(indent=\" \"))",
"def save_xml(self, filename):\n if \".xml\" not in filename:\n filename = filename + \".xml\"\n\n shutil.copyfile(self.env.model_file, filename)",
"def save_profile(self):\n self.save()",
"def save_profile(self, dir):\n filename = \"profile.pkl\"\n with open(osp.join(dir, filename), \"wb\") as f:\n pickle.dump(self, f)",
"def Save_xml(self, accounts):\n try:\n\n self.extension = \".xml\"\n\n colors.info(\"Saving as XML in {}{}\".format(self.file, self.extension))\n\n Main = ET.Element(\"SpotCheck\")\n\n SpotifyFree = ET.SubElement(Main, 'SpotifyFree')\n SpotifyPremium = ET.SubElement(Main, 'SpotifyPremium')\n PremiumFamily = ET.SubElement(Main, 'PremiumFamily')\n AdminPremiumFamily = ET.SubElement(Main, 'AdminPremiumFamily')\n BadAccounts = ET.SubElement(Main, 'BadAccounts')\n\n for account in accounts:\n if account.get(\"account_login\") == \"error\":\n temp = ET.SubElement(BadAccounts, \"account\")\n temp.set(\"Username\", account[\"Username\"])\n temp.set(\"Password\", account[\"Password\"])\n else:\n if account.get(\"AccountType\") == \"Spotify Free\":\n temp = ET.SubElement(SpotifyFree, \"account\")\n temp.set(\"Username\", account[\"Username\"])\n temp.set(\"Password\", account[\"Password\"])\n temp.set(\"Country\", account[\"Country\"])\n elif account.get(\"AccountType\") == \"Spotify Premium\":\n temp = ET.SubElement(SpotifyPremium, \"account\")\n temp.set(\"Username\", account[\"Username\"])\n temp.set(\"Password\", account[\"Password\"])\n temp.set(\"Country\", account[\"Country\"])\n elif account.get(\"AccountType\") == \"Premium Family\":\n if account.get(\"Admin\"):\n temp = ET.SubElement(AdminPremiumFamily, \"account\")\n temp.set(\"Username\", account[\"Username\"])\n temp.set(\"Password\", account[\"Password\"])\n temp.set(\"Country\", account[\"Country\"])\n else:\n temp = ET.SubElement(PremiumFamily, \"account\")\n temp.set(\"Username\", account[\"Username\"])\n temp.set(\"Password\", account[\"Password\"])\n temp.set(\"Country\", account[\"Country\"])\n XML = ET.tostring(Main)\n with open(self.file + self.extension, \"w\") as output_:\n output_.write(XML)\n colors.correct(\"Done! All saved successfully\")\n except Exception as e:\n colors.error(str(e))\n _exit(1)",
"def Save_Current_Profile(self):\r\n #name = tkFileDialog.asksaveasfilename()\r\n #if( name == \"\" ):\r\n # return\r\n #self.system.Save_Current_Profile(name)\r\n self.system.Save_Current_Profile()",
"def saveSessionToXML(self, filename):\r\n xmlStr = self.createXMLStr()\r\n \r\n #Write to the file\r\n #xml.dom.ext.PrettyPrint(doc, open(filename, 'w'))\r\n xmlFile = open(filename, 'w')\r\n xmlFile.write(xmlStr)\r\n xmlFile.close()",
"def save(self, save_dir):\n ProfileManager.save_data_to_disk(self.player_data, path.join(save_dir, self.player_name + '.yaml'))",
"def apply(self):\r\n\r\n file_name = str(sum([ord(i) for i in self.ssid.get()]))\r\n\r\n def saving_file(xml):\r\n \"\"\" Save user profile in xml format to temp_ dir.\"\"\"\r\n\r\n xml_string = etree.tostring(xml)\r\n parsed = minidom.parseString(xml_string)\r\n with open(self.app_path + \"\\\\temp_\\\\\" + file_name + \".xml\", \"w\") as file:\r\n file.write(parsed.toprettyxml(indent=\" \"))\r\n\r\n parse_xml = etree.parse(os.path.dirname(os.path.realpath(__file__)) +\r\n \"/data/sampleProfile.xml\")\r\n\r\n # The below code will parse the sample xml file\r\n # and fill important details entered by the user.\r\n root_tree = parse_xml.getroot()\r\n root_tree[0].text = self.ssid.get()\r\n root_tree[1][0][0].text = self.ssid.get()\r\n root_tree[3].text = self.connection_mode.get().lower()\r\n security = root_tree[4][0]\r\n security[0][0].text = self.authentication.get()\r\n security[0][1].text = self.encryption.get()\r\n if self.authentication.get() != \"open\":\r\n etree.SubElement(security, \"sharedKey\")\r\n etree.SubElement(security[1], \"keyType\").text = \"passPhrase\"\r\n etree.SubElement(security[1], \"protected\").text = \"false\"\r\n etree.SubElement(security[1], \"keyMaterial\").text = self.password.get()\r\n\r\n # Save the xml file\r\n saving_file(root_tree)\r\n\r\n # Add profile to the system.\r\n temp_path = 'netsh wlan add profile filename=\"' + self.app_path + \"\\\\temp_\\\\\"\r\n output_ = subprocess.run(temp_path + file_name + '.xml\"', shell=True,\r\n capture_output=True, text=True)\r\n os.remove(self.app_path + \"\\\\temp_\\\\\" + file_name + \".xml\")\r\n\r\n # If unable to add profile.\r\n if output_.returncode != 0:\r\n message = \"Sorry, Unable to add profile.\\n(You entered wrong details \" \\\r\n \"or else you don't have admin rights.)\"\r\n image_ = \"error\"\r\n\r\n else:\r\n message = \"Profile added successfully (Please Refresh)\"\r\n image_ = \"warning\"\r\n\r\n MessageBox(self.parent, message, image_)",
"def saveFile(self, filename):\n ret = libxml2mod.xmlSaveFile(filename, self._o)\n return ret",
"def save(self, filename=None):\n f = filename if filename else self.path\n etree.register_namespace('', TEI)\n etree.register_namespace('mith', MITH)\n self.doc.write(f, xml_declaration=True, encoding='utf-8', method='xml')",
"def save(self, save_path=None):\n if self._xml is None:\n raise IOError(\"There's nothing to save\")\n\n path = self._path_to_xml if save_path is None else save_path\n\n with open(path, 'w') as f:\n rough_string = Et.tostring(self._xml, 'utf-8')\n par = etree.XMLParser(remove_blank_text=True)\n elem = etree.XML(rough_string, parser=par)\n parsed = minidom.parseString(etree.tostring(elem))\n f.write(parsed.toprettyxml(indent=\" \"))",
"def save_profiles(self, fout, save_hybrid_meta=True):\n\n self._init_h5_out(fout, save_hybrid_meta=save_hybrid_meta)\n self._write_h5_out(fout, save_hybrid_meta=save_hybrid_meta)",
"def save(self):\n # TODO: save the file",
"def write_tessprofiles(tessprofiles: Dict, online=False):\n logger.info(f'Writing tessprofiles')\n profile = TESSPROFILE_ONLINE_PATH if online else TESSPROFILE_PATH\n with open(profile, 'w') as f:\n json.dump(tessprofiles, f, indent=4)",
"def save(self, filename):\n pass",
"def SaveXMLToDB(xmlFileName):",
"def save(self):\n with self.open(self.filename, 'wt') as fd:\n for node in self.elements:\n fd.write(node.text)",
"def save(self, fname):\n pass",
"def save(self):\n self.lock.acquire()\n try:\n self.xml.set(\"name\",self.name)\n self.xml.set(\"room\",self.room)\n self.xml.set(\"type\",self.type)\n self.xml.find(\"address\").text = \":\".join([str(x) for x in self.address])\n if self.pos is not None:\n self.xml.find(\"pos\").text = \" \".join([str(x) for x in self.pos])\n self.xml.find(\"icon\").text = self.icon\n \n finally:\n self.lock.release()\n \n self.house.save_devices()",
"def dumpProfiles(profiles):\n with open(userProfilesDir, \"w\") as outfile:\n outfile.writelines(json.dumps(profiles, indent=4))\n outfile.close()",
"def saveUsersProfiles_(self, plist):\r\n \r\n LogInfo(u\"Saving update profiles with PublicationDate %@\", plist[u\"PublicationDate\"])\r\n if not plist.writeToFile_atomically_(self.userUpdateProfilesPath, False):\r\n LogError(u\"Failed to write %@\", self.userUpdateProfilesPath)",
"def save_file(self):\n if self.select_path.text() != \"\":\n filepath = self.select_path.text()\n road_network = self.map_selection.currentText()\n if self.map_selection.currentText() == \"User Defined\":\n road_network = self.map_selection_user_defined.text()\n gen_xml = GenerateXML(filepath, road_network)\n gen_xml.main()\n # remember Road Network for future\n set_metadata(road_network_filepath=road_network)\n else:\n message = \"No export path was selected\"\n iface.messageBar().pushMessage(\"Warning\", message, level=Qgis.Warning)\n QgsMessageLog.logMessage(message, level=Qgis.Warning)",
"def saveSettings(self):\n self.userFiles.applyData()\n self.userPersonal.applyData()",
"def _save(self):\n file = open(\"settings.ini\", \"w\")\n self._parser.write(file)\n file.close()",
"def save_as(self, fname, base = None, indent = '', topns = True, namespaces = {}):\n with codecs.open(fname, \"w\", encoding=\"utf-8\") as outf:\n self.serialize_xml(outf.write, base=base, indent=indent, topns=topns, namespaces=namespaces)",
"def save(self, filename: str):\n dump(self, filename)",
"def save(self, pretty=True):\n self.endInstance()\n if pretty:\n _indent(self.root, whitespace=self._whiteSpace)\n tree = ET.ElementTree(self.root)\n tree.write(self.path, encoding=\"utf-8\", method='xml', xml_declaration=True)\n if self.logger:\n self.logger.info(\"Writing %s\", self.path)"
] | [
"0.77402055",
"0.7153504",
"0.68901503",
"0.65989304",
"0.6585187",
"0.6567334",
"0.65197617",
"0.64477056",
"0.6292227",
"0.62706214",
"0.62006843",
"0.61452764",
"0.6128959",
"0.610915",
"0.6092176",
"0.6072949",
"0.60326505",
"0.60216737",
"0.5989765",
"0.5988323",
"0.5958493",
"0.59531224",
"0.594053",
"0.5936368",
"0.5935535",
"0.5903034",
"0.5895587",
"0.5894408",
"0.5844191",
"0.58375555"
] | 0.76685476 | 1 |
Saves a single profile to an xml file. | def save_profile(file_path, profile):
try:
xSettings = XmlWriterSettings()
xSettings.Indent = True
with XmlWriter.Create(file_path, xSettings) as writer:
profile.save_to_xml(writer)
except Exception, ex:
MessageBox.Show("An error occured writing the settings file. The error was:\n\n" + ex.message, "Error saving settings file", MessageBoxButtons.OK, MessageBoxIcon.Error) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_profile(self):\n self.save()",
"def save_profiles(file_path, profiles, lastused=\"\"):\r\n try:\r\n xSettings = XmlWriterSettings()\r\n xSettings.Indent = True\r\n with XmlWriter.Create(file_path, xSettings) as writer:\r\n writer.WriteStartElement(\"Profiles\")\r\n if lastused:\r\n writer.WriteAttributeString(\"LastUsed\", \",\".join(lastused))\r\n for profile in profiles:\r\n profiles[profile].save_to_xml(writer)\r\n writer.WriteEndElement()\r\n except Exception, ex:\r\n MessageBox.Show(\"An error occured writing the settings file. The error was:\\n\\n\" + ex.message, \"Error saving settings file\", MessageBoxButtons.OK, MessageBoxIcon.Error)",
"def save_profile(self, dir):\n filename = \"profile.pkl\"\n with open(osp.join(dir, filename), \"wb\") as f:\n pickle.dump(self, f)",
"def save_to_xml(self, xwriter):\r\n\r\n xwriter.WriteStartElement(\"Profile\")\r\n xwriter.WriteAttributeString(\"Name\", self.Name)\r\n xwriter.WriteStartAttribute(\"Version\")\r\n xwriter.WriteValue(self.Version)\r\n xwriter.WriteEndAttribute()\r\n\r\n for var_name in self.__dict__:\r\n var_type = type(getattr(self, var_name))\r\n\r\n if var_type is str and var_name != \"Name\":\r\n self.write_string_to_xml(var_name, xwriter)\r\n\r\n elif var_type is bool:\r\n self.write_bool_to_xml(var_name, xwriter)\r\n\r\n elif var_type is dict:\r\n self.write_dict_to_xml(var_name, xwriter)\r\n\r\n elif var_type is list and var_name != \"ExcludeRules\":\r\n self.write_list_to_xml(var_name, xwriter)\r\n\r\n xwriter.WriteStartElement(\"ExcludeRules\")\r\n xwriter.WriteAttributeString(\"Operator\", self.ExcludeOperator)\r\n xwriter.WriteAttributeString(\"ExcludeMode\", self.ExcludeMode)\r\n for rule in self.ExcludeRules:\r\n if rule:\r\n rule.save_xml(xwriter)\r\n xwriter.WriteEndElement()\r\n \r\n xwriter.WriteEndElement()",
"def Save_Current_Profile(self):\r\n #name = tkFileDialog.asksaveasfilename()\r\n #if( name == \"\" ):\r\n # return\r\n #self.system.Save_Current_Profile(name)\r\n self.system.Save_Current_Profile()",
"def save_profile(sender, instance, **kwargs):\n instance.profile.save()",
"def saveProfile(self, request):\n return self._doProfile(request)",
"def saving_file(xml):\r\n\r\n xml_string = etree.tostring(xml)\r\n parsed = minidom.parseString(xml_string)\r\n with open(self.app_path + \"\\\\temp_\\\\\" + file_name + \".xml\", \"w\") as file:\r\n file.write(parsed.toprettyxml(indent=\" \"))",
"def save_user_profile(instance, **_):\n instance.profile.save()",
"def save_current_to_profile(self, profile_name, prof_desc='', prof_path='',\n self_contained=False):\n # Open the already existing profile\n new_profile = profile(profile_name, workdir=os.path.dirname(prof_path))\n\n # shortcut\n w3af_plugins = self._w3af_core.plugins\n\n # Save the enabled plugins\n for plugin_type in w3af_plugins.get_plugin_types():\n enabled_plugins = []\n for plugin_name in w3af_plugins.get_enabled_plugins(plugin_type):\n enabled_plugins.append(plugin_name)\n new_profile.set_enabled_plugins(plugin_type, enabled_plugins)\n\n # Save the plugin options\n for plugin_type in w3af_plugins.get_plugin_types():\n for plugin_name in w3af_plugins.get_enabled_plugins(plugin_type):\n plugin_options = w3af_plugins.get_plugin_options(plugin_type,\n plugin_name)\n if plugin_options:\n new_profile.set_plugin_options(plugin_type,\n plugin_name,\n plugin_options,\n self_contained=self_contained)\n\n # Save the profile targets\n targets = cf.cf.get('targets')\n if targets:\n new_profile.set_target(' , '.join(t.url_string for t in targets))\n\n # Save the misc and http settings\n misc_settings = MiscSettings()\n new_profile.set_misc_settings(misc_settings.get_options())\n new_profile.set_http_settings(\n self._w3af_core.uri_opener.settings.get_options())\n\n # Save the profile name and description\n new_profile.set_desc(prof_desc)\n new_profile.set_name(profile_name)\n\n # Save the profile to the file\n new_profile.save(profile_name)\n\n return new_profile",
"def save(self, save_dir):\n ProfileManager.save_data_to_disk(self.player_data, path.join(save_dir, self.player_name + '.yaml'))",
"def save_profile(self, request):\n return self.profile_service.do_profile(request)",
"def save(self, save_path=None):\n if self._xml is None:\n raise IOError(\"There's nothing to save\")\n\n path = self._path_to_xml if save_path is None else save_path\n\n with open(path, 'w') as f:\n rough_string = Et.tostring(self._xml, 'utf-8')\n par = etree.XMLParser(remove_blank_text=True)\n elem = etree.XML(rough_string, parser=par)\n parsed = minidom.parseString(etree.tostring(elem))\n f.write(parsed.toprettyxml(indent=\" \"))",
"def save(self, fname):\n pass",
"def save_profile():\n state = request.get_json()\n logger.debug(\"Roast Profile: %s\" % state)\n c = mongo.db[app.config['PROFILE_COLLECTION']]\n item = {'coffee': state.get('coffee'), 'roast': state.get('roast'),\n 'drop_temp': state.get('drop_temp'),\n 'brew_methods': state.get('brew_methods'),\n 'notes': state.get('notes'), 'datetime': now_time(),\n 'user': current_user.get_id()}\n _id = c.insert(item)\n return jsonify({'success': True})",
"def save_xml(self, filename):\n if \".xml\" not in filename:\n filename = filename + \".xml\"\n\n shutil.copyfile(self.env.model_file, filename)",
"def save_user_profile(sender, instance, **kwargs):\n instance.profile.save()",
"def save_user_profile(sender, instance, **kwargs):\n instance.profile.save()",
"def save_calibration_profile(self, filename: str) -> None:\n pass",
"def saveSessionToXML(self, filename):\r\n xmlStr = self.createXMLStr()\r\n \r\n #Write to the file\r\n #xml.dom.ext.PrettyPrint(doc, open(filename, 'w'))\r\n xmlFile = open(filename, 'w')\r\n xmlFile.write(xmlStr)\r\n xmlFile.close()",
"def saveFile(self, filename):\n ret = libxml2mod.xmlSaveFile(filename, self._o)\n return ret",
"def save(self, filename):\n pass",
"def save():",
"def apply(self):\r\n\r\n file_name = str(sum([ord(i) for i in self.ssid.get()]))\r\n\r\n def saving_file(xml):\r\n \"\"\" Save user profile in xml format to temp_ dir.\"\"\"\r\n\r\n xml_string = etree.tostring(xml)\r\n parsed = minidom.parseString(xml_string)\r\n with open(self.app_path + \"\\\\temp_\\\\\" + file_name + \".xml\", \"w\") as file:\r\n file.write(parsed.toprettyxml(indent=\" \"))\r\n\r\n parse_xml = etree.parse(os.path.dirname(os.path.realpath(__file__)) +\r\n \"/data/sampleProfile.xml\")\r\n\r\n # The below code will parse the sample xml file\r\n # and fill important details entered by the user.\r\n root_tree = parse_xml.getroot()\r\n root_tree[0].text = self.ssid.get()\r\n root_tree[1][0][0].text = self.ssid.get()\r\n root_tree[3].text = self.connection_mode.get().lower()\r\n security = root_tree[4][0]\r\n security[0][0].text = self.authentication.get()\r\n security[0][1].text = self.encryption.get()\r\n if self.authentication.get() != \"open\":\r\n etree.SubElement(security, \"sharedKey\")\r\n etree.SubElement(security[1], \"keyType\").text = \"passPhrase\"\r\n etree.SubElement(security[1], \"protected\").text = \"false\"\r\n etree.SubElement(security[1], \"keyMaterial\").text = self.password.get()\r\n\r\n # Save the xml file\r\n saving_file(root_tree)\r\n\r\n # Add profile to the system.\r\n temp_path = 'netsh wlan add profile filename=\"' + self.app_path + \"\\\\temp_\\\\\"\r\n output_ = subprocess.run(temp_path + file_name + '.xml\"', shell=True,\r\n capture_output=True, text=True)\r\n os.remove(self.app_path + \"\\\\temp_\\\\\" + file_name + \".xml\")\r\n\r\n # If unable to add profile.\r\n if output_.returncode != 0:\r\n message = \"Sorry, Unable to add profile.\\n(You entered wrong details \" \\\r\n \"or else you don't have admin rights.)\"\r\n image_ = \"error\"\r\n\r\n else:\r\n message = \"Profile added successfully (Please Refresh)\"\r\n image_ = \"warning\"\r\n\r\n MessageBox(self.parent, message, image_)",
"def save(self, filename=None):\n f = filename if filename else self.path\n etree.register_namespace('', TEI)\n etree.register_namespace('mith', MITH)\n self.doc.write(f, xml_declaration=True, encoding='utf-8', method='xml')",
"def write_tessprofiles(tessprofiles: Dict, online=False):\n logger.info(f'Writing tessprofiles')\n profile = TESSPROFILE_ONLINE_PATH if online else TESSPROFILE_PATH\n with open(profile, 'w') as f:\n json.dump(tessprofiles, f, indent=4)",
"def Save_xml(self, accounts):\n try:\n\n self.extension = \".xml\"\n\n colors.info(\"Saving as XML in {}{}\".format(self.file, self.extension))\n\n Main = ET.Element(\"SpotCheck\")\n\n SpotifyFree = ET.SubElement(Main, 'SpotifyFree')\n SpotifyPremium = ET.SubElement(Main, 'SpotifyPremium')\n PremiumFamily = ET.SubElement(Main, 'PremiumFamily')\n AdminPremiumFamily = ET.SubElement(Main, 'AdminPremiumFamily')\n BadAccounts = ET.SubElement(Main, 'BadAccounts')\n\n for account in accounts:\n if account.get(\"account_login\") == \"error\":\n temp = ET.SubElement(BadAccounts, \"account\")\n temp.set(\"Username\", account[\"Username\"])\n temp.set(\"Password\", account[\"Password\"])\n else:\n if account.get(\"AccountType\") == \"Spotify Free\":\n temp = ET.SubElement(SpotifyFree, \"account\")\n temp.set(\"Username\", account[\"Username\"])\n temp.set(\"Password\", account[\"Password\"])\n temp.set(\"Country\", account[\"Country\"])\n elif account.get(\"AccountType\") == \"Spotify Premium\":\n temp = ET.SubElement(SpotifyPremium, \"account\")\n temp.set(\"Username\", account[\"Username\"])\n temp.set(\"Password\", account[\"Password\"])\n temp.set(\"Country\", account[\"Country\"])\n elif account.get(\"AccountType\") == \"Premium Family\":\n if account.get(\"Admin\"):\n temp = ET.SubElement(AdminPremiumFamily, \"account\")\n temp.set(\"Username\", account[\"Username\"])\n temp.set(\"Password\", account[\"Password\"])\n temp.set(\"Country\", account[\"Country\"])\n else:\n temp = ET.SubElement(PremiumFamily, \"account\")\n temp.set(\"Username\", account[\"Username\"])\n temp.set(\"Password\", account[\"Password\"])\n temp.set(\"Country\", account[\"Country\"])\n XML = ET.tostring(Main)\n with open(self.file + self.extension, \"w\") as output_:\n output_.write(XML)\n colors.correct(\"Done! All saved successfully\")\n except Exception as e:\n colors.error(str(e))\n _exit(1)",
"def save(self, filename):\n with open(filename, \"w\") as fp:\n dump(self, fp)",
"def save(self):\n # TODO: save the file",
"def save_as(self, fname, base = None, indent = '', topns = True, namespaces = {}):\n with codecs.open(fname, \"w\", encoding=\"utf-8\") as outf:\n self.serialize_xml(outf.write, base=base, indent=indent, topns=topns, namespaces=namespaces)"
] | [
"0.720293",
"0.71852773",
"0.6845692",
"0.6742488",
"0.6558711",
"0.6505063",
"0.645329",
"0.64384377",
"0.6373711",
"0.63664985",
"0.6296266",
"0.6280068",
"0.62594324",
"0.6223609",
"0.6212394",
"0.6104943",
"0.60820746",
"0.60820746",
"0.6042483",
"0.60255325",
"0.60242546",
"0.60145825",
"0.5931385",
"0.59309375",
"0.59156597",
"0.5862243",
"0.5861292",
"0.5856361",
"0.5822992",
"0.5806487"
] | 0.8002179 | 0 |
This function returns the softmax derivative value for the given input | def softmax_derivative(x):
der = derivative(softmax,x,dx=1e-9)
return der | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def softmax_derivative(Z):\n\treturn None",
"def softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum(axis=0) # only difference",
"def softmax(x):\r\n e_x = np.exp(x - np.max(x))\r\n return e_x / e_x.sum(axis=0) # only difference\r",
"def softmax(x):\r\n e_x = np.exp(x - np.expand_dims(np.max(x, axis=-1), axis=-1))\r\n return e_x / np.expand_dims(e_x.sum(axis=-1), axis=-1) # only difference\r",
"def softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum(axis=0) # only difference",
"def softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum(axis=0) # only difference",
"def softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum(axis=0) # only difference",
"def softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum(axis=0) # only difference",
"def softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum(axis=0) # only difference",
"def softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum(axis=0)",
"def softmax(x):\n \"\"\"\"\"\"\n return exp(x) / sum(exp(x), axis=0)",
"def softmax(x): \n e_x = np.exp(x - np.max(x)) \n return e_x / e_x.sum()",
"def softmax(x):\n #pass # TODO: Compute and return softmax(x)\n return np.exp(x) / np.sum(np.exp(x), axis=0)",
"def softmax(X):\n num = np.exp(X)\n den = np.sum(np.exp(X))\n return num / den",
"def softmax(x):\n x_exp = (x - x.max(1)[0].view(-1, 1)).exp()\n return x_exp / x_exp.sum(1).view(-1, 1)",
"def softmax(x):\n return np.exp(x) / np.sum(np.exp(x), axis=0)\n # return ( x / np.sum(x, axis=0) )",
"def softmax(x):\n e_x = np.exp(x - np.max(x))\n return old_div(e_x, e_x.sum())",
"def softmax(x):\r\n e_x = np.exp(x - np.max(x))\r\n return e_x / e_x.sum()",
"def softmax(x):\r\n e_x = np.exp(x - np.max(x))\r\n return e_x / e_x.sum()",
"def softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum()",
"def softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum(axis=0)",
"def softmax(x):\n return np.exp(x)/np.sum(np.exp(x),axis=0)",
"def softmax(x):\n return np.exp(x)/np.sum(np.exp(x),axis=0)",
"def softmax(x):\n pass # TODO: Compute and return softmax(x)\n\n exp_x = np.exp(x)\n sum_x = np.sum(exp_x, axis=0)\n softmax = exp_x/sum_x\n \n return softmax",
"def softmax_gradient(softmax_result):\r\n\r\n s = softmax_result.reshape(-1, 1)\r\n return np.diagflat(s) - np.dot(s, s.T)",
"def softmax(x):\n num = np.exp(x)\n den = np.sum(np.exp(x), axis=1)\n output = (num.T / den).T\n return output",
"def softmax(x):\n\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum()",
"def softmax(x): \n return np.exp(x) / np.sum(np.exp(x), axis=0)",
"def softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum()",
"def softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum()"
] | [
"0.78823423",
"0.7861486",
"0.7818649",
"0.78063107",
"0.7801125",
"0.7801125",
"0.7801125",
"0.7801125",
"0.77747434",
"0.7771414",
"0.7755482",
"0.7754241",
"0.76962405",
"0.7687193",
"0.7682934",
"0.76776224",
"0.76399696",
"0.7632133",
"0.7632133",
"0.76276577",
"0.7626131",
"0.7624954",
"0.7624954",
"0.76234233",
"0.7621945",
"0.7616067",
"0.7615478",
"0.760707",
"0.759843",
"0.759843"
] | 0.8873611 | 0 |
AppendRows(numRows=1) > bool Append additional rows at the end of the table. | def AppendRows(self, numRows=1): # real signature unknown; restored from __doc__
return (self.GetNumberRows() + numRows) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def AppendRows(self, numRows = 1):\n for i in range(numRows):\n self.data = numpy.vstack((self.data,\n numpy.array([''] * self.data.shape[1], dtype = numpy.object),\n ))\n self.rowmask = numpy.append(self.rowmask, numpy.zeros((numRows,), dtype = numpy.bool))\n\n msg = wx.grid.GridTableMessage(self,\n wx.grid.GRIDTABLE_NOTIFY_ROWS_APPENDED,\n numRows)\n #if not self._batchcount:\n # self.GetView().ProcessTableMessage(msg)\n self.GetView().ProcessTableMessage(msg)\n return True",
"def AppendRows(self, numRows=1): # real signature unknown; restored from __doc__\n return (self.GetNumberRows() + numRows)",
"def AppendRows(self, numRows=1): # real signature unknown; restored from __doc__\n return (self.GetNumberRows() + numRows)",
"def append_rows(self, rows):\n for row in rows:\n self.append_row(row)",
"def add_rows(self):\n for row in self.rows:\n self.table.add_row(row)",
"def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__\n return False",
"def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__\n return False",
"def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__\n return False",
"def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__\n return False",
"def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__\n return False",
"def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__\n return False",
"def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__\n return False",
"def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__\n return False",
"def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__\n return False",
"def appendRow(self, contents = None):\n\n\t\t\t\t#Find the last row\n\t\t\t\trow = len(tuple(self.thing.iter_rows())) + 1\n\n\t\t\t\t#Write to cells\n\t\t\t\tif ((contents != None) and (len(contents) != 0)):\n\t\t\t\t\tfor column, item in enumerate(contents):\n\t\t\t\t\t\tself.setCell(row, column + 1, item)\n\t\t\t\telse:\n\t\t\t\t\tself.setCell(row, 1, \" \")",
"def Append(self, row):\n self._rows.append(row)",
"def append_row(self, row=None):\n self.set_row(self.size, row)",
"def add_section(self) -> None:\n\n if self.rows:\n self.rows[-1].end_section = True",
"def _fcn_add_score_row(self):\n # Increase length :\n self._scoreTable.setRowCount(self._scoreTable.rowCount() + 1)",
"def add_row(self, row):\n ...",
"def append_row(self, values):\n self.range(self._op.max_row + 1, 1, len(values)).values = values",
"def append(self, row_or_table):\n row, table, inc = row_or_table, row_or_table, 1\n if not row:\n return\n if isinstance(table, Table):\n row, inc = table.get_columns(*self.column_labels), table.num_rows\n for i, column in enumerate(self._columns):\n self._columns[column] = np.append(self[column], row[i])\n self._num_rows = self.num_rows + inc\n return self",
"def __multi_append_row(self, data, path):\n #Write the row to the data page file ('a' positions the stream at the end of the file).\n temp_current_row = self.current_row\n with open(path, 'a') as f:\n f.write(data)\n #if self.__check_write_success_insert(new_data, path):\n # return True\n #else:\n # print('Data was corrupted at row: ' + temp_current_row)\n # return False\n return True",
"def add_row(self):\n if len(self._grid) == 0 or len(self._grid[0]) == 1:\n self._grid.append([None])\n elif len(self._grid[0]) > 1:\n row = [None for _ in range(len(self._grid[0]))]\n self._grid.append(row)\n return True",
"def nextRow(self) -> bool:\n if self.hasNextRow():\n self.__currentRow += 1\n return True\n\n return False",
"def _add_rows(df, num, alloc_id, constraint, stuff=False):\n if num == 0:\n return df.copy()\n\n to_add = np.random.choice(df.index.values, num)\n rows_to_add = df.loc[to_add]\n\n # update the new rows' index\n max_idx = df.index.max()\n rows_to_add.index = range(max_idx + 1, max_idx + len(rows_to_add) + 1)\n\n # allocate rows to containers\n _allocate_rows(rows_to_add, alloc_id, constraint, stuff)\n\n return pd.concat([df, rows_to_add])",
"def add_row(\n self,\n *renderables: Optional[\"RenderableType\"],\n style: Optional[StyleType] = None,\n end_section: bool = False,\n ) -> None:\n\n def add_cell(column: Column, renderable: \"RenderableType\") -> None:\n column._cells.append(renderable)\n\n cell_renderables: List[Optional[\"RenderableType\"]] = list(renderables)\n\n columns = self.columns\n if len(cell_renderables) < len(columns):\n cell_renderables = [\n *cell_renderables,\n *[None] * (len(columns) - len(cell_renderables)),\n ]\n for index, renderable in enumerate(cell_renderables):\n if index == len(columns):\n column = Column(_index=index)\n for _ in self.rows:\n add_cell(column, Text(\"\"))\n self.columns.append(column)\n else:\n column = columns[index]\n if renderable is None:\n add_cell(column, \"\")\n elif is_renderable(renderable):\n add_cell(column, renderable)\n else:\n raise errors.NotRenderableError(\n f\"unable to render {type(renderable).__name__}; a string or other renderable object is required\"\n )\n self.rows.append(Row(style=style, end_section=end_section))",
"def num_rows(self):\n return (len(self.rows))",
"def add_rows(data, nrows, starting_index=None, accounting_column=None):\n logger.debug('start: adding {} rows in transition model'.format(nrows))\n if nrows == 0:\n return data, _empty_index(), _empty_index()\n\n if not starting_index:\n starting_index = data.index.values.max() + 1\n\n new_rows = sample_rows(nrows, data, accounting_column=accounting_column)\n copied_index = new_rows.index\n added_index = pd.Index(np.arange(\n starting_index, starting_index + len(new_rows.index), dtype=np.int))\n new_rows.index = added_index\n\n logger.debug(\n 'finish: added {} rows in transition model'.format(len(new_rows)))\n return pd.concat([data, new_rows]), added_index, copied_index",
"def _add_row(self, index):\n if index is None:\n index = self.size\n\n if index < self.size:\n raise ValueError(f\"Duplicate row index: {index}\")\n\n for empty in range(self.size, index):\n self._add_row(empty)\n\n self._data.append([None] * len(self._columns))\n\n return self.size - 1"
] | [
"0.82234913",
"0.7680864",
"0.7680864",
"0.67422974",
"0.66286564",
"0.65267634",
"0.65267634",
"0.65267634",
"0.65267634",
"0.65267634",
"0.65267634",
"0.65267634",
"0.65267634",
"0.65267634",
"0.62137854",
"0.60128397",
"0.60119236",
"0.5857733",
"0.5790014",
"0.5778878",
"0.5717909",
"0.56991786",
"0.5677284",
"0.5644456",
"0.5630746",
"0.55628234",
"0.5558119",
"0.5548274",
"0.55403",
"0.5494752"
] | 0.7819538 | 1 |
DeleteRows(pos=0, numRows=1) > bool Delete rows from the table. | def DeleteRows(self, pos=0, numRows=1): # real signature unknown; restored from __doc__
if self.data is None or len(self.data) == 0:
return False
for rowNum in range(0,numRows):
self.data.remove(self.data[numRows-1-pos-rowNum])
gridView=self.GetView()
gridView.BeginBatch()
deleteMsg=wx.grid.GridTableMessage(self,wx.grid.GRIDTABLE_NOTIFY_ROWS_DELETED,pos,numRows)
gridView.ProcessTableMessage(deleteMsg)
gridView.EndBatch()
getValueMsg=wx.grid.GridTableMessage(self,wx.grid.GRIDTABLE_REQUEST_VIEW_GET_VALUES)
gridView.ProcessTableMessage(getValueMsg)
# if self.onGridValueChanged:
# self.onGridValueChanged()
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def DeleteRows(self, pos=0, numRows=1): # real signature unknown; restored from __doc__\n if self.data is None or len(self.data) == 0:\n return False\n for rowNum in range(0, numRows):\n self.data.remove(self.data[numRows - 1 - pos - rowNum])\n gridView = self.GetView()\n gridView.BeginBatch()\n deleteMsg = wx.grid.GridTableMessage(self, wx.grid.GRIDTABLE_NOTIFY_ROWS_DELETED, pos, numRows)\n gridView.ProcessTableMessage(deleteMsg)\n gridView.EndBatch()\n getValueMsg = wx.grid.GridTableMessage(self, wx.grid.GRIDTABLE_REQUEST_VIEW_GET_VALUES)\n gridView.ProcessTableMessage(getValueMsg)\n # if self.onGridValueChanged:\n # self.onGridValueChanged()\n return True",
"def DeleteRows(self, pos=0, numRows=1): # real signature unknown; restored from __doc__\n if self.data is None or len(self.data) == 0:\n return False\n for rowNum in range(0, numRows):\n self.data.remove(self.data[numRows - 1 - pos - rowNum])\n gridView = self.GetView()\n gridView.BeginBatch()\n deleteMsg = wx.grid.GridTableMessage(self, wx.grid.GRIDTABLE_NOTIFY_ROWS_DELETED, pos, numRows)\n gridView.ProcessTableMessage(deleteMsg)\n gridView.EndBatch()\n getValueMsg = wx.grid.GridTableMessage(self, wx.grid.GRIDTABLE_REQUEST_VIEW_GET_VALUES)\n gridView.ProcessTableMessage(getValueMsg)\n # if self.onGridValueChanged:\n # self.onGridValueChanged()\n return True",
"def Delete(self, rows):\n query=pgQuery(self.tableSpecs.tabName, self.tableSpecs.GetCursor())\n allWhere=[]\n for row in rows:\n wh=[]\n for colname in self.tableSpecs.keyCols:\n wh.append(\"%s=%s\" % (quoteIdent(colname), quoteValue(self.rows[row][colname])))\n allWhere.append(\"(%s)\" % \" AND \".join(wh))\n query.AddWhere(\"\\n OR \".join(allWhere))\n rc=query.Delete()\n \n self.grid.Freeze()\n self.grid.BeginBatch()\n for row in rows:\n self.grid.DeleteRows(row, 1, True)\n\n# msg=wx.grid.GridTableMessage(self, wx.grid.GRIDTABLE_NOTIFY_ROWS_APPENDED)\n\n self.grid.EndBatch()\n self.grid.ForceRefresh()\n self.grid.Thaw()\n return rc",
"def removeRows(self, position, rows=1, index:QtCore.QModelIndex=QtCore.QModelIndex()):\n self.beginRemoveRows(QtCore.QModelIndex(), position, position + rows - 1)\n\n del self.__checknode.params[position:position + rows]\n self.__checknode._commit()\n\n self.endRemoveRows()\n return True",
"def removeRows(self, position, rows=1, index:QtCore.QModelIndex=QtCore.QModelIndex()):\n self.beginRemoveRows(QtCore.QModelIndex(), position, position + rows - 1)\n\n del self.__checknode.params[position:position + rows]\n self.__checknode._commit()\n\n self.endRemoveRows()\n return True",
"def removeRows(self, position, rows=1, index=QModelIndex()):\n self.beginRemoveRows(QModelIndex(), position, position + rows - 1)\n\n del self.Grains[position:position+rows]\n\n self.endRemoveRows()\n self.dataChanged.emit(index, index) \n return True",
"def deleteRecords(table: db.Table, colIx: int, addrMap: ghidra.program.database.map.AddressMap, start: ghidra.program.model.address.Address, end: ghidra.program.model.address.Address, filter: ghidra.program.database.util.RecordFilter) -> bool:\n ...",
"def delete_rows(self, table_model, row, count):\n self.undostack.push(DeleteRowCommand(table_model, row, table_model.get_rows(row, count=count)))",
"def remove_rows(self, rows, regroup=False):\n self.table.remove_rows(np.atleast_1d(rows))\n if regroup:\n for col in ['setup', 'calib', 'calibbit', 'comb_id', 'bkg_id']:\n if col in self.keys():\n del self.table[col]\n self.set_configurations()\n self.set_calibration_groups()\n self.set_combination_groups()",
"def delete(self, table, **kwargs):\n cols, values = self.get_cols_and_values(table, kwargs)\n where_clause = self.get_where_clause_pattern(cols)\n sql = \"DELETE FROM %s %s;\" % (table, where_clause)\n self.c.execute(sql, values)\n return self.c.rowcount # number of rows deleted",
"def delete(self, predicate=lambda row: True):\n self.rows = [row for row in self.rows if not predicate(row)]",
"def deleteSelectedRows(self):\n # Get unique row number (user can select multiple cells in one row)\n uniqRows = set([idx.row() for idx in self.view.selectedIndexes()])\n # It's necessary to remove rows from the end, otherwise indexes become\n # outdated and useless.\n revRovs = sorted(list(uniqRows), reverse=True)\n for row in revRovs:\n self.model.removeRow(row)",
"def test_delete_rows(self, source):\n widget = self.makeTableWidget()\n widget.request = TestRequest(rows=[1, 3, 5])\n widget.delete_rows()\n\n source.delete_rows.assert_called_once_with([1, 3, 5])",
"def deleteRecords(table: db.Table, addrMap: ghidra.program.database.map.AddressMap, start: ghidra.program.model.address.Address, end: ghidra.program.model.address.Address) -> bool:\n ...",
"def del_row(self, row_index):\n ...",
"def delete(self, predicate: WhereClause = lambda row: True) -> None:\n self.rows = [row for row in self.rows if not predicate(row)]",
"def row_delete(self,sql):\n self.connect.execute(sql)\n self.commit()",
"def AppendRows(self, numRows = 1):\n for i in range(numRows):\n self.data = numpy.vstack((self.data,\n numpy.array([''] * self.data.shape[1], dtype = numpy.object),\n ))\n self.rowmask = numpy.append(self.rowmask, numpy.zeros((numRows,), dtype = numpy.bool))\n\n msg = wx.grid.GridTableMessage(self,\n wx.grid.GRIDTABLE_NOTIFY_ROWS_APPENDED,\n numRows)\n #if not self._batchcount:\n # self.GetView().ProcessTableMessage(msg)\n self.GetView().ProcessTableMessage(msg)\n return True",
"def delete_all_rows(table_widget: QTableWidget):\n row_count = table_widget.rowCount()\n table_widget.setSelectionMode(QAbstractItemView.ExtendedSelection)\n setSel(list(range(row_count)), table_widget)\n remove_row_all_table(table_widget)\n table_widget.setSelectionMode(QAbstractItemView.ExtendedSelection)",
"def remove_rows(data, nrows, accounting_column=None):\n logger.debug('start: removing {} rows in transition model'.format(nrows))\n nrows = abs(nrows) # in case a negative number came in\n unit_check = data[accounting_column].sum() if accounting_column else len(data)\n if nrows == 0:\n return data, _empty_index()\n elif nrows > unit_check:\n raise ValueError('Number of rows to remove exceeds number of records in table.')\n\n remove_rows = sample_rows(nrows, data, accounting_column=accounting_column, replace=False)\n remove_index = remove_rows.index\n\n logger.debug('finish: removed {} rows in transition model'.format(nrows))\n return data.loc[data.index.difference(remove_index)], remove_index",
"def delete_row(self, pos):\n del self._grid[pos]",
"def delete(self, table, condition='1==1'):\n return True",
"def _deleteRows(self, startIndex, endIndex, gridID):\n body = {\n \"requests\": [\n { \"deleteDimension\": {\n \"range\": {\n \"sheetId\": gridID,\n \"dimension\": \"ROWS\",\n \"startIndex\": startIndex,\n \"endIndex\": endIndex,\n }\n }},\n ],\n \"includeSpreadsheetInResponse\": False,\n \"responseIncludeGridData\": False,\n }\n\n result = self.service.spreadsheets().batchUpdate(\n spreadsheetId=self.SPREADSHEETID, body=body).execute()\n return result",
"def rpc_database_delete_rows_by_id(self, row_ids):\n\t\ttable = DATABASE_TABLE_OBJECTS.get(self.path.split('/')[-3])\n\t\tassert table\n\t\tdeleted_rows = []\n\t\tsession = db_manager.Session()\n\t\ttry:\n\t\t\tfor row_id in row_ids:\n\t\t\t\trow = db_manager.get_row_by_id(session, table, row_id)\n\t\t\t\tif not row:\n\t\t\t\t\tcontinue\n\t\t\t\tsession.delete(row)\n\t\t\t\tdeleted_rows.append(row_id)\n\t\t\tsession.commit()\n\t\tfinally:\n\t\t\tsession.close()\n\t\treturn deleted_rows",
"async def delete_records(self, table_name: str, conditions_list=None):\n if conditions_list:\n conditions = LemkPgUtils.get_conditions(conditions_list)\n query = f\"\"\"DELETE FROM {table_name} WHERE {\" \".join(conditions)}\"\"\"\n else:\n query = f\"\"\"DELETE FROM {table_name}\"\"\"\n await LemkPgUtils.execute_query(self.dsn, query)\n return True",
"def delete_rows(self, row_numbers):\n\n for row in row_numbers:\n if row not in self.row_cache:\n print(\"Not deleting unknown row %s\" % row)\n continue\n\n self[self.row_cache[row]] = None\n del self.row_cache[row]\n print(\"Deleted row %s\" % row)\n\n if self.persistent:\n self.write_cache()",
"def delete_row(self, row_id):\n data = self._run_query(\n f\"\"\"SELECT id\n FROM {self.table}\n WHERE id = {row_id}\n \"\"\")\n if data:\n self._run_query(\n f\"\"\"DELETE\n FROM {self.table}\n WHERE id = {row_id}\n \"\"\")\n exit_code = 0\n else:\n exit_code = 1\n\n return exit_code",
"def delete(self, query_conditions):\n matched_queries = self.__return_query('query', query_conditions)\n if matched_queries == None:\n raise Exception('Sorry, your query did not match any data.')\n else:\n #Loop through and update each row where the query returned true\n for found_row in matched_queries:\n row_id = found_row['row_id']\n self.delete_row(row_id)",
"def ok_to_delete_row(self, row):\n if self.is_new_row(row):\n return False, _('Unable to delete new row')\n elif row == 0:\n return False, _('Unable to delete sofa id row')\n elif self.new_is_dirty:\n return (False, _(\n 'Cannot delete a row while in the middle of making a new one'))\n else:\n return True, None",
"def remove_row_all_table(table_widget):\n table_widget: QTableWidget\n selected_rows = table_widget.selectionModel().selectedRows()\n count = 0\n if selected_rows:\n row_indices = []\n for row_index in selected_rows:\n row_indices.append(row_index.row())\n row_indices.sort(key=lambda x: -1 * x)\n for row in row_indices: # sorted in descending order\n table_widget.removeRow(row)\n count += 1\n return count"
] | [
"0.8509247",
"0.8509247",
"0.7138309",
"0.6614276",
"0.6614276",
"0.6300265",
"0.6087969",
"0.59195846",
"0.59115934",
"0.5869983",
"0.58482474",
"0.57762325",
"0.57336473",
"0.570933",
"0.56701875",
"0.56412894",
"0.56196254",
"0.5617777",
"0.56165785",
"0.56080425",
"0.5603893",
"0.55413026",
"0.55400604",
"0.5479219",
"0.5467647",
"0.5454702",
"0.53956205",
"0.53947675",
"0.5383723",
"0.5338224"
] | 0.85211456 | 0 |
DeleteRows(pos=0, numRows=1) > bool Delete rows from the table. | def DeleteRows(self, pos=0, numRows=1): # real signature unknown; restored from __doc__
if self.data is None or len(self.data) == 0:
return False
for rowNum in range(0,numRows):
self.data.remove(self.data[numRows-1-pos-rowNum])
gridView=self.GetView()
gridView.BeginBatch()
deleteMsg=wx.grid.GridTableMessage(self,wx.grid.GRIDTABLE_NOTIFY_ROWS_DELETED,pos,numRows)
gridView.ProcessTableMessage(deleteMsg)
gridView.EndBatch()
getValueMsg=wx.grid.GridTableMessage(self,wx.grid.GRIDTABLE_REQUEST_VIEW_GET_VALUES)
gridView.ProcessTableMessage(getValueMsg)
# if self.onGridValueChanged:
# self.onGridValueChanged()
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def DeleteRows(self, pos=0, numRows=1): # real signature unknown; restored from __doc__\n if self.data is None or len(self.data) == 0:\n return False\n for rowNum in range(0, numRows):\n self.data.remove(self.data[numRows - 1 - pos - rowNum])\n gridView = self.GetView()\n gridView.BeginBatch()\n deleteMsg = wx.grid.GridTableMessage(self, wx.grid.GRIDTABLE_NOTIFY_ROWS_DELETED, pos, numRows)\n gridView.ProcessTableMessage(deleteMsg)\n gridView.EndBatch()\n getValueMsg = wx.grid.GridTableMessage(self, wx.grid.GRIDTABLE_REQUEST_VIEW_GET_VALUES)\n gridView.ProcessTableMessage(getValueMsg)\n # if self.onGridValueChanged:\n # self.onGridValueChanged()\n return True",
"def DeleteRows(self, pos=0, numRows=1): # real signature unknown; restored from __doc__\n if self.data is None or len(self.data) == 0:\n return False\n for rowNum in range(0, numRows):\n self.data.remove(self.data[numRows - 1 - pos - rowNum])\n gridView = self.GetView()\n gridView.BeginBatch()\n deleteMsg = wx.grid.GridTableMessage(self, wx.grid.GRIDTABLE_NOTIFY_ROWS_DELETED, pos, numRows)\n gridView.ProcessTableMessage(deleteMsg)\n gridView.EndBatch()\n getValueMsg = wx.grid.GridTableMessage(self, wx.grid.GRIDTABLE_REQUEST_VIEW_GET_VALUES)\n gridView.ProcessTableMessage(getValueMsg)\n # if self.onGridValueChanged:\n # self.onGridValueChanged()\n return True",
"def Delete(self, rows):\n query=pgQuery(self.tableSpecs.tabName, self.tableSpecs.GetCursor())\n allWhere=[]\n for row in rows:\n wh=[]\n for colname in self.tableSpecs.keyCols:\n wh.append(\"%s=%s\" % (quoteIdent(colname), quoteValue(self.rows[row][colname])))\n allWhere.append(\"(%s)\" % \" AND \".join(wh))\n query.AddWhere(\"\\n OR \".join(allWhere))\n rc=query.Delete()\n \n self.grid.Freeze()\n self.grid.BeginBatch()\n for row in rows:\n self.grid.DeleteRows(row, 1, True)\n\n# msg=wx.grid.GridTableMessage(self, wx.grid.GRIDTABLE_NOTIFY_ROWS_APPENDED)\n\n self.grid.EndBatch()\n self.grid.ForceRefresh()\n self.grid.Thaw()\n return rc",
"def removeRows(self, position, rows=1, index:QtCore.QModelIndex=QtCore.QModelIndex()):\n self.beginRemoveRows(QtCore.QModelIndex(), position, position + rows - 1)\n\n del self.__checknode.params[position:position + rows]\n self.__checknode._commit()\n\n self.endRemoveRows()\n return True",
"def removeRows(self, position, rows=1, index:QtCore.QModelIndex=QtCore.QModelIndex()):\n self.beginRemoveRows(QtCore.QModelIndex(), position, position + rows - 1)\n\n del self.__checknode.params[position:position + rows]\n self.__checknode._commit()\n\n self.endRemoveRows()\n return True",
"def removeRows(self, position, rows=1, index=QModelIndex()):\n self.beginRemoveRows(QModelIndex(), position, position + rows - 1)\n\n del self.Grains[position:position+rows]\n\n self.endRemoveRows()\n self.dataChanged.emit(index, index) \n return True",
"def deleteRecords(table: db.Table, colIx: int, addrMap: ghidra.program.database.map.AddressMap, start: ghidra.program.model.address.Address, end: ghidra.program.model.address.Address, filter: ghidra.program.database.util.RecordFilter) -> bool:\n ...",
"def delete_rows(self, table_model, row, count):\n self.undostack.push(DeleteRowCommand(table_model, row, table_model.get_rows(row, count=count)))",
"def remove_rows(self, rows, regroup=False):\n self.table.remove_rows(np.atleast_1d(rows))\n if regroup:\n for col in ['setup', 'calib', 'calibbit', 'comb_id', 'bkg_id']:\n if col in self.keys():\n del self.table[col]\n self.set_configurations()\n self.set_calibration_groups()\n self.set_combination_groups()",
"def delete(self, table, **kwargs):\n cols, values = self.get_cols_and_values(table, kwargs)\n where_clause = self.get_where_clause_pattern(cols)\n sql = \"DELETE FROM %s %s;\" % (table, where_clause)\n self.c.execute(sql, values)\n return self.c.rowcount # number of rows deleted",
"def delete(self, predicate=lambda row: True):\n self.rows = [row for row in self.rows if not predicate(row)]",
"def deleteSelectedRows(self):\n # Get unique row number (user can select multiple cells in one row)\n uniqRows = set([idx.row() for idx in self.view.selectedIndexes()])\n # It's necessary to remove rows from the end, otherwise indexes become\n # outdated and useless.\n revRovs = sorted(list(uniqRows), reverse=True)\n for row in revRovs:\n self.model.removeRow(row)",
"def test_delete_rows(self, source):\n widget = self.makeTableWidget()\n widget.request = TestRequest(rows=[1, 3, 5])\n widget.delete_rows()\n\n source.delete_rows.assert_called_once_with([1, 3, 5])",
"def deleteRecords(table: db.Table, addrMap: ghidra.program.database.map.AddressMap, start: ghidra.program.model.address.Address, end: ghidra.program.model.address.Address) -> bool:\n ...",
"def del_row(self, row_index):\n ...",
"def delete(self, predicate: WhereClause = lambda row: True) -> None:\n self.rows = [row for row in self.rows if not predicate(row)]",
"def row_delete(self,sql):\n self.connect.execute(sql)\n self.commit()",
"def AppendRows(self, numRows = 1):\n for i in range(numRows):\n self.data = numpy.vstack((self.data,\n numpy.array([''] * self.data.shape[1], dtype = numpy.object),\n ))\n self.rowmask = numpy.append(self.rowmask, numpy.zeros((numRows,), dtype = numpy.bool))\n\n msg = wx.grid.GridTableMessage(self,\n wx.grid.GRIDTABLE_NOTIFY_ROWS_APPENDED,\n numRows)\n #if not self._batchcount:\n # self.GetView().ProcessTableMessage(msg)\n self.GetView().ProcessTableMessage(msg)\n return True",
"def delete_all_rows(table_widget: QTableWidget):\n row_count = table_widget.rowCount()\n table_widget.setSelectionMode(QAbstractItemView.ExtendedSelection)\n setSel(list(range(row_count)), table_widget)\n remove_row_all_table(table_widget)\n table_widget.setSelectionMode(QAbstractItemView.ExtendedSelection)",
"def remove_rows(data, nrows, accounting_column=None):\n logger.debug('start: removing {} rows in transition model'.format(nrows))\n nrows = abs(nrows) # in case a negative number came in\n unit_check = data[accounting_column].sum() if accounting_column else len(data)\n if nrows == 0:\n return data, _empty_index()\n elif nrows > unit_check:\n raise ValueError('Number of rows to remove exceeds number of records in table.')\n\n remove_rows = sample_rows(nrows, data, accounting_column=accounting_column, replace=False)\n remove_index = remove_rows.index\n\n logger.debug('finish: removed {} rows in transition model'.format(nrows))\n return data.loc[data.index.difference(remove_index)], remove_index",
"def delete_row(self, pos):\n del self._grid[pos]",
"def delete(self, table, condition='1==1'):\n return True",
"def _deleteRows(self, startIndex, endIndex, gridID):\n body = {\n \"requests\": [\n { \"deleteDimension\": {\n \"range\": {\n \"sheetId\": gridID,\n \"dimension\": \"ROWS\",\n \"startIndex\": startIndex,\n \"endIndex\": endIndex,\n }\n }},\n ],\n \"includeSpreadsheetInResponse\": False,\n \"responseIncludeGridData\": False,\n }\n\n result = self.service.spreadsheets().batchUpdate(\n spreadsheetId=self.SPREADSHEETID, body=body).execute()\n return result",
"def rpc_database_delete_rows_by_id(self, row_ids):\n\t\ttable = DATABASE_TABLE_OBJECTS.get(self.path.split('/')[-3])\n\t\tassert table\n\t\tdeleted_rows = []\n\t\tsession = db_manager.Session()\n\t\ttry:\n\t\t\tfor row_id in row_ids:\n\t\t\t\trow = db_manager.get_row_by_id(session, table, row_id)\n\t\t\t\tif not row:\n\t\t\t\t\tcontinue\n\t\t\t\tsession.delete(row)\n\t\t\t\tdeleted_rows.append(row_id)\n\t\t\tsession.commit()\n\t\tfinally:\n\t\t\tsession.close()\n\t\treturn deleted_rows",
"async def delete_records(self, table_name: str, conditions_list=None):\n if conditions_list:\n conditions = LemkPgUtils.get_conditions(conditions_list)\n query = f\"\"\"DELETE FROM {table_name} WHERE {\" \".join(conditions)}\"\"\"\n else:\n query = f\"\"\"DELETE FROM {table_name}\"\"\"\n await LemkPgUtils.execute_query(self.dsn, query)\n return True",
"def delete_rows(self, row_numbers):\n\n for row in row_numbers:\n if row not in self.row_cache:\n print(\"Not deleting unknown row %s\" % row)\n continue\n\n self[self.row_cache[row]] = None\n del self.row_cache[row]\n print(\"Deleted row %s\" % row)\n\n if self.persistent:\n self.write_cache()",
"def delete_row(self, row_id):\n data = self._run_query(\n f\"\"\"SELECT id\n FROM {self.table}\n WHERE id = {row_id}\n \"\"\")\n if data:\n self._run_query(\n f\"\"\"DELETE\n FROM {self.table}\n WHERE id = {row_id}\n \"\"\")\n exit_code = 0\n else:\n exit_code = 1\n\n return exit_code",
"def delete(self, query_conditions):\n matched_queries = self.__return_query('query', query_conditions)\n if matched_queries == None:\n raise Exception('Sorry, your query did not match any data.')\n else:\n #Loop through and update each row where the query returned true\n for found_row in matched_queries:\n row_id = found_row['row_id']\n self.delete_row(row_id)",
"def ok_to_delete_row(self, row):\n if self.is_new_row(row):\n return False, _('Unable to delete new row')\n elif row == 0:\n return False, _('Unable to delete sofa id row')\n elif self.new_is_dirty:\n return (False, _(\n 'Cannot delete a row while in the middle of making a new one'))\n else:\n return True, None",
"def remove_row_all_table(table_widget):\n table_widget: QTableWidget\n selected_rows = table_widget.selectionModel().selectedRows()\n count = 0\n if selected_rows:\n row_indices = []\n for row_index in selected_rows:\n row_indices.append(row_index.row())\n row_indices.sort(key=lambda x: -1 * x)\n for row in row_indices: # sorted in descending order\n table_widget.removeRow(row)\n count += 1\n return count"
] | [
"0.8509247",
"0.8509247",
"0.7138309",
"0.6614276",
"0.6614276",
"0.6300265",
"0.6087969",
"0.59195846",
"0.59115934",
"0.5869983",
"0.58482474",
"0.57762325",
"0.57336473",
"0.570933",
"0.56701875",
"0.56412894",
"0.56196254",
"0.5617777",
"0.56165785",
"0.56080425",
"0.5603893",
"0.55413026",
"0.55400604",
"0.5479219",
"0.5467647",
"0.5454702",
"0.53956205",
"0.53947675",
"0.5383723",
"0.5338224"
] | 0.85211456 | 1 |
AppendCols(numCols=1) > bool Exactly the same as AppendRows() but for columns. | def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_new_cols(cat, prefix=\"\", floatcols=None, boolcols=None):\n\t\n\tif floatcols != None:\n\t\tfor col in floatcols:\n\t\t\tcat.add_column(astropy.table.MaskedColumn(name=prefix+col, dtype=float, length=len(cat)))\n\t\t\tcat[prefix+col].mask = [True] * len(cat)\n\tif boolcols != None:\n\t\tfor col in boolcols:\n\t\t\tcat.add_column(astropy.table.MaskedColumn(name=prefix+col, dtype=bool, length=len(cat)))\n\t\t\tcat[prefix+col].mask = [True] * len(cat)",
"def add_column(self):\n if len(self._grid) == 1:\n self._grid[0].append(None)\n elif len(self._grid) > 1:\n for i in range(len(self._grid)):\n self._grid[i].append(None)\n return True",
"def getNumCols(self):\n return self.__cols",
"def add_column(matrix):\n import numpy as np\n shape = np.shape(matrix)\n if matrix is np.zeros(shape):\n pass",
"def appendColumn(self, contents = None):\n\n\t\t\t\t#Find the last column\n\t\t\t\tcolumn = len(tuple(self.thing.iter_cols())) + 1\n\n\t\t\t\t#Write to cells\n\t\t\t\tif ((contents != None) and (len(contents) != 0)):\n\t\t\t\t\tfor row, item in enumerate(contents):\n\t\t\t\t\t\tself.setCell(row + 1, column, item)\n\t\t\t\telse:\n\t\t\t\t\tself.setCell(1, column, \" \")",
"def add_columns(array, cols=1):\n # TODO: error handling\n rows = array.shape[0]\n new_cols = np.empty((rows, cols), dtype=np.object)\n new_array = np.concatenate((array, new_cols),\n axis=1)\n return new_array",
"def test_num_columns(self):\n pass",
"def AppendRows(self, numRows = 1):\n for i in range(numRows):\n self.data = numpy.vstack((self.data,\n numpy.array([''] * self.data.shape[1], dtype = numpy.object),\n ))\n self.rowmask = numpy.append(self.rowmask, numpy.zeros((numRows,), dtype = numpy.bool))\n\n msg = wx.grid.GridTableMessage(self,\n wx.grid.GRIDTABLE_NOTIFY_ROWS_APPENDED,\n numRows)\n #if not self._batchcount:\n # self.GetView().ProcessTableMessage(msg)\n self.GetView().ProcessTableMessage(msg)\n return True",
"def append_columns(classdict, shape=()):\n heavy = common.heavy\n for (itype, type_) in enumerate(sorted(type_info)):\n if not heavy and type_ in heavy_types:\n continue # skip heavy type in non-heavy mode\n colpos = itype + 1\n colname = 'c_%s' % type_\n if type_ == 'enum':\n base = tb.Atom.from_sctype(sctype_from_type[type_])\n col = tb.EnumCol(enum, enum(0), base, shape=shape, pos=colpos)\n else:\n sctype = sctype_from_type[type_]\n dtype = np.dtype((sctype, shape))\n col = tb.Col.from_dtype(dtype, pos=colpos)\n classdict[colname] = col\n ncols = colpos\n return ncols",
"def num_cols(self):\n return len(self.column_names())",
"def no_of_columns(self): \n return len(self.columns) + (1 if self.serialize else 0)",
"def newrow(self):\n maxlen = 0\n for colbuf in self.colbufs:\n maxlen = max(maxlen, len(colbuf))\n\n for i in range(maxlen):\n first = True\n for colbuf in self.colbufs:\n if first:\n first = False\n else:\n sys.stdout.write(self.sepstr)\n if i < len(colbuf):\n sys.stdout.write(colbuf[i])\n else:\n sys.stdout.write(\" \"*self.colwidth)\n sys.stdout.write(\"\\n\")\n\n self.colbufs = []\n for i in range(self.ncolumns):\n self.colbufs.append([])",
"def add_feature_columns(self, feature_columns: typing.List[str]):\n self.feature_columns += feature_columns",
"def _add_cols(df: pandas.DataFrame, scope = (globals(), locals())) -> None:\n command : str = input(\"\\nAdd a column:\\n\")\n if command.lower() in ['n', 'no', 'quit()', 'exit', 'return']:\n return\n\n col_name : str = command[ \\\n re.search(r'[\\w\\.\\(\\)]+', command).start(): \\\n re.search(r'[\\w\\.\\(\\)]+', command).end() \\\n ]\n # new column's name\n\n arg : str = command[re.search(r'[=,;]', command).end():]\n # the new column's \"function\"\n ref_cols = re.findall(r'(?<=\\{)\\w[\\w\\.\\(\\)]*(?=\\})', arg)\n # df column names that are referenced to create new columns\n\n for i in range(len(ref_cols)):\n arg = re.sub(\n f'{{{ref_cols[i]}}}',\n f'df[\\'{ref_cols[i]}\\']',\n arg\n )\n # substituting references\n\n scope[0].update(globals())\n scope[1].update(locals())\n\n col_arg = eval(arg, scope[0], scope[1])\n # pandas.Series for type checking\n df[col_name] = col_arg\n # creating column\n\n more : str = input(\"\\nWould you like to add more columns?\\n\")\n if more.lower() in ['y', 'yes', 'continue', 'true']:\n return _add_cols(df)\n return",
"def append_columns(cls, columns, grid=None, grid_url=None):\n grid_id = parse_grid_id_args(grid, grid_url)\n\n grid_ops.ensure_uploaded(grid_id)\n\n # Verify unique column names\n column_names = [c.name for c in columns]\n if grid:\n existing_column_names = [c.name for c in grid]\n column_names.extend(existing_column_names)\n duplicate_name = utils.get_first_duplicate(column_names)\n if duplicate_name:\n err = exceptions.NON_UNIQUE_COLUMN_MESSAGE.format(duplicate_name)\n raise exceptions.InputError(err)\n\n # This is sorta gross, we need to double-encode this.\n body = {\"cols\": _json.dumps(columns, cls=PlotlyJSONEncoder)}\n fid = grid_id\n response = v2.grids.col_create(fid, body)\n parsed_content = response.json()\n\n cls._fill_in_response_column_ids(columns, parsed_content[\"cols\"], fid)\n\n if grid:\n grid.extend(columns)",
"def addemptycolumn(self, colname, coltype):\n setattr(self,colname,N.zeros((len(self),),coltype))\n self._modflag=True\n self._type[colname]=coltype\n\n #Looks strange here because we count columns from 1 but\n #Python counts them from 0\n self._ncolumns+=1\n self._d[colname]=self._ncolumns\n self._colnames.append(colname)\n self._header+='# %d %s\\n'%(self._ncolumns,colname)",
"def configcols(self,cols,coltype=None,colformat=None,visible=None,latexphantomflag=False,defaultvalue=None):\n\n if type(cols) is str:\n cols=[cols,]\n for col in cols: # test if the column already exist\n if col == None: continue\n if col in self.colinfo:\n newcolflag=0\n oldcoltype=self.colinfo[col]['type'] # save the previous setting\n # if the coltype is changed, and no new format given: give it default format\n if (not (oldcoltype == coltype)) and colformat==None and (not (coltype==None)):\n colformat='default'\n else:\n newcolflag=1\n self.cols.append(col)\n self.colinfo[col]={} #initialize colinfo\n oldcoltype=''\n # as default: columns are type string\n if coltype==None:\n coltype='s'\n # new col: give it default format if none given\n if colformat==None:\n colformat='default'\n # set the type self.colinfo[col]['type'] and self.colinfo[col]['format']\n self.setcoltype(col,coltype,colformat=colformat)\n\n # set if the column is visible, i.e. if it is printed by default by printtexttable\n self.setcol2visible(col,visible)\n\n # latex table: set if instead of spaces you want to use phantom{0}\n self.setcol2latexphantom(col,latexphantomflag)\n\n # set column to the defaultvalue if necessary\n if newcolflag or (not defaultvalue==None):\n self.setcol2value(col,defaultvalue)\n self.colinfo[col]['autoformat']='%s'\n else:\n # redo typecasting if necessary\n if (not newcolflag) and (not coltype==oldcoltype) :\n self.redotypecasting(col)",
"def augment (self, *args):\n cols = list(self.columns())\n for aug in args:\n try:\n cols.extend(aug.columns())\n except AttributeError:\n cols.append(aug)\n return Matrix(*cols, columns=True)",
"def cols(self, col):\n self.col += col",
"def setOptionalColumns(self, colnames):\n # Make sure all column names are lower case so comparisons in _TableRow\n # are not case sensitive. From a modularity standpoint, this should be\n # done in _TableRow, but it is more efficient to do it here, since the\n # conversion need be done only once.\n if colnames == [0]:\n self.optional_cols = colnames\n else:\n self.optional_cols = [colname.lower() for colname in colnames]",
"def _modify_columns(self, cols, X, y=None):",
"def columns(self):\n \n pass",
"def has_group_cols(self):\n return len(self.group_cols) != 0",
"def add_columns(self, **columns):\n return self.as_dataframe(self.data.assign(**columns))",
"def add_col(self):\r\n reader = csv.reader(open(self.in_csvfile, newline=''))\r\n rows = list(reader)\r\n rows[0].append(self.col_name)\r\n for i in range(1, len(rows)):\r\n rows[i].append(self.cell_filler(rows[i]))\r\n writer = csv.writer(open(self.out_csvfile, 'w', newline=''))\r\n writer.writerows(rows)",
"def number_of_columns(self):\n return len(self._columns)",
"def GetNumColumns(self):\n return len(self.columns)",
"def AddColumnsInRow(self, r, ncol):\n return _table.Table_AddColumnsInRow(self, r, ncol)",
"def AppendRows(self, numRows=1): # real signature unknown; restored from __doc__\n return (self.GetNumberRows() + numRows)",
"def AppendRows(self, numRows=1): # real signature unknown; restored from __doc__\n return (self.GetNumberRows() + numRows)"
] | [
"0.6017477",
"0.5631939",
"0.55924374",
"0.5548952",
"0.5529502",
"0.5455568",
"0.5444327",
"0.54101974",
"0.53786486",
"0.53754896",
"0.53557205",
"0.5345275",
"0.53391767",
"0.53190124",
"0.5316484",
"0.5307784",
"0.528852",
"0.5280459",
"0.5254802",
"0.525032",
"0.5224798",
"0.5210471",
"0.519142",
"0.5182929",
"0.5171422",
"0.51590073",
"0.5138467",
"0.511951",
"0.5107517",
"0.5107517"
] | 0.89480335 | 0 |
AppendCols(numCols=1) > bool Exactly the same as AppendRows() but for columns. | def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_new_cols(cat, prefix=\"\", floatcols=None, boolcols=None):\n\t\n\tif floatcols != None:\n\t\tfor col in floatcols:\n\t\t\tcat.add_column(astropy.table.MaskedColumn(name=prefix+col, dtype=float, length=len(cat)))\n\t\t\tcat[prefix+col].mask = [True] * len(cat)\n\tif boolcols != None:\n\t\tfor col in boolcols:\n\t\t\tcat.add_column(astropy.table.MaskedColumn(name=prefix+col, dtype=bool, length=len(cat)))\n\t\t\tcat[prefix+col].mask = [True] * len(cat)",
"def add_column(self):\n if len(self._grid) == 1:\n self._grid[0].append(None)\n elif len(self._grid) > 1:\n for i in range(len(self._grid)):\n self._grid[i].append(None)\n return True",
"def getNumCols(self):\n return self.__cols",
"def add_column(matrix):\n import numpy as np\n shape = np.shape(matrix)\n if matrix is np.zeros(shape):\n pass",
"def appendColumn(self, contents = None):\n\n\t\t\t\t#Find the last column\n\t\t\t\tcolumn = len(tuple(self.thing.iter_cols())) + 1\n\n\t\t\t\t#Write to cells\n\t\t\t\tif ((contents != None) and (len(contents) != 0)):\n\t\t\t\t\tfor row, item in enumerate(contents):\n\t\t\t\t\t\tself.setCell(row + 1, column, item)\n\t\t\t\telse:\n\t\t\t\t\tself.setCell(1, column, \" \")",
"def add_columns(array, cols=1):\n # TODO: error handling\n rows = array.shape[0]\n new_cols = np.empty((rows, cols), dtype=np.object)\n new_array = np.concatenate((array, new_cols),\n axis=1)\n return new_array",
"def test_num_columns(self):\n pass",
"def AppendRows(self, numRows = 1):\n for i in range(numRows):\n self.data = numpy.vstack((self.data,\n numpy.array([''] * self.data.shape[1], dtype = numpy.object),\n ))\n self.rowmask = numpy.append(self.rowmask, numpy.zeros((numRows,), dtype = numpy.bool))\n\n msg = wx.grid.GridTableMessage(self,\n wx.grid.GRIDTABLE_NOTIFY_ROWS_APPENDED,\n numRows)\n #if not self._batchcount:\n # self.GetView().ProcessTableMessage(msg)\n self.GetView().ProcessTableMessage(msg)\n return True",
"def append_columns(classdict, shape=()):\n heavy = common.heavy\n for (itype, type_) in enumerate(sorted(type_info)):\n if not heavy and type_ in heavy_types:\n continue # skip heavy type in non-heavy mode\n colpos = itype + 1\n colname = 'c_%s' % type_\n if type_ == 'enum':\n base = tb.Atom.from_sctype(sctype_from_type[type_])\n col = tb.EnumCol(enum, enum(0), base, shape=shape, pos=colpos)\n else:\n sctype = sctype_from_type[type_]\n dtype = np.dtype((sctype, shape))\n col = tb.Col.from_dtype(dtype, pos=colpos)\n classdict[colname] = col\n ncols = colpos\n return ncols",
"def num_cols(self):\n return len(self.column_names())",
"def no_of_columns(self): \n return len(self.columns) + (1 if self.serialize else 0)",
"def newrow(self):\n maxlen = 0\n for colbuf in self.colbufs:\n maxlen = max(maxlen, len(colbuf))\n\n for i in range(maxlen):\n first = True\n for colbuf in self.colbufs:\n if first:\n first = False\n else:\n sys.stdout.write(self.sepstr)\n if i < len(colbuf):\n sys.stdout.write(colbuf[i])\n else:\n sys.stdout.write(\" \"*self.colwidth)\n sys.stdout.write(\"\\n\")\n\n self.colbufs = []\n for i in range(self.ncolumns):\n self.colbufs.append([])",
"def add_feature_columns(self, feature_columns: typing.List[str]):\n self.feature_columns += feature_columns",
"def _add_cols(df: pandas.DataFrame, scope = (globals(), locals())) -> None:\n command : str = input(\"\\nAdd a column:\\n\")\n if command.lower() in ['n', 'no', 'quit()', 'exit', 'return']:\n return\n\n col_name : str = command[ \\\n re.search(r'[\\w\\.\\(\\)]+', command).start(): \\\n re.search(r'[\\w\\.\\(\\)]+', command).end() \\\n ]\n # new column's name\n\n arg : str = command[re.search(r'[=,;]', command).end():]\n # the new column's \"function\"\n ref_cols = re.findall(r'(?<=\\{)\\w[\\w\\.\\(\\)]*(?=\\})', arg)\n # df column names that are referenced to create new columns\n\n for i in range(len(ref_cols)):\n arg = re.sub(\n f'{{{ref_cols[i]}}}',\n f'df[\\'{ref_cols[i]}\\']',\n arg\n )\n # substituting references\n\n scope[0].update(globals())\n scope[1].update(locals())\n\n col_arg = eval(arg, scope[0], scope[1])\n # pandas.Series for type checking\n df[col_name] = col_arg\n # creating column\n\n more : str = input(\"\\nWould you like to add more columns?\\n\")\n if more.lower() in ['y', 'yes', 'continue', 'true']:\n return _add_cols(df)\n return",
"def append_columns(cls, columns, grid=None, grid_url=None):\n grid_id = parse_grid_id_args(grid, grid_url)\n\n grid_ops.ensure_uploaded(grid_id)\n\n # Verify unique column names\n column_names = [c.name for c in columns]\n if grid:\n existing_column_names = [c.name for c in grid]\n column_names.extend(existing_column_names)\n duplicate_name = utils.get_first_duplicate(column_names)\n if duplicate_name:\n err = exceptions.NON_UNIQUE_COLUMN_MESSAGE.format(duplicate_name)\n raise exceptions.InputError(err)\n\n # This is sorta gross, we need to double-encode this.\n body = {\"cols\": _json.dumps(columns, cls=PlotlyJSONEncoder)}\n fid = grid_id\n response = v2.grids.col_create(fid, body)\n parsed_content = response.json()\n\n cls._fill_in_response_column_ids(columns, parsed_content[\"cols\"], fid)\n\n if grid:\n grid.extend(columns)",
"def addemptycolumn(self, colname, coltype):\n setattr(self,colname,N.zeros((len(self),),coltype))\n self._modflag=True\n self._type[colname]=coltype\n\n #Looks strange here because we count columns from 1 but\n #Python counts them from 0\n self._ncolumns+=1\n self._d[colname]=self._ncolumns\n self._colnames.append(colname)\n self._header+='# %d %s\\n'%(self._ncolumns,colname)",
"def configcols(self,cols,coltype=None,colformat=None,visible=None,latexphantomflag=False,defaultvalue=None):\n\n if type(cols) is str:\n cols=[cols,]\n for col in cols: # test if the column already exist\n if col == None: continue\n if col in self.colinfo:\n newcolflag=0\n oldcoltype=self.colinfo[col]['type'] # save the previous setting\n # if the coltype is changed, and no new format given: give it default format\n if (not (oldcoltype == coltype)) and colformat==None and (not (coltype==None)):\n colformat='default'\n else:\n newcolflag=1\n self.cols.append(col)\n self.colinfo[col]={} #initialize colinfo\n oldcoltype=''\n # as default: columns are type string\n if coltype==None:\n coltype='s'\n # new col: give it default format if none given\n if colformat==None:\n colformat='default'\n # set the type self.colinfo[col]['type'] and self.colinfo[col]['format']\n self.setcoltype(col,coltype,colformat=colformat)\n\n # set if the column is visible, i.e. if it is printed by default by printtexttable\n self.setcol2visible(col,visible)\n\n # latex table: set if instead of spaces you want to use phantom{0}\n self.setcol2latexphantom(col,latexphantomflag)\n\n # set column to the defaultvalue if necessary\n if newcolflag or (not defaultvalue==None):\n self.setcol2value(col,defaultvalue)\n self.colinfo[col]['autoformat']='%s'\n else:\n # redo typecasting if necessary\n if (not newcolflag) and (not coltype==oldcoltype) :\n self.redotypecasting(col)",
"def augment (self, *args):\n cols = list(self.columns())\n for aug in args:\n try:\n cols.extend(aug.columns())\n except AttributeError:\n cols.append(aug)\n return Matrix(*cols, columns=True)",
"def cols(self, col):\n self.col += col",
"def setOptionalColumns(self, colnames):\n # Make sure all column names are lower case so comparisons in _TableRow\n # are not case sensitive. From a modularity standpoint, this should be\n # done in _TableRow, but it is more efficient to do it here, since the\n # conversion need be done only once.\n if colnames == [0]:\n self.optional_cols = colnames\n else:\n self.optional_cols = [colname.lower() for colname in colnames]",
"def _modify_columns(self, cols, X, y=None):",
"def columns(self):\n \n pass",
"def has_group_cols(self):\n return len(self.group_cols) != 0",
"def add_columns(self, **columns):\n return self.as_dataframe(self.data.assign(**columns))",
"def add_col(self):\r\n reader = csv.reader(open(self.in_csvfile, newline=''))\r\n rows = list(reader)\r\n rows[0].append(self.col_name)\r\n for i in range(1, len(rows)):\r\n rows[i].append(self.cell_filler(rows[i]))\r\n writer = csv.writer(open(self.out_csvfile, 'w', newline=''))\r\n writer.writerows(rows)",
"def number_of_columns(self):\n return len(self._columns)",
"def GetNumColumns(self):\n return len(self.columns)",
"def AddColumnsInRow(self, r, ncol):\n return _table.Table_AddColumnsInRow(self, r, ncol)",
"def AppendRows(self, numRows=1): # real signature unknown; restored from __doc__\n return (self.GetNumberRows() + numRows)",
"def AppendRows(self, numRows=1): # real signature unknown; restored from __doc__\n return (self.GetNumberRows() + numRows)"
] | [
"0.6017477",
"0.5631939",
"0.55924374",
"0.5548952",
"0.5529502",
"0.5455568",
"0.5444327",
"0.54101974",
"0.53786486",
"0.53754896",
"0.53557205",
"0.5345275",
"0.53391767",
"0.53190124",
"0.5316484",
"0.5307784",
"0.528852",
"0.5280459",
"0.5254802",
"0.525032",
"0.5224798",
"0.5210471",
"0.519142",
"0.5182929",
"0.5171422",
"0.51590073",
"0.5138467",
"0.511951",
"0.5107517",
"0.5107517"
] | 0.89480335 | 1 |
Reset all noisy layers. | def reset_noise(self):
self.advantage_hidden_layer.reset_noise()
self.advantage_layer.reset_noise()
self.value_hidden_layer.reset_noise()
self.value_layer.reset_noise() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reset(self):\n for layer in self.network:\n layer.clean()",
"def reset_layers(self, rov_id): # Clear hidden layers and output layers\n for i in range(self.n_nodes):\n self.hid_layer[rov_id, i] = 0.0\n\n for j in range(self.n_outputs):\n self.out_layer[rov_id, j] = 0.0",
"def reset(self):\n self.noise.reset()",
"def reset_noise(self):\n try:\n self.head.reset_noise()\n except:\n pass\n\n try:\n for m in self.vhead.children():\n try:\n m.reset_noise()\n except:\n pass\n except:\n pass\n\n try:\n for m in self.ahead.children():\n try:\n m.reset_noise()\n except:\n pass\n except:\n pass",
"def reset_nn(self): # Clear current network\n self.weights = np.zeros((p.num_rovers, self.n_weights))\n self.in_layer = np.zeros((p.num_rovers, self.n_inputs))\n self.hid_layer = np.zeros((p.num_rovers, self.n_nodes))\n self.out_layer = np.zeros((p.num_rovers, self.n_outputs))",
"def reset(self):\n self.data = {}\n self.pf.reset()\n\n self.tc.reset()\n # Reset the neuron grid\n (self.n_n, XE, YE, IE, _, _) = self.init_pix_rf_centers(\n self.l_n, self.l_i, self.ds, self.de, mode=self.neuron_layout,\n drop_prob=self.drop_prob\n )\n self.tc.t_XE.set_value(XE)\n self.tc.t_YE.set_value(YE)\n self.tc.t_IE.set_value(IE)\n self.pf = self.init_particle_filter(self.motion_prior, self.n_p)",
"def reset_pooling_layer(self):\n self._aspp.reset_pooling_layer()",
"def reset(self):\n self.reset_image_estimate()\n self.init_m_aux()\n self.reset_hessian_and_bias()\n self.reset_adadelta_variables()",
"def reset(self):\n self.train_loss.reset_states()\n self.train_accuracy.reset_states()\n self.val_loss.reset_states()\n self.val_accuracy.reset_states()\n self.train_mIoU.reset_states()\n self.val_mIoU.reset_states()",
"def reset_parameters(self):\n\n for layer in self.layers:\n layer.reset_parameters()",
"def reset(self):\n self.__init__() # Reset all variables\n self.stitch = self.convertNumpy2Image(np.zeros([750,850,3], dtype=\"uint8\"))\n self.capture = self.convertNumpy2Image(np.zeros([320,408, 3],dtype=\"uint8\"))",
"def reset(self):\n\n def reset_function(module):\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n m.reset_parameters()\n\n self.apply(reset_function)",
"def reset(self, fullreset=True):\n self.controlpoints = []\n self.contour = []\n self.ext_energies = []\n self.update()\n if fullreset:\n self.optimized = False",
"def reset(self):\n self.mode = 0\n self.graphs = [[], [], []]\n self.coefficients = []\n self.sample = []",
"def reset(self):\n self.loss = []\n self.funcargs = []\n self.nSteps = 0 \n self.converged = False",
"def reset(self):\n for i in range(0, len(self.current_state)):\n self.current_state[i] = 0\n\n for i in range(0, len(self.weights)):\n self.weights[i] = 0",
"def reset_model(model):\n\n\tfor layer in model.layers:\n\t\t# Note: these are custom depending on the layer type\n\t\tif '.MoleculeConv' in str(layer):\n\t\t\tW_inner = layer.init_inner((layer.inner_dim, layer.inner_dim))\n\t\t\tb_inner = np.zeros((1, layer.inner_dim))\n\t\t\t# Inner weights\n\t\t\tlayer.W_inner.set_value((T.tile(W_inner, (layer.depth + 1, 1, 1)).eval() + \\\n\t\t\t\tinitializations.uniform((layer.depth + 1, layer.inner_dim, layer.inner_dim)).eval()).astype(np.float32))\n\t\t\tlayer.b_inner.set_value((T.tile(b_inner, (layer.depth + 1, 1, 1)).eval() + \\\n\t\t\t\tinitializations.uniform((layer.depth + 1, 1, layer.inner_dim)).eval()).astype(np.float32))\n\n\t\t\t# Outer weights\n\t\t\tW_output = layer.init_output((layer.inner_dim, layer.units), scale = layer.scale_output)\n\t\t\tb_output = np.zeros((1, layer.units))\n\t\t\t# Initialize weights tensor\n\t\t\tlayer.W_output.set_value((T.tile(W_output, (layer.depth + 1, 1, 1)).eval()).astype(np.float32))\n\t\t\tlayer.b_output.set_value((T.tile(b_output, (layer.depth + 1, 1, 1)).eval()).astype(np.float32))\n\t\t\tlogging.info('graphFP layer reset')\n\n\t\telif '.Dense' in str(layer):\n\t\t\tlayer.W.set_value((layer.init(layer.W.shape.eval()).eval()).astype(np.float32))\n\t\t\tlayer.b.set_value(np.zeros(layer.b.shape.eval(), dtype=np.float32))\n\t\t\tlogging.info('dense layer reset')\n\n\t\telif '.Dropout' in str(layer):\n\t\t\tlogging.info('dropout unchanged')\n\t\telse:\n\t\t\traise ValueError('Unknown layer {}, cannot reset weights'.format(str(layer)))\n\tlogging.info('Reset model weights')\n\treturn model",
"def reset(self):\n self.loss = 0\n self.cnt = 0",
"def reset_pooling_layer(self):\n self._semantic_decoder.reset_pooling_layer()\n if self._instance_decoder is not None:\n self._instance_decoder.reset_pooling_layer()",
"def reset(self):\n if self._key:\n self._lib.StObjectReset(self._key)\n os.chdir(self._cwd)\n self._layers.clear() # layer: index\n self._substrate = None\n self._experiments.clear() # analyzed experiments\n self._tmpstandards.clear()",
"def reset(self):\n self._weights.clear()",
"def reset(self):\n # must NOT reset color map here, otherwise we loose provided configs by user,\n # which are more important in this case for result images vs whatever the model task specified\n self.class_names = None\n self._map = None",
"def _reset(self):\n self.loss_history = []\n self.optim_configs = {}\n for p in self.model.params:\n d = {k: v for k, v in self.optim_config.items()}\n self.optim_configs[p] = d",
"def reset(self):\n self.pred_classes.clear()\n self.gold_classes.clear()\n self.pred_probas.clear()\n self.gold_probas.clear()\n self.loss = 0\n self.nb_batches = 0\n self.prec_rec_f1 = None\n self.acc = None\n self.mcc = None",
"def _reset(self) -> None:\n self.images = []\n self.activations = []\n self.labels = []\n self.preds = []\n self.n_found = 0",
"def reset_parameters(self):\n self.conv_in.reset_parameters()\n self.conv_out.reset_parameters()\n if self.lin is not None:\n self.lin.reset_parameters()",
"def reset(self):\r\n self._p = self._p_init\r\n self._r = self._r_init\r\n self._v = self._v_init\r\n self._w = self._w_init\r\n self._a = self._a_init\r\n self._alpha = self._alpha_init",
"def reset(self):\n\t\t\n\t\t# The measured information, from the shape measurement on the observed image\n\t\tself.mes_x = 0.0\n\t\tself.mes_y = 0.0\n\t\tself.mes_a = 0.0\n\t\tself.mes_b = 0.0\n\t\tself.mes_theta = 0.0 # Sextractor : from -90 to 90 deg\n\t\tself.mes_fwhm = 0.0\n\t\tself.mes_flux = 0.0\n\t\tself.mes_fluxerr = 0.0\n\t\tself.mes_flux_max = 0.0\n\n\t\tself.mes_sky = 0.0\n\t\tself.mes_sig = 0.0",
"def reset_params(self):\n self.blur = -1\n self.closing = -1\n self.thresh = -1",
"def reset(self):\n self.sample['masked'] = [False]*len(self.sample.index)\n self.sample['colour'] = ['undefined']*len(self.sample.index)"
] | [
"0.75135684",
"0.7388977",
"0.7320769",
"0.7070128",
"0.6976212",
"0.69198513",
"0.69061995",
"0.6805137",
"0.67520696",
"0.67174494",
"0.6668943",
"0.6648948",
"0.6637794",
"0.66241336",
"0.6584105",
"0.65818655",
"0.6574278",
"0.65249395",
"0.6519634",
"0.6511302",
"0.6481823",
"0.647396",
"0.64525056",
"0.642967",
"0.6413875",
"0.6410909",
"0.64100945",
"0.64065164",
"0.6375765",
"0.63543725"
] | 0.8044907 | 0 |
Given an undefined output folder path, we return the blank string | def test_make_output_folder_undefined_path(self):
test_object = Maic()
expected_result = ""
self.assertEqual(expected_result,
test_object.make_output_folder(output_folder=None),
"Should get back an empty string for an undefined "
"output folder") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_make_output_folder_blank_path(self):\n test_object = Maic()\n expected_result = \"\"\n self.assertEqual(expected_result,\n test_object.make_output_folder(output_folder=\"\"),\n \"Should get back an empty string for an output \"\n \"folder specified as ''\")",
"def get_output_path():\n return os.getcwd() + \"/output/\"",
"def GetOutputPath(self):\n self.outputDir = raw_input(\"What path should be outputted to?\\n\\r>>> \")\n if self.outputDir is \"\":\n self.outputDir = \"C:\\Users\\Lucas\\Pictures\\GraphOutput\"\n bob = os.path.isabs(self.inputDir)\n if not bob:\n print \"that was not an excepted path name. Try again\"\n self.GetOutputPath()",
"def name_final_path(out_img_folder):\n if out_img_folder == None:\n return \"./.out_hidden_images\"\n else:\n return out_img_folder",
"def outpath(self):\n return None",
"def get_output_dir(direct=\"default\"):\n result = \"\"\n if(direct == \"default\"):\n result = \"\"\n else:\n result = direct\n return result",
"def get_output_folder(self):\n return os.path.join(self.root_output_folder, self.base_fish_folder)",
"def output_path():\n folder = path.join(path.curdir, \"stages\")\n folder = path.abspath(folder)\n return ensure_path(folder)",
"def get_output_folder_name(argi=2, root_folder=\"\"):\n # First tries to read the output folder name from argv[2]\n try:\n output_folder = sys.argv[argi]\n except IndexError:\n # If argv[argi] was not passed, asks the user for the output folder.\n output_folder = root_folder\n output_folder += input(\"Output folder path was not informed. Please inform:\\n\"\n \"{}\".format(root_folder))\n\n # Adds the SEP (/ or \\\\) character to the end of the folder name.\n if output_folder[-len(SEP):] != SEP:\n output_folder += SEP\n\n # Checks if the folder does not exist. Creates it, in this case.\n if not os.path.exists(output_folder):\n os.system(\"mkdir -p '{}'\".format(output_folder))\n\n return output_folder",
"def breseq_pipeline_output_empty(tmp_path)->Path:\n\n\tparent_folder = checkdir(tmp_path / \"parent_folder\")\n\n\tsample_1_folder = checkdir(parent_folder / \"sample1\")\n\tsample_1_folder_output = checkdir(sample_1_folder / \"output\")\n\tsample_1_folder_data = checkdir(sample_1_folder / \"data\")\n\n\tsample_2_folder = checkdir(parent_folder / \"sample2\")\n\tsample_2_folder_breseq = checkdir(sample_2_folder / \"breseq\")\n\tsample_2_folder_output = checkdir(sample_2_folder_breseq / \"output\")\n\tsample_2_folder_data = checkdir(sample_2_folder_breseq / \"data\")\n\n\tsample_3_folder = checkdir(parent_folder / \"AU1234_ABC\")\n\tsample_3_folder_breseq = checkdir(sample_3_folder / \"breseq_output\")\n\tsample_3_folder_output = checkdir(sample_3_folder_breseq / \"output\")\n\tsample_3_folder_data = checkdir(sample_3_folder_breseq / \"data\")\n\t\n\treturn parent_folder",
"def _output_path(name):\n output = Path(\"../Analysis Results/\").joinpath(str(date.today()))\n output.mkdir(parents=True, exist_ok=True)\n return output.joinpath(f\"{name}.png\")",
"def outputdir():\n return __OUTPUT_DIR__",
"def _out(self, *args):\n suffix = '_'.join(map(str, args))\n return os.path.join(self._out_folder, suffix )",
"def GetOutSubDir(cls):\n return PipelineConfig.Instance().pipeline_subdirs().get('PIPELINE_OUT_DIR', '')",
"def output_path(self):\r\n return '%s/%s' % (os.path.abspath(os.path.dirname(__file__) + 'outputs'),\r\n self.identifier)",
"def output_dir(self):\n ep, pp = (\n maybe_path(os.getenv(\"BRIGHTWAY2_OUTPUT_DIR\")),\n maybe_path(config.p.get(\"output_dir\")),\n )\n if ep and ep.is_dir():\n return ep\n elif pp and pp.is_dir():\n return pp\n else:\n return self.request_directory(\"output\")",
"def folder(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"folder\")",
"def output_path(self):\n\n output_path = stringify(self._output_path)\n if output_path is None:\n with current_context() as ctx:\n output_path_relative = stringify(self.output_path_relative)\n if output_path_relative is not None:\n output_path = join_path(ctx.paths.output, output_path_relative)\n else:\n output_path = ctx.current.project.get_output_path(self.executor.output_type)\n return output_path",
"def get_output_path(self):\n output_path = '%s/%s' % (\n os.path.expanduser(JOB_OUTPUT_PATH), self.get_unique_name())\n return output_path",
"def get_dummy_folder() -> str:\n dummy_folder = os.path.join(os.getcwd(), 'dummy_test_folder')\n return dummy_folder",
"def get_path():\n\n output_path = None\n while output_path is None:\n print question + \"Please enter the directory where you would like the file saved?\"\n output_path = raw_input()\n if os.path.isdir(os.path.expanduser(output_path)):\n pass\n else:\n os.system('clear')\n print warn + \"%s is not valid, please try again: \" % str(output_path)\n output_path = None\n return os.path.expanduser(output_path)",
"def get_output_path(backup_file, output_root):\n dir_path = backup_file.translated_path()\n full_output_path = os.path.join(output_root, dir_path)\n return os.path.normpath(full_output_path)",
"def get_output_dir(imdb, net):\n path = os.path.abspath(os.path.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, imdb.name))\n if net is None:\n return path\n else:\n return os.path.join(path, net.name)",
"def _dir_out(self):\n ens_label = utils.io.ens_label(self.ens_mem)\n return os.path.join(self.proj.direc_out, self.proj.name,\n self.model.name, self.run.name,\n ens_label, self.name)",
"def output_path(self) -> str:\n if self._output_path is None:\n if not self._root_folder:\n self._root_folder = self._env.experiments_folder\n folder = os.path.join(self._root_folder, self.key)\n\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n self._output_path = folder\n\n return self._output_path",
"def getOutputFolder(analysesFolder):\n i = 1\n outputFolder = os.path.join(analysesFolder, \"Output_\" + str(i))\n while os.path.exists(outputFolder):\n i += 1\n outputFolder = os.path.join(analysesFolder, \"Output_\" + str(i))\n\n os.mkdir(outputFolder)\n return outputFolder",
"def filter_pathdir(val: Optional[str]) -> str:\n return os.path.dirname(val or '')",
"def create_output_loc(self):\n self.output_name = [self.args.xml_out, 'gatk4_' + self.json_file['name'].lower().split(' ')[0] + '.xml']\n if not self.args.xml_out.endswith('/'):\n return '/'.join(self.output_name)\n else:\n return ''.join(self.output_name)",
"def fake_full_path(self) -> PurePath:\n if self.category:\n # Giza wrote out yaml file artifacts under a directory. e.g. steps-foo.yaml becomes\n # steps/foo.rst\n return self.source_path.parent.joinpath(\n PurePath(self.category), self.output_filename\n )\n return self.source_path",
"def get_output_path():\n\n path = rs.DocumentPath()\n name = rs.DocumentName()\n \n if gc.operating_system == \"mac\":\n\n path = path[:-len(name)] + \"_system.dat\"\n\n elif gc.operating_system == \"win\":\n\n i = path.rfind(\"\\\\\")\n\n path = path[:i] + \"/_system.dat\" \n\n return path"
] | [
"0.76985216",
"0.70029175",
"0.69376665",
"0.6890628",
"0.68563265",
"0.6776184",
"0.6774517",
"0.6737329",
"0.6688305",
"0.6654316",
"0.66089475",
"0.6563189",
"0.6542934",
"0.6504976",
"0.64609385",
"0.63817656",
"0.6361122",
"0.6351268",
"0.63438004",
"0.63324255",
"0.6330289",
"0.6289684",
"0.62757605",
"0.62707746",
"0.62662804",
"0.6264529",
"0.62470067",
"0.6246883",
"0.6242412",
"0.6235085"
] | 0.78773004 | 0 |
Given an empty output folder path, we return the blank string | def test_make_output_folder_blank_path(self):
test_object = Maic()
expected_result = ""
self.assertEqual(expected_result,
test_object.make_output_folder(output_folder=""),
"Should get back an empty string for an output "
"folder specified as ''") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_make_output_folder_undefined_path(self):\n test_object = Maic()\n expected_result = \"\"\n self.assertEqual(expected_result,\n test_object.make_output_folder(output_folder=None),\n \"Should get back an empty string for an undefined \"\n \"output folder\")",
"def breseq_pipeline_output_empty(tmp_path)->Path:\n\n\tparent_folder = checkdir(tmp_path / \"parent_folder\")\n\n\tsample_1_folder = checkdir(parent_folder / \"sample1\")\n\tsample_1_folder_output = checkdir(sample_1_folder / \"output\")\n\tsample_1_folder_data = checkdir(sample_1_folder / \"data\")\n\n\tsample_2_folder = checkdir(parent_folder / \"sample2\")\n\tsample_2_folder_breseq = checkdir(sample_2_folder / \"breseq\")\n\tsample_2_folder_output = checkdir(sample_2_folder_breseq / \"output\")\n\tsample_2_folder_data = checkdir(sample_2_folder_breseq / \"data\")\n\n\tsample_3_folder = checkdir(parent_folder / \"AU1234_ABC\")\n\tsample_3_folder_breseq = checkdir(sample_3_folder / \"breseq_output\")\n\tsample_3_folder_output = checkdir(sample_3_folder_breseq / \"output\")\n\tsample_3_folder_data = checkdir(sample_3_folder_breseq / \"data\")\n\t\n\treturn parent_folder",
"def get_output_path():\n return os.getcwd() + \"/output/\"",
"def GetOutputPath(self):\n self.outputDir = raw_input(\"What path should be outputted to?\\n\\r>>> \")\n if self.outputDir is \"\":\n self.outputDir = \"C:\\Users\\Lucas\\Pictures\\GraphOutput\"\n bob = os.path.isabs(self.inputDir)\n if not bob:\n print \"that was not an excepted path name. Try again\"\n self.GetOutputPath()",
"def name_final_path(out_img_folder):\n if out_img_folder == None:\n return \"./.out_hidden_images\"\n else:\n return out_img_folder",
"def outpath(self):\n return None",
"def get_output_folder(self):\n return os.path.join(self.root_output_folder, self.base_fish_folder)",
"def output_path():\n folder = path.join(path.curdir, \"stages\")\n folder = path.abspath(folder)\n return ensure_path(folder)",
"def get_output_folder_name(argi=2, root_folder=\"\"):\n # First tries to read the output folder name from argv[2]\n try:\n output_folder = sys.argv[argi]\n except IndexError:\n # If argv[argi] was not passed, asks the user for the output folder.\n output_folder = root_folder\n output_folder += input(\"Output folder path was not informed. Please inform:\\n\"\n \"{}\".format(root_folder))\n\n # Adds the SEP (/ or \\\\) character to the end of the folder name.\n if output_folder[-len(SEP):] != SEP:\n output_folder += SEP\n\n # Checks if the folder does not exist. Creates it, in this case.\n if not os.path.exists(output_folder):\n os.system(\"mkdir -p '{}'\".format(output_folder))\n\n return output_folder",
"def get_output_dir(direct=\"default\"):\n result = \"\"\n if(direct == \"default\"):\n result = \"\"\n else:\n result = direct\n return result",
"def GetOutSubDir(cls):\n return PipelineConfig.Instance().pipeline_subdirs().get('PIPELINE_OUT_DIR', '')",
"def outputdir():\n return __OUTPUT_DIR__",
"def get_dummy_folder() -> str:\n dummy_folder = os.path.join(os.getcwd(), 'dummy_test_folder')\n return dummy_folder",
"def _output_path(name):\n output = Path(\"../Analysis Results/\").joinpath(str(date.today()))\n output.mkdir(parents=True, exist_ok=True)\n return output.joinpath(f\"{name}.png\")",
"def folder(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"folder\")",
"def get_path():\n\n output_path = None\n while output_path is None:\n print question + \"Please enter the directory where you would like the file saved?\"\n output_path = raw_input()\n if os.path.isdir(os.path.expanduser(output_path)):\n pass\n else:\n os.system('clear')\n print warn + \"%s is not valid, please try again: \" % str(output_path)\n output_path = None\n return os.path.expanduser(output_path)",
"def _out(self, *args):\n suffix = '_'.join(map(str, args))\n return os.path.join(self._out_folder, suffix )",
"def getOutputFolder(analysesFolder):\n i = 1\n outputFolder = os.path.join(analysesFolder, \"Output_\" + str(i))\n while os.path.exists(outputFolder):\n i += 1\n outputFolder = os.path.join(analysesFolder, \"Output_\" + str(i))\n\n os.mkdir(outputFolder)\n return outputFolder",
"def filter_pathdir(val: Optional[str]) -> str:\n return os.path.dirname(val or '')",
"def output_dir(self):\n ep, pp = (\n maybe_path(os.getenv(\"BRIGHTWAY2_OUTPUT_DIR\")),\n maybe_path(config.p.get(\"output_dir\")),\n )\n if ep and ep.is_dir():\n return ep\n elif pp and pp.is_dir():\n return pp\n else:\n return self.request_directory(\"output\")",
"def get_output_path(self):\n output_path = '%s/%s' % (\n os.path.expanduser(JOB_OUTPUT_PATH), self.get_unique_name())\n return output_path",
"def output_path(self) -> str:\n if self._output_path is None:\n if not self._root_folder:\n self._root_folder = self._env.experiments_folder\n folder = os.path.join(self._root_folder, self.key)\n\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n self._output_path = folder\n\n return self._output_path",
"def fake_full_path(self) -> PurePath:\n if self.category:\n # Giza wrote out yaml file artifacts under a directory. e.g. steps-foo.yaml becomes\n # steps/foo.rst\n return self.source_path.parent.joinpath(\n PurePath(self.category), self.output_filename\n )\n return self.source_path",
"def output_path(self):\n\n output_path = stringify(self._output_path)\n if output_path is None:\n with current_context() as ctx:\n output_path_relative = stringify(self.output_path_relative)\n if output_path_relative is not None:\n output_path = join_path(ctx.paths.output, output_path_relative)\n else:\n output_path = ctx.current.project.get_output_path(self.executor.output_type)\n return output_path",
"def get_output_path(backup_file, output_root):\n dir_path = backup_file.translated_path()\n full_output_path = os.path.join(output_root, dir_path)\n return os.path.normpath(full_output_path)",
"def output_path(self):\r\n return '%s/%s' % (os.path.abspath(os.path.dirname(__file__) + 'outputs'),\r\n self.identifier)",
"def _dir_out(self):\n ens_label = utils.io.ens_label(self.ens_mem)\n return os.path.join(self.proj.direc_out, self.proj.name,\n self.model.name, self.run.name,\n ens_label, self.name)",
"def _prepare_subject_output_path(output_root, subject_id):\n output_dir = output_root / subject_id\n output_dir.mkdir(parents=True, exist_ok=True)\n return output_dir / f\"{subject_id}_task-tapping_nirs.nwb\"",
"def create_output_loc(self):\n self.output_name = [self.args.xml_out, 'gatk4_' + self.json_file['name'].lower().split(' ')[0] + '.xml']\n if not self.args.xml_out.endswith('/'):\n return '/'.join(self.output_name)\n else:\n return ''.join(self.output_name)",
"def out_dir(self) -> str:\n return self._out_dir"
] | [
"0.7781955",
"0.6907767",
"0.66690165",
"0.6645506",
"0.6641241",
"0.66131055",
"0.65263367",
"0.63996685",
"0.6386103",
"0.6343422",
"0.6288943",
"0.62462056",
"0.6244957",
"0.61959714",
"0.6165891",
"0.61632746",
"0.6150932",
"0.61507374",
"0.60942763",
"0.6087422",
"0.6048714",
"0.6048673",
"0.6048656",
"0.60459405",
"0.602275",
"0.60113746",
"0.59912485",
"0.5983808",
"0.59736365",
"0.59356844"
] | 0.78897774 | 0 |
Given a complex folder path with multiple embedded slashes, check that the code tries to make the folder and returns the path with a single trailing '/' appended only if required | def test_make_output_folder_path_with_multi_slashes(self, mock_makedirs):
mock_makedirs.return_value = True
test_object = Maic()
path = '/c/o/m/p/l/e/x_p/a/t/h/'
expected_result = path
self.assertEqual(expected_result,
test_object.make_output_folder(output_folder=path),
"Should get back '"
+ expected_result
+ "' for an output folder specified as '"
+ path
+ "'")
self.assertTrue(mock_makedirs.called_once_with(path)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def format_folder_path(folder_path):\n if folder_path[-1] != '/':\n folder_path += '/'\n\n return folder_path",
"def test_fix_path(self):\n\n expected = \"hello\" + PyFunceble.directory_separator + \"world\" + PyFunceble.directory_separator # pylint: disable=line-too-long\n actual = Directory(\"/hello/world\").fix_path()\n\n self.assertEqual(expected, actual)\n\n actual = Directory(\"\\\\hello\\\\world\").fix_path()\n self.assertEqual(expected, actual)\n\n actual = Directory(\"hello\\\\world\").fix_path()\n self.assertEqual(expected, actual)\n\n actual = Directory(r\"hello\\world\").fix_path()\n self.assertEqual(expected, actual)\n\n actual = Directory(r\"hello/world/\").fix_path()\n self.assertEqual(expected, actual)",
"def chkPath(fullPath: str) -> None:\n\n # Check if path already exist.\n p = os.path.split(fullPath)\n exists = os.path.exists(p[0])\n # If not then create it.\n if exists == False:\n try:\n os.makedirs(p[0])\n except:\n print(\"Failed to create requested path.\")",
"def dir_path(path):\n pattern='^(.*)[/]$'\n matchobj=re.match(pattern,path)\n if matchobj:\n return path\n else:\n return path+'/'",
"def _normalize_path(path):\n if path is None:\n directory = BASE_PATH\n path = ''\n else:\n path = op.normpath(path)\n directory = op.normpath(op.join(BASE_PATH, path))\n\n if not is_in_folder(BASE_PATH, directory):\n abort(404)\n\n if not op.exists(directory):\n abort(404)\n\n return BASE_PATH, directory, path",
"def directory_slash(destination):\n\n if destination[-1] != '/':\n return destination + '/'\n\n return destination",
"def test_make_output_folder_simple_path_with_slash(self, mock_makedirs):\n mock_makedirs.return_value = True\n test_object = Maic()\n path = 'simple_path/'\n expected_result = path\n self.assertEqual(expected_result,\n test_object.make_output_folder(output_folder=path),\n \"Should get back '\"\n + expected_result\n + \"' for an output folder specified as '\"\n + path\n + \"'\")\n self.assertTrue(mock_makedirs.called_once_with(path))",
"def normdirpath(path):\n if not path.endswith('/') and path != '':\n path += '/'\n return path",
"def build_path(obj_name: str, is_folder: bool=False) -> str:\n\n return validate_path_or_name(\n obj_name if obj_name.startswith('/') else '/{}'.format(obj_name),\n is_folder=is_folder\n )",
"def concat_folder(folder, element):\n if folder[-1] == \"/\":\n return folder + element\n return folder + \"/\" + element",
"def ensure_path(path):\n\n path = os.path.expanduser(path)\n #Do not take into consideration the last path element\n #Unless it end with '/'\n os.makedirs('/'.join(path.split('/')[:-1]), exist_ok=True)\n return path",
"def format_path(path):\n return path if path.endswith('/') else path + '/'",
"def makepath(plname,root):\n if (root.endswith('/') and not plname[0] =='/' ) or ( not root.endswith('/') and plname[0] =='/') :\n return root+plname\n elif root.endswith('/') and plname[0] =='/' :\n return root+plname[1:]\n else:\n return root+\"/\"+plname",
"def clean_path(path: str) -> str:\n previous_path = \"\"\n next_path = path\n while next_path != previous_path:\n previous_path = next_path\n next_path = copy_annotations(path, next_path.replace(\"//\", \"/\"))\n while next_path.endswith(\"/\"):\n next_path = next_path[:-1]\n return next_path",
"def clean_folder_name(folder_name):\n folder_name = folder_name.strip('/')\n if folder_name != '':\n folder_name = os.path.normpath(folder_name)\n return folder_name",
"def validate_path_or_name(path_or_name: str, is_folder: bool=False) -> str:\n\n if is_folder:\n assert path_or_name.endswith('/')\n else:\n assert not path_or_name.endswith('/')\n\n return path_or_name",
"def test_predicates_on_unsanitized_paths(self):\n self.mfs.add_entries({'/just/another/pythonista': ''})\n\n self.assertTrue(os.path.isdir('///just'))\n self.assertTrue(os.path.isdir('///just/////another'))\n self.assertTrue(os.path.exists('///just////another////////pythonista'))\n self.assertTrue(os.path.isfile('///just////another////////pythonista'))",
"def test_make_output_folder_simple_path(self, mock_makedirs):\n mock_makedirs.return_value = True\n test_object = Maic()\n path = 'simple_path'\n expected_result = '{path}{sep}'.format(path=path, sep=os.sep)\n self.assertTrue(mock_makedirs.called_once_with(path))\n self.assertEqual(expected_result,\n test_object.make_output_folder(output_folder=path),\n \"Should get back '{expected}' for an output folder\"\n \" specified as '{path}'\".format(\n expected=expected_result, path=path)\n )",
"def StripFolder(path):\n\n if not path.endswith((\"/\", \"\\\\\")):\n path = path + \"\\\\\"\n folders = [path]\n allf = []\n while folders:\n folder = folders.pop(0)\n allf.append(folder)\n for lister in os.listdir(folder):\n if os.path.isdir(folder + lister):\n folders.append(folder + lister + \"\\\\\")\n elif not path == folder:\n CopyFolder(folder, path)\n shutil.rmtree(folder)\n\n return tuple(allf)",
"def checkfolder(paths):\n\tpaths = paths if isinstance(paths, list) else [paths]\n\t\n\tdef creat_dir(x):\n\t\tx = Path(x)\n\t\tif x.is_dir():\n\t\t\tprint(f\"Dir {x} already exists\")\n\t\telse:\n\t\t\tPath.mkdir(x)\n\t\t\tprint(f\"Created new dir {x}\")\n\t\n\tlist(map(creat_dir, paths))",
"def fix_path(path):\n path = os.path.normpath(path)\n os.makedirs(path, exist_ok=True)\n return path",
"def fix_length(path):\n if len(path) > MAX_PATH_LENGTH+FOLDER_NAME_LENGTH:\n folder, post_folder_path = path.split(\"/\")\n post_folder_path = post_folder_path[-MAX_PATH_LENGTH:]\n path = folder+\"/\"+post_folder_path\n return path",
"def test_buildArn_with_folder_with_slashes(self):\n\n expected = 'arn:aws:s3:::my_bucket/some/folder/*'\n actual = TileBucket.buildArn('my_bucket', '/some/folder/')\n assert(expected == actual)",
"def create_nested_catalog(path_as_string):\n path_as_list = path_as_string.replace(\"\\\"\", \"\").replace(\"\\\\\", \"/\").split(\"/\")\n if path_as_list[0].endswith(\":\"):\n path_as_list[0] = path_as_list[0] + \"\\\\\"\n\n next_nested_folder = ''\n for folder in path_as_list:\n next_nested_folder = os.path.join(next_nested_folder, folder)\n if os.path.exists(next_nested_folder):\n if os.path.isdir(next_nested_folder):\n print(f\"Creation of the directory skipped: \\\"{next_nested_folder}\\\" already exists\")\n else:\n print(\"Invalid input\")\n return False\n else:\n try:\n os.mkdir(next_nested_folder)\n except OSError:\n print(f\"Creation of the directory \\\"{next_nested_folder}\\\" failed\")\n return False\n print(f\"\\\"{next_nested_folder}\\\" created\")\n return True",
"def add_trailing_slash(path):\n if len(path) > 0:\n if path[len(path) - 1] == \"/\":\n return path\n else:\n return path + \"/\"\n else:\n return path + \"/\"",
"def make_dir_if_need(path):\n folder, filename, ext = split_path(path)\n if len(folder) > 0 and not os.path.exists(folder):\n try:\n os.makedirs(folder)\n except Exception as e:\n print(e)\n sys.stderr.write('folder:{0} is not valid path'.format(folder))\n return sanitize_path(path)",
"def test_buildArn_with_folder_no_slashes(self):\n\n expected = 'arn:aws:s3:::my_bucket/some/folder/*'\n actual = TileBucket.buildArn('my_bucket', 'some/folder')\n assert(expected == actual)",
"def make_full_path(self, path, name):\n full_path = (path + \"/\" + name) if path != '' else name\n # remove any duplicate slashes\n full_path = re.sub(r'//+',r'/', full_path)\n self.validate_path(full_path)\n return full_path",
"def fix_path(name):\n saveslash = \"/\" if (name[0] == \"/\") else \"\"\n name = re.split(\"\\\\\\|/\", name)\n new = name[0]\n for i in range(1,len(name)):\n new = os.path.join(new, name[i])\n new = \"%s%s\" % (saveslash, new)\n return new",
"def test_make_output_folder_blank_path(self):\n test_object = Maic()\n expected_result = \"\"\n self.assertEqual(expected_result,\n test_object.make_output_folder(output_folder=\"\"),\n \"Should get back an empty string for an output \"\n \"folder specified as ''\")"
] | [
"0.7249269",
"0.6797798",
"0.6691372",
"0.6648862",
"0.6491748",
"0.6453401",
"0.6421338",
"0.63004524",
"0.62997335",
"0.6285595",
"0.62594306",
"0.6247562",
"0.62401736",
"0.62397146",
"0.6239207",
"0.6219014",
"0.62041944",
"0.61918926",
"0.6191478",
"0.61791414",
"0.61717826",
"0.6157799",
"0.61163014",
"0.6114561",
"0.60982865",
"0.6082216",
"0.6078282",
"0.6071894",
"0.6061808",
"0.6054187"
] | 0.6893744 | 1 |
Check that an output folder path that exists but does not end with something that looks like a timestamp gets a timestamp added | def test_make_output_folder_exists_no_timestamp(self, mock_makedirs,
mock_logger):
mock_makedirs.side_effect = [OSError, True]
test_object = Maic()
path = "my_path"
sep = os.sep
if os.sep == '\\':
# we've got a backslash which causes havoc in a regex so we need
# to escape the backslash twice
sep = '\\\\'
result = test_object.make_output_folder(output_folder=path)
match_string = r'^my_path-\d{4}(-\d{2}){2}-(-\d{2}){2}' + sep + '$'
self.assertTrue(
re.search(match_string, result,
re.S),
"Should have got a path with a Timestamp attached")
mock_logger.assert_called_with(
"Specified folder (my_path) already exists - trying to create "
"one with a timestamp") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_make_output_folder_exists_with_timestamp_fails(self,\n mock_makedirs):\n mock_makedirs.side_effect = [OSError]\n test_object = Maic()\n path = \"my_path-1960-04-04--15-00\"\n try:\n test_object.make_output_folder(output_folder=path)\n except OSError:\n pass\n except BaseException:\n self.fail(\"Should get an OSError\")",
"def prerun(timestamp):\r\n if not os.path.isdir('log'):\r\n os.makedirs('log')\r\n if not os.path.isdir('collected'):\r\n os.makedirs('collected')\r\n if not os.path.isdir('done'):\r\n os.makedirs('done')\r\n time_stamped_folder = os.path.join('collected', timestamp)\r\n if not os.path.isdir(time_stamped_folder):\r\n os.makedirs(time_stamped_folder)\r\n return time_stamped_folder",
"def check_outpath(self, outpath):\n if not os.path.isdir(outpath+str(self.ar_no)):\n ar_outpath = os.path.join(outpath,str(self.ar_no))\n ar_outpath_video = os.path.join(outpath,str(self.ar_no)+'_video')\n os.makedirs(ar_outpath)\n os.makedirs(ar_outpath_video)\n print(\"Path does not exist, create: \")\n print(ar_outpath)\n print(ar_outpath_video)",
"def test_directory_path_without_calendar():\n downloader = WallpaperDownloader(\"08-2020\")\n directory_path = downloader._get_directory_path(with_calendar=False)\n exist_directory_path = os.path.join(\n downloader.destination_directory_path,\n \"august-2020 (without-calendar)\",\n )\n assert directory_path == exist_directory_path",
"def test_make_final_path_datetime(self):\n with DataArchive(self.user, DATA_DOWNLOADS_WORKING_DIR) as archive:\n now = timezone.now()\n final_path = archive.make_final_path(date=now)\n \n valid_path = os.path.join(\n archive.data_dir_path,\n now.strftime('%Y'),\n now.strftime('%m'),\n now.strftime('%d')\n )\n\n self.assertEqual(final_path, valid_path)",
"def make_sure_path_exists(out_path):\n try:\n os.makedirs(out_path)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n print \"Errors in output folder path! please change the output path or analysis name\\n\"\n exit()",
"def check_base_filename(self, record):\n time_tuple = time.localtime()\n\n if self.suffix_time != time.strftime(self.suffix, time_tuple) or not os.path.exists(\n self.baseFilename):\n # if self.suffix_time != time.strftime(self.suffix, time_tuple) or not os.path.exists(\n # self.baseFilename + '.' + self.suffix_time):\n\n return 1\n else:\n return 0",
"def verify_folder_name(folder):\n regex = re.compile(\"\\/([0-9]{8})_([0-9]{6})_(\\w+)$\")\n find = regex.search(folder)\n if find:\n date = f\"{find.group(1)}_{find.group(2)}\"\n name = find.group(3)\n folder = f\"{date}_{name}\"\n try:\n ctime = datetime.datetime.strptime(date, \"%Y%m%d_%H%M%S\")\n return (folder, name, ctime)\n except:\n return False",
"def create_report(folderpath):\n\n outputfolder = create_folder(DEFAULT_OUTPUT_FOLDER)\n\n folderpath = os.path.expanduser(folderpath)\n updatesByHour = collections.defaultdict(list)\n\n now = datetime.now()\n\n for root, folders, files in os.walk(folderpath, followlinks=False):\n for filename in files:\n if filename not in IGNORE_THESE_FILES:\n filepath = pathlib.Path(root, filename)\n mtime = datetime.fromtimestamp(filepath.stat().st_mtime)\n\n if mtime.year == now.year and mtime.month == now.month:\n # For now only deal with this month\n mtime_str = mtime.strftime(\"%Y-%m-%d %H:00\")\n updatesByHour[mtime_str].append((root,filename))\n\n outputFilePath = pathlib.Path(outputfolder, now.strftime(\"%Y-%m.md\"))\n\n with open(outputFilePath, \"w\") as output_file:\n output_file.write(\"# \"+folderpath+\"\\n\")\n for updateTime in sorted(updatesByHour.keys()):\n output_file.write(\"## \"+updateTime+\"\\n\")\n previous_root = None\n previous_pattern=None\n s=\"\"\n for root, filename in sorted(updatesByHour[updateTime]):\n if not previous_root == root:\n # Print a Directory heading\n this_folder=root[len(folderpath):]\n if not len(this_folder.strip()):\n this_folder=folderpath\n output_file.write(\"### \"+this_folder+\" \\n\")\n this_pattern=re.sub(\"[0-9]\",\"x\",filename)\n if not previous_pattern==this_pattern:\n if len(s):\n listItem = \"* \" + s \n output_file.write(listItem[:-2]+\"\\n\")\n s=\"\"\n s=s+str(filename)+\", \"\n previous_root = root\n previous_pattern=this_pattern",
"def program_out_of_date(self, stamp_path):\n if not os.path.exists(stamp_path) or self.clean:\n return True\n with open(stamp_path, 'r') as stamp:\n return self.date != stamp.read()",
"def check_already_extracted(video_parts):\n filename_no_ext, _ = video_parts\n return bool(os.path.exists(os.path.join(output_dir,\n filename_no_ext + '-0030.jpg')))",
"def check_base_filename(self, record):\n time_tuple = time.localtime()\n\n if self.file_name_format:\n pass\n\n if self.suffix_time != time.strftime(self.suffix, time_tuple) or not os.path.exists(\n self._get_format_filename()):\n return 1\n else:\n return 0",
"def duplicate_timestamp_path(existing_path):\n logfile = parse.parse_filename(existing_path)\n index = 0\n while index < 25:\n if index == 0:\n suffix = ''\n else:\n suffix = '-%02d' % index\n\n new_path = parse.unparse_filename(\n (\n logfile.prefix +\n '-logjam-compress-duplicate-timestamp' +\n suffix\n ),\n logfile.timestamp,\n logfile.suffix,\n logfile.extension\n )\n if not os.path.exists(new_path):\n return new_path\n\n index += 1\n\n raise Exception('%d duplicate timestamp paths detected.' % index)",
"def _is_path_inside_output_dir(self, path: str) -> bool:\n real_output_dir = os.path.realpath(self._output_dir)\n real_file_path = os.path.realpath(path)\n return os.path.commonpath([real_output_dir, real_file_path]) == real_output_dir",
"def checkIfExist(foamCase):\n if os.path.isdir(foamCase):\n# endTime=0.4 #hardcode the endtime for cases which don't have a system folder\n try:\n endTime = readInput('controlDict', 'endTime', foamCase=foamCase)\n if os.path.isdir(foamCase+'/' + endTime):\n return 0\n else:\n return 1\n except:\n return 3\n else:\n return 2",
"def test_make_final_path_date(self):\n with DataArchive(self.user, DATA_DOWNLOADS_WORKING_DIR) as archive:\n now = timezone.now().date()\n final_path = archive.make_final_path(date=now)\n \n valid_path = os.path.join(\n archive.data_dir_path,\n now.strftime('%Y'),\n now.strftime('%m'),\n now.strftime('%d')\n )\n\n self.assertEqual(final_path, valid_path)",
"def output_out_of_date(self):\n if not os.path.exists(self.output_file):\n logging.info(\"will generate, missing binding output file\")\n return True\n output_mtime = os.path.getmtime(self.output_file)\n if self._any_files_newer(self.header_files, output_mtime):\n logging.info(\"will generate, header files newer\")\n return True\n if self._any_files_newer(self.interface_files, output_mtime):\n logging.info(\"will generate, interface files newer\")\n return True\n if self._file_newer(self.input_file, output_mtime):\n logging.info(\"will generate, swig input file newer\")\n return True\n if self._file_newer(self.extensions_file, output_mtime):\n logging.info(\"will generate, swig extensions file newer\")\n return True\n if self._file_newer(self.wrapper_file, output_mtime):\n logging.info(\"will generate, swig wrapper file newer\")\n return True\n if self._file_newer(self.typemaps_file, output_mtime):\n logging.info(\"will generate, swig typemaps file newer\")\n return True\n if self._file_newer(self.safecast_file, output_mtime):\n logging.info(\"will generate, swig safecast file newer\")\n return True\n\n # If we made it here, nothing is newer than the output file.\n # Thus, the output file is not out of date.\n return False",
"def test_make_output_folder_undefined_path(self):\n test_object = Maic()\n expected_result = \"\"\n self.assertEqual(expected_result,\n test_object.make_output_folder(output_folder=None),\n \"Should get back an empty string for an undefined \"\n \"output folder\")",
"def out_of_date(original, derived):\r\n return (not os.path.exists(derived) or\r\n (os.path.exists(original) and\r\n os.stat(derived).st_mtime < os.stat(original).st_mtime))",
"def precheck(self):\n if (not dfs.exists(self.outputpath)):\n logger.debug(\"precheck(%s): outputpath %s does not exist, ready to run.\" \n % (self, self.outputpath))\n return 'ready'\n inTSs = [dfs.modtime(file) for file in self.inputpaths]\n outTS = dfs.modtime(self.outputpath)\n newer = reduce(lambda x,y: x or y, [(inTS>outTS) for inTS in inTSs])\n logger.debug(\"Input timestamps: %s\" % inTSs)\n logger.debug(\"Output timestamp: %s\" % outTS)\n if newer:\n logger.debug(\"At least one input file is newer than outputfile, ready to run.\")\n dfs.delete(self.outputpath)\n return 'ready'\n else:\n logger.debug(\"All input files are newer than outputfile, skipping.\")\n return 'skip'",
"def test_directory_path_with_calendar():\n downloader = WallpaperDownloader(\"08-2020\")\n directory_path = downloader._get_directory_path(with_calendar=True)\n exist_directory_path = os.path.join(\n downloader.destination_directory_path,\n \"august-2020 (with-calendar)\",\n )\n assert directory_path == exist_directory_path",
"def create_output_folder(self):\n if not os.path.exists(self.current_path):\n os.mkdir(self.current_path)\n data_dir_by_date = datetime.datetime.now().strftime(\n \"data-%d-%b_%H-%M-%S\")\n self.date_path = os.path.join(self.current_path, data_dir_by_date)\n if not os.path.exists(self.date_path):\n os.mkdir(self.date_path)",
"def get_run_directory(output_root: Union[str, Path]) -> Path:\n output_root = Path(output_root).resolve()\n launch_time = datetime.datetime.now().strftime(\"%Y_%m_%d\")\n today_runs = [\n int(run_dir.name.split(\".\")[1])\n for run_dir in output_root.iterdir()\n if run_dir.name.startswith(launch_time)\n ]\n run_version = max(today_runs) + 1 if today_runs else 1\n datetime_dir = output_root / f\"{launch_time}.{run_version:0>2}\"\n return datetime_dir",
"def test_files_missing():\n\tfiledir = \"./goes_files/%Y_events/%Y%m%devents.txt\"\n\tt0 = timerange.start.datetime\n\tdays = [t0]\n\twhile timerange.end.datetime > t0:\n\t\tt0 = t0 + relativedelta(days=1)\n\t\tdays.append(t0)\n\n\tmissing_files = []\n\tfor d in days:\n\t\tif not os.path.exists(d.strftime(filedir)):\n\t\t\tmissing_files.append(d.strftime(filedir))\n\tprint(missing_files)",
"def test_make_output_folder_blank_path(self):\n test_object = Maic()\n expected_result = \"\"\n self.assertEqual(expected_result,\n test_object.make_output_folder(output_folder=\"\"),\n \"Should get back an empty string for an output \"\n \"folder specified as ''\")",
"def test_02_not_exist(self):\n with self.assertLogs(\"borg\", \"WARNING\") as logger:\n self.api.extract(self.archive, self.file_3)\n message = logger.records[0].getMessage()\n self.assertRegex(\n message,\n r\".*?file_3.*never\",\n \"Warning not logged for bad path\",\n )",
"def _validate_output_file_path(file_path: str):\n file_dir = os.path.dirname(file_path)\n if not os.path.isdir(file_dir):\n try:\n os.makedirs(file_dir)\n except Exception as e:\n utils.error(f\"Failed to create parent directory {file_dir} for file {file_path}. Reason: {e}\")\n if not os.access(file_dir, os.W_OK):\n utils.error(f\"Cannot write file: {file_path}. {file_dir} is not writeable.\")",
"def check_for_preexisting_output_file(output_file_path):\n if path.exists(f\"{output_file_path}\"):\n print(\"Output file at specified save location file path already exists!\")\n print(\"Aborting operation!\")\n sys.exit()",
"def create_experiment_folder(path_out, dir_name, name='', stamp_unique=True):\n assert os.path.exists(path_out), 'missing base folder \"%s\"' % path_out\n date = time.gmtime()\n if isinstance(name, str) and name:\n dir_name = '{}_{}'.format(dir_name, name)\n path_exp = os.path.join(path_out, dir_name)\n if stamp_unique:\n path_exp += '_' + time.strftime(FORMAT_DATE_TIME, date)\n path_created = None\n while not path_created:\n logging.warning('particular out folder already exists')\n if path_created is not None:\n path_exp += '-' + str(np.random.randint(0, 100))\n path_created = create_folder(path_exp, ok_existing=False)\n else:\n path_created = create_folder(path_exp, ok_existing=False)\n logging.info('created experiment folder \"%r\"', path_created)\n return path_exp",
"def check_output(self):\n directory, file = split(self.target)\n if not exists(directory):\n mkdir(directory)\n if exists(self.target):\n unlink(self.target)"
] | [
"0.65769327",
"0.6254106",
"0.62120444",
"0.61580235",
"0.60756755",
"0.602552",
"0.60144717",
"0.6014027",
"0.6002626",
"0.59853584",
"0.5975292",
"0.5899892",
"0.58989424",
"0.5844146",
"0.5834909",
"0.5810283",
"0.5796441",
"0.5729634",
"0.5713246",
"0.57086563",
"0.5672583",
"0.5655928",
"0.56004196",
"0.55908096",
"0.5575804",
"0.55558354",
"0.55340415",
"0.55174387",
"0.5511181",
"0.55028975"
] | 0.6856277 | 0 |
Check that an output folder path that exists and does end with something that looks like a timestamp raises an exception | def test_make_output_folder_exists_with_timestamp_fails(self,
mock_makedirs):
mock_makedirs.side_effect = [OSError]
test_object = Maic()
path = "my_path-1960-04-04--15-00"
try:
test_object.make_output_folder(output_folder=path)
except OSError:
pass
except BaseException:
self.fail("Should get an OSError") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_make_output_folder_exists_no_timestamp(self, mock_makedirs,\n mock_logger):\n mock_makedirs.side_effect = [OSError, True]\n test_object = Maic()\n path = \"my_path\"\n sep = os.sep\n if os.sep == '\\\\':\n # we've got a backslash which causes havoc in a regex so we need\n # to escape the backslash twice\n sep = '\\\\\\\\'\n result = test_object.make_output_folder(output_folder=path)\n match_string = r'^my_path-\\d{4}(-\\d{2}){2}-(-\\d{2}){2}' + sep + '$'\n self.assertTrue(\n re.search(match_string, result,\n re.S),\n \"Should have got a path with a Timestamp attached\")\n mock_logger.assert_called_with(\n \"Specified folder (my_path) already exists - trying to create \"\n \"one with a timestamp\")",
"def program_out_of_date(self, stamp_path):\n if not os.path.exists(stamp_path) or self.clean:\n return True\n with open(stamp_path, 'r') as stamp:\n return self.date != stamp.read()",
"def make_sure_path_exists(out_path):\n try:\n os.makedirs(out_path)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n print \"Errors in output folder path! please change the output path or analysis name\\n\"\n exit()",
"def test_directory_path_without_calendar():\n downloader = WallpaperDownloader(\"08-2020\")\n directory_path = downloader._get_directory_path(with_calendar=False)\n exist_directory_path = os.path.join(\n downloader.destination_directory_path,\n \"august-2020 (without-calendar)\",\n )\n assert directory_path == exist_directory_path",
"def test_make_final_path_datetime(self):\n with DataArchive(self.user, DATA_DOWNLOADS_WORKING_DIR) as archive:\n now = timezone.now()\n final_path = archive.make_final_path(date=now)\n \n valid_path = os.path.join(\n archive.data_dir_path,\n now.strftime('%Y'),\n now.strftime('%m'),\n now.strftime('%d')\n )\n\n self.assertEqual(final_path, valid_path)",
"def check_base_filename(self, record):\n time_tuple = time.localtime()\n\n if self.suffix_time != time.strftime(self.suffix, time_tuple) or not os.path.exists(\n self.baseFilename):\n # if self.suffix_time != time.strftime(self.suffix, time_tuple) or not os.path.exists(\n # self.baseFilename + '.' + self.suffix_time):\n\n return 1\n else:\n return 0",
"def check_already_extracted(video_parts):\n filename_no_ext, _ = video_parts\n return bool(os.path.exists(os.path.join(output_dir,\n filename_no_ext + '-0030.jpg')))",
"def check_base_filename(self, record):\n time_tuple = time.localtime()\n\n if self.file_name_format:\n pass\n\n if self.suffix_time != time.strftime(self.suffix, time_tuple) or not os.path.exists(\n self._get_format_filename()):\n return 1\n else:\n return 0",
"def checkIfExist(foamCase):\n if os.path.isdir(foamCase):\n# endTime=0.4 #hardcode the endtime for cases which don't have a system folder\n try:\n endTime = readInput('controlDict', 'endTime', foamCase=foamCase)\n if os.path.isdir(foamCase+'/' + endTime):\n return 0\n else:\n return 1\n except:\n return 3\n else:\n return 2",
"def verify_folder_name(folder):\n regex = re.compile(\"\\/([0-9]{8})_([0-9]{6})_(\\w+)$\")\n find = regex.search(folder)\n if find:\n date = f\"{find.group(1)}_{find.group(2)}\"\n name = find.group(3)\n folder = f\"{date}_{name}\"\n try:\n ctime = datetime.datetime.strptime(date, \"%Y%m%d_%H%M%S\")\n return (folder, name, ctime)\n except:\n return False",
"def test_02_not_exist(self):\n with self.assertLogs(\"borg\", \"WARNING\") as logger:\n self.api.extract(self.archive, self.file_3)\n message = logger.records[0].getMessage()\n self.assertRegex(\n message,\n r\".*?file_3.*never\",\n \"Warning not logged for bad path\",\n )",
"def check_outpath(self, outpath):\n if not os.path.isdir(outpath+str(self.ar_no)):\n ar_outpath = os.path.join(outpath,str(self.ar_no))\n ar_outpath_video = os.path.join(outpath,str(self.ar_no)+'_video')\n os.makedirs(ar_outpath)\n os.makedirs(ar_outpath_video)\n print(\"Path does not exist, create: \")\n print(ar_outpath)\n print(ar_outpath_video)",
"def out_of_date(original, derived):\r\n return (not os.path.exists(derived) or\r\n (os.path.exists(original) and\r\n os.stat(derived).st_mtime < os.stat(original).st_mtime))",
"def check_for_preexisting_output_file(output_file_path):\n if path.exists(f\"{output_file_path}\"):\n print(\"Output file at specified save location file path already exists!\")\n print(\"Aborting operation!\")\n sys.exit()",
"def _validate_output_file_path(file_path: str):\n file_dir = os.path.dirname(file_path)\n if not os.path.isdir(file_dir):\n try:\n os.makedirs(file_dir)\n except Exception as e:\n utils.error(f\"Failed to create parent directory {file_dir} for file {file_path}. Reason: {e}\")\n if not os.access(file_dir, os.W_OK):\n utils.error(f\"Cannot write file: {file_path}. {file_dir} is not writeable.\")",
"def test_directory_path_with_calendar():\n downloader = WallpaperDownloader(\"08-2020\")\n directory_path = downloader._get_directory_path(with_calendar=True)\n exist_directory_path = os.path.join(\n downloader.destination_directory_path,\n \"august-2020 (with-calendar)\",\n )\n assert directory_path == exist_directory_path",
"def test_files_missing():\n\tfiledir = \"./goes_files/%Y_events/%Y%m%devents.txt\"\n\tt0 = timerange.start.datetime\n\tdays = [t0]\n\twhile timerange.end.datetime > t0:\n\t\tt0 = t0 + relativedelta(days=1)\n\t\tdays.append(t0)\n\n\tmissing_files = []\n\tfor d in days:\n\t\tif not os.path.exists(d.strftime(filedir)):\n\t\t\tmissing_files.append(d.strftime(filedir))\n\tprint(missing_files)",
"def test_make_final_path_date(self):\n with DataArchive(self.user, DATA_DOWNLOADS_WORKING_DIR) as archive:\n now = timezone.now().date()\n final_path = archive.make_final_path(date=now)\n \n valid_path = os.path.join(\n archive.data_dir_path,\n now.strftime('%Y'),\n now.strftime('%m'),\n now.strftime('%d')\n )\n\n self.assertEqual(final_path, valid_path)",
"def duplicate_timestamp_path(existing_path):\n logfile = parse.parse_filename(existing_path)\n index = 0\n while index < 25:\n if index == 0:\n suffix = ''\n else:\n suffix = '-%02d' % index\n\n new_path = parse.unparse_filename(\n (\n logfile.prefix +\n '-logjam-compress-duplicate-timestamp' +\n suffix\n ),\n logfile.timestamp,\n logfile.suffix,\n logfile.extension\n )\n if not os.path.exists(new_path):\n return new_path\n\n index += 1\n\n raise Exception('%d duplicate timestamp paths detected.' % index)",
"def _has_valid_save_as(self):\n try:\n output_path = self.settings[\"OUTPUT_PATH\"]\n except KeyError:\n # we cannot check\n return True\n\n try:\n sanitised_join(output_path, self.save_as)\n except RuntimeError: # outside output_dir\n logger.error(\n \"Skipping %s: file %r would be written outside output path\",\n self,\n self.save_as,\n )\n return False\n\n return True",
"def test_log_filenames_invalid_timestamp(self):\n with self.assertRaises(Exception):\n self.app.log_filenames(self.track_path('silence.mp3'), timestamp='foo')\n self.assertEqual(self.get_track_count(), 0)",
"def prerun(timestamp):\r\n if not os.path.isdir('log'):\r\n os.makedirs('log')\r\n if not os.path.isdir('collected'):\r\n os.makedirs('collected')\r\n if not os.path.isdir('done'):\r\n os.makedirs('done')\r\n time_stamped_folder = os.path.join('collected', timestamp)\r\n if not os.path.isdir(time_stamped_folder):\r\n os.makedirs(time_stamped_folder)\r\n return time_stamped_folder",
"def test_output_exists():\n global out_dir, cor_dir\n assert(path.exists(path.join(out_dir, 'oshea_similarity.json')))",
"def _is_path_inside_output_dir(self, path: str) -> bool:\n real_output_dir = os.path.realpath(self._output_dir)\n real_file_path = os.path.realpath(path)\n return os.path.commonpath([real_output_dir, real_file_path]) == real_output_dir",
"def output_out_of_date(self):\n if not os.path.exists(self.output_file):\n logging.info(\"will generate, missing binding output file\")\n return True\n output_mtime = os.path.getmtime(self.output_file)\n if self._any_files_newer(self.header_files, output_mtime):\n logging.info(\"will generate, header files newer\")\n return True\n if self._any_files_newer(self.interface_files, output_mtime):\n logging.info(\"will generate, interface files newer\")\n return True\n if self._file_newer(self.input_file, output_mtime):\n logging.info(\"will generate, swig input file newer\")\n return True\n if self._file_newer(self.extensions_file, output_mtime):\n logging.info(\"will generate, swig extensions file newer\")\n return True\n if self._file_newer(self.wrapper_file, output_mtime):\n logging.info(\"will generate, swig wrapper file newer\")\n return True\n if self._file_newer(self.typemaps_file, output_mtime):\n logging.info(\"will generate, swig typemaps file newer\")\n return True\n if self._file_newer(self.safecast_file, output_mtime):\n logging.info(\"will generate, swig safecast file newer\")\n return True\n\n # If we made it here, nothing is newer than the output file.\n # Thus, the output file is not out of date.\n return False",
"def test_timestamp_not_found(self, l):\n extract_columns(data=self.data, columns=['a'], timestamps=['timestamp'])\n l.check(\n ('pynts.util', 'WARNING', \"Couldn't find timestamps '['timestamp']' in data, using 'ts' instead\"),\n )",
"def test_non_existing_directory_raises_when_metavar_is_dir_for_db_export_cleaned(self):\n with contextlib.redirect_stderr(io.StringIO()) as stderr:\n with pytest.raises(SystemExit):\n parser = cli_parser.get_parser()\n parser.parse_args([\"db\", \"export-archived\", \"--output-path\", \"/non/existing/directory\"])\n error_msg = stderr.getvalue()\n\n assert error_msg == (\n \"\\nairflow db export-archived command error: The directory \"\n \"'/non/existing/directory' does not exist!, see help above.\\n\"\n )",
"def test_make_output_fail():\n with pytest.raises(ValueError):\n make_output_format('dummy_format', LOG_DIR)",
"def is_crashing_test(path):\n if not path.endswith('expected.txt'):\n if 'crash' in path.lower():\n if 'svn' not in path.lower():\n return True\n return False",
"def is_timeseries(filepath):\n\n if os.path.isdir(os.path.dirname(filepath)):\n\n if len(os.listdir(os.path.dirname(filepath))) > 1:\n ts = True\n else:\n ts = False\n else:\n ts = None\n\n return ts"
] | [
"0.66987944",
"0.63590485",
"0.6132183",
"0.60929406",
"0.604455",
"0.6034466",
"0.60191363",
"0.60158616",
"0.59803444",
"0.59565026",
"0.59523237",
"0.5931433",
"0.5840402",
"0.57827204",
"0.5757492",
"0.5711939",
"0.5690312",
"0.5685456",
"0.5668919",
"0.566759",
"0.5656807",
"0.56350976",
"0.5620488",
"0.5618824",
"0.56136435",
"0.559473",
"0.5565586",
"0.55533695",
"0.5543662",
"0.54868793"
] | 0.68613607 | 0 |
Function to remove the line numbers from the debug output of gyp and thus reduce the extreme fragility of the stdout comparison tests. | def remove_debug_line_numbers(contents):
lines = contents.splitlines()
# split each line on ":"
lines = [l.split(":", 3) for l in lines]
# join each line back together while ignoring the
# 3rd column which is the line number
lines = [len(l) > 3 and ":".join(l[3:]) or l for l in lines]
return "\n".join(lines) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_curl_debug_lines(text: str) -> str:\n lines = text.split(\"\\n\")\n lines = [line for line in lines if not line.startswith(\"**\")]\n return \"\\n\".join(lines)",
"def lines_without_stdlib(self):\n prev_line = None\n current_module_path = inspect.getabsfile(inspect.currentframe())\n for module_path, lineno, runtime in self.lines:\n module_abspath = os.path.abspath(module_path)\n if not prev_line:\n prev_line = [module_abspath, lineno, runtime]\n else:\n if (not check_standard_dir(module_path) and\n module_abspath != current_module_path):\n yield prev_line\n prev_line = [module_abspath, lineno, runtime]\n else:\n prev_line[2] += runtime\n yield prev_line",
"def delete_line_numbers(text):\n text = re.sub(r\"(\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}):(\\d+)\", r\"\\g<1>#\\g<2>\", text)\n\n res = re.sub(r\"(?<=:)\\d+(?=\\)?\\]?(\\n|\\r\\n|$))\", \" \", text)\n res = re.sub(r\"((?<=line )|(?<=line))\\s*\\d+\\s*((?=, in)|(?=,in)|(?=\\n)|(?=\\r\\n)|(?=$))\",\n \" \", res, flags=re.I)\n res = re.sub(\"|\".join([r\"\\.%s(?!\\.)\\b\" % ext for ext in file_extensions]), \" \", res, flags=re.I)\n res = re.sub(r\"(\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})#(\\d+)\", r\"\\g<1>:\\g<2>\", res)\n result = re.search(r\"^\\s*at\\s+.*\\(.*?\\)[\\s]*$\", res)\n if result and result.group(0) == res:\n res = re.sub(r\"\\d\", \"\", res)\n res = \"# \" + res\n else:\n result = re.search(r\"^\\s*\\w+([\\.\\/]\\s*\\w+)+\\s*\\(.*?\\)[\\s]*$\", res)\n if result and result.group(0) == res:\n res = \"# \" + res\n return res",
"def strip_python_stderr(stderr):\n stderr = re.sub(br\"\\[\\d+ refs, \\d+ blocks\\]\\r?\\n?\", b\"\", stderr).strip()\n return stderr",
"def strip_warnings(self, line):\n if line[0] == \"|\":\n return \"\"\n else:\n return line",
"def _strip_position(line: str) -> str:\n line = \".py\".join(line.split(\".py:\")[1:])\n line = \" \".join(line.split(\" \")[1:])\n return line",
"def ugly():\n\n global _pretty\n _pretty = False",
"def __remove_line_numbers(file_contents: str) -> str:\n\n spaces = ' ' * 6\n result = ''\n\n for line in file_contents.splitlines():\n new_line = spaces + line[6:72].rstrip()\n result += new_line + '\\n'\n\n return result",
"def hide_magic(source: str) -> str:\n\n def _hide_magic_line(line: str) -> str:\n return f\"###MAGIC###{line}\" if contains_magic(line) else line\n\n return \"\\n\".join(_hide_magic_line(line) for line in source.split(\"\\n\"))",
"def remove_firebug_calls(js_data):\n js_data = re.compile('console\\.[^(]*?\\([^()]*?\\);').sub(\"\", js_data)\n return js_data",
"def test_very_verbose_output_not_truncated(self, monkeypatch):\n hooks = setup_hooks(very_verbose=True)\n line_length = 20\n monkeypatch.setattr(\n \"repobee_junit4._output._truncate_lines\",\n partial(_output._truncate_lines, max_len=line_length),\n )\n\n result = hooks.act_on_cloned_repo(FAIL_REPO)\n\n lines = result.msg.split(os.linesep)\n assert len(lines) > 1\n # the first line can be somewhat longer due to staus message\n # and color codes\n assert any([len(line) > line_length for line in lines[1:]])",
"def remove_warnings(self, program):\n lines = program.split(\"\\n\")\n clean = []\n for line in lines:\n if line.startswith(\"Dafny program verifier finished\"):\n pass\n elif re.search(\"Warning: .*No terms found\", line):\n pass\n elif re.search(\"Warning: the type of the other operand\", line):\n pass\n else:\n clean.append(line)\n return \"\\n\".join(clean)",
"def remLines(origFile):\n\n noLineFile = origFile + \".noline\"\n\n # Generate no line file\n cmd = \"cat %s | sed -e '/^\\s*\\.line.*$/d' | sed -e 's/\\/jumbo//' > %s\" % \\\n (commands.mkarg(origFile), commands.mkarg(noLineFile))\n commands.getstatusoutput(cmd)\n\n if not os.path.exists(noLineFile):\n return None\n\n # Generate line patch\n linesPatch = origFile + \".linepatch\"\n cmd = \"diff -B -u %s %s > %s\" % \\\n (commands.mkarg(noLineFile), commands.mkarg(origFile), commands.mkarg(linesPatch))\n commands.getstatusoutput(cmd)\n\n shutil.move(noLineFile, origFile)\n\n return linesPatch",
"def test_clean_lines(self):\n before_b = \"\"\"\\\n # Should remove all trailing whitespace.\n\n a = 2 \n \n b = 3\n c = 4 \n d = 5\n e = 6 \n x\n \"\"\"\n after_b = \"\"\"\\\n # Should remove all trailing whitespace.\n\n a = 2\n\n b = 3\n c = 4\n d = 5\n e = 6\n x\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.0\", \"1.0\"),\n after_sel=(\"1.0\", \"1.0\"),\n command_name=\"clean-lines\",\n )",
"def suppressMessages():\n dislin.unit(0)",
"def __clean_line_comments(self):\n self.lines = [l for l in self.lines if not l.startswith(\"//\") and len(l) != 0]",
"def __clean_line_comments(self):\n self.lines = [l for l in self.lines if not l.startswith(\"//\") and len(l) != 0]",
"def _chop_end_misc(line):\n return re.sub(r\"\\s+\\d\\d-\\w\\w\\w-\\d\\d\\s+[1-9][0-9A-Z]{3}\\s*\\Z\", \"\", line)",
"def debug_dump(black_chunks: List[DiffChunk], edited_linenums: List[int]) -> None:\n if logger.getEffectiveLevel() > logging.DEBUG:\n return\n for offset, old_lines, new_lines in black_chunks:\n print(80 * \"-\")\n for delta, old_line in enumerate(old_lines):\n linenum = offset + delta\n edited = \"*\" if linenum in edited_linenums else \" \"\n print(f\"{edited}-{linenum:4} {old_line}\")\n for _, new_line in enumerate(new_lines):\n print(f\" + {new_line}\")\n print(80 * \"-\")",
"def _chop_end_codes(line):\n return re.sub(r\"\\s\\s\\s\\s+[\\w]{4}.\\s+\\d*\\Z\", \"\", line)",
"def _filter_codesign_output(codesign_output):\n filtered_lines = []\n for line in codesign_output.splitlines():\n if line and not _BENIGN_CODESIGN_OUTPUT_REGEX.search(line):\n filtered_lines.append(line)\n return \"\\n\".join(filtered_lines)",
"def _trunc_lines(self):\n\t\tif self._appendMessages:\n\t\t\tself._trunc_lines_append()\n\t\telse:\n\t\t\tself._trunc_lines_prepend()",
"def strip_output(nb):\n nb.metadata.pop(\"signature\", None)\n for cell in _cells(nb):\n if \"outputs\" in cell:\n cell[\"outputs\"] = []\n if \"prompt_number\" in cell:\n cell[\"prompt_number\"] = None\n return nb",
"def test_remove_blank_lines(self):\n before_b = \"\"\"\\\n first line\n\n line 1\n line a\n line b\n\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.0\", \"9.0\"),\n after_sel=(\"1.0\", \"6.9\"),\n command_name=\"remove-blank-lines\",\n )",
"def remove_lines():\n work_folder = os.path.join(CURRENT_FOLDER, \"..\\\\Data\\\\weather_data\")\n with open(os.path.join(work_folder, \"filtered_merged_history_KMDW.csv\"), \"w\") as outfile:\n with open(os.path.join(work_folder, \"merged_history_KMDW.csv\")) as infile:\n outfile.write(infile.next())\n for line in infile:\n if line[0].isdigit():\n outfile.write(line)",
"def prolog(out):\n print(lstrip(\"\"\"\n // Copyright 2021 The Chromium Authors\n // Use of this source code is governed by a BSD-style license that can be\n // found in the LICENSE file.\n\n // This file is automatically generated. Do not edit. Just generate.\n // $ ninja -C ... generate_sanitizer_builtins\n\n #include \"third_party/blink/renderer/modules/sanitizer_api/builtins/sanitizer_builtins.h\"\n \"\"\"),\n file=out)",
"def debug():",
"def getDebugLines(self):\n return self._get_table_info() + self._get_avatar_info() + self._get_player_info()",
"def degsOutput(err, globalNameSpace):\n lineNumber = err.lineNumber\n columnNumber = err.columnNumber\n err.msg = '\\n' + err.msg + '\\n'\n print(err.msg, file=sys.stderr)\n if not lineNumber == None:\n positionReference = [\"Error caused at line %(lineNumber)i\" % locals()]\n if not columnNumber == None:\n positionReference.append(\", column %(columnNumber)i\" % locals())\n positionReference.append(\":\\n\")\n positionReference.append(globalNameSpace['inputScript'].splitlines(True)[lineNumber-1])\n if not columnNumber == None:\n positionReference.append(\" \"*(columnNumber-1) + \"^~~ here.\")\n print(''.join(positionReference) + '\\n', file=sys.stderr)\n if err.element:\n print(\"In element: \" + err.element.userUnderstandableXPath(), file=sys.stderr)\n else:\n print(\"Unknown element. Please report this error to %s\" % globalNameSpace['bugReportAddress'], file=sys.stderr)",
"def clean_diff(diff):\n res = []\n skip = True\n for line in diff.split('\\n'):\n if line.startswith('diff --git'):\n skip = True\n if line.startswith('@@ '):\n skip = False\n if not skip:\n res.append(line)\n return '\\n'.join(res)"
] | [
"0.6622459",
"0.59901255",
"0.59256816",
"0.58421296",
"0.581115",
"0.57030994",
"0.5660609",
"0.55992305",
"0.55508304",
"0.5550596",
"0.552097",
"0.55145377",
"0.54679954",
"0.5458479",
"0.5454181",
"0.54463637",
"0.54463637",
"0.53545886",
"0.53511184",
"0.5350658",
"0.5345324",
"0.53434503",
"0.532315",
"0.53153664",
"0.53137153",
"0.52972585",
"0.5243674",
"0.5236876",
"0.5231545",
"0.5224446"
] | 0.7173805 | 0 |
File contents matcher that ignores line numbers. | def match_modulo_line_numbers(contents_a, contents_b):
contents_a = remove_debug_line_numbers(contents_a)
contents_b = remove_debug_line_numbers(contents_b)
return TestCommon.match_exact(contents_a, contents_b) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_line_count(self):\n self.assertEqual(analyze_text(self.filename)[0], 11)",
"def test_line_count(self):\n self.assertEqual(analyze_text(self.filename)[0], 4)",
"def test_line_count(self):\n self.assertEqual(analyze_text(self.filename)[0], 4)",
"def _MatchPatternLines(self, in_stream, re_pattern, num_lines=None):\n num_read = 0\n while True:\n line = in_stream.readline()\n if not line:\n return None\n num_read += 1\n m = re_pattern.match(line)\n if m is not None:\n return m\n if num_lines is not None and num_read >= num_lines:\n return None",
"def test_basic_dummy_match(self):\n self.assertLines(\n [\"a\", r\"\\d\", \"examples/dummy.csv\"], [\"a,b,c,a_xfind\", \"1,2,3,1\",]\n )",
"def test_line_count(self):\n\t\tself.assertEqual(analyse_text(self.filename)[0], 4)",
"def _actual_lines(file_handle):\n for line in file_handle:\n yield line",
"def commentOutLineMatching(pattern,fileName,maxOccurs=None):\n \n file=open(fileName,mode='r')\n pattern=re.compile(pattern)\n fileText=\"\"\n numMatches=0\n if maxOccurs==None:\n maxOccurs=sys.maxsize\n \n for line in file:\n \n if pattern.match(line) and numMatches<maxOccurs:\n fileText+=\"#\"+line\n numMatches+=1\n else:\n fileText+=line\n file.close()\n file=open(fileName,mode='w')\n file.write(fileText)\n file.close()\n return numMatches",
"def test_file_readlines(self):\n FileWriter(self.multiline_path).write(self.multiline_string)\n line_list = FileReader(self.multiline_path).readlines()\n self.assertEqual(line_list, self.multiline_list)",
"def _expected_lines_and_line_numbers(path, check_prefix):\n with open(path) as f:\n for index, line in enumerate(f):\n if 'RUN:' in line:\n # Ignore lit directives, which may include a call to\n # xctest_checker that specifies a check prefix.\n continue\n\n # Note that line numbers are not zero-indexed; we must add one to\n # the loop index.\n line_number = index + 1\n\n components = line.split(check_prefix)\n if len(components) == 2:\n yield (replace_offsets(components[1].strip(), line_number),\n line_number)\n elif len(components) > 2:\n # Include a newline, then the file name and line number in the\n # exception in order to have it appear as an inline failure in\n # Xcode.\n raise XCTestCheckerError(\n path, line_number,\n 'Usage violation: prefix \"{}\" appears twice in the same '\n 'line.'.format(check_prefix))",
"def test_file_read():\n expected = [\"scorevideo LOG\\n\", \"File: log.mat\"]\n with open(TEST_RES + \"/file_read.txt\", 'r') as file:\n actual = file.readlines()\n assert expected == actual",
"def exercise_lines(path):\n with open(path) as fin:\n within_exercise = False\n for line, line_number in zip(fin, count(1)):\n line = line.lstrip()\n\n if within_exercise and line.startswith('#'):\n yield line_number\n elif not within_exercise and line.startswith('#') and 'EXERCISE:' in line:\n within_exercise = True\n yield line_number\n else:\n within_exercise = False",
"def test_file_iterator_strips_newlines(self):\n for line in file_iterator('example_module.py'):\n self.assertFalse(line.endswith('\\n'))",
"def test_file_iterator_removes_leading_whitespace(self):\n for line in file_iterator('example_module.py'):\n self.assertFalse(line.startswith(' '))",
"def matchlines(self, file, text):\n escaped_text = text\n # Replace escaped Jinja blocks with the same number of empty lines\n for match in self.jinja_escape_regex.finditer(text):\n start = match.start()\n end = match.end()\n # Get the number of newlines in the escaped match\n lines = text[start:end].splitlines()\n num_of_lines = len(lines) - 1\n\n # Replace escaped Jinja block in the escaped text by newlines to\n # keep all the line numbers consistent\n pre_text = escaped_text[:start]\n post_text = escaped_text[end:]\n newlines = '\\n' * num_of_lines\n escaped_text = pre_text + newlines + post_text\n\n # Call the matchlines() on the parent class with the escaped text\n matches = super().matchlines(file, escaped_text)\n return matches",
"def fileReSeek(fh, regex):\n\n p = re.compile(regex)\n while True:\n line = fh.readline()\n if line == '':\n return None\n match = p.match(line)\n if match:\n return match",
"def scanpatch(fp):\n lr = patch.linereader(fp)\n\n def scanwhile(first, p):\n \"\"\"scan lr while predicate holds\"\"\"\n lines = [first]\n while True:\n line = lr.readline()\n if not line:\n break\n if p(line):\n lines.append(line)\n else:\n lr.push(line)\n break\n return lines\n\n while True:\n line = lr.readline()\n if not line:\n break\n if line.startswith('diff --git a/'):\n def notheader(line):\n s = line.split(None, 1)\n return not s or s[0] not in ('---', 'diff')\n header = scanwhile(line, notheader)\n fromfile = lr.readline()\n if fromfile.startswith('---'):\n tofile = lr.readline()\n header += [fromfile, tofile]\n else:\n lr.push(fromfile)\n yield 'file', header\n elif line[0] == ' ':\n yield 'context', scanwhile(line, lambda l: l[0] in ' \\\\')\n elif line[0] in '-+':\n yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\\\')\n else:\n m = lines_re.match(line)\n if m:\n yield 'range', m.groups()\n else:\n raise patch.PatchError('unknown patch content: %r' % line)",
"def _GetExpectationLine(self, expectation: data_types.Expectation,\n file_contents: str, expectation_file: str\n ) -> Union[Tuple[None, None], Tuple[str, int]]:\n # We have all the information necessary to recreate the expectation line and\n # line number can be pulled during the initial expectation parsing. However,\n # the information we have is not necessarily in the same order as the\n # text file (e.g. tag ordering), and line numbers can change pretty\n # dramatically between the initial parse and now due to stale expectations\n # being removed. So, parse this way in order to improve the user experience.\n file_lines = file_contents.splitlines()\n for line_number, line in enumerate(file_lines):\n if _IsCommentOrBlankLine(line.strip()):\n continue\n current_expectation = self._CreateExpectationFromExpectationFileLine(\n line, expectation_file)\n if expectation == current_expectation:\n return line, line_number + 1\n return None, None",
"def _whitelist_reader(path: str) -> Iterator[str]:\n with open(path, \"r\") as source:\n for line in source:\n line = re.sub(r\"\\s*#.*$\", \"\", line) # Removes comments from line.\n yield line.rstrip()",
"def _parse_ach_file(self, contents):\n file_length = len(contents)\n\n for index in range(0, file_length, self.LINE_LENGTH):\n line = contents[index:index + self.LINE_LENGTH]\n\n if line.startswith('1'):\n self._read_header(line)\n elif line.startswith('5'):\n self._read_batch_header(line)\n elif line.startswith('6'):\n self._read_entry_detail(line)\n elif line.startswith('7'):\n self._read_addenda_record(line)\n elif line.startswith('8'):\n self._read_batch_control_record(line)\n elif line.startswith('9'):\n if line == '9' * 94:\n continue\n self._read_file_control_record(line)",
"def test_onePerLine(self):\n fp = FilePath(self.mktemp())\n fp.setContent('something\\ncool')\n self.assertEqual(list(inventoryReader(fp.path)), ['something', 'cool'])",
"def filtered_lines():\n\n with open(filename, 'r') as cds_file:\n for line in cds_file:\n if(\n line[0] not in '#-'\n and\n line[:2] != ' |'\n ):\n yield line",
"def test_file_iterator_removes_all_whitespace(self):\n for line in file_iterator('example_module.py'):\n self.assertEqual(line, line.strip())",
"def _header_transformer(self, lines):\n needle = b'--%s\\n' % self.boundary\n in_header = False\n for line in lines:\n if line == needle:\n in_header = True\n if in_header:\n assert line[-1] == b'\\n'\n line = line[:-1] + b'\\r\\n'\n if line == b'\\r\\n':\n in_header = False\n yield line",
"def test_file_readlines_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).readlines()",
"def search_in(self, file_object):\n for line_num, line in enumerate(file_object.readlines()):\n line = line.replace(\"\\n\", \"\").replace(\"\\r\", \"\") # remove new line char\n if re.match(self.regex, line):\n result = f\"~{os.path.abspath(file_object.name)}: {line} (line {line_num})\"\n if self.colored:\n result = self.highlight_phrase(result)\n print(result, file=sys.stdout)",
"def assert_file_equals(self, actual, fn):\n with open(fn, 'r') as expected:\n self.assertEquals(\n manage.to_unicode(expected.read()).split('\\n'),\n self.filter_log(actual.split('\\n')))",
"def test_basic_dummy_no_match(self):\n self.assertLines([\"a\", \";\", \"examples/dummy.csv\"], [\"a,b,c,a_xfind\", \"1,2,3,\",])",
"def check_line_in(filename, line):\n with open(filename, \"r\") as f:\n for l in f:\n if l.rstrip() == line:\n break\n else:\n assert False, \"Could not find {} in {}:\\n{}\".format(\n repr(line), filename, content_of(filename)\n )",
"def test_do_not_ignore_empty_files(self):\n\n node_mock = MagicMock()\n node_mock.stream.return_value.__enter__.return_value.read.return_value.decode.return_value = ''\n with self.assertAddsMessages(pylint.testutils.Message(\n msg_id='invalid-file-header',\n line=1,\n args=self.EXPECTED_HEADER)):\n self.checker.process_module(node_mock)"
] | [
"0.6075021",
"0.6011866",
"0.6011866",
"0.5980236",
"0.58842176",
"0.5875963",
"0.5659142",
"0.5647999",
"0.5642151",
"0.5629507",
"0.5616189",
"0.560516",
"0.55971444",
"0.55460477",
"0.5533876",
"0.55265915",
"0.5506669",
"0.5466076",
"0.54504406",
"0.54157746",
"0.5407447",
"0.5405313",
"0.5389636",
"0.5375038",
"0.5368304",
"0.5366325",
"0.5359834",
"0.5359733",
"0.5358477",
"0.5357334"
] | 0.6234986 | 0 |
Fails the test if the specified built file name does not exist. | def built_file_must_exist(self, name, type=None, **kw):
return self.must_exist(self.built_file_path(name, type, **kw)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def built_file_must_not_exist(self, name, type=None, **kw):\n return self.must_not_exist(self.built_file_path(name, type, **kw))",
"def check_build_exists(self):\n path = self.base_dir + \"/\" + self.app_name + \"/\" + \"build\"\n from django_swagger_utils.core.utils.check_path_exists import check_path_exists\n build_dir = check_path_exists(path)\n if build_dir:\n raise Exception(\"Build Directory Already Exist, please run update_specs_build\")",
"def built_file_must_not_match(self, name, contents, **kw):\n return self.must_not_match(self.built_file_path(name, **kw), contents)",
"def test_not_present_file(self):\n\t\ttry:\n\t\t\tmain.Main(['input/abc.txt']).run()\n\t\texcept:\n\t\t\tself.assertTrue(True)",
"def test_missing_file(self):\r\n bundle = self.mkbundle('xyz', output=\"out\")\r\n assert_raises_regexp(\r\n BundleError, 'using staticfiles finders', bundle.build)",
"def test_failToBuild(self):\n # note no fake sphinx project is created\n self.assertRaises(CalledProcessError, self.builder.build, self.sphinxDir)",
"def test_exists(self):\n self.assertTrue(os.path.exists(__file__) == self._system.exists(__file__))",
"def BinaryExists(filename):\n return os.path.exists(os.path.join(self.options.build_dir, filename))",
"def built_file_must_match(self, name, contents, **kw):\n return self.must_match(self.built_file_path(name, **kw), contents)",
"def test_build_dir(self):\n build_dir = local.path(str(CFG['build_dir']))\n self.assertTrue(build_dir.exists())",
"def built_file_must_not_contain(self, name, contents, **kw):\n return self.must_not_contain(self.built_file_path(name, **kw), contents)",
"def _check_build(self, gppkg_file, gppkg_spec):\n return gppkg_file == gppkg_spec.get_filename()",
"def test_buildings_file_path(self):\n self.assertRaises(ValueError, buildings_clean, \"not_a_file_path\")",
"def checkPath(filename, projectSource):\n filePath = os.path.join(projectSource, filename)\n if os.path.exists(filePath):\n pass\n else:\n sys.stderr.write(\"Error: \" + filePath + \" not found\")\n sys.exit(1)\n return filePath",
"def test_raise_error_unknown_file():\n\n options = {'input_files': ['Sparta.lol']}\n\n with pytest.raises(KeyError) as excinfo:\n process.Packager(options)\n excinfo.match(r'File ([a-zA-Z_\\.\\'].*) not found in file list.')",
"def checkPath(self, filename):\r\n if (not os.path.exists(filename)):\r\n filename = os.getenv('MDLROOT')+'/'+filename\r\n if (not os.path.exists(filename)):\r\n print \"[MDL] ERROR, FILE\", filename, \"DOES NOT EXIST.\"\r\n sys.exit(1)\r\n return filename",
"def BuildExists(buildname):\n for platform in platforms:\n if not os.path.exists(builds_basedir+'/'+platform+'/'+buildname):\n return False\n return True",
"def check_for_assemble_file(task_file):\n if not os.path.exists(task_file):\n print_failure_msg(\"{} file is missing\".format(task_file))\n exit(127)\n return True",
"def test_missing_file(self):\n with self.assertRaises(subprocess.CalledProcessError) as cm:\n subprocess.check_output(\n [sys.executable, idf_py_path, '--version', '@args_non_existent'],\n env=os.environ,\n stderr=subprocess.STDOUT).decode('utf-8', 'ignore')\n self.assertIn('(expansion of @args_non_existent) could not be opened', cm.exception.output.decode('utf-8', 'ignore'))",
"def test_invalid_project_name(self):\n with tempfile.TemporaryDirectory() as tmp_dir:\n self.assertFalse(\n cifuzz.build_fuzzers(\n 'not_a_valid_project',\n 'oss-fuzz',\n tmp_dir,\n commit_sha='0b95fe1039ed7c38fea1f97078316bfc1030c523'))",
"def is_crashing_test(path):\n if not path.endswith('expected.txt'):\n if 'crash' in path.lower():\n if 'svn' not in path.lower():\n return True\n return False",
"def test_not_exectuable(self):\n (status, output, imlog, makelog) = \\\n self.run_instmake_build(log_prefix=\"not-executable\",\n make_opts=[\"not-executable\"])\n\n self.assertEqual(status, util.SUCCESS, output)",
"def _is_valid_file(arg: str) -> str:\n if not os.path.exists(arg):\n raise FileNotFoundError(\"%s does not exist\" % arg)\n return arg",
"def test_no_reuse_existing_build_dir(self, data):\n\n build_dir = os.path.join(self.tempdir, 'build', 'simple')\n os.makedirs(build_dir)\n open(os.path.join(build_dir, \"setup.py\"), 'w')\n reqset = self.basic_reqset()\n req = InstallRequirement.from_line('simple')\n reqset.add_requirement(req)\n finder = PackageFinder([data.find_links], [], session=PipSession())\n assert_raises_regexp(\n PreviousBuildDirError,\n \"pip can't proceed with [\\s\\S]*%s[\\s\\S]*%s\" %\n (req, build_dir.replace('\\\\', '\\\\\\\\')),\n reqset.prepare_files,\n finder,\n )",
"def test_missing_file():\n passed = False\n try:\n x = XPIManager('foo.bar')\n except:\n passed = True\n assert passed",
"def download_build(self, name, dst_directory):\n logging.info('Not downloading build because no Filestore.')",
"def test_py_file(self):\n\n self.assertTrue(os.path.isfile(\n \"{}/{}\".format(self.APP_PATH, self.TARGET_PY_FILE)),\n msg=\"py file does not exist\")",
"def test_construct_payload__file_not_found(self, task):\n task.options[\"name\"] = \"cci-deploy\"\n pkg_zip_file = Path(task.options[\"package_zip_file\"])\n with temporary_dir() as temp_dir:\n with zipfile.ZipFile(pkg_zip_file) as zf:\n zf.extractall(temp_dir)\n\n expected_payload_file = Path(temp_dir + \"/info.json\")\n assert expected_payload_file.is_file()\n Path.unlink(expected_payload_file)\n\n with pytest.raises(DeploymentException):\n task._construct_payload(Path(temp_dir))",
"def test_script_exists(self):\n get_files=os.listdir(\"../../taxonomy/src_files\")\n self.assertIn(\"validate_match_batch.py\", get_files)",
"def test_builder_files_exists(code_builder: dataset_builder.DatasetBuilder):\n # When code is available, and no version specified, load from code\n builder = load.builder(code_builder.name)\n assert isinstance(builder, type(code_builder)) # Check builder is DummyMnist\n assert not isinstance(builder, read_only_builder.ReadOnlyBuilder)\n\n # If the version is specified, load from the files (backward support)\n builder = load.builder(f'{code_builder.name}:*.*.*') # Most recent version\n assert not isinstance(builder, type(code_builder))\n assert isinstance(builder, read_only_builder.ReadOnlyBuilder)\n\n # If the version is specified but files not found, load from the code\n builder = load.builder(\n f'{code_builder.name}:*.*.*', data_dir='/tmp/path/tfds/not-exists'\n )\n assert isinstance(builder, type(code_builder))\n assert not isinstance(builder, read_only_builder.ReadOnlyBuilder)"
] | [
"0.7556298",
"0.68302405",
"0.6742456",
"0.6706596",
"0.66908234",
"0.65641314",
"0.651123",
"0.6510022",
"0.6502727",
"0.64891094",
"0.64801955",
"0.64408535",
"0.6430225",
"0.6354745",
"0.6318534",
"0.6268824",
"0.62088376",
"0.6206341",
"0.6198592",
"0.61985487",
"0.61741894",
"0.6168016",
"0.61611515",
"0.61464095",
"0.6135789",
"0.6105133",
"0.61030424",
"0.6062952",
"0.60628605",
"0.6037928"
] | 0.7726593 | 0 |
Fails the test if the specified built file name exists. | def built_file_must_not_exist(self, name, type=None, **kw):
return self.must_not_exist(self.built_file_path(name, type, **kw)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def built_file_must_exist(self, name, type=None, **kw):\n return self.must_exist(self.built_file_path(name, type, **kw))",
"def check_build_exists(self):\n path = self.base_dir + \"/\" + self.app_name + \"/\" + \"build\"\n from django_swagger_utils.core.utils.check_path_exists import check_path_exists\n build_dir = check_path_exists(path)\n if build_dir:\n raise Exception(\"Build Directory Already Exist, please run update_specs_build\")",
"def built_file_must_not_match(self, name, contents, **kw):\n return self.must_not_match(self.built_file_path(name, **kw), contents)",
"def test_missing_file(self):\r\n bundle = self.mkbundle('xyz', output=\"out\")\r\n assert_raises_regexp(\r\n BundleError, 'using staticfiles finders', bundle.build)",
"def test_exists(self):\n self.assertTrue(os.path.exists(__file__) == self._system.exists(__file__))",
"def BinaryExists(filename):\n return os.path.exists(os.path.join(self.options.build_dir, filename))",
"def test_not_present_file(self):\n\t\ttry:\n\t\t\tmain.Main(['input/abc.txt']).run()\n\t\texcept:\n\t\t\tself.assertTrue(True)",
"def _check_build(self, gppkg_file, gppkg_spec):\n return gppkg_file == gppkg_spec.get_filename()",
"def BuildExists(buildname):\n for platform in platforms:\n if not os.path.exists(builds_basedir+'/'+platform+'/'+buildname):\n return False\n return True",
"def built_file_must_not_contain(self, name, contents, **kw):\n return self.must_not_contain(self.built_file_path(name, **kw), contents)",
"def built_file_must_match(self, name, contents, **kw):\n return self.must_match(self.built_file_path(name, **kw), contents)",
"def test_failToBuild(self):\n # note no fake sphinx project is created\n self.assertRaises(CalledProcessError, self.builder.build, self.sphinxDir)",
"def is_crashing_test(path):\n if not path.endswith('expected.txt'):\n if 'crash' in path.lower():\n if 'svn' not in path.lower():\n return True\n return False",
"def checkPath(filename, projectSource):\n filePath = os.path.join(projectSource, filename)\n if os.path.exists(filePath):\n pass\n else:\n sys.stderr.write(\"Error: \" + filePath + \" not found\")\n sys.exit(1)\n return filePath",
"def test_raise_error_unknown_file():\n\n options = {'input_files': ['Sparta.lol']}\n\n with pytest.raises(KeyError) as excinfo:\n process.Packager(options)\n excinfo.match(r'File ([a-zA-Z_\\.\\'].*) not found in file list.')",
"def _is_valid_file(arg: str) -> str:\n if not os.path.exists(arg):\n raise FileNotFoundError(\"%s does not exist\" % arg)\n return arg",
"def test_missing_file(self):\n with self.assertRaises(subprocess.CalledProcessError) as cm:\n subprocess.check_output(\n [sys.executable, idf_py_path, '--version', '@args_non_existent'],\n env=os.environ,\n stderr=subprocess.STDOUT).decode('utf-8', 'ignore')\n self.assertIn('(expansion of @args_non_existent) could not be opened', cm.exception.output.decode('utf-8', 'ignore'))",
"def check_exists(self, name):\n if self.pyload.config.get(\"download\", \"skip_existing\"):\n download_folder = self.pyload.config.get(\n 'general', 'download_folder')\n dest_file = fsjoin(download_folder,\n self.pyfile.package().folder if self.pyload.config.get(\n \"general\", \"folder_per_package\") else \"\",\n name)\n if exists(dest_file):\n self.pyfile.name = name\n self.skip(_(\"File exists.\"))",
"def test_script_exists(self):\n get_files=os.listdir(\"../../taxonomy/src_files\")\n self.assertIn(\"validate_match_batch.py\", get_files)",
"def test_missing_file():\n passed = False\n try:\n x = XPIManager('foo.bar')\n except:\n passed = True\n assert passed",
"def test_invalid_project_name(self):\n with tempfile.TemporaryDirectory() as tmp_dir:\n self.assertFalse(\n cifuzz.build_fuzzers(\n 'not_a_valid_project',\n 'oss-fuzz',\n tmp_dir,\n commit_sha='0b95fe1039ed7c38fea1f97078316bfc1030c523'))",
"def check_for_assemble_file(task_file):\n if not os.path.exists(task_file):\n print_failure_msg(\"{} file is missing\".format(task_file))\n exit(127)\n return True",
"def test_buildings_file_path(self):\n self.assertRaises(ValueError, buildings_clean, \"not_a_file_path\")",
"def checkPath(self, filename):\r\n if (not os.path.exists(filename)):\r\n filename = os.getenv('MDLROOT')+'/'+filename\r\n if (not os.path.exists(filename)):\r\n print \"[MDL] ERROR, FILE\", filename, \"DOES NOT EXIST.\"\r\n sys.exit(1)\r\n return filename",
"def shouldhave(self, thisfile):\n if not os.path.isfile(thisfile):\n self.logtxt(\"ERROR: expected file (%s/%s) does not exist!\" %\n (os.getcwd(), thisfile), 'error')",
"def test_not_exectuable(self):\n (status, output, imlog, makelog) = \\\n self.run_instmake_build(log_prefix=\"not-executable\",\n make_opts=[\"not-executable\"])\n\n self.assertEqual(status, util.SUCCESS, output)",
"def checkExists(fileName):\n if fileName == '' or not pathlib.Path(fileName).exists():\n print('Error: {} is not found !!!'.format(fileName))\n exit()",
"def fileCheck(filename):\n if not os.path.isfile(filename):\n print('File: ' + filename + ' not found. Exiting...', file=sys.stderr)\n sys.exit(1)",
"def test_file_exists(self):\n self.assertTrue(os.path.exists(\"file.json\"))",
"def test_build_dir(self):\n build_dir = local.path(str(CFG['build_dir']))\n self.assertTrue(build_dir.exists())"
] | [
"0.7764489",
"0.6900003",
"0.6773992",
"0.67330974",
"0.67238265",
"0.6665234",
"0.6645847",
"0.6642983",
"0.6570576",
"0.65533084",
"0.65091807",
"0.6487374",
"0.6403167",
"0.6386307",
"0.63263017",
"0.632228",
"0.62877107",
"0.6276694",
"0.62678105",
"0.6252823",
"0.6243601",
"0.6240356",
"0.6239068",
"0.62112933",
"0.621037",
"0.6191118",
"0.61604273",
"0.61575973",
"0.6154464",
"0.6133825"
] | 0.75279915 | 1 |
Fails the test if the contents of the specified built file name do not match the specified contents. | def built_file_must_match(self, name, contents, **kw):
return self.must_match(self.built_file_path(name, **kw), contents) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def built_file_must_not_match(self, name, contents, **kw):\n return self.must_not_match(self.built_file_path(name, **kw), contents)",
"def built_file_must_not_contain(self, name, contents, **kw):\n return self.must_not_contain(self.built_file_path(name, **kw), contents)",
"def test_buildings_file_path(self):\n self.assertRaises(ValueError, buildings_clean, \"not_a_file_path\")",
"def built_file_must_exist(self, name, type=None, **kw):\n return self.must_exist(self.built_file_path(name, type, **kw))",
"def _check_build(self, gppkg_file, gppkg_spec):\n return gppkg_file == gppkg_spec.get_filename()",
"def built_file_must_not_exist(self, name, type=None, **kw):\n return self.must_not_exist(self.built_file_path(name, type, **kw))",
"def should_build(target_platform, changed_files):\n return any(_should_file_trigger_build(target_platform, file) for file in changed_files)",
"def test_invalidFile(self):\n self.assertRaises(cesmEnvLib.checkFile(\"blah\", \"write\"))",
"def test_input_files(self):\n files = list_files_folder(data_dir + \"build-custom/files/\", ext=\"fna.gz\")\n params = self.default_params.copy()\n params[\"db_prefix\"] = self.results_dir + \"test_input_files\"\n params[\"input\"] = files\n params[\"input_extension\"] = \"\"\n cfg = Config(\"build-custom\", **params)\n self.assertTrue(run_ganon(cfg, params[\"db_prefix\"]), \"ganon build-custom run failed\")\n res = build_sanity_check_and_parse(vars(cfg))\n self.assertIsNotNone(res, \"ganon build-custom sanity check failed\")\n\n self.assertTrue(res[\"target\"][\"file\"].isin(files).all(), \"Files missing from target\")\n self.assertEqual(len(files), res[\"target\"].shape[0], \"Wrong number of files on target\")\n self.assertTrue(res[\"info\"][\"file\"].isin(files).all(), \"Files missing from info\")\n self.assertEqual(len(files), res[\"info\"].shape[0], \"Wrong number of files on info\")\n\n # All files are invalid\n files = [f+\".xxx\" for f in files]\n params = self.default_params.copy()\n params[\"db_prefix\"] = self.results_dir + \"test_input_files_invalid\"\n params[\"input\"] = files\n params[\"input_extension\"] = \"\"\n cfg = Config(\"build-custom\", **params)\n self.assertFalse(run_ganon(cfg, params[\"db_prefix\"]), \"ganon build-custom ran but it should fail\")",
"def test_create_SHA_256_hash_of_file_matches_cosmic_build_tool(\n file_name, expected_hash\n):\n file_path = str(Path(__file__).parent.parent / \"steps/component1\" / file_name)\n hash = utils.create_SHA_256_hash_of_file(file_path)\n\n assert hash == expected_hash",
"def is_crashing_test(path):\n if not path.endswith('expected.txt'):\n if 'crash' in path.lower():\n if 'svn' not in path.lower():\n return True\n return False",
"def test_basic_validate_build_command_build():\n t = TestClient()\n conanfile = textwrap.dedent(\"\"\"\n from conan import ConanFile\n from conan.errors import ConanInvalidConfiguration\n\n class myConan(ConanFile):\n settings = \"os\"\n\n def validate_build(self):\n if self.settings.os == \"Windows\":\n raise ConanInvalidConfiguration(\"This doesn't build in Windows\")\n \"\"\")\n\n t.save({\"conanfile.py\": conanfile})\n t.run(f\"build . -s os=Windows\", assert_error=True)\n assert \"ERROR: conanfile.py: Cannot build for this configuration: \" \\\n \"This doesn't build in Windows\" in t.out\n t.run(\"build . -s os=Linux\")\n # It doesn't fail",
"def test_raise_error_unknown_file():\n\n options = {'input_files': ['Sparta.lol']}\n\n with pytest.raises(KeyError) as excinfo:\n process.Packager(options)\n excinfo.match(r'File ([a-zA-Z_\\.\\'].*) not found in file list.')",
"def test_build(self):\r\n self.mkbundle('file1', 'file2', output=\"out\").build()\r\n assert self.get(\"media/out\") == \"foo\\nbar\"",
"def _raise_incorrect_address_error(self, spec_path, wrong_target_name, addresses):\n was_not_found_message = '{target_name} was not found in BUILD files from {spec_path}'.format(\n target_name=wrong_target_name, spec_path=spec_path)\n\n if not addresses:\n raise self.EmptyBuildFileError(\n '{was_not_found_message}, because that directory contains no BUILD files defining addressable entities.'\n .format(was_not_found_message=was_not_found_message))\n # Print BUILD file extensions if there's more than one BUILD file with targets only.\n if (any(not hasattr(address, 'build_file') for address in addresses) or\n len(set([address.build_file for address in addresses])) == 1):\n specs = [':{}'.format(address.target_name) for address in addresses]\n else:\n specs = [':{} (from {})'.format(address.target_name, os.path.basename(address.build_file.relpath))\n for address in addresses]\n\n # Might be neat to sort by edit distance or something, but for now alphabetical is fine.\n specs = [''.join(pair) for pair in sorted(specs)]\n\n # Give different error messages depending on whether BUILD file was empty.\n one_of = ' one of' if len(specs) > 1 else '' # Handle plurality, just for UX.\n raise self.AddressNotInBuildFile(\n '{was_not_found_message}. Perhaps you '\n 'meant{one_of}: \\n {specs}'.format(was_not_found_message=was_not_found_message,\n one_of=one_of,\n specs='\\n '.join(specs)))",
"def _raise_incorrect_address_error(self, spec_path, wrong_target_name, addresses):\n was_not_found_message = '{target_name} was not found in BUILD files from {spec_path}'.format(\n target_name=wrong_target_name, spec_path=spec_path)\n\n if not addresses:\n raise self.EmptyBuildFileError(\n '{was_not_found_message}, because that directory contains no BUILD files defining addressable entities.'\n .format(was_not_found_message=was_not_found_message))\n # Print BUILD file extensions if there's more than one BUILD file with targets only.\n if (any(not hasattr(address, 'rel_path') for address in addresses) or\n len(set(address.rel_path for address in addresses)) == 1):\n specs = [':{}'.format(address.target_name) for address in addresses]\n else:\n specs = [':{} (from {})'.format(address.target_name, os.path.basename(address.rel_path))\n for address in addresses]\n\n # Might be neat to sort by edit distance or something, but for now alphabetical is fine.\n specs.sort()\n\n # Give different error messages depending on whether BUILD file was empty.\n one_of = ' one of' if len(specs) > 1 else '' # Handle plurality, just for UX.\n raise self.AddressNotInBuildFile(\n '{was_not_found_message}. Perhaps you '\n 'meant{one_of}: \\n {specs}'.format(was_not_found_message=was_not_found_message,\n one_of=one_of,\n specs='\\n '.join(specs)))",
"def test_mismatching_releases_raises_error(self):\n\n # The failure message that we expect to see\n expected_fail_regex = (\n f\"Provided release (.*) does not match release found in VersionInfo.xml\"\n )\n\n with self.assertRaisesRegex(\n docker.errors.BuildError,\n expected_fail_regex,\n ):\n # Build the Docker image using the default value for MATLAB_RELEASE,\n # which does not match with the one in mocks/matlab-install/VersionInfo.xml\n self.client.images.build(\n path=self.dockerfile_dirpath,\n forcerm=True,\n buildargs={\"MATLAB_RELEASE\": self.old_matlab_release},\n )",
"def test_verify_corrupt_archive(self):\n self.backup(u\"full\", u\"testfiles/various_file_types\", options=[])\n output_files = os.listdir(\"testfiles/output\")\n archives = [elem for elem in output_files if \"vol\" in elem]\n for archive in archives:\n # Edit source file\n with open(\"testfiles/output/\" + archive, 'r+') as f:\n f.write('This writes text into each archive file to corrupt it.')\n # Test verify for the file\n try:\n self.verify(u'testfiles/various_file_types/executable', file_to_verify=u'executable', options=[])\n except CmdError as e:\n # Should return a 21 error code for \"hash mismatch\"\n self.assertEqual(e.exit_status, 21, str(e))\n else:\n self.fail('Expected Hash Mismatch Error not thrown')",
"def match(goal, built, verbose):\n if not path.exists(built):\n return False, built + ' was not built (required by ' + goal + ')'\n exc, diff_file, diff = capture(['diff', built, goal], built + '.diff')\n if exc != 0:\n _, word_diff_file, word_diff = capture(\n ['git', 'diff', '--word-diff=color', '--no-index', built, goal],\n built + '.word.diff')\n msg = built + ' != ' + goal + '\\n' + diff_file + '\\n' + word_diff_file\n if verbose:\n for contents in [diff, word_diff]:\n msg += '\\n' + indent(contents())\n return False, msg\n else:\n return True, built + ' == ' + goal",
"def _compare(got, expected_filename):\n with open(os.path.join(TEST_DIR, expected_filename), 'r') \\\n as expected_file:\n expected = expected_file.read()\n assert got == expected",
"def file_name_check(file_name):\n # Please print out which line of the above program contains an error. E.g. if the bug is on line 4 then print 4\n # END OF CONTEXT\n print(\"9\")\n # END OF SOLUTION",
"def test_not_present_file(self):\n\t\ttry:\n\t\t\tmain.Main(['input/abc.txt']).run()\n\t\texcept:\n\t\t\tself.assertTrue(True)",
"def test_archivename(self):\n\n for testfile in ['6mbzipattachment.eml', '6mbrarattachment.eml']:\n try:\n # copy file rules\n tmpfile = tempfile.NamedTemporaryFile(\n suffix='virus', prefix='fuglu-unittest', dir='/tmp')\n shutil.copy(\"%s/%s\" % (TESTDATADIR, testfile), tmpfile.name)\n\n user = '[email protected]'\n conffile = self.tempdir + \"/%s-archivenames.conf\" % user\n open(conffile, 'w').write(\n \"deny largefile user does not like the largefile within a zip\\ndeny 6mbfile user does not like the largefile within a zip\")\n self.rulescache._loadrules()\n suspect = Suspect(\n '[email protected]', user, tmpfile.name)\n\n result = self.candidate.examine(suspect)\n if type(result) is tuple:\n result, message = result\n self.assertEqual(\n result, DELETE, 'archive containing blocked filename was not blocked')\n finally:\n tmpfile.close()\n os.remove(conffile)",
"def test_verify_corrupt_archive_compare_data(self):\n self.backup(u\"full\", u\"testfiles/various_file_types\", options=[])\n output_files = os.listdir(\"testfiles/output\")\n archives = [elem for elem in output_files if \"vol\" in elem]\n for archive in archives:\n # Edit source file\n with open(\"testfiles/output/\" + archive, 'r+') as f:\n f.write('This writes text into each archive file to corrupt it.')\n # Test verify for the file\n try:\n self.verify(u'testfiles/various_file_types/executable', file_to_verify=u'executable',\n options=[u\"--compare-data\"])\n except CmdError as e:\n # Should return a 21 error code for \"hash mismatch\"\n self.assertEqual(e.exit_status, 21, str(e))\n else:\n self.fail('Expected Hash Mismatch Error not thrown')",
"def test_003(compiler, temp_builds_dir):\n filepath = temp_builds_dir.join(\"foo/bar/home.txt\")\n\n content = \"\"\"Some sample unicode text: フランス Furansu\"\"\"\n\n compiler.write_content(content, filepath.strpath)\n\n # Read file to compare\n with io.open(filepath.strpath, \"r\", encoding=\"utf-8\") as f:\n result = f.read()\n\n assert content == result",
"def fail_check(version, num):\n f1 = open(\"replace/outputs/t\" + str(num), 'r')\n f2 = open(\"replace/outputs/v\" + str(version) + \"/t\" + str(num), 'r')\n ret = f1.readlines() != f2.readlines()\n f1.close()\n f2.close()\n return ret",
"def test_raise_error_unknown_field_filtered_files():\n\n files = ['Unihan_Variants.txt']\n\n options = {'input_files': files, 'fields': ['kDefinition']}\n\n with pytest.raises(KeyError) as excinfo:\n process.Packager(options)\n excinfo.match('Field ([a-zA-Z].*) not found in file list.')",
"def test_correct_fuzzer_build(self):\n test_fuzzer_dir = os.path.join(TEST_FILES_PATH, 'out')\n self.assertTrue(cifuzz.check_fuzzer_build(test_fuzzer_dir))",
"def test_build_command(self):\n output = name_pdfs.build_cmd(\"ChupStudent\", \"somefile.tex\")\n self.assertIsInstance(output, list)\n self.assertGreater(len(output), 0)\n for chunk in output:\n self.assertIsInstance(chunk, str)\n name = [8675309, \"ChupStudent\"]\n filename = [\"somefile.tex\", 8675309]\n for i in range(2):\n with self.subTest():\n with self.assertRaises(TypeError):\n name_pdfs.build_cmd(name[i], filename[i])",
"def test_invalid_project_name(self):\n with tempfile.TemporaryDirectory() as tmp_dir:\n self.assertFalse(\n cifuzz.build_fuzzers(\n 'not_a_valid_project',\n 'oss-fuzz',\n tmp_dir,\n commit_sha='0b95fe1039ed7c38fea1f97078316bfc1030c523'))"
] | [
"0.7865123",
"0.725757",
"0.6427956",
"0.6343546",
"0.61661303",
"0.6108487",
"0.6048125",
"0.6037537",
"0.60135037",
"0.59942937",
"0.5991819",
"0.59909046",
"0.5990231",
"0.5943843",
"0.5930705",
"0.5888282",
"0.5878586",
"0.58620876",
"0.5855489",
"0.5848895",
"0.58476365",
"0.5839241",
"0.58386844",
"0.58237296",
"0.5806284",
"0.58033",
"0.5792295",
"0.5769559",
"0.5768521",
"0.5763535"
] | 0.8206071 | 0 |
Fails the test if the contents of the specified built file name match the specified contents. | def built_file_must_not_match(self, name, contents, **kw):
return self.must_not_match(self.built_file_path(name, **kw), contents) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def built_file_must_match(self, name, contents, **kw):\n return self.must_match(self.built_file_path(name, **kw), contents)",
"def built_file_must_not_contain(self, name, contents, **kw):\n return self.must_not_contain(self.built_file_path(name, **kw), contents)",
"def built_file_must_exist(self, name, type=None, **kw):\n return self.must_exist(self.built_file_path(name, type, **kw))",
"def _check_build(self, gppkg_file, gppkg_spec):\n return gppkg_file == gppkg_spec.get_filename()",
"def should_build(target_platform, changed_files):\n return any(_should_file_trigger_build(target_platform, file) for file in changed_files)",
"def test_buildings_file_path(self):\n self.assertRaises(ValueError, buildings_clean, \"not_a_file_path\")",
"def match(goal, built, verbose):\n if not path.exists(built):\n return False, built + ' was not built (required by ' + goal + ')'\n exc, diff_file, diff = capture(['diff', built, goal], built + '.diff')\n if exc != 0:\n _, word_diff_file, word_diff = capture(\n ['git', 'diff', '--word-diff=color', '--no-index', built, goal],\n built + '.word.diff')\n msg = built + ' != ' + goal + '\\n' + diff_file + '\\n' + word_diff_file\n if verbose:\n for contents in [diff, word_diff]:\n msg += '\\n' + indent(contents())\n return False, msg\n else:\n return True, built + ' == ' + goal",
"def is_crashing_test(path):\n if not path.endswith('expected.txt'):\n if 'crash' in path.lower():\n if 'svn' not in path.lower():\n return True\n return False",
"def test_create_SHA_256_hash_of_file_matches_cosmic_build_tool(\n file_name, expected_hash\n):\n file_path = str(Path(__file__).parent.parent / \"steps/component1\" / file_name)\n hash = utils.create_SHA_256_hash_of_file(file_path)\n\n assert hash == expected_hash",
"def built_file_must_not_exist(self, name, type=None, **kw):\n return self.must_not_exist(self.built_file_path(name, type, **kw))",
"def Validate(self, relative_file, contents):\n pass",
"def test_invalidFile(self):\n self.assertRaises(cesmEnvLib.checkFile(\"blah\", \"write\"))",
"def is_built(args, task_name: str, artifact_name: str) -> bool:\n if task_name not in args._artifacts:\n return False\n\n for a in args._artifacts[task_name]:\n if a.name == artifact_name and a.built:\n return True\n elif a.name == artifact_name and not a.built:\n return False\n return False",
"def file_name_check(file_name):\n # Please print out which line of the above program contains an error. E.g. if the bug is on line 4 then print 4\n # END OF CONTEXT\n print(\"9\")\n # END OF SOLUTION",
"def test_raise_error_unknown_file():\n\n options = {'input_files': ['Sparta.lol']}\n\n with pytest.raises(KeyError) as excinfo:\n process.Packager(options)\n excinfo.match(r'File ([a-zA-Z_\\.\\'].*) not found in file list.')",
"def test_archivename(self):\n\n for testfile in ['6mbzipattachment.eml', '6mbrarattachment.eml']:\n try:\n # copy file rules\n tmpfile = tempfile.NamedTemporaryFile(\n suffix='virus', prefix='fuglu-unittest', dir='/tmp')\n shutil.copy(\"%s/%s\" % (TESTDATADIR, testfile), tmpfile.name)\n\n user = '[email protected]'\n conffile = self.tempdir + \"/%s-archivenames.conf\" % user\n open(conffile, 'w').write(\n \"deny largefile user does not like the largefile within a zip\\ndeny 6mbfile user does not like the largefile within a zip\")\n self.rulescache._loadrules()\n suspect = Suspect(\n '[email protected]', user, tmpfile.name)\n\n result = self.candidate.examine(suspect)\n if type(result) is tuple:\n result, message = result\n self.assertEqual(\n result, DELETE, 'archive containing blocked filename was not blocked')\n finally:\n tmpfile.close()\n os.remove(conffile)",
"def fail_check(version, num):\n f1 = open(\"replace/outputs/t\" + str(num), 'r')\n f2 = open(\"replace/outputs/v\" + str(version) + \"/t\" + str(num), 'r')\n ret = f1.readlines() != f2.readlines()\n f1.close()\n f2.close()\n return ret",
"def _compare(got, expected_filename):\n with open(os.path.join(TEST_DIR, expected_filename), 'r') \\\n as expected_file:\n expected = expected_file.read()\n assert got == expected",
"def test_003(compiler, temp_builds_dir):\n filepath = temp_builds_dir.join(\"foo/bar/home.txt\")\n\n content = \"\"\"Some sample unicode text: フランス Furansu\"\"\"\n\n compiler.write_content(content, filepath.strpath)\n\n # Read file to compare\n with io.open(filepath.strpath, \"r\", encoding=\"utf-8\") as f:\n result = f.read()\n\n assert content == result",
"def test_script_exists(self):\n get_files=os.listdir(\"../../taxonomy/src_files\")\n self.assertIn(\"validate_match_batch.py\", get_files)",
"def test_basic_validate_build_command_build():\n t = TestClient()\n conanfile = textwrap.dedent(\"\"\"\n from conan import ConanFile\n from conan.errors import ConanInvalidConfiguration\n\n class myConan(ConanFile):\n settings = \"os\"\n\n def validate_build(self):\n if self.settings.os == \"Windows\":\n raise ConanInvalidConfiguration(\"This doesn't build in Windows\")\n \"\"\")\n\n t.save({\"conanfile.py\": conanfile})\n t.run(f\"build . -s os=Windows\", assert_error=True)\n assert \"ERROR: conanfile.py: Cannot build for this configuration: \" \\\n \"This doesn't build in Windows\" in t.out\n t.run(\"build . -s os=Linux\")\n # It doesn't fail",
"def test_not_present_file(self):\n\t\ttry:\n\t\t\tmain.Main(['input/abc.txt']).run()\n\t\texcept:\n\t\t\tself.assertTrue(True)",
"def test_build(self):\r\n self.mkbundle('file1', 'file2', output=\"out\").build()\r\n assert self.get(\"media/out\") == \"foo\\nbar\"",
"def test_sanitize_content_filename(filename, expected):\n assert sanitize_content_filename(filename) == expected",
"def test_input_files(self):\n files = list_files_folder(data_dir + \"build-custom/files/\", ext=\"fna.gz\")\n params = self.default_params.copy()\n params[\"db_prefix\"] = self.results_dir + \"test_input_files\"\n params[\"input\"] = files\n params[\"input_extension\"] = \"\"\n cfg = Config(\"build-custom\", **params)\n self.assertTrue(run_ganon(cfg, params[\"db_prefix\"]), \"ganon build-custom run failed\")\n res = build_sanity_check_and_parse(vars(cfg))\n self.assertIsNotNone(res, \"ganon build-custom sanity check failed\")\n\n self.assertTrue(res[\"target\"][\"file\"].isin(files).all(), \"Files missing from target\")\n self.assertEqual(len(files), res[\"target\"].shape[0], \"Wrong number of files on target\")\n self.assertTrue(res[\"info\"][\"file\"].isin(files).all(), \"Files missing from info\")\n self.assertEqual(len(files), res[\"info\"].shape[0], \"Wrong number of files on info\")\n\n # All files are invalid\n files = [f+\".xxx\" for f in files]\n params = self.default_params.copy()\n params[\"db_prefix\"] = self.results_dir + \"test_input_files_invalid\"\n params[\"input\"] = files\n params[\"input_extension\"] = \"\"\n cfg = Config(\"build-custom\", **params)\n self.assertFalse(run_ganon(cfg, params[\"db_prefix\"]), \"ganon build-custom ran but it should fail\")",
"def test_verify_corrupt_archive(self):\n self.backup(u\"full\", u\"testfiles/various_file_types\", options=[])\n output_files = os.listdir(\"testfiles/output\")\n archives = [elem for elem in output_files if \"vol\" in elem]\n for archive in archives:\n # Edit source file\n with open(\"testfiles/output/\" + archive, 'r+') as f:\n f.write('This writes text into each archive file to corrupt it.')\n # Test verify for the file\n try:\n self.verify(u'testfiles/various_file_types/executable', file_to_verify=u'executable', options=[])\n except CmdError as e:\n # Should return a 21 error code for \"hash mismatch\"\n self.assertEqual(e.exit_status, 21, str(e))\n else:\n self.fail('Expected Hash Mismatch Error not thrown')",
"def _raise_incorrect_address_error(self, spec_path, wrong_target_name, addresses):\n was_not_found_message = '{target_name} was not found in BUILD files from {spec_path}'.format(\n target_name=wrong_target_name, spec_path=spec_path)\n\n if not addresses:\n raise self.EmptyBuildFileError(\n '{was_not_found_message}, because that directory contains no BUILD files defining addressable entities.'\n .format(was_not_found_message=was_not_found_message))\n # Print BUILD file extensions if there's more than one BUILD file with targets only.\n if (any(not hasattr(address, 'build_file') for address in addresses) or\n len(set([address.build_file for address in addresses])) == 1):\n specs = [':{}'.format(address.target_name) for address in addresses]\n else:\n specs = [':{} (from {})'.format(address.target_name, os.path.basename(address.build_file.relpath))\n for address in addresses]\n\n # Might be neat to sort by edit distance or something, but for now alphabetical is fine.\n specs = [''.join(pair) for pair in sorted(specs)]\n\n # Give different error messages depending on whether BUILD file was empty.\n one_of = ' one of' if len(specs) > 1 else '' # Handle plurality, just for UX.\n raise self.AddressNotInBuildFile(\n '{was_not_found_message}. Perhaps you '\n 'meant{one_of}: \\n {specs}'.format(was_not_found_message=was_not_found_message,\n one_of=one_of,\n specs='\\n '.join(specs)))",
"def test_verify_corrupt_archive_compare_data(self):\n self.backup(u\"full\", u\"testfiles/various_file_types\", options=[])\n output_files = os.listdir(\"testfiles/output\")\n archives = [elem for elem in output_files if \"vol\" in elem]\n for archive in archives:\n # Edit source file\n with open(\"testfiles/output/\" + archive, 'r+') as f:\n f.write('This writes text into each archive file to corrupt it.')\n # Test verify for the file\n try:\n self.verify(u'testfiles/various_file_types/executable', file_to_verify=u'executable',\n options=[u\"--compare-data\"])\n except CmdError as e:\n # Should return a 21 error code for \"hash mismatch\"\n self.assertEqual(e.exit_status, 21, str(e))\n else:\n self.fail('Expected Hash Mismatch Error not thrown')",
"def _is_valid_file(arg: str) -> str:\n if not os.path.exists(arg):\n raise FileNotFoundError(\"%s does not exist\" % arg)\n return arg",
"def assertFilesEqual(self, name1, name2, msg=None):\n self.assertEqual(name1.getContent(), name2.getContent(), msg)"
] | [
"0.85351104",
"0.7359012",
"0.62920475",
"0.6136921",
"0.60466075",
"0.60382736",
"0.59580076",
"0.59560263",
"0.5939516",
"0.58847064",
"0.5883172",
"0.58079237",
"0.57745993",
"0.57733077",
"0.57392937",
"0.5728668",
"0.5728633",
"0.56980926",
"0.5695948",
"0.56950855",
"0.56655717",
"0.56486404",
"0.5646517",
"0.5636702",
"0.56356937",
"0.56082225",
"0.5601944",
"0.5600846",
"0.55949247",
"0.55886394"
] | 0.79406774 | 1 |
Copies the test configuration from the specified source_dir (the directory in which the test script lives) to the specified dest_dir (a temporary working directory). This ignores all files and directories that begin with the string 'gyptest', and all '.svn' subdirectories. | def copy_test_configuration(self, source_dir, dest_dir):
for root, dirs, files in os.walk(source_dir):
if '.svn' in dirs:
dirs.remove('.svn')
dirs = [ d for d in dirs if not d.startswith('gyptest') ]
files = [ f for f in files if not f.startswith('gyptest') ]
for dirname in dirs:
source = os.path.join(root, dirname)
destination = source.replace(source_dir, dest_dir)
os.mkdir(destination)
if sys.platform != 'win32':
shutil.copystat(source, destination)
for filename in files:
source = os.path.join(root, filename)
destination = source.replace(source_dir, dest_dir)
shutil.copy2(source, destination) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def copy_dir(source, dest, vars, verbosity=1, simulate=False, indent=0,\n sub_vars=True, interactive=False, overwrite=True,\n template_renderer=None, out_=sys.stdout):\n def out(msg):\n out_.write(msg)\n out_.write('\\n')\n out_.flush()\n # This allows you to use a leading +dot+ in filenames which would\n # otherwise be skipped because leading dots make the file hidden:\n vars.setdefault('dot', '.')\n vars.setdefault('plus', '+')\n use_pkg_resources = isinstance(source, tuple)\n if use_pkg_resources:\n names = sorted(pkg_resources.resource_listdir(source[0], source[1]))\n else:\n names = sorted(os.listdir(source))\n pad = ' '*(indent*2)\n if not os.path.exists(dest):\n if verbosity >= 1:\n out('%sCreating %s/' % (pad, dest))\n if not simulate:\n makedirs(dest, verbosity=verbosity, pad=pad)\n elif verbosity >= 2:\n out('%sDirectory %s exists' % (pad, dest))\n for name in names:\n if use_pkg_resources:\n full = '/'.join([source[1], name])\n else:\n full = os.path.join(source, name)\n reason = should_skip_file(name)\n if reason:\n if verbosity >= 2:\n reason = pad + reason % {'filename': full}\n out(reason)\n continue # pragma: no cover\n if sub_vars:\n dest_full = os.path.join(dest, substitute_filename(name, vars))\n sub_file = False\n if dest_full.endswith('_tmpl'):\n dest_full = dest_full[:-5]\n sub_file = sub_vars\n if use_pkg_resources and pkg_resources.resource_isdir(source[0], full):\n if verbosity:\n out('%sRecursing into %s' % (pad, os.path.basename(full)))\n copy_dir((source[0], full), dest_full, vars, verbosity, simulate,\n indent=indent+1,\n sub_vars=sub_vars, interactive=interactive,\n template_renderer=template_renderer, out_=out_)\n continue\n elif not use_pkg_resources and os.path.isdir(full):\n if verbosity:\n out('%sRecursing into %s' % (pad, os.path.basename(full)))\n copy_dir(full, dest_full, vars, verbosity, simulate,\n indent=indent+1,\n sub_vars=sub_vars, interactive=interactive,\n template_renderer=template_renderer, out_=out_)\n continue\n elif use_pkg_resources:\n content = pkg_resources.resource_string(source[0], full)\n else:\n f = open(full, 'rb')\n content = f.read()\n f.close()\n if sub_file:\n try:\n content = substitute_content(\n content, vars, filename=full,\n template_renderer=template_renderer\n )\n except SkipTemplate:\n continue # pragma: no cover\n if content is None:\n continue # pragma: no cover\n already_exists = os.path.exists(dest_full)\n if already_exists:\n f = open(dest_full, 'rb')\n old_content = f.read()\n f.close()\n if old_content == content:\n if verbosity:\n out('%s%s already exists (same content)' %\n (pad, dest_full))\n continue # pragma: no cover\n if interactive:\n if not query_interactive(\n native_(full, fsenc), native_(dest_full, fsenc),\n native_(content, fsenc), native_(old_content, fsenc),\n simulate=simulate, out_=out_):\n continue\n elif not overwrite:\n continue # pragma: no cover\n if verbosity and use_pkg_resources:\n out('%sCopying %s to %s' % (pad, full, dest_full))\n elif verbosity:\n out(\n '%sCopying %s to %s' % (pad, os.path.basename(full),\n dest_full))\n if not simulate:\n f = open(dest_full, 'wb')\n f.write(content)\n f.close()",
"def _copy_dir(\n source_dir: str,\n target_dir: str,\n *,\n exclude: Optional[List] = None,\n _retry: bool = True,\n) -> None:\n target_dir = os.path.normpath(target_dir)\n try:\n # Timeout 0 means there will be only one attempt to acquire\n # the file lock. If it cannot be aquired, a TimeoutError\n # will be thrown.\n with TempFileLock(f\"{target_dir}.lock\", timeout=0):\n _delete_path_unsafe(target_dir)\n\n _ignore = None\n if exclude:\n\n def _ignore(path, names):\n ignored_names = set()\n rel_path = os.path.relpath(path, source_dir)\n for name in names:\n candidate = os.path.join(rel_path, name)\n for excl in exclude:\n if fnmatch.fnmatch(candidate, excl):\n ignored_names.add(name)\n break\n return ignored_names\n\n shutil.copytree(source_dir, target_dir, ignore=_ignore)\n except TimeoutError:\n # wait, but do not do anything\n with TempFileLock(f\"{target_dir}.lock\"):\n pass\n # if the dir was locked due to being deleted,\n # recreate\n if not os.path.exists(target_dir):\n if _retry:\n _copy_dir(source_dir, target_dir, _retry=False)\n else:\n raise RuntimeError(\n f\"Target directory {target_dir} does not exist \"\n \"and couldn't be recreated. \"\n \"Please raise an issue on GitHub: \"\n \"https://github.com/ray-project/ray/issues\"\n )",
"def main(source_dir, dest_dir):\n\n paths = []\n for root, _, files in os.walk(source_dir):\n paths.extend([os.path.join(root, f) for f in files])\n\n def copy(source_path, skip_existing=True):\n \"\"\"Copies a file from source_path to source_path with\n source_dir replaced by dest_dir.\n\n Arguments:\n source_path(str): Path to a file to be copied.\n skip_existing(bool): True to skip copying files\n when the destination file already exists.\n \"\"\"\n\n dest_path = source_path.replace(source_dir.strip('/'), dest_dir.strip('/'))\n\n # Skip if dest file already exists\n if skip_existing and os.path.exists(dest_path):\n return\n\n # Create directory if necessary\n os.makedirs(os.path.dirname(dest_path), exist_ok=True)\n\n copyfile(source_path, dest_path)\n\n p_umap(copy, paths)",
"def clean_test_files(dest_dir):\n\n print 'Cleaning data files'\n folders = [os.path.join(dest_dir, 'testdata'),\n os.path.join(dest_dir, 'logs')]\n for the_folder in folders:\n if os.path.isdir(the_folder):\n for the_file in os.listdir(the_folder):\n file_path = os.path.join(the_folder, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except IOError, exception:\n print exception\n for the_folder in folders:\n if not os.path.isdir(the_folder):\n try:\n os.makedirs(the_folder)\n except OSError:\n print 'ERROR Could not create directory structure for tests.'",
"def copy_to_cwd(source_dir, source_name, dest_name):\n source_path = os.path.join(source_dir, source_name)\n dest_path = os.path.join(os.getcwd(), dest_name)\n if os.path.isfile(dest_path):\n raise OSError(dest_name + ' file exists in current directory.')\n shutil.copy2(source_path, dest_path)",
"def config_dir(tmpdir):\n test_dir = tmpdir.mkdir(\"config\")\n test_dir.join('config.cfg').write('')\n test_dir.join('extra.ini').write('')\n test_dir.join('module.cfg').write('')\n test_dir.join('README').write('')\n\n return test_dir",
"def copy_all_paths_to_sourcedata(input_dir: Path, raw_dir: Path):\n\n user_wants_to_continue = \"y\"\n\n if raw_dir.exists():\n print(f\"{raw_dir} already exists. Do you want to overwrite it?\")\n user_wants_to_continue = input(\"(y/n): \")\n \n if user_wants_to_continue == \"y\":\n rmtree(raw_dir, ignore_errors=True)\n print(f\"Copying {input_dir.name} to {raw_dir}\")\n print(\"This will probably take a really long time.\")\n copytree(src=input_dir, dst=raw_dir, dirs_exist_ok=True)\n print(\"Copying complete.\")\n\n else:\n print(f\"OK. I won't overwrite {raw_dir.name}, but I'll try bidsifying what's already inside it.\")",
"def copy(source_path, skip_existing=True):\n\n dest_path = source_path.replace(source_dir.strip('/'), dest_dir.strip('/'))\n\n # Skip if dest file already exists\n if skip_existing and os.path.exists(dest_path):\n return\n\n # Create directory if necessary\n os.makedirs(os.path.dirname(dest_path), exist_ok=True)\n\n copyfile(source_path, dest_path)",
"def copydir(source, dest):\n dest_par = os.path.dirname(dest)\n for root, dirs, files in os.walk(source):\n if not os.path.isdir(root):\n os.makedirs(root)\n\n for mdir in dirs:\n try:\n dest_path = os.path.join(dest_par, root, mdir)\n if not os.path.isdir(dest_path):\n os.makedirs(dest_path)\n except:\n pass\n for file in files:\n rel_path = root.replace(source, '').lstrip(os.sep)\n dest_path = os.path.join(dest, rel_path)\n if not os.path.isdir(dest_path):\n os.makedirs(dest_path)\n cpy_src = os.path.join(root, file)\n cpy_dest = os.path.join(dest_path, file)\n shutil.copyfile(cpy_src, cpy_dest)\n shutil.copymode(cpy_src, cpy_dest)",
"def fresh_copy_dir(source_path, target_path):\n os.mkdir(target_path)\n for item in os.listdir(source_path):\n s = os.path.join(source_path, item)\n t = os.path.join(target_path, item)\n if os.path.isdir(s):\n fresh_copy_dir(s, t)\n else:\n shutil.copyfile(s, t)",
"def copyDir(src, dst, includes, excludes = []):\n\tmultiFilesReplacements([], dst, src, includes, excludes)",
"def assert_destination(config: Config) -> Config:\n assert_directories(config.destination)\n\n return config",
"def copyDir(srcPath, destPath):\n shutil.copytree(srcPath, destPath)",
"def copy_dir(src=\"\", dst=\"\", header=\"\", footer=\"\", clip=0, ext=\"\", test=False):\n failed = []\n nfiles = 0\n if not os.path.exists(dst):\n os.makedirs(dst)\n if not os.path.exists(src):\n raise argparse.ArgumentError(\"source does not exist! It must be a directory.\")\n else:\n for root, dirs, files in os.walk(src, topdown=False):\n for name in files:\n name_wo_ext, file_ext = os.path.splitext(name)\n\n src_path = os.path.join(root, name)\n dstfilename = header + os.path.join(root[len(src)+1:], name_wo_ext[clip:]) + footer + file_ext\n dst_path = os.path.join(dst, dstfilename)\n\n dst_pdir = os.path.dirname(dst_path)\n if not os.path.exists(dst_pdir):\n os.makedirs(dst_pdir)\n\n if not os.path.exists(dst_path):\n if ext == \"\" or ext == file_ext[1:]:\n try:\n shutil.copy(src_path, dst_path)\n except:\n failed.append(src_path)\n print(f\"... {src_path} failed\")\n else:\n print(f\"... {dst_path} already exists'. Skipping\")\n nfiles += 1\n\n if test:\n break\n if test:\n break\n print(f\"{nfiles - len(failed)} / {nfiles} files were copied.\")\n return failed",
"def force_copy(src, dest):\r\n if os.path.isfile(dest):\r\n os.remove(dest)\r\n if os.path.isdir(dest):\r\n dest = os.path.join(dest, os.path.basename(src))\r\n shutil.copyfile(src, dest)\r\n return dest",
"def pre_install(self, dest_dir):\n pass",
"def change_dir(self, src: str = None, dest: str = None):\n\n if not is_empty(src):\n self._srcDir = src\n\n if not is_empty(dest):\n self._destDir = dest",
"def copy_fixture(src: Path, dest: Path) -> Path:\n return shutil.copy(src.absolute(), dest.absolute())",
"def _clone_defaults(self, source, dest, context):\n\n for base, dirs, files in os.walk(source):\n relative = os.path.relpath(base, source)\n\n for d in dirs:\n os.makedirs(os.path.join(dest, relative, d))\n\n for filename in files:\n\n if not filename.endswith(self.valid_extensions):\n continue\n\n with open(os.path.join(base, filename), 'r') as f:\n data = f.read()\n\n with open(os.path.join(dest, relative, filename), 'w') as f:\n data = jinja2.Template(data).render(**context)\n f.write(data)",
"def copy(self, src, dest):\n\n src = os.path.join(os.path.dirname(__file__), \"collections\", \"kitchensink\", src)\n dest = os.path.join(self.checkout, dest)\n if os.path.isdir(src):\n shutil.copytree(src, dest)\n else:\n shutil.copy(src, dest)\n return dest",
"def deploy_conf(self, source_path, dest_path):\n if not os.path.exists(source_path):\n raise RuntimeError('Expected configuration file to exist in {}, but does not.'.format(source_path))\n\n self._shell_client.copy(source_path, dest_path)\n # Must set permissions of conf to '600' for security purposes.\n self._shell_client.exec_command('chmod 600 {}'.format(dest_path), error_on_failure=True)",
"def create_dir(self):\n\n os.makedirs(self.path)\n\n instance_config_dir = p.abspath(p.join(self.path, \"configs\"))\n os.makedirs(instance_config_dir)\n\n print(\n f\"Copy common default production configuration from {self.base_config_dir}. Files: {self.main_config_name}, {self.users_config_name}\"\n )\n\n shutil.copyfile(\n p.join(self.base_config_dir, self.main_config_name),\n p.join(instance_config_dir, self.main_config_name),\n )\n shutil.copyfile(\n p.join(self.base_config_dir, self.users_config_name),\n p.join(instance_config_dir, self.users_config_name),\n )\n\n logging.debug(\"Create directory for configuration generated in this helper\")\n # used by all utils with any config\n conf_d_dir = p.abspath(p.join(instance_config_dir, \"conf.d\"))\n os.mkdir(conf_d_dir)\n\n logging.debug(\"Create directory for common tests configuration\")\n # used by server with main config.xml\n self.config_d_dir = p.abspath(p.join(instance_config_dir, \"config.d\"))\n os.mkdir(self.config_d_dir)\n users_d_dir = p.abspath(p.join(instance_config_dir, \"users.d\"))\n os.mkdir(users_d_dir)\n dictionaries_dir = p.abspath(p.join(instance_config_dir, \"dictionaries\"))\n os.mkdir(dictionaries_dir)\n extra_conf_dir = p.abspath(p.join(instance_config_dir, \"extra_conf.d\"))\n os.mkdir(extra_conf_dir)\n\n def write_embedded_config(name, dest_dir, fix_log_level=False):\n with open(p.join(HELPERS_DIR, name), \"r\") as f:\n data = f.read()\n data = data.replace(\"clickhouse\", self.config_root_name)\n if fix_log_level:\n data = data.replace(\"<level>test</level>\", \"<level>trace</level>\")\n with open(p.join(dest_dir, name), \"w\") as r:\n r.write(data)\n\n logging.debug(\"Copy common configuration from helpers\")\n # The file is named with 0_ prefix to be processed before other configuration overloads.\n if self.copy_common_configs:\n write_embedded_config(\n \"0_common_instance_config.xml\",\n self.config_d_dir,\n self.with_installed_binary,\n )\n\n write_embedded_config(\"0_common_instance_users.xml\", users_d_dir)\n if (\n os.environ.get(\"CLICKHOUSE_USE_NEW_ANALYZER\") is not None\n and self.allow_analyzer\n ):\n write_embedded_config(\"0_common_enable_analyzer.xml\", users_d_dir)\n\n if len(self.custom_dictionaries_paths):\n write_embedded_config(\"0_common_enable_dictionaries.xml\", self.config_d_dir)\n\n logging.debug(\"Generate and write macros file\")\n macros = self.macros.copy()\n macros[\"instance\"] = self.name\n with open(p.join(conf_d_dir, \"macros.xml\"), \"w\") as macros_config:\n macros_config.write(self.dict_to_xml({\"macros\": macros}))\n\n # Put ZooKeeper config\n if self.with_zookeeper:\n shutil.copy(self.zookeeper_config_path, conf_d_dir)\n\n if self.with_secrets:\n if self.with_kerberos_kdc:\n base_secrets_dir = self.cluster.instances_dir\n else:\n base_secrets_dir = self.path\n from_dir = self.secrets_dir\n to_dir = p.abspath(p.join(base_secrets_dir, \"secrets\"))\n logging.debug(f\"Copy secret from {from_dir} to {to_dir}\")\n shutil.copytree(\n self.secrets_dir,\n p.abspath(p.join(base_secrets_dir, \"secrets\")),\n dirs_exist_ok=True,\n )\n\n if self.with_coredns:\n shutil.copytree(\n self.coredns_config_dir, p.abspath(p.join(self.path, \"coredns_config\"))\n )\n\n # Copy config.d configs\n logging.debug(\n f\"Copy custom test config files {self.custom_main_config_paths} to {self.config_d_dir}\"\n )\n for path in self.custom_main_config_paths:\n shutil.copy(path, self.config_d_dir)\n\n # Copy users.d configs\n for path in self.custom_user_config_paths:\n shutil.copy(path, users_d_dir)\n\n # Copy dictionaries configs to configs/dictionaries\n for path in self.custom_dictionaries_paths:\n shutil.copy(path, dictionaries_dir)\n for path in self.custom_extra_config_paths:\n shutil.copy(path, extra_conf_dir)\n\n db_dir = p.abspath(p.join(self.path, \"database\"))\n logging.debug(f\"Setup database dir {db_dir}\")\n if self.clickhouse_path_dir is not None:\n logging.debug(f\"Database files taken from {self.clickhouse_path_dir}\")\n shutil.copytree(self.clickhouse_path_dir, db_dir)\n logging.debug(\n f\"Database copied from {self.clickhouse_path_dir} to {db_dir}\"\n )\n else:\n os.mkdir(db_dir)\n\n logs_dir = p.abspath(p.join(self.path, \"logs\"))\n logging.debug(f\"Setup logs dir {logs_dir}\")\n os.mkdir(logs_dir)\n self.logs_dir = logs_dir\n\n depends_on = []\n\n if self.with_mysql_client:\n depends_on.append(self.cluster.mysql_client_host)\n\n if self.with_mysql:\n depends_on.append(\"mysql57\")\n\n if self.with_mysql8:\n depends_on.append(\"mysql80\")\n\n if self.with_mysql_cluster:\n depends_on.append(\"mysql57\")\n depends_on.append(\"mysql2\")\n depends_on.append(\"mysql3\")\n depends_on.append(\"mysql4\")\n\n if self.with_postgres_cluster:\n depends_on.append(\"postgres2\")\n depends_on.append(\"postgres3\")\n depends_on.append(\"postgres4\")\n\n if self.with_kafka:\n depends_on.append(\"kafka1\")\n depends_on.append(\"schema-registry\")\n\n if self.with_kerberized_kafka:\n depends_on.append(\"kerberized_kafka1\")\n\n if self.with_kerberos_kdc:\n depends_on.append(\"kerberoskdc\")\n\n if self.with_kerberized_hdfs:\n depends_on.append(\"kerberizedhdfs1\")\n\n if self.with_rabbitmq:\n depends_on.append(\"rabbitmq1\")\n\n if self.with_nats:\n depends_on.append(\"nats1\")\n\n if self.with_zookeeper:\n depends_on.append(\"zoo1\")\n depends_on.append(\"zoo2\")\n depends_on.append(\"zoo3\")\n\n if self.with_minio:\n depends_on.append(\"minio1\")\n\n if self.with_azurite:\n depends_on.append(\"azurite1\")\n\n self.cluster.env_variables.update(self.env_variables)\n\n odbc_ini_path = \"\"\n if self.odbc_ini_path:\n self._create_odbc_config_file()\n odbc_ini_path = \"- \" + self.odbc_ini_path\n\n entrypoint_cmd = self.clickhouse_start_command\n\n if self.stay_alive:\n entrypoint_cmd = self.clickhouse_stay_alive_command.replace(\n \"{main_config_file}\", self.main_config_name\n )\n else:\n entrypoint_cmd = (\n \"[\"\n + \", \".join(map(lambda x: '\"' + x + '\"', entrypoint_cmd.split()))\n + \"]\"\n )\n\n logging.debug(\"Entrypoint cmd: {}\".format(entrypoint_cmd))\n\n networks = app_net = ipv4_address = ipv6_address = net_aliases = net_alias1 = \"\"\n if (\n self.ipv4_address is not None\n or self.ipv6_address is not None\n or self.hostname != self.name\n ):\n networks = \"networks:\"\n app_net = \"default:\"\n if self.ipv4_address is not None:\n ipv4_address = \"ipv4_address: \" + self.ipv4_address\n if self.ipv6_address is not None:\n ipv6_address = \"ipv6_address: \" + self.ipv6_address\n if self.hostname != self.name:\n net_aliases = \"aliases:\"\n net_alias1 = \"- \" + self.hostname\n\n if not self.with_installed_binary:\n binary_volume = \"- \" + self.server_bin_path + \":/usr/bin/clickhouse\"\n odbc_bridge_volume = (\n \"- \" + self.odbc_bridge_bin_path + \":/usr/bin/clickhouse-odbc-bridge\"\n )\n library_bridge_volume = (\n \"- \"\n + self.library_bridge_bin_path\n + \":/usr/bin/clickhouse-library-bridge\"\n )\n else:\n binary_volume = \"- \" + self.server_bin_path + \":/usr/share/clickhouse_fresh\"\n odbc_bridge_volume = (\n \"- \"\n + self.odbc_bridge_bin_path\n + \":/usr/share/clickhouse-odbc-bridge_fresh\"\n )\n library_bridge_volume = (\n \"- \"\n + self.library_bridge_bin_path\n + \":/usr/share/clickhouse-library-bridge_fresh\"\n )\n\n external_dirs_volumes = \"\"\n if self.external_dirs:\n for external_dir in self.external_dirs:\n external_dir_abs_path = p.abspath(\n p.join(self.cluster.instances_dir, external_dir.lstrip(\"/\"))\n )\n logging.info(f\"external_dir_abs_path={external_dir_abs_path}\")\n os.makedirs(external_dir_abs_path, exist_ok=True)\n external_dirs_volumes += (\n \"- \" + external_dir_abs_path + \":\" + external_dir + \"\\n\"\n )\n\n with open(self.docker_compose_path, \"w\") as docker_compose:\n docker_compose.write(\n DOCKER_COMPOSE_TEMPLATE.format(\n image=self.image,\n tag=self.tag,\n name=self.name,\n hostname=self.hostname,\n binary_volume=binary_volume,\n odbc_bridge_volume=odbc_bridge_volume,\n library_bridge_volume=library_bridge_volume,\n instance_config_dir=instance_config_dir,\n config_d_dir=self.config_d_dir,\n db_dir=db_dir,\n external_dirs_volumes=external_dirs_volumes,\n tmpfs=str(self.tmpfs),\n logs_dir=logs_dir,\n depends_on=str(depends_on),\n user=os.getuid(),\n env_file=self.env_file,\n odbc_ini_path=odbc_ini_path,\n keytab_path=self.keytab_path,\n krb5_conf=self.krb5_conf,\n entrypoint_cmd=entrypoint_cmd,\n networks=networks,\n app_net=app_net,\n ipv4_address=ipv4_address,\n ipv6_address=ipv6_address,\n net_aliases=net_aliases,\n net_alias1=net_alias1,\n )\n )",
"def test_dry_run(self):\n with TemporaryDirectory() as temporary_directory:\n source = os.path.join(temporary_directory, 'source')\n destination = os.path.join(temporary_directory, 'destination')\n latest_directory = os.path.join(destination, 'latest')\n os.makedirs(latest_directory)\n # Create a source for testing.\n self.create_source(source)\n # Run the program through the command line interface.\n exit_code, output = run_cli(\n '--dry-run', '--no-sudo',\n source, latest_directory,\n )\n assert exit_code == 0\n # Make sure no backup was created.\n assert len(os.listdir(latest_directory)) == 0\n # Make sure no snapshot was created.\n assert len(find_snapshots(destination)) == 0",
"def move_from_temp_directory(self):",
"def move(self,src,dst):\n src = os.path.join(self.testpath,src)\n dst = os.path.join(self.testpath,dst)\n directory = os.path.split(dst)[0]\n try:\n os.makedirs(directory)\n except OSError:\n pass\n\n shutil.move(src,dst)",
"def test_absolute_outdir(tmp_path):\n # Create destination directory.\n tempdir = tmp_path / \"outdir\"\n tempdir.mkdir(mode=0o700)\n assert tempdir.exists()\n assert tempdir.is_absolute()\n assert len(list(tempdir.glob(\"**/*.*\"))) == 0, \"Must be empty.\"\n # Create a new configuration file with an absolute output_directory.\n # We are cheating a little by writing it to the same directory\n # where the test files will be saved.\n config_file = tempdir / Path(\"rewritten.cfg\")\n contents = Path(\"tests/generate.cfg\").read_text(encoding=\"utf-8\")\n contents = contents.replace(\".gendir-suite-cfg\", str(tempdir))\n contents = contents.replace(\"print = filename, summary\", \"print = summary\")\n _ = config_file.write_text(contents, encoding=\"utf-8\")\n phmdoctest.main.generate_using(config_file=config_file)\n assert config_file.exists(), \"In output_directory and didn't get wiped.\"\n assert (Path(tempdir) / \"test_project.py\").exists()\n assert (Path(tempdir) / \"test_doc__directive1.py\").exists()\n assert (Path(tempdir) / \"test_doc__directive2.py\").exists()\n assert (Path(tempdir) / \"test_doc__directive3.py\").exists()\n assert (Path(tempdir) / \"test_doc__example1.py\").exists()\n assert (Path(tempdir) / \"test_doc__example2.py\").exists()\n assert (Path(tempdir) / \"test_doc__inline_example.py\").exists()\n assert (Path(tempdir) / \"test_tests__managenamespace.py\").exists()\n assert (Path(tempdir) / \"test_tests__one_code_block.py\").exists()\n assert (Path(tempdir) / \"test_tests__output_has_blank_lines.py\").exists()\n assert (Path(tempdir) / \"test_tests__setup_only.py\").exists()\n assert (Path(tempdir) / \"test_tests__twentysix_session_blocks.py\").exists()\n assert len(list(tempdir.glob(\"**/*.*\"))) == 13, \"12 test files and .cfg file.\"",
"def test_ignore_non_configs_from_current_dir(tmp_path: pathlib.Path) -> None:\n\n cli.startup(tmp_path)\n\n junk_config = tmp_path / \"myconfig.psd\"\n junk_config.touch()\n conf = tmp_path / \"watmyconfig.json\"\n conf.touch()\n configs_found = in_dir(tmp_path)\n assert len(configs_found) == 1",
"def copy_source():\n shutil.copytree(\"src\", os.path.join(BUILD_DIR, \"src\"))\n for file in os.listdir(\".\"):\n if os.path.isfile(file):\n shutil.copyfile(file, os.path.join(BUILD_DIR, file))",
"def oh_folders(src, dest=dest):\n copytree(src, dest, ignore=ignore_patterns(*ignore_list), dirs_exist_ok=True)",
"def __init__(self, destpath=os.curdir):\n if destpath:\n self._destpath = os.path.abspath(destpath)\n self._istmpdest = False\n else:\n import tempfile # deferring import to improve startup time\n self._destpath = tempfile.mkdtemp()\n self._istmpdest = True"
] | [
"0.5968082",
"0.5623668",
"0.55685383",
"0.5543154",
"0.54757017",
"0.53514683",
"0.5298348",
"0.5297488",
"0.5291825",
"0.5271045",
"0.5267549",
"0.52317536",
"0.51011693",
"0.5092042",
"0.5072532",
"0.5053451",
"0.50515586",
"0.5050869",
"0.50469726",
"0.5045372",
"0.5012634",
"0.5010097",
"0.50014925",
"0.49909496",
"0.49824238",
"0.49695733",
"0.49667978",
"0.49592772",
"0.49166262",
"0.49116614"
] | 0.86488324 | 0 |
Initializes the .build_tool attribute. Searches the .build_tool_list for an executable name on the user's $PATH. The first tool on the list is used asis if nothing is found on the current $PATH. | def initialize_build_tool(self):
for build_tool in self.build_tool_list:
if not build_tool:
continue
if os.path.isabs(build_tool):
self.build_tool = build_tool
return
build_tool = self.where_is(build_tool)
if build_tool:
self.build_tool = build_tool
return
if self.build_tool_list:
self.build_tool = self.build_tool_list[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_tool_path(self):",
"def __find_tool_path(self):\n tool_path = Path(os.path.dirname(os.path.realpath(__file__)))\n # We asume the installion path is relative to our installation path\n tool_path = tool_path / '../../../bin'\n if os.name == 'posix':\n ret = tool_path / 'fast-discovery-server'\n if not os.path.exists(ret):\n print('fast-discovery-server tool not installed')\n sys.exit(1)\n elif os.name == 'nt':\n ret = tool_path / 'fast-discovery-server.exe'\n if not os.path.exists(ret):\n ret = tool_path / 'fast-discovery-server.bat'\n if not os.path.exists(ret):\n print('fast-discovery-server tool not installed')\n sys.exit(1)\n else:\n print(f'{os.name} not supported')\n sys.exit(1)\n\n return ret",
"def SetToolPaths(toolpaths):\n global tool_search_paths\n\n tool_search_paths = toolpaths",
"def get_toolkit(tool_list):\n best_choice = None \n for exe in tool_list:\n if which(exe):\n best_choice = exe\n break\n \n # Did not find any tools\n # to potentially use\n if not best_choice:\n err(\n 'Error: Did not find any tools to get job information!'\n )\n fatal(\n 'Expected one of the following tools to be in $PATH:'\n '\\t{0}'.format(tool_list)\n )\n \n return best_choice",
"def __init__(self, toolName):\n\t\tself.toolName = toolName",
"def tool(self):\n tool_type = self.__class__.__module__.split('.')[-1]\n return g.config.tools[tool_type]",
"def calrissian_make_tool(spec, loadingContext):\n if \"class\" in spec and spec[\"class\"] == \"CommandLineTool\":\n return CalrissianCommandLineTool(spec, loadingContext)\n else:\n return default_make_tool(spec, loadingContext)",
"def setFDKToolsPath(toolName):\n\ttoolPath = 0\n\tif sys.platform == \"darwin\":\n\t\tpaths = os.environ[\"PATH\"]\n\t\tif \"FDK/Tools/osx\" not in paths:\n\t\t\thome = os.environ[\"HOME\"]\n\t\t\tfdkPath = \":%s/bin/FDK/Tools/osx\" % (home)\n\t\t\tos.environ[\"PATH\"] = paths + fdkPath\n\t\n\tif os.name == \"nt\":\n\t\tp = os.popen(\"for %%i in (%s) do @echo. %%~$PATH:i\" % (toolName))\n\t\tlog = p.read()\n\t\tp.close()\n\t\tlog = log.strip()\n\t\tif log:\n\t\t\ttoolPath = log\t\n\telse:\n\t\tp = os.popen(\"which %s\" % (toolName))\n\t\tlog = p.read()\n\t\tp.close()\n\t\tlog = log.strip()\n\t\tif log:\n\t\t\ttoolPath = log\t\n\t\n\tif not toolPath:\n\t\tprint \"\"\"\nThe script cannot run the command-line program '%s'. Please make sure the AFDKO is installed, and the system environment variable PATH\ncontains the path the to FDK sub-directory containing '%s'.\"\"\" % (toolName, toolName)\n\n\treturn toolPath # get reid of new-line",
"def _set_executables(self):\n\n # add path from argument to env\n if self.home_path:\n if self.env:\n self.env += f\":{self.home_path}\"\n else:\n self.env = self.home_path\n\n # set fuzzer_exe \n self.fuzzer_exe = self._search_for_executable(self.fuzzer_exe)\n L.debug(\"Will use %s as fuzzer executable.\", self.fuzzer_exe)\n\n # set compiler_exe\n if self.compiler_exe:\n self.compiler_exe = self._search_for_executable(self.compiler_exe)\n L.debug(\"Will use %s as fuzzer compiler.\", self.compiler_exe)\n\n # set additional executables\n for exe_name, exe_file in self.EXECUTABLES.items():\n self.EXECUTABLES[exe_name] = self._search_for_executable(exe_file)",
"def __init__(self, argv):\n tool_path = str(self.__find_tool_path().resolve())\n\n try:\n result = subprocess.run(\n [tool_path],\n stdout=subprocess.PIPE,\n universal_newlines=True\n )\n\n if result.returncode != 0:\n sys.exit(result.returncode)\n\n if (\n len(argv) == 0 or\n (len(argv) == 1 and argv[0] == '-h') or\n (len(argv) == 1 and argv[0] == '--help')\n ):\n print(self.__edit_tool_help(result.stdout))\n else:\n # Call the tool\n result = subprocess.run([tool_path] + argv)\n if result.returncode != 0:\n sys.exit(result.returncode)\n\n except KeyboardInterrupt:\n # it lets the subprocess to handle the exception\n pass\n\n except BaseException as e:\n self.__help_message += str(e)\n self.__help_message += '\\n fast-discovery-server tool not found!'\n print(self.__help_message)\n sys.exit(1)",
"def tool_path(self, tool_name):\n assert tool_name in TOOL_PATHS\n if tool_name not in self._tool_paths:\n return TOOL_PATHS[tool_name]\n\n tool_path = os.path.normpath(self._tool_paths[tool_name])\n return self.expand_vars([tool_path])[0]",
"def build_tool(self, doc, entity):\n match = self.tool_re.match(entity)\n if match and validations.validate_tool_name(match.group(self.TOOL_NAME_GROUP)):\n name = match.group(self.TOOL_NAME_GROUP)\n return creationinfo.Tool(name)\n else:\n raise SPDXValueError('Failed to extract tool name')",
"def __init__(self):\n self.label = \"Python ToolBox\"\n self.alias = \"\"\n\n # List of tool classes associated with this toolbox\n self.tools = [Tool]",
"def __setup(self):\n\n build_environment = []\n\n # The download URL has the format contains vMAJOR.MINOR in the\n # path and the tarball contains MAJOR.MINOR.REVISION, so pull\n # apart the full version to get the MAJOR and MINOR components.\n match = re.match(r'(?P<major>\\d+)\\.(?P<minor>\\d+)', self.version)\n major_minor = 'v{0}.{1}'.format(match.groupdict()['major'],\n match.groupdict()['minor'])\n tarball = 'openmpi-{}.tar.bz2'.format(self.version)\n url = '{0}/{1}/downloads/{2}'.format(self.baseurl, major_minor,\n tarball)\n\n # CUDA\n if self.cuda:\n if self.__toolchain.CUDA_HOME:\n self.configure_opts.append(\n '--with-cuda={}'.format(self.__toolchain.CUDA_HOME))\n else:\n self.configure_opts.append('--with-cuda')\n else:\n self.configure_opts.append('--without-cuda')\n\n # InfiniBand\n if self.infiniband:\n self.configure_opts.append('--with-verbs')\n else:\n self.configure_opts.append('--without-verbs')\n\n # UCX\n if self.__ucx:\n if isinstance(self.__ucx, string_types):\n # Use specified path\n self.configure_opts.append('--with-ucx={}'.format(self.__ucx))\n else:\n self.configure_opts.append('--with-ucx')\n\n # If UCX was built with CUDA support, it is linked with\n # libcuda.so.1, which is not available during the\n # build stage. Assume that if OpenMPI is built with\n # CUDA support, then UCX was as well...\n if self.cuda:\n cuda_home = \"/usr/local/cuda\"\n if self.__toolchain.CUDA_HOME:\n cuda_home = self.__toolchain.CUDA_HOME\n self.__commands.append('ln -s {0} {1}'.format(\n os.path.join(cuda_home, 'lib64', 'stubs', 'libcuda.so'),\n os.path.join(cuda_home, 'lib64', 'stubs', 'libcuda.so.1')))\n if not self.__toolchain.LD_LIBRARY_PATH:\n build_environment.append('LD_LIBRARY_PATH=\"{}:$LD_LIBRARY_PATH\"'.format(os.path.join(cuda_home, 'lib64', 'stubs')))\n\n if self.directory:\n # Use source from local build context\n self.__commands.append(self.configure_step(\n directory=os.path.join(self.__wd, self.directory),\n toolchain=self.__toolchain))\n else:\n # Download source from web\n self.__commands.append(self.download_step(url=url,\n directory=self.__wd))\n self.__commands.append(self.untar_step(\n tarball=os.path.join(self.__wd, tarball), directory=self.__wd))\n self.__commands.append(self.configure_step(\n directory=os.path.join(self.__wd,\n 'openmpi-{}'.format(self.version)),\n environment=build_environment,\n toolchain=self.__toolchain))\n\n self.__commands.append(self.build_step())\n\n if self.__check:\n self.__commands.append(self.check_step())\n\n self.__commands.append(self.install_step())\n\n # Set library path\n libpath = os.path.join(self.prefix, 'lib')\n if self.ldconfig:\n self.__commands.append(self.ldcache_step(directory=libpath))\n else:\n self.__environment_variables['LD_LIBRARY_PATH'] = '{}:$LD_LIBRARY_PATH'.format(libpath)\n\n if self.directory:\n # Using source from local build context, cleanup directory\n self.__commands.append(self.cleanup_step(\n items=[os.path.join(self.__wd, self.directory)]))\n else:\n # Using downloaded source, cleanup tarball and directory\n self.__commands.append(self.cleanup_step(\n items=[os.path.join(self.__wd, tarball),\n os.path.join(self.__wd,\n 'openmpi-{}'.format(self.version))]))",
"def is_tool(name):\n return find_executable(name) is not None",
"def setup_tool(shell,tool_name,tool_revision,invoke_script,\n test_program_name,test_program_script):\n\n # check that the user is in the apps group\n groups,es = shell.execute(\"echo ${USER} | groups\")\n if \"apps\" not in groups.split():\n # user not in the apps group, bail\n username,es = shell.execute(\"echo ${USER}\")\n# raise RuntimeError(\"user %s not in apps group: %s\" % (username,groups))\n raise GroupMembershipError(\"user %s not in apps group: %s\" % (username,groups))\n\n # become the apps user\n shell.send('sudo su - apps')\n shell.start_bash_shell()\n\n tool_revision_string = \"r%s\" % (tool_revision)\n tool_path = \"/apps/%s/%s\" % (tool_name, tool_revision_string)\n dev_path = \"/apps/%s/dev\" % (tool_name)\n\n # setup the new tool's invoke script\n # mv %(tool_path)s %(tmp_tool_path)s;\n # tmp_tool_path = tool_path + \".old\"\n # \"\"\" % {'tool_path' : tool_path, 'tmp_tool_path' : tmp_tool_path}\n script = \"\"\"\n rm -rf %(tool_path)s;\n mkdir %(tool_path)s;\n rm -f %(dev_path)s;\n ln -s %(tool_path)s %(dev_path)s;\n cd %(tool_path)s;\n mkdir middleware bin;\n \"\"\" % {'tool_path' : tool_path,\n 'dev_path' : dev_path}\n\n commands = script.strip().split('\\n')\n shell.execute(commands)\n\n # write the invoke script to disk\n shell.write_file('middleware/invoke', invoke_script)\n shell.execute('chmod 755 middleware/invoke')\n\n # write the test program to disk\n shell.write_file(\"bin/%s\" % (test_program_name), test_program_script)\n shell.execute(\"chmod 755 bin/%s\" % (test_program_name))\n\n # exit from apps user\n shell.stop_bash_shell()\n shell.send('exit')",
"def find_tool():\n return shutil.which('nm')",
"def __init__(self):\n self.label = \"Create\"\n self.alias = \"\"\n\n # List of tool classes associated with this toolbox\n if core.get_pass():\n self.tools = [Fbound, Roads, Diekdikisi]\n else:\n self.tools = []",
"def get_tool(cls, tool_name):\n if cls.tool_dict is None:\n # Init the module_dict once.\n cls.tool_dict = {tool.name: tool for tool in cls.get_pb().tools}\n return cls.tool_dict.get(tool_name)",
"def tool(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"tool\")",
"def tool(self):\n return self._tool",
"def __init__(\n self,\n logger: Log,\n console: Console,\n base_path: Path,\n home_path: Path = None,\n ):\n self.logger = logger\n self.input = console\n self.base_path = Path(base_path)\n self.home_path = Path(\n os.path.expanduser(home_path if home_path else Path.home())\n )\n\n self.host_arch = self.platform.machine()\n self.host_os = self.platform.system()\n\n self.app_tools: DefaultDict[AppConfig, ToolCache] = defaultdict(\n lambda: ToolCache(\n logger=self.logger,\n console=self.input,\n base_path=self.base_path,\n home_path=self.home_path,\n )\n )\n\n # Built-in tools without any external dependencies\n Subprocess.verify(tools=self)\n Download.verify(tools=self)",
"def find_tool():\n return shutil.which('readelf')",
"def get_lex_path(env, append_paths: bool=False) -> Optional[str]:\n for prog in BINS:\n bin_path = SCons.Tool.find_program_path(\n env,\n prog,\n default_paths=DEFAULT_PATHS,\n add_path=append_paths,\n )\n if bin_path:\n return bin_path\n\n SCons.Warnings.warn(\n SCons.Warnings.SConsWarning,\n 'lex tool requested, but lex or flex binary not found in ENV PATH'\n )",
"def CreateTool(tool_name, adb):\n if not tool_name:\n return BaseTool()\n\n ctor = TOOL_REGISTRY.get(tool_name)\n if ctor:\n return ctor(adb)\n else:\n print 'Unknown tool %s, available tools: %s' % (\n tool_name, ', '.join(sorted(TOOL_REGISTRY.keys())))\n sys.exit(1)",
"def find_and_register(cls, name, vers):\n # locate the given tool using its name as an executable,\n # and adjust the version to match the detected version (major.minor)\n\n tc = CLang(name, vers)\n\n if tc.version is None:\n debug(\"looking for {}\".format(tc.name))\n err = tc.detect_version_on_path_or_env('CC', tc.name, needs_version=False,\n allow_unversioned=(vers is None))\n if err is not None:\n return\n\n try:\n comp = subprocess.run([tc.name, '--version'],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n # ignore stray intl chars\n comp.stdout = str(comp.stdout, encoding='ascii', errors='ignore')\n comp.stderr = str(comp.stderr, encoding='ascii', errors='ignore')\n except OSError as e:\n # e.g. PermissionError, from trying to run Cygwin softlink\n debug(\"failed to invoke '{}', error '{}'\".format(tc.name, e))\n return\n\n if comp.returncode == 0:\n # e.g. clang version 3.8.0-2ubuntu3~trusty5 (tags/RELEASE_380/final)\n # e.g. clang version 3.8.1 (branches/release_38)\n # e.g. clang version 5.0.1-svn319952-1~exp1 (branches/release_50)\n # and the outliers:\n # e.g. Apple LLVM version 8.0.0 (clang-800.0.42.1)\n # e.g. Apple LLVM version 10.0.0 (clang-1000.11.45.2)\n # the version does not correspond to any actual upstream LLVM version :-p\n stdout = comp.stdout\n m = re.match(r'.*?\\s+version\\s+(\\d+\\.\\d+)(\\.\\d+)?.*', stdout, re.M)\n if m:\n version = m.group(1)\n debug(\"matched {}\".format(version))\n\n # then rename this tool to be more specific\n tc.name = 'clang-' + version\n tc.version = version\n else:\n debug(\"did not find version information in output: {}\".format(\n comp.stdout + comp.stderr))\n else:\n debug(\"failed to run '{} --version': {}\".format(tc.name, comp.stderr))\n\n if tc.version:\n if version_compare(tc.version, cls.VERSION_CLANG_LATEST) > 0:\n cls.VERSION_CLANG_LATEST = tc.version\n\n Toolchain.register(tc, force=True)",
"def load_environment(self, project, tool_name=None):\n if self._environment_type == 'chiptools':\n log.debug(\n 'Environment for {0} is already initialised.'.format(self)\n )\n return\n simulator, root, libs = ChipToolsTest.get_environment(\n project, \n tool_name\n )\n self.__class__._loaded_path = None\n self.__class__._simulator = simulator\n self.__class__._simulation_root = root\n self.__class__._simulation_libraries = libs\n self.__class__._environment_type = 'chiptools'\n log.debug('Finished load_environment call on {0}'.format(self))",
"def __init__(self, tools: ToolCache, root_path: Path, version: str, arch: str):\n super().__init__(tools=tools)\n self.root_path = root_path\n self.version = version\n self.arch = arch",
"def _add_default_setup_cmd(framework, config):\n if \"setup_cmd\" not in framework:\n framework._setup_cmd = None\n framework.setup_cmd = None\n else:\n framework._setup_cmd = framework.setup_cmd\n if isinstance(framework.setup_cmd, str):\n framework.setup_cmd = [framework.setup_cmd]\n framework.setup_cmd = [\n cmd.format(pip=\"{pip}\", py=\"{py}\", **config.common_dirs)\n for cmd in framework.setup_cmd\n ]",
"def __init__(self):\n self.label = \"Check\"\n self.alias = \"Check Shapefiles\"\n\n # List of tool classes associated with this toolbox\n if core.get_pass():\n self.tools = [Dbound, Overlaps, Numbering, Geometry, Roads, Bld]\n else:\n self.tools = []"
] | [
"0.6117797",
"0.59801966",
"0.57670516",
"0.5763969",
"0.57467836",
"0.5693948",
"0.5670825",
"0.5632699",
"0.55697215",
"0.5522049",
"0.5520909",
"0.5454913",
"0.5439451",
"0.54242945",
"0.54215056",
"0.5409395",
"0.5408753",
"0.5398145",
"0.53854394",
"0.53382486",
"0.5336726",
"0.53256637",
"0.53034323",
"0.5303169",
"0.52970606",
"0.529354",
"0.5287926",
"0.5286085",
"0.5263933",
"0.5254691"
] | 0.84088266 | 0 |
Renames (relocates) the specified source (usually a directory) to the specified destination, creating the destination directory first if necessary. | def relocate(self, source, destination):
destination_dir = os.path.dirname(destination)
if not os.path.exists(destination_dir):
self.subdir(destination_dir)
os.rename(source, destination) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def os_rename(self, source, destination):\n cmd = ['/bin/mv', source, destination]\n process = subprocess.Popen(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n returncode = subprocess.Popen.wait(process)\n return returncode",
"def hmove(src_path, res_path):\n os.rename(src_path, res_path)",
"def mv(src_path, dest_path):\n try:\n os.rename(src_path, dest_path)\n except OSError:\n # this will happen on windows\n os.remove(dest_path)\n os.rename(src_path, dest_path)",
"def move(source, destination):\n logger.info(\"Move: %s -> %s\" % (source, destination))\n try:\n __create_destdir(destination)\n shutil.move(source, destination)\n return True\n except Exception:\n logger.exception(\"Failed to Move: %s -> %s\" % (source, destination))\n return False",
"def rename(self, src, dst):\n os.rename(src, dst)",
"def safe_move(src: str, dst: str) -> None:\n try:\n os.rename(src, dst)\n except OSError as err:\n\n if err.errno == errno.EXDEV:\n # Generate a unique ID, and copy `<src>` to the target directory\n # with a temporary name `<dst>.<ID>.tmp`. Because we're copying\n # across a filesystem boundary, this initial copy may not be\n # atomic. We intersperse a random UUID so if different processes\n # are copying into `<dst>`, they don't overlap in their tmp copies.\n copy_id = uuid4()\n tmp_dst = \"%s.%s.tmp\" % (dst, copy_id)\n shutil.copyfile(src, tmp_dst)\n\n # Then do an atomic rename onto the new name, and clean up the\n # source image.\n os.rename(tmp_dst, dst)\n os.unlink(src)\n else:\n raise",
"def rename_file(source, destination, alog):\n\n # Some error checking against a legitimate source & destination.\n if not type(source) is str:\n raise CoreError('Source is not of str type.')\n elif not type(destination) is str:\n raise CoreError('Destination is not of str type.')\n elif not os.path.isfile(source):\n raise CoreError(source + ' is not a valid file.')\n\n head, tail = os.path.split(destination)\n if not os.path.isdir(head + '/'):\n try:\n os.makedirs(head + '/')\n except:\n raise CoreError('Failed to create new directory: '\n + (head + '/'))\n\n for i in range(0, len(MuzikArkive.illegal_name_characters)):\n if MuzikArkive.illegal_name_characters[i] in tail:\n tail = tail.replace(MuzikArkive.illegal_name_characters[i], '_')\n alog.rlog = MuzikArkive.illegal_name_characters[i] \\\n + ' was removed from ' + destination\n\n if not os.path.isfile(destination):\n try:\n os.rename(source, destination)\n except:\n raise CoreError('os.rename() Failed.')\n else:\n head, tail = destination.rsplit('.', 1)\n rname = True\n i = 1\n while rname:\n addon = '[' + str(i) + '].'\n if not os.path.isfile(head + addon + tail):\n try:\n os.rename(source, (head + addon + tail))\n except:\n raise CoreError('os.rename() Failed.')\n else:\n rname = False\n else:\n i += 1",
"def move(self, name, source, dest):\n self.m.path.assert_absolute(source)\n self.m.path.assert_absolute(dest)\n self._run(name, ['move', source, dest])\n self.m.path.mock_copy_paths(source, dest)\n self.m.path.mock_remove_paths(source)",
"def mv(self, src: int, dest: int) -> bool:\n url = 'https://webapi.115.com/files/move'\n result = self.s.post(url, data={'pid': dest, 'fid[0]': src}, headers={'Origin': origin['webapi'], 'Referer': referer['115'].format(self.default_dir)}).json()\n if result['errno'] == '':\n _ = functools.reduce(dict.__getitem__, self._dirs_lookup[src], self.dirs) # TODO: need to test\n self._dirs_lookup[src] = self._dirs_lookup[dest].append(dest)\n parent = functools.reduce(dict.__getitem__, self._dirs_lookup[src], self.dirs)\n if src not in parent:\n parent.update({src: _})\n else:\n parent.get(src).update(_)\n return True",
"def relocate(source, destination, move=False):\n venv = api.VirtualEnvironment(source)\n if not move:\n\n venv.relocate(destination)\n return None\n\n venv.move(destination)\n return None",
"def move(self, destination, **kwargs):\n assert _os.path.exists(self.__str__()) == True\n _shutil.move(self.__str__(), destination, **kwargs)",
"def rename(self, name=None, destination=None):\n raise NotImplementedError\n return None",
"def move(self, target):\n if target.relto(self):\n raise error.EINVAL(target, \"cannot move path into a subdirectory of itself\")\n try:\n self.rename(target)\n except error.EXDEV: # invalid cross-device link\n self.copy(target)\n self.remove()",
"def move_file(source, destination):\n shutil.move(source, destination)",
"def move(self,src,dst):\n src = os.path.join(self.testpath,src)\n dst = os.path.join(self.testpath,dst)\n directory = os.path.split(dst)[0]\n try:\n os.makedirs(directory)\n except OSError:\n pass\n\n shutil.move(src,dst)",
"def rename(source_dir,dest_dir):\n keep_going(text=\"This script will backup the original folder to dest_dir/Source/** and remove the original folder. It will make copies of the original files and rename them in directories called Darks, Flats, etc. Do you wish to continue? Answer Y or N.\")\n\n ## Backup Original Source Folder\n dir_util.copy_tree(source_dir, dest_dir + '/Source')\n\n data = []\n for file in os.listdir(\"./\" + source_dir): # put in your path directory\n if file.endswith(\".fits\"): # what does the file end with?\n data.append(os.path.join(source_dir, file))\n\n n = len(data)\n obj, itime, filt, renamed, datemod, count, flatmod, mod = ([] for i in range(8))\n for i in range(0, n):\n header = fits.getheader(data[i])\n Name, Date, Number, Ext = data[i].split(\".\")\n obj.append(header['OBJECT'])\n itime.append(header['ITIME'])\n filt.append(header['FWINAME'])\n mod.append((header['OBJECT'] + header['FWINAME']))\n flatmod.append((header['OBJECT'] + header['FWINAME'] + Date))\n datemod.append(datetime.strptime(Date, \"%Y%m%d\").date())\n if flatmod[i] in flatmod:\n count = flatmod.count(flatmod[i])\n if ('Lamp' in obj[i] or 'Flat' in obj[i]):\n renamed.append((dest_dir + '/Flats/' + str(datemod[i]) + '/' + 'K' + header['OBJECT'] + str(count) + \".fits\"))\n os.makedirs(os.path.dirname(dest_dir + '/Flats/' + str(datemod[i]) + '/'), exist_ok=True)\n elif ('Dark' in obj[i]) or ('dark' in obj[i]):\n renamed.append((dest_dir + '/Darks/' + str(datemod[i]) + '/' + 'K' + header['OBJECT'] + str(count) + \".fits\"))\n os.makedirs(os.path.dirname(dest_dir + '/Darks/' + str(datemod[i]) + '/'), exist_ok=True)\n elif ('Sky' in obj[i]) or ('sky' in obj[i]):\n renamed.append((dest_dir + '/Skys/' + str(datemod[i]) + '/' + 'K' + header['OBJECT'] + header['FWINAME'] + str(\n count) + \".fits\"))\n os.makedirs(os.path.dirname(dest_dir + '/Skys/' + str(datemod[i]) + '/'), exist_ok=True)\n else:\n renamed.append((dest_dir + '/Objects/' + header['OBJECT'].upper() + '/' + str(datemod[i]) + '/' + 'K' + list(header['CAMNAME'])[0].title() + header['OBJECT'].upper() +\n header['FWINAME'] + str(\n count) + \".fits\"))\n os.makedirs(os.path.dirname(dest_dir + '/Objects/' + header['OBJECT'].upper() + '/' + str(datemod[i]) + '/'), exist_ok=True)\n os.rename(data[i], renamed[i])\n\n ## REMOVE LEFT OVER original Folders\n shutil.rmtree(source_dir)\n\n lists = [data, mod, datemod, itime, flatmod, renamed]\n data_headers = pd.concat([pd.Series(x) for x in lists], axis=1)\n\n return data_headers",
"def moveImage(image, dest):\n if not os.path.exists(dest):\n os.mkdir(dest)\n move(image, dest)",
"def move(self,fileName,destDir):\n if not os.path.exists(destDir): \n os.makedirs(destDir)\n srcPath = os.path.join(self.dir,fileName)\n destPath = os.path.join(destDir,fileName)\n renameFile(srcPath,destPath)\n self.refresh()",
"def moveFile(source, dest):\n try:\n shutil.move(source, dest) \n except IOError as e:\n print (\"Unable to move file. %s\" %(e))",
"def copy_rename_file(source_file_path: str, target_dir: str, new_name: str) -> str:\n shutil.copy2(source_file_path, target_dir)\n target_path = os.path.join(target_dir, os.path.basename(source_file_path))\n new_file_name = new_name + get_extension(source_file_path)\n new_file_path = os.path.join(target_dir, new_file_name)\n os.rename(target_path, new_file_path)\n return new_file_path",
"def change_dir(self, src: str = None, dest: str = None):\n\n if not is_empty(src):\n self._srcDir = src\n\n if not is_empty(dest):\n self._destDir = dest",
"def moveDirectoryContents(self, source, target, force=False):\n if source.endswith('/') or source.endswith('\\\\'):\n source += '*'\n else:\n source += os.path.sep + '*'\n if force:\n command = 'mv -f %s %s'\n else:\n command = 'mv %s %s'\n self.communicate(command % (source, target))",
"def update_copy(self, source, dest):\n relsource = os.path.relpath(source, os.path.realpath(self.dirname))\n for copy in self.runscript.copies:\n if copy[1] == dest:\n copy[0] = relsource\n break\n else:\n self.runscript.add_copy(relsource, dest)",
"def change_nm(src,dst):\n\timport os\n\ttry:\n\t\tos.rename(src,dst)\n\texcept:\n\t\tprint \"this is a mistake\"\n\t\treturn -1\n\n\treturn 0",
"def MovePath(options, src, dst):\n # if the destination is not an existing directory, then overwrite it\n if os.path.isdir(dst):\n dst = os.path.join(dst, os.path.basename(src))\n\n # If the destination exists, the remove it\n if os.path.exists(dst):\n if options.force:\n Remove(['-vfr', dst])\n if os.path.exists(dst):\n raise OSError('mv: FAILED TO REMOVE ' + dst)\n else:\n raise OSError('mv: already exists ' + dst)\n for _ in range(5):\n try:\n os.rename(src, dst)\n break\n except OSError as error:\n print('Failed on %s with %s, retrying' % (src, error))\n time.sleep(5)\n else:\n print('Gave up.')\n raise OSError('mv: ' + error)",
"def copyfile(source, dest, newname=None):\n\n if not os.path.exists(source):\n #print 'no such file %s' %source\n return False\n shutil.copy(source, newname)\n dest = os.path.join(dest, newname)\n if os.path.exists(dest):\n os.remove(dest)\n shutil.move(newname, dest)\n return True",
"def do_mv(self, args):\n if args:\n args = args.split()\n\n if not args or len(args) < 2:\n print('Usage: mv source_file target_file')\n return\n\n src = args[0]\n dst = args[1]\n if not (src.startswith('shared/') and dst.startswith('shared/')\n or self._user):\n print('login required for specifying non-shared file with mv')\n return\n\n try:\n new_name = self._qm.rename_file(self._user, src, dst)\n print('renamed file', src, 'to', new_name)\n except Exception as e:\n print('ERROR renaming %s: %s' % (src, e), file=sys.stderr)\n return",
"def mv(self, source: str, filename: str) -> None:\n\n self.cp(source, filename)\n self.rm(source)",
"def _move(self, in_file, dest):\n dest = os.path.abspath(dest)\n _, in_base_name = os.path.split(in_file)\n dest_parent_dir, _ = os.path.split(dest)\n if os.path.exists(dest):\n out_file = os.path.join(dest, in_base_name)\n else:\n if not os.path.exists(dest_parent_dir):\n os.makedirs(dest_parent_dir)\n out_file = dest\n shutil.move(in_file, dest)\n\n return out_file",
"def copy(source_path, skip_existing=True):\n\n dest_path = source_path.replace(source_dir.strip('/'), dest_dir.strip('/'))\n\n # Skip if dest file already exists\n if skip_existing and os.path.exists(dest_path):\n return\n\n # Create directory if necessary\n os.makedirs(os.path.dirname(dest_path), exist_ok=True)\n\n copyfile(source_path, dest_path)"
] | [
"0.7349947",
"0.68736243",
"0.6767133",
"0.6640591",
"0.65091836",
"0.6415128",
"0.6383818",
"0.63748705",
"0.6370775",
"0.63415533",
"0.6336806",
"0.63208145",
"0.63091844",
"0.62984276",
"0.61878335",
"0.6184522",
"0.6032903",
"0.60203373",
"0.6016925",
"0.59881675",
"0.59866506",
"0.5974061",
"0.5958896",
"0.5954288",
"0.5945952",
"0.5932426",
"0.5907843",
"0.58870035",
"0.5856898",
"0.5842453"
] | 0.83426297 | 0 |
Reports that a build is not uptodate. This provides common reporting for formats that have complicated conditions for checking whether a build is uptodate. Formats that expect exact output from the command (make) can just set stdout= when they call the run_build() method. | def report_not_up_to_date(self):
print "Build is not up-to-date:"
print self.banner('STDOUT ')
print self.stdout()
stderr = self.stderr()
if stderr:
print self.banner('STDERR ')
print stderr | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _check_build_log(mysettings, out=None):\n\tlogfile = mysettings.get(\"PORTAGE_LOG_FILE\")\n\tif logfile is None:\n\t\treturn\n\ttry:\n\t\tf = open(_unicode_encode(logfile, encoding=_encodings['fs'],\n\t\t\terrors='strict'), mode='rb')\n\texcept EnvironmentError:\n\t\treturn\n\n\tf_real = None\n\tif logfile.endswith('.gz'):\n\t\tf_real = f\n\t\tf = gzip.GzipFile(filename='', mode='rb', fileobj=f)\n\n\tam_maintainer_mode = []\n\tbash_command_not_found = []\n\tbash_command_not_found_re = re.compile(\n\t\tr'(.*): line (\\d*): (.*): command not found$')\n\tcommand_not_found_exclude_re = re.compile(r'/configure: line ')\n\thelper_missing_file = []\n\thelper_missing_file_re = re.compile(\n\t\tr'^!!! (do|new).*: .* does not exist$')\n\n\tconfigure_opts_warn = []\n\tconfigure_opts_warn_re = re.compile(\n\t\tr'^configure: WARNING: [Uu]nrecognized options: ')\n\n\t# Exclude output from dev-libs/yaz-3.0.47 which looks like this:\n\t#\n\t#Configuration:\n\t# Automake: ${SHELL} /var/tmp/portage/dev-libs/yaz-3.0.47/work/yaz-3.0.47/config/missing --run automake-1.10\n\tam_maintainer_mode_re = re.compile(r'/missing --run ')\n\tam_maintainer_mode_exclude_re = \\\n\t\tre.compile(r'(/missing --run (autoheader|autotest|help2man|makeinfo)|^\\s*Automake:\\s)')\n\n\tmake_jobserver_re = \\\n\t\tre.compile(r'g?make\\[\\d+\\]: warning: jobserver unavailable:')\n\tmake_jobserver = []\n\n\tdef _eerror(lines):\n\t\tfor line in lines:\n\t\t\teerror(line, phase=\"install\", key=mysettings.mycpv, out=out)\n\n\ttry:\n\t\tfor line in f:\n\t\t\tline = _unicode_decode(line)\n\t\t\tif am_maintainer_mode_re.search(line) is not None and \\\n\t\t\t\tam_maintainer_mode_exclude_re.search(line) is None:\n\t\t\t\tam_maintainer_mode.append(line.rstrip(\"\\n\"))\n\n\t\t\tif bash_command_not_found_re.match(line) is not None and \\\n\t\t\t\tcommand_not_found_exclude_re.search(line) is None:\n\t\t\t\tbash_command_not_found.append(line.rstrip(\"\\n\"))\n\n\t\t\tif helper_missing_file_re.match(line) is not None:\n\t\t\t\thelper_missing_file.append(line.rstrip(\"\\n\"))\n\n\t\t\tif configure_opts_warn_re.match(line) is not None:\n\t\t\t\tconfigure_opts_warn.append(line.rstrip(\"\\n\"))\n\n\t\t\tif make_jobserver_re.match(line) is not None:\n\t\t\t\tmake_jobserver.append(line.rstrip(\"\\n\"))\n\n\texcept zlib.error as e:\n\t\t_eerror([\"portage encountered a zlib error: '%s'\" % (e,),\n\t\t\t\"while reading the log file: '%s'\" % logfile])\n\tfinally:\n\t\tf.close()\n\n\tdef _eqawarn(lines):\n\t\tfor line in lines:\n\t\t\teqawarn(line, phase=\"install\", key=mysettings.mycpv, out=out)\n\twrap_width = 70\n\n\tif am_maintainer_mode:\n\t\tmsg = [_(\"QA Notice: Automake \\\"maintainer mode\\\" detected:\")]\n\t\tmsg.append(\"\")\n\t\tmsg.extend(\"\\t\" + line for line in am_maintainer_mode)\n\t\tmsg.append(\"\")\n\t\tmsg.extend(wrap(_(\n\t\t\t\"If you patch Makefile.am, \"\n\t\t\t\"configure.in, or configure.ac then you \"\n\t\t\t\"should use autotools.eclass and \"\n\t\t\t\"eautomake or eautoreconf. Exceptions \"\n\t\t\t\"are limited to system packages \"\n\t\t\t\"for which it is impossible to run \"\n\t\t\t\"autotools during stage building. \"\n\t\t\t\"See http://www.gentoo.org/p\"\n\t\t\t\"roj/en/qa/autofailure.xml for more information.\"),\n\t\t\twrap_width))\n\t\t_eqawarn(msg)\n\n\tif bash_command_not_found:\n\t\tmsg = [_(\"QA Notice: command not found:\")]\n\t\tmsg.append(\"\")\n\t\tmsg.extend(\"\\t\" + line for line in bash_command_not_found)\n\t\t_eqawarn(msg)\n\n\tif helper_missing_file:\n\t\tmsg = [_(\"QA Notice: file does not exist:\")]\n\t\tmsg.append(\"\")\n\t\tmsg.extend(\"\\t\" + line[4:] for line in helper_missing_file)\n\t\t_eqawarn(msg)\n\n\tif configure_opts_warn:\n\t\tmsg = [_(\"QA Notice: Unrecognized configure options:\")]\n\t\tmsg.append(\"\")\n\t\tmsg.extend(\"\\t\" + line for line in configure_opts_warn)\n\t\t_eqawarn(msg)\n\n\tif make_jobserver:\n\t\tmsg = [_(\"QA Notice: make jobserver unavailable:\")]\n\t\tmsg.append(\"\")\n\t\tmsg.extend(\"\\t\" + line for line in make_jobserver)\n\t\t_eqawarn(msg)\n\n\tf.close()\n\tif f_real is not None:\n\t\tf_real.close()",
"def checkBuildStatus(self):\n pass",
"def test_mismatching_releases_displays_err_msg(self):\n\n # The failure message that we expect to see\n expected_fail_msg = (\n f\"Provided release ({self.old_matlab_release}) does not match \"\n \"release found in VersionInfo.xml\"\n )\n\n build_msg = utils.get_build_output(\n docker_api_client=self.client.api,\n dockerfile_dirpath=self.dockerfile_dirpath,\n release=self.old_matlab_release,\n )\n\n self.assertTrue(\n any([expected_fail_msg in line for line in build_msg]),\n f\"The error message '{expected_fail_msg}' was not displayed\",\n )",
"def report_build_progress(self, build_id, current, total, group_name='',\n status_line=''):\n pass",
"def test_version_check_outdated(self):\n output = self.run_command(\"selfupdate --check bennr01:selfupdate_test_outdated\", exitcode=0)\n self.assertIn(\"Target: bennr01:selfupdate_test_outdated\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)\n self.assertIn(\"Already at latest version\", output)\n self.assertNotIn(\"New version available\", output)\n self.assertNotIn(\"Error: \", output)",
"def testOutput(self):\n global base_dir\n\n base_dir = tempfile.mkdtemp()\n if not os.path.isdir(base_dir):\n os.mkdir(base_dir)\n build = builder.Builder(self.toolchains, base_dir, None, 1, 2,\n checkout=False, show_unknown=False)\n build.do_make = self.Make\n board_selected = self.boards.GetSelectedDict()\n\n build.BuildBoards(self.commits, board_selected, keep_outputs=False,\n verbose=False)\n lines = terminal.GetPrintTestLines()\n count = 0\n for line in lines:\n if line.text.strip():\n count += 1\n\n # We should get two starting messages, then an update for every commit\n # built.\n self.assertEqual(count, len(commits) * len(boards) + 2)\n build.SetDisplayOptions(show_errors=True);\n build.ShowSummary(self.commits, board_selected)\n #terminal.EchoPrintTestLines()\n lines = terminal.GetPrintTestLines()\n self.assertEqual(lines[0].text, '01: %s' % commits[0][1])\n self.assertEqual(lines[1].text, '02: %s' % commits[1][1])\n\n # We expect all archs to fail\n col = terminal.Color()\n self.assertSummary(lines[2].text, 'sandbox', '+', ['board4'])\n self.assertSummary(lines[3].text, 'arm', '+', ['board1'])\n self.assertSummary(lines[4].text, 'powerpc', '+', ['board2', 'board3'])\n\n # Now we should have the compiler warning\n self.assertEqual(lines[5].text, 'w+%s' %\n errors[0].rstrip().replace('\\n', '\\nw+'))\n self.assertEqual(lines[5].colour, col.MAGENTA)\n\n self.assertEqual(lines[6].text, '03: %s' % commits[2][1])\n self.assertSummary(lines[7].text, 'sandbox', '+', ['board4'])\n self.assertSummary(lines[8].text, 'arm', '', ['board1'], ok=True)\n self.assertSummary(lines[9].text, 'powerpc', '+', ['board2', 'board3'])\n\n # Compiler error\n self.assertEqual(lines[10].text, '+%s' %\n errors[1].rstrip().replace('\\n', '\\n+'))\n\n self.assertEqual(lines[11].text, '04: %s' % commits[3][1])\n self.assertSummary(lines[12].text, 'sandbox', '', ['board4'], ok=True)\n self.assertSummary(lines[13].text, 'powerpc', '', ['board2', 'board3'],\n ok=True)\n\n # Compile error fixed\n self.assertEqual(lines[14].text, '-%s' %\n errors[1].rstrip().replace('\\n', '\\n-'))\n self.assertEqual(lines[14].colour, col.GREEN)\n\n self.assertEqual(lines[15].text, 'w+%s' %\n errors[2].rstrip().replace('\\n', '\\nw+'))\n self.assertEqual(lines[15].colour, col.MAGENTA)\n\n self.assertEqual(lines[16].text, '05: %s' % commits[4][1])\n self.assertSummary(lines[17].text, 'sandbox', '+', ['board4'])\n self.assertSummary(lines[18].text, 'powerpc', '', ['board3'], ok=True)\n\n # The second line of errors[3] is a duplicate, so buildman will drop it\n expect = errors[3].rstrip().split('\\n')\n expect = [expect[0]] + expect[2:]\n self.assertEqual(lines[19].text, '+%s' %\n '\\n'.join(expect).replace('\\n', '\\n+'))\n\n self.assertEqual(lines[20].text, 'w-%s' %\n errors[2].rstrip().replace('\\n', '\\nw-'))\n\n self.assertEqual(lines[21].text, '06: %s' % commits[5][1])\n self.assertSummary(lines[22].text, 'sandbox', '', ['board4'], ok=True)\n\n # The second line of errors[3] is a duplicate, so buildman will drop it\n expect = errors[3].rstrip().split('\\n')\n expect = [expect[0]] + expect[2:]\n self.assertEqual(lines[23].text, '-%s' %\n '\\n'.join(expect).replace('\\n', '\\n-'))\n\n self.assertEqual(lines[24].text, 'w-%s' %\n errors[0].rstrip().replace('\\n', '\\nw-'))\n\n self.assertEqual(lines[25].text, '07: %s' % commits[6][1])\n self.assertSummary(lines[26].text, 'sandbox', '+', ['board4'])\n\n # Pick out the correct error lines\n expect_str = errors[4].rstrip().replace('%(basedir)s', '').split('\\n')\n expect = expect_str[3:8] + [expect_str[-1]]\n self.assertEqual(lines[27].text, '+%s' %\n '\\n'.join(expect).replace('\\n', '\\n+'))\n\n # Now the warnings lines\n expect = [expect_str[0]] + expect_str[10:12] + [expect_str[9]]\n self.assertEqual(lines[28].text, 'w+%s' %\n '\\n'.join(expect).replace('\\n', '\\nw+'))\n\n self.assertEqual(len(lines), 29)\n shutil.rmtree(base_dir)",
"def test_check_no_download(self):\n output = self.run_command(\"selfupdate --check\", exitcode=0)\n contains_latest_version = (\"Already at latest version\" in output)\n contains_new_version = (\"New version available\" in output)\n assert (contains_latest_version or contains_new_version)\n self.assertNotIn(\"Url: \", output)\n self.assertNotIn(\"Update completed.\", output)\n self.assertNotIn(\"Failed to update. Please try again.\", output)",
"def test_version_check_does_not_exist(self):\n output = self.run_command(\"selfupdate --check selfupdate_test_does_not_exist\", exitcode=0)\n self.assertIn(\"Target: ywangd:selfupdate_test_does_not_exist\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)\n self.assertNotIn(\"Already at latest version\", output)\n self.assertNotIn(\"New version available\", output)\n self.assertIn(\"Error: \", output)",
"def check_build_status(owner, repository, ref):\n return get_hvcs().check_build_status(owner, repository, ref)",
"def test_deploy_no_change_log_messages(deploy_no_change_result: Result) -> None:\n expected_lines = [\n \"deployment_1:processing deployment (in progress)\",\n \"deployment_1:processing regions sequentially...\",\n \"\",\n \"deployment_1.test_raw_cfn:processing module in us-east-1 (in progress)\",\n \"cfngin.yml:init (in progress)\",\n \"skipped; cfngin_bucket not defined\",\n \"cfngin.yml:init (complete)\",\n \"cfngin.yml:deploy (in progress)\",\n \"raw-template-vpc:skipped (nochange)\",\n \"cfngin.yml:deploy (complete)\",\n \"deployment_1.test_raw_cfn:processing module in us-east-1 (complete)\",\n \"deployment_1:processing deployment (complete)\",\n ]\n expected = \"\\n\".join(f\"[runway] {msg}\" for msg in expected_lines)\n assert expected in deploy_no_change_result.stdout, (\n \"stdout does not match expected\\n\\nEXPECTED:\\n\"\n f\"{expected}\\n\\nSTDOUT:\\n{deploy_no_change_result.stdout}\"\n )",
"def log_build(self, build):\n with self._conn.begin():\n if build.status:\n build_id = self._conn.execute(\n \"VALUES (log_build_success(%s, %s, %s, %s, %s, %s, \"\n \"CAST(%s AS files ARRAY), CAST(%s AS dependencies ARRAY)\"\n \"))\",\n (\n build.package,\n build.version,\n build.slave_id,\n build.duration,\n build.abi_tag,\n sanitize(build.output),\n [(\n file.filename,\n None,\n file.filesize,\n file.filehash,\n file.package_tag,\n file.package_version_tag,\n file.py_version_tag,\n file.abi_tag,\n file.platform_tag,\n file.requires_python,\n )\n for file in build.files.values()],\n [(\n file.filename,\n tool,\n dependency,\n )\n for file in build.files.values()\n for tool, dependencies in file.dependencies.items()\n for dependency in dependencies]\n )).scalar()\n else:\n build_id = self._conn.execute(\n \"VALUES (log_build_failure(%s, %s, %s, %s, %s, %s))\",\n (\n build.package,\n build.version,\n build.slave_id,\n build.duration,\n build.abi_tag,\n sanitize(build.output),\n )).scalar()\n build.logged(build_id)",
"def ValidateOutput(self, stdout, stderr, result):\n # Store .ref and .log files in a platform-specific subdirectory\n # (avoid possible clashes if several platforms are tested)\n if \"CMTCONFIG\" in os.environ:\n try: os.mkdir( os.environ['CMTCONFIG'] )\n except OSError: pass\n stdout_log_path=os.environ['CMTCONFIG']+os.sep\n else:\n stdout_log_path=''\n # Maybe some verbosity is needed here\n if not(self.stdout_tag==''):\n strlog='the tag is ' + self.stdout_tag\n logger.debug('ExecTestBase2:ValidateOutput: '+strlog)\n if not(self.stdout_tol==0):\n strlog='the tolerance is ' + repr(self.stdout_tol) \n logger.debug('ExecTestBase2:ValidateOutput: '+strlog)\n if not(self.stdout_ref==''):\n if not(self.stdout_ref_path==''):\n self.reference_file=self.stdout_ref_path+os.sep+self.stdout_ref \n else:\n self.reference_file=self.stdout_ref \n if os.path.abspath(self.reference_file) != os.path.abspath(stdout_log_path+str(self.stdout_ref)):\n shutil.copyfile(os.path.abspath(self.reference_file),\n stdout_log_path+str(self.stdout_ref))\n ref_file_stdout=''\n for l in fileinput.input(stdout_log_path+str(self.stdout_ref)): \n ref_file_stdout=ref_file_stdout+l.strip()+'\\n'\n if not(self.excluded_lines==''):\n strlog='the excluded lines are ' + self.excluded_lines \n logger.debug('ExecTestBase2:ValidateOutput: '+strlog) \n # Copy the log for later use as ref \n f_ouput=open(stdout_log_path+self.stdout_ref.rstrip('ref')+'log', 'w')\n f_ouput.write(stdout)\n f_ouput.close()\n # Check to see if the standard output matches.\n self.causes = []\n if not(self.stdout=='*'):\n if not(self.stdout_ref==''):\n # the reference output is described in a\n # external reference file \n if not self.__CompareText1(stdout, ref_file_stdout, result):\n self.causes.append(\"standard output\") \n result[\"ExecTest.expected_stdout\"] = result.Quote(self.stdout)\n else:\n # the reference output is described in the test-case\n if not self.__CompareText1(stdout, self.stdout,result):\n self.causes.append(\"standard output\") \n result[\"ExecTest.expected_stdout\"] = result.Quote(self.stdout)\n else:\n result[\"ExecTest.expected_stdout\"] = result.Quote(self.stdout)\n \n # Check to see if the standard error matches.\n if not(self.stderr=='*'):\n if not self.__CompareText(stderr, self.stderr):\n self.causes.append(\"standard error\")\n result[\"ExecTest.expected_stderr\"] = result.Quote(self.stderr)\n else: \n result[\"ExecTest.expected_stderr\"] = result.Quote(self.stderr)\n #\n return self.causes",
"def info_build_test(self):\n\n self._export(\"H0\", \"0.1\")\n\n self._export(\"H1a\", \"0.1\", deps=[(\"H0/0.1@lu/st\", \"private\")])\n self._export(\"H1b\", \"0.1\", deps=[\"H0/0.1@lu/st\"])\n self._export(\"H1c\", \"0.1\", deps=[(\"H0/0.1@lu/st\", \"private\")])\n\n self._export(\"H2a\", \"0.1\", deps=[\"H1a/0.1@lu/st\"])\n self._export(\"H2c\", \"0.1\", deps=[\"H1c/0.1@lu/st\"])\n\n self._export(\"H3\", \"0.1\", deps=[\"H2a/0.1@lu/st\",\n \"H2c/0.1@lu/st\"])\n\n # If we install H3 we need to build all except H1b\n self.clients[\"H3\"].run(\"info --build missing\")\n self.assert_last_line(self.clients[\"H3\"],\n \"H0/0.1@lu/st, H1a/0.1@lu/st, H1c/0.1@lu/st, H2a/0.1@lu/st, H2c/0.1@lu/st\")\n\n # If we install H0 we need to build nothing (current project)\n self.clients[\"H0\"].run(\"info --build missing\")\n self.assert_last_line(self.clients[\"H0\"], \"\")\n\n # If we install H0 we need to build H0\n self.clients[\"H1a\"].run(\"info --build missing\")\n self.assert_last_line(self.clients[\"H1a\"], \"H0/0.1@lu/st\")\n\n # If we build and upload H1a and H1c, no more H0 (private) is required\n self.clients[\"H3\"].run(\"install H1a/0.1@lu/st --build \")\n self.clients[\"H3\"].run(\"install H1c/0.1@lu/st --build \")\n self.clients[\"H3\"].run(\"upload H1a/0.1@lu/st --all\")\n self.clients[\"H3\"].run(\"upload H1c/0.1@lu/st --all\")\n\n self.clients[\"H3\"].run(\"remove '*' -f\")\n self.clients[\"H3\"].run(\"info --build missing\")\n self.assert_last_line(self.clients[\"H3\"],\n \"H2a/0.1@lu/st, H2c/0.1@lu/st\")\n\n # But if we force to build all, all nodes have to be built\n self.clients[\"H3\"].run(\"remove '*' -f\")\n self.clients[\"H3\"].run(\"info --build\")\n self.assert_last_line(self.clients[\"H3\"],\n \"H0/0.1@lu/st, H1a/0.1@lu/st, H1c/0.1@lu/st, H2a/0.1@lu/st, H2c/0.1@lu/st\")\n\n # Now upgrade the recipe H1a and upload it (but not the package)\n # so the package become outdated\n conanfile_path = os.path.join(self.clients[\"H1a\"].current_folder, CONANFILE)\n conanfile = load(conanfile_path)\n conanfile += \"\\n# MODIFIED\"\n save(conanfile_path, conanfile)\n self.clients[\"H1a\"].run(\"export lu/st\")\n self.clients[\"H1a\"].run(\"upload H1a/0.1@lu/st\") # NOW IS OUTDATED!\n\n # Without build outdated the built packages are the same\n self.clients[\"H3\"].run(\"remove '*' -f\")\n self.clients[\"H3\"].run(\"info --build missing\")\n self.assert_last_line(self.clients[\"H3\"],\n \"H2a/0.1@lu/st, H2c/0.1@lu/st\")\n\n # But with build outdated we have to build the private H0 (but only once) and H1a\n self.clients[\"H3\"].run(\"remove '*' -f\")\n self.clients[\"H3\"].run(\"info --build outdated\")\n self.assert_last_line(self.clients[\"H3\"],\n \"H0/0.1@lu/st, H1a/0.1@lu/st, H2a/0.1@lu/st, H2c/0.1@lu/st\")",
"def _check_nothing_changed(self):\n if self.data['history_file'] is None:\n return\n nothing_yet = self.data['nothing_changed_yet']\n if nothing_yet not in self.data['history_last_release']:\n return\n # We want quotes around the text, but also want to avoid\n # printing text with a u'unicode marker' in front...\n pretty_nothing_changed = '\"{}\"'.format(nothing_yet)\n if not utils.ask(\n \"WARNING: Changelog contains {}. Are you sure you \"\n \"want to release?\".format(pretty_nothing_changed),\n default=False):\n logger.info(\"You can use the 'lasttaglog' command to \"\n \"see the commits since the last tag.\")\n sys.exit(1)",
"def test_do_not_need_alternate(self):\n os.unlink(os.path.join(self.reports_dir,\n 'TEST-testutil.manual_test.LintTest-fail.xml'\n ))\n os.unlink(os.path.join(self.reports_dir,\n 'TEST-testutil.manual_test.LintTest-success.xml'\n ))\n actual = self._analyze_make_output()\n self.assertEqual(1, actual)\n self.assertIn('E999 lint error from txt-file.', self.errors[0])",
"def test__clean_status(self):\n assert not dockerprettyps._clean_status(\"Exited (1) 22 minutes ago\")\n assert dockerprettyps._clean_status(\"Up 12 minutes\")",
"def composeTestingSummaryEmail(self):\r\n brokenPlatforms = 0\r\n for platform in sorted_keys( self.platforms ):\r\n if self.platforms[platform].isBroken():\r\n brokenPlatforms = brokenPlatforms + 1\r\n\r\n if brokenPlatforms == 0:\r\n return None;\r\n \r\n message = \"\"\"From: Douglas Gregor <[email protected]>\r\nTo: [email protected]\r\nReply-To: [email protected]\r\nSubject: [Report] \"\"\"\r\n message += str(brokenPlatforms) + \" potentially broken platforms on \" + branch\r\n if branch != 'trunk':\r\n message += ' branch'\r\n message += \" (\" + str(datetime.date.today()) + \")\"\r\n message += \"\"\"\r\n\r\nPotentially broken platforms for Boost regression testing\r\n\"\"\"\r\n message += \"Report time: \" + self.date + \"\"\"\r\n\r\nThis report lists the high-priority platforms that are exhibiting a\r\nlarge number of regression test failures, which might indicate a problem\r\nwith the test machines or testing harness.\r\n\r\nDetailed report:\r\n\"\"\"\r\n\r\n message += ' ' + self.url + '\\n'\r\n\r\n message += \"\"\"\r\nPlatforms with a large number of failures:\r\n\"\"\"\r\n for platform in sorted_keys( self.platforms ):\r\n if self.platforms[platform].isBroken():\r\n message += (' ' + platform + ' ('\r\n + str(len(self.platforms[platform].failures))\r\n + ' failures)\\n')\r\n\r\n return message",
"def build_report(self, msg=''):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\ts = '\\n'\n\t\ts += '################################################################################\\n'\n\t\ts += '# COMMAND HISTORY BEGIN ' + shutit_global.shutit_global_object.build_id + '\\n'\n\t\ts += self.get_commands()\n\t\ts += '# COMMAND HISTORY END ' + shutit_global.shutit_global_object.build_id + '\\n'\n\t\ts += '################################################################################\\n'\n\t\ts += '################################################################################\\n'\n\t\ts += '# BUILD REPORT FOR BUILD BEGIN ' + shutit_global.shutit_global_object.build_id + '\\n'\n\t\ts += '# ' + msg + '\\n'\n\t\tif self.build['report'] != '':\n\t\t\ts += self.build['report'] + '\\n'\n\t\telse:\n\t\t\ts += '# Nothing to report\\n'\n\t\tif 'container_id' in self.target:\n\t\t\ts += '# CONTAINER_ID: ' + self.target['container_id'] + '\\n'\n\t\ts += '# BUILD REPORT FOR BUILD END ' + shutit_global.shutit_global_object.build_id + '\\n'\n\t\ts += '###############################################################################\\n'\n\t\ts += '# INVOKING COMMAND WAS: ' + sys.executable\n\t\tfor arg in sys.argv:\n\t\t\ts += ' ' + arg\n\t\ts += '\\n'\n\t\ts += '###############################################################################\\n'\n\t\treturn s",
"def html_message_formatter(mode, name, build, results, master_status):\n result = Results[results]\n\n limit_lines = 80\n text = list()\n text.append(u'<h4>Build status: %s</h4>' % result.upper())\n text.append(u'<table cellspacing=\"10\"><tr>')\n text.append(u\"<td>Buildslave for this Build:</td><td><b>%s</b></td></tr>\" % build.getSlavename())\n if master_status.getURLForThing(build):\n text.append(u'<tr><td>Complete logs for all build steps:</td><td><a href=\"%s\">%s</a></td></tr>'\n % (master_status.getURLForThing(build),\n master_status.getURLForThing(build))\n )\n text.append(u'<tr><td>Build Reason:</td><td>%s</td></tr>' % build.getReason())\n source = u\"\"\n for ss in build.getSourceStamps():\n if ss.codebase:\n source += u'%s: ' % ss.codebase\n if ss.branch:\n source += u\"[branch %s] \" % ss.branch\n if ss.revision:\n source += ss.revision\n else:\n source += u\"HEAD\"\n if ss.patch:\n source += u\" (plus patch)\"\n if ss.patch_info: # add patch comment\n source += u\" (%s)\" % ss.patch_info[1]\n text.append(u\"<tr><td>Build Source Stamp:</td><td><b>%s</b></td></tr>\" % source)\n text.append(u\"<tr><td>Blamelist:</td><td>%s</td></tr>\" % \",\".join(build.getResponsibleUsers()))\n text.append(u'</table>')\n if ss.changes:\n text.append(u'<h4>Recent Changes:</h4>')\n for c in ss.changes:\n cd = c.asDict()\n when = datetime.datetime.fromtimestamp(cd['when'] ).ctime()\n text.append(u'<table cellspacing=\"10\">')\n text.append(u'<tr><td>Repository:</td><td>%s</td></tr>' % cd['repository'] )\n text.append(u'<tr><td>Project:</td><td>%s</td></tr>' % cd['project'] )\n text.append(u'<tr><td>Time:</td><td>%s</td></tr>' % when)\n text.append(u'<tr><td>Changed by:</td><td>%s</td></tr>' % cd['who'] )\n text.append(u'<tr><td>Comments:</td><td>%s</td></tr>' % cd['comments'] )\n text.append(u'</table>')\n files = cd['files']\n if files:\n text.append(u'<table cellspacing=\"10\"><tr><th align=\"left\">Files</th></tr>')\n for file in files:\n text.append(u'<tr><td>%s:</td></tr>' % file['name'] )\n text.append(u'</table>')\n text.append(u'<br>')\n # get all the steps in build in reversed order\n rev_steps = reversed(build.getSteps())\n # find the last step that finished\n for step in rev_steps:\n if step.isFinished():\n break\n # get logs for the last finished step\n if step.isFinished():\n logs = step.getLogs()\n # No step finished, loop just exhausted itself; so as a special case we fetch all logs\n else:\n logs = build.getLogs()\n # logs within a step are in reverse order. Search back until we find stdio\n for log in reversed(logs):\n if log.getName() == 'stdio':\n break\n name = \"%s.%s\" % (log.getStep().getName(), log.getName())\n status, dummy = log.getStep().getResults()\n content = log.getText().splitlines() # Note: can be VERY LARGE\n url = u'%s/steps/%s/logs/%s' % (master_status.getURLForThing(build),\n log.getStep().getName(),\n log.getName())\n\n text.append(u'<i>Detailed log of last build step:</i> <a href=\"%s\">%s</a>'\n % (url, url))\n text.append(u'<br>')\n text.append(u'<h4>Last %d lines of \"%s\"</h4>' % (limit_lines, name))\n unilist = list()\n for line in content[len(content)-limit_lines:]:\n unilist.append(cgi.escape(unicode(line,'utf-8')))\n text.append(u'<pre>')\n text.extend(unilist)\n text.append(u'</pre>')\n text.append(u'<br><br>')\n text.append(u'<b>-The Buildbot</b>')\n return {\n 'body': u\"\\n\".join(text),\n 'type': 'html'\n }",
"def test_check_version_non_release(self):\n with self.assertLogs(\"dakara_feeder.version\", \"DEBUG\") as logger:\n with patch.multiple(\n \"dakara_feeder.version\", __version__=\"0.1.0-dev\", __date__=\"1970-01-01\"\n ):\n check_version()\n\n # assert effect on logs\n self.assertListEqual(\n logger.output,\n [\n \"INFO:dakara_feeder.version:\" \"Dakara feeder 0.1.0-dev (1970-01-01)\",\n \"WARNING:dakara_feeder.version:\"\n \"You are running a dev version, use it at your own risks!\",\n ],\n )",
"def print_unidiff(self):\n\n color_stdout(\"\\nTest failed! Result content mismatch:\\n\", schema='error')\n with open(self.result, \"r\") as result:\n with open(self.reject, \"r\") as reject:\n result_time = time.ctime(os.stat(self.result).st_mtime)\n reject_time = time.ctime(os.stat(self.reject).st_mtime)\n diff = difflib.unified_diff(result.readlines(),\n reject.readlines(),\n self.result,\n self.reject,\n result_time,\n reject_time)\n\n color_stdout.writeout_unidiff(diff)",
"def test_failed():\n build()\n sh(\"%s %s --last-failed\" % (PYTHON, RUNNER_PY))",
"def test_nonexistent_report(self):\n command_line = [\"report\", \"notreport\"]\n for prefix in [[], [\"--propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)",
"def test_no_change_without_enough_results(self):\n MetadataUpdater.min_results_for_update = 2\n self.write_contents(\n 'external/wpt/fail.html.ini', \"\"\"\\\n [fail.html]\n expected: FAIL\n \"\"\")\n self.update({\n 'results': [{\n 'test': '/fail.html',\n 'status': 'PASS',\n 'expected': 'FAIL',\n }],\n })\n self.assert_contents(\n 'external/wpt/fail.html.ini', \"\"\"\\\n [fail.html]\n expected: FAIL\n \"\"\")",
"def test_version_check_update_available(self):\n output = self.run_command(\"selfupdate --check bennr01:selfupdate_test_future\", exitcode=0)\n self.assertIn(\"Target: bennr01:selfupdate_test_future\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)\n self.assertNotIn(\"Already at latest version\", output)\n self.assertIn(\"New version available\", output)\n self.assertNotIn(\"Error: \", output)",
"def test_not_exectuable(self):\n (status, output, imlog, makelog) = \\\n self.run_instmake_build(log_prefix=\"not-executable\",\n make_opts=[\"not-executable\"])\n\n self.assertEqual(status, util.SUCCESS, output)",
"def getLastFinishedBuild():",
"def test_make_output_fail():\n with pytest.raises(ValueError):\n make_output_format('dummy_format', LOG_DIR)",
"def get_status():\n\n # pylint: disable=global-statement\n global _version\n # pylint: global-statement\n\n if not _version:\n this_file_dir = os.path.dirname(__file__)\n file_path = os.path.join(this_file_dir, \"../../build_version.txt\")\n with open(file_path, \"r\") as f:\n _version = f.read()\n\n # _print_headers(headers)\n return \"The service version: {}\".format(_version)",
"def test_install_error_message(self):\n\n fail_msg = \"Failure message\"\n\n fail_file = Path(self.dockerfile_dirpath) / \"matlab-install\" / \"FAIL\"\n\n with open(str(fail_file), \"w\") as ff:\n ff.write(fail_msg + \"\\n\")\n self.addCleanup(utils.remove_file, fail_file)\n\n build_msg = utils.get_build_output(\n docker_api_client=self.client.api,\n dockerfile_dirpath=self.dockerfile_dirpath,\n release=\"latest\",\n )\n\n self.assertTrue(any([fail_msg in msg for msg in build_msg]))"
] | [
"0.6245686",
"0.61322933",
"0.5972909",
"0.5953914",
"0.5951226",
"0.5934385",
"0.56919557",
"0.55922455",
"0.5552093",
"0.54457146",
"0.54384977",
"0.5403101",
"0.53969556",
"0.5394229",
"0.5389238",
"0.5372847",
"0.53642845",
"0.5359401",
"0.5345811",
"0.5342945",
"0.5335017",
"0.532175",
"0.53071886",
"0.5304977",
"0.53043556",
"0.52992266",
"0.52423275",
"0.5221551",
"0.51882756",
"0.51634693"
] | 0.7970113 | 0 |
Runs gyp against the specified gyp_file with the specified args. | def run_gyp(self, gyp_file, *args, **kw):
# When running gyp, and comparing its output we use a comparitor
# that ignores the line numbers that gyp logs in its debug output.
if kw.pop('ignore_line_numbers', False):
kw.setdefault('match', match_modulo_line_numbers)
# TODO: --depth=. works around Chromium-specific tree climbing.
depth = kw.pop('depth', '.')
run_args = ['--depth='+depth]
run_args.append(gyp_file)
if self.no_parallel:
run_args += ['--no-parallel']
# TODO: if extra_args contains a '--build' flag
# we really want that to only apply to the last format (self.format).
run_args.extend(self.extra_args)
# Default xcode_ninja_target_pattern to ^.*$ to fix xcode-ninja tests
xcode_ninja_target_pattern = kw.pop('xcode_ninja_target_pattern', '.*')
run_args.extend(
['-G', 'xcode_ninja_target_pattern=%s' % xcode_ninja_target_pattern])
run_args.extend(args)
return self.run(program=self.gyp, arguments=run_args, **kw) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def TestGyp(*args, **kw):\n format = kw.pop('format', os.environ.get('TESTGYP_FORMAT'))\n if format != 'ninja':\n raise Exception(\"unknown format %r\" % format)\n return TestGypNinja(*args, **kw)",
"def build(self, gyp_file, target=None, **kw):\n raise NotImplementedError",
"def run_python_file(python, file_args, directives=None):\n args = []\n if directives:\n for directive in directives:\n args.extend(('-X', directive))\n args.extend(file_args)\n command = (\n \"import Cython.Build.BuildExecutable as bex; \"\n \"bex.DEBUG = False; \"\n \"bex.build_and_run({args!r})\"\n ).format(args=args)\n run_python(python, command)",
"def package_std_dyn(args: Namespace) -> None:\n pass\n p = Packager.from_args(args)\n pkg_dir = getattr(args, \"package-dir\")\n p.package_standalone_dyn(pkg_dir)\n p.package_langkit_support_dyn(pkg_dir)",
"def pants(args):\n os.chdir(git_toplevel())\n return run(\"./pants %s\" % args) if args is not None else None",
"def run_mypy(args: Namespace) -> None:\n # Make sure mypy can find the type hints for the Libpythonlang/Liblktlang\n # Python bindings.\n env = dict(os.environ)\n for prj in (\"python\", \"lkt\"):\n add_to_path(\n env,\n \"MYPYPATH\",\n P.join(LANGKIT_ROOT, \"contrib\", prj, \"build\", \"python\")\n )\n subprocess.check_call([\"mypy\"], cwd=LANGKIT_ROOT, env=env)",
"def run():\n args = parse_args(sys.argv[1:])\n fnames = args.fnames\n runner = PylintRunner(args)\n runner.run(fnames)",
"def _main(args):\n if args.files:\n _update_files()\n\n if args.templates:\n _update_template(args.template_definition)",
"def main(args=None):\n\n args, _ = root_parser.parse_known_args(args=args)\n import_path = ENTRY_POINTS[args.test]\n module = import_module(import_path)\n main_fnc = getattr(module, \"main\")\n _check_main(main_fnc)\n if args.dry:\n return\n main_fnc()",
"def main():\r\n args = getargs()\r\n testng_file = args.testng_file\r\n url = args.url\r\n fetch_testng(testng_file, url)",
"def run(fips_dir, proj_dir, args) :\n if len(args) > 0 :\n proj_name = args[0]\n proj_dir = util.get_project_dir(fips_dir, proj_name)\n dep.fetch_imports(fips_dir, proj_dir)",
"def run(self, *args, **kw):\n if kw.has_key('SYMROOT'):\n del kw['SYMROOT']\n super(TestGypBase, self).run(*args, **kw)",
"def gen(\n file: str,\n infer: bool = typer.Option(\n True, help=\"Whether to run type inference on code examples.\"\n ),\n exec: bool = typer.Option(\n False, help=\"Whether to attempt to execute doctring code.\"\n ),\n experimental: bool = typer.Option(False, help=\"Use experimental Ts parsing\"),\n debug: bool = False,\n dummy_progress: bool = typer.Option(False, help=\"Disable rich progress bar\"),\n):\n _intro()\n from papyri.gen import gen_main\n\n gen_main(\n infer=infer,\n exec_=exec,\n target_file=file,\n experimental=experimental,\n debug=debug,\n dummy_progress=dummy_progress,\n )",
"def run_python_script(package=None, module=None, args=[], p_args=[]):\n assert module is not None\n assert isinstance(args, (tuple, list)) and isinstance(p_args, (tuple, list))\n path = python_script_exists(package, module)\n run_program(sys.executable, p_args + [path] + args)",
"def up_to_date(self, gyp_file, target=None, **kw):\n raise NotImplementedError",
"def main(args):",
"def main(args):",
"def main():\n args = parse_args()\n process_args(args)",
"def test(args):\n try:\n import pytest # pylint: disable=unused-import\n except ImportError:\n raise KedroCliError(NO_DEPENDENCY_MESSAGE.format(\"pytest\"))\n else:\n python_call(\"pytest\", args)",
"def package_deps(args: Namespace) -> None:\n p = Packager.from_args(args)\n p.package_deps(getattr(args, \"package-dir\"))",
"def main():\n opt = parse_opts()\n run(opt)",
"def main():\n opt = parse_opts()\n run(opt)",
"def main(args=None):",
"def main(args=None):",
"def main():\n widget = ParseGrypeJSON()\n logging.debug(f'argv {\",\".join(sys.argv)}')\n\n if len(sys.argv) > 1:\n widget.filename(sys.argv[1])\n\n sys.exit(widget.report())",
"def _Main():\n\n options, args = run_tests_util.ParseArgs('gtest')\n test_runner = run_tests_util.TestRunner(\n script_dir=SCRIPT_DIR,\n build_dir_var_name='GMOCK_BUILD_DIR',\n injected_build_dir_finder=GetGmockBuildDir)\n tests = test_runner.GetTestsToRun(args,\n options.configurations,\n options.built_configurations)\n if not tests:\n sys.exit(1) # Incorrect parameters given, abort execution.\n\n sys.exit(test_runner.RunTests(tests[0], tests[1]))",
"def main(cmd_line_args):\n if cmd_line_args.clean:\n clean_wrapper(cmd_line_args)\n if cmd_line_args.nobuild == False:\n build_wrapper(cmd_line_args)",
"def call_autopep8_executable(file_path):\r\n if os.name == \"nt\":\r\n exepath = \"C:/Program Files/Python35/Scripts/autopep8.exe\"\r\n pypep8rc = os.getenv(\"USERPROFILE\") + \"/Projects/AStyleTest/file-py/pypep8rc\"\r\n else:\r\n exepath = \"autopep8\"\r\n pypep8rc = os.getenv(\"HOME\") + \"/Projects/AStyleTest/file-py/pypep8rc\"\r\n\r\n # couldn't get options file to work \"--global-config=pypep8rc\"\r\n # so get them from the file\r\n options = []\r\n get_options(options, pypep8rc)\r\n print(\"options = {}\".format(options))\r\n print()\r\n\r\n # build the autopep8 call list\r\n autopep8_call = options\r\n autopep8_call.insert(0, exepath)\r\n autopep8_call.append(\"--in-place\")\r\n autopep8_call.append(\"--verbose\")\r\n autopep8_call.append(file_path)\r\n\r\n try:\r\n retval = subprocess.call(autopep8_call)\r\n except FileNotFoundError:\r\n print(\"Cannot find '\" + exepath + \"'\")\r\n os._exit(1)\r\n # a fatal return contains a bit-ORed 1\r\n # a usage error contains a bit-ORed 32\r\n # other bit values indicate messages that were issued\r\n # https://docs.pylint.org/en/latest/user_guide/run.html\r\n if retval and (retval & 1 or retval & 32):\r\n print(\"\\nBad autopep8 return: \" + str(retval))\r\n os._exit(1)",
"def main(args=None):\n pass",
"def main():\n\n # pylint: disable=import-outside-toplevel\n\n import sys\n cmd = ToyMaker()\n sys.exit(cmd.main())"
] | [
"0.65655947",
"0.64958555",
"0.5667241",
"0.56293947",
"0.56070536",
"0.54797554",
"0.5434344",
"0.5331075",
"0.52507114",
"0.5226264",
"0.5204586",
"0.51571316",
"0.5147136",
"0.5133636",
"0.51022816",
"0.5073251",
"0.5073251",
"0.5066566",
"0.50257397",
"0.5001088",
"0.497084",
"0.497084",
"0.4964944",
"0.4964944",
"0.49498487",
"0.49461064",
"0.4945458",
"0.4933639",
"0.492415",
"0.49234763"
] | 0.673332 | 0 |
Runs a build of the specified target against the configuration generated from the specified gyp_file. A 'target' argument of None or the special value TestGyp.DEFAULT specifies the default argument for the underlying build tool. A 'target' argument of TestGyp.ALL specifies the 'all' target (if any) of the underlying build tool. | def build(self, gyp_file, target=None, **kw):
raise NotImplementedError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run_gyp(self, gyp_file, *args, **kw):\n\n # When running gyp, and comparing its output we use a comparitor\n # that ignores the line numbers that gyp logs in its debug output.\n if kw.pop('ignore_line_numbers', False):\n kw.setdefault('match', match_modulo_line_numbers)\n\n # TODO: --depth=. works around Chromium-specific tree climbing.\n depth = kw.pop('depth', '.')\n run_args = ['--depth='+depth]\n run_args.append(gyp_file)\n if self.no_parallel:\n run_args += ['--no-parallel']\n # TODO: if extra_args contains a '--build' flag\n # we really want that to only apply to the last format (self.format).\n run_args.extend(self.extra_args)\n # Default xcode_ninja_target_pattern to ^.*$ to fix xcode-ninja tests\n xcode_ninja_target_pattern = kw.pop('xcode_ninja_target_pattern', '.*')\n run_args.extend(\n ['-G', 'xcode_ninja_target_pattern=%s' % xcode_ninja_target_pattern])\n run_args.extend(args)\n return self.run(program=self.gyp, arguments=run_args, **kw)",
"def main(argv):\n target = argv[1] if len(argv) >= 2 else 'all'\n if target not in ('all', 'flatbuffers', 'webp', 'clean'):\n sys.stderr.write('No rule to build target %s.\\n' % target)\n\n if target in ('all', 'flatbuffers'):\n try:\n generate_flatbuffer_binaries()\n except BuildError as error:\n handle_build_error(error)\n return 1\n if target in ('all', 'webp'):\n try:\n generate_webp_textures()\n except BuildError as error:\n handle_build_error(error)\n return 1\n if target == 'clean':\n try:\n clean()\n except OSError as error:\n sys.stderr.write('Error cleaning: %s' % str(error))\n return 1\n\n return 0",
"def TestGyp(*args, **kw):\n format = kw.pop('format', os.environ.get('TESTGYP_FORMAT'))\n if format != 'ninja':\n raise Exception(\"unknown format %r\" % format)\n return TestGypNinja(*args, **kw)",
"def _run_pants(\n self,\n pants_repo: pathlib.PosixPath,\n pants_target: str,\n extension: str\n ) -> pathlib.PosixPath:\n\n # Version check for pants v1 vs v2 flags/behavior.\n is_pants_v1 = pants_repo.joinpath('pants.ini').exists()\n if is_pants_v1:\n goal_name = 'binary'\n tmp_root = None\n else:\n goal_name = 'package'\n # N.B. pants v2 doesn't support `--pants-distdir` outside of the build root.\n tmp_root = pants_repo.joinpath('dist')\n # N.B. The dist dir must exist for temporary_dir.\n tmp_root.mkdir(exist_ok=True)\n\n with temporary_dir(root_dir=tmp_root, cleanup=False) as tmp_dir:\n tmp_path = pathlib.PosixPath(tmp_dir)\n title = f'[Build] ./pants {goal_name} {pants_target}'\n cmd = f'cd {pants_repo} && ./pants --pants-distdir=\"{tmp_path}\" {goal_name} {pants_target}'\n return self._stream_binary_build_with_output(cmd, title, tmp_path, extension=extension)",
"def test_run_target(self):\n cmd = GreenTestCommand(Distribution())\n cmd.target = \"test\"\n cmd.ensure_finalized()\n cmd.run()\n self.assertThat(_subprocess_call_args(),\n Contains(\"test\"))",
"def main(target_dir=None, require_sk_user_config=False, gyp_source_dir=None):\n # Create a temporary folder to hold gyp and gypd files. Create it in SKIA_DIR\n # so that it is a sibling of gyp/, so the relationships between gyp files and\n # other files (e.g. platform_tools/android/gyp/dependencies.gypi, referenced\n # by android_deps.gyp as a relative path) is unchanged.\n # Use mkdtemp to find an unused folder name, but then delete it so copytree\n # can be called with a non-existent directory.\n tmp_folder = tempfile.mkdtemp(dir=SKIA_DIR)\n os.rmdir(tmp_folder)\n shutil.copytree(os.path.join(SKIA_DIR, GYP_FOLDER), tmp_folder)\n\n try:\n main_gyp_file = 'android_framework_lib.gyp'\n\n print 'Creating Android.mk',\n\n # Generate a separate VarsDict for each architecture type. For each\n # archtype:\n # 1. call android_framework_gyp.main() to generate gypd files\n # 2. call parse_gypd to read those gypd files into the VarsDict\n # 3. delete the gypd files\n #\n # Once we have the VarsDict for each architecture type, we combine them all\n # into a single Android.mk file, which can build targets of any\n # architecture type.\n\n # The default uses a non-existant archtype, to find all the general\n # variable definitions.\n default_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'other',\n False, False, False, gyp_source_dir)\n arm_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'arm', False,\n False, False, gyp_source_dir)\n arm_neon_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'arm',\n True, False, False, gyp_source_dir)\n x86_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'x86', False,\n False, False, gyp_source_dir)\n x86_64_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'x86_64',\n False, False, False, gyp_source_dir)\n\n mips_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'mips', False,\n False, False, gyp_source_dir)\n\n mips_dspr2_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'mips',\n False, True, False, gyp_source_dir)\n\n mips_dspr1_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'mips',\n False, False, True, gyp_source_dir)\n\n mips64_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'mips64',\n False, False, False, gyp_source_dir)\n\n arm64_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'arm64',\n False, False, False, gyp_source_dir)\n\n # Compute the intersection of all targets. All the files in the intersection\n # should be part of the makefile always. Each dict will now contain trimmed\n # lists containing only variable definitions specific to that configuration.\n var_dict_list = [default_var_dict, arm_var_dict, arm_neon_var_dict,\n x86_var_dict, x86_64_var_dict, mips_var_dict,\n mips_dspr1_var_dict, mips_dspr2_var_dict, mips64_var_dict,\n arm64_var_dict]\n common = vars_dict_lib.intersect(var_dict_list)\n\n common.LOCAL_MODULE.add('libskia')\n\n # Create SkUserConfig\n user_config = os.path.join(SKIA_DIR, 'include', 'config', 'SkUserConfig.h')\n if target_dir:\n dst_dir = target_dir\n else:\n dst_dir = os.path.join(SKIA_DIR, 'include', 'core')\n\n generate_user_config.generate_user_config(\n original_sk_user_config=user_config,\n require_sk_user_config=require_sk_user_config, target_dir=dst_dir,\n defines=common.DEFINES)\n\n tool_makefile_writer.generate_tool(gyp_dir=tmp_folder,\n target_file='bench.gyp',\n skia_trunk=target_dir,\n dest_dir='bench',\n skia_lib_var_dict=common,\n local_module_name='skia_nanobench',\n local_module_tags=['tests'],\n desired_targets=['nanobench'],\n gyp_source_dir=gyp_source_dir)\n\n tool_makefile_writer.generate_tool(gyp_dir=tmp_folder,\n target_file='dm.gyp',\n skia_trunk=target_dir,\n dest_dir='dm',\n skia_lib_var_dict=common,\n local_module_name='skia_dm',\n local_module_tags=['tests'],\n desired_targets=['dm'],\n gyp_source_dir=gyp_source_dir)\n\n # Now that the defines have been written to SkUserConfig and they've been\n # used to skip adding them to the tools makefiles, they are not needed in\n # Android.mk. Reset DEFINES.\n common.DEFINES.reset()\n\n # Further trim arm_neon_var_dict with arm_var_dict. After this call,\n # arm_var_dict (which will now be the intersection) includes all definitions\n # used by both arm and arm + neon, and arm_neon_var_dict will only contain\n # those specific to arm + neon.\n arm_var_dict = vars_dict_lib.intersect([arm_var_dict, arm_neon_var_dict])\n\n # Now create a list of VarsDictData holding everything but common.\n deviations_from_common = []\n deviations_from_common.append(makefile_writer.VarsDictData(\n arm_var_dict, 'arm'))\n deviations_from_common.append(makefile_writer.VarsDictData(\n arm_neon_var_dict, 'arm', 'ARCH_ARM_HAVE_NEON'))\n deviations_from_common.append(makefile_writer.VarsDictData(x86_var_dict,\n 'x86'))\n deviations_from_common.append(makefile_writer.VarsDictData(x86_64_var_dict,\n 'x86_64'))\n\n deviations_from_common.append(makefile_writer.VarsDictData(\n mips_dspr2_var_dict, 'mips', 'mips32r2dspr2-fp'))\n\n deviations_from_common.append(makefile_writer.VarsDictData(\n mips_dspr1_var_dict, 'mips', 'mips32r2dsp-fp'))\n\n deviations_from_common.append(makefile_writer.VarsDictData(mips_var_dict,\n 'mips'))\n\n deviations_from_common.append(makefile_writer.VarsDictData(mips64_var_dict,\n 'mips64'))\n\n deviations_from_common.append(makefile_writer.VarsDictData(arm64_var_dict,\n 'arm64'))\n\n makefile_writer.write_android_mk(target_dir=target_dir,\n common=common, deviations_from_common=deviations_from_common)\n\n makefile_writer.write_static_deps_mk(target_dir=target_dir,\n common=common, deviations_from_common=deviations_from_common)\n\n finally:\n shutil.rmtree(tmp_folder)",
"def up_to_date(self, gyp_file, target=None, **kw):\n raise NotImplementedError",
"def __init__(self, gyp_target, gn_target=None):\n if gn_target is None:\n gn_target = gyp_target\n self._gyp_target = gyp_target\n self._gn_target = gn_target\n\n self._skipped = []\n\n self._total_diffs = 0\n\n self._missing_gyp_flags = {}\n self._missing_gn_flags = {}\n\n self._missing_gyp_files = {}\n self._missing_gn_files = {}\n\n self._CompareFiles()",
"def compile(self, targets=None, name=None, out_dir=None,\n target=None, use_goma_module=False, **kwargs):\n\n targets = targets or self.c.compile_py.default_targets.as_jsonish()\n assert isinstance(targets, (list, tuple))\n\n if self.c.gyp_env.GYP_DEFINES.get('clang', 0) == 1:\n # Get the Clang revision before compiling.\n self._clang_version = self.get_clang_version()\n\n goma_env = self.get_env()\n goma_env.update(self.m.context.env)\n ninja_env = goma_env.copy()\n\n goma_env['GOMA_CACHE_DIR'] = self.m.goma.default_cache_path\n\n # Enable goma DepsCache\n goma_env['GOMA_DEPS_CACHE_FILE'] = \"goma_deps_cache\"\n\n if self.c.compile_py.mode:\n if (self.c.compile_py.mode == 'google_chrome' or\n self.c.compile_py.mode == 'official'):\n ninja_env['CHROMIUM_BUILD'] = '_google_chrome'\n\n if self.c.compile_py.mode == 'official':\n # Official builds are always Google Chrome.\n ninja_env['CHROME_BUILD_TYPE'] = '_official'\n\n if self.c.compile_py.goma_hermetic:\n goma_env['GOMA_HERMETIC'] = self.c.compile_py.goma_hermetic\n if self.c.compile_py.goma_enable_remote_link:\n goma_env['GOMA_ENABLE_REMOTE_LINK'] = 'true'\n if self.c.compile_py.goma_enable_localoutputcache:\n # Use per-slave cache. LocalOutputCache could use a lot of disks.\n # To run GC for older caches, we should share the same build\n # among builders.\n goma_env['GOMA_LOCAL_OUTPUT_CACHE_DIR'] = (\n self.m.path.join(self.m.goma.default_cache_path_per_slave,\n \"localoutputcache\"))\n if self.c.compile_py.goma_store_local_run_output:\n goma_env['GOMA_STORE_LOCAL_RUN_OUTPUT'] = 'true'\n if self.c.compile_py.goma_max_active_fail_fallback_tasks:\n goma_env['GOMA_MAX_ACTIVE_FAIL_FALLBACK_TASKS'] = (\n self.c.compile_py.goma_max_active_fail_fallback_tasks)\n if (self.m.tryserver.is_tryserver or\n self.c.compile_py.goma_failfast):\n # We rely on goma to meet cycle time goals on the tryserver. It's better\n # to fail early.\n goma_env['GOMA_FAIL_FAST'] = 'true'\n else:\n goma_env['GOMA_ALLOWED_NETWORK_ERROR_DURATION'] = '1800'\n\n if self.c.TARGET_CROS_BOARD:\n # Wrap 'compile' through 'cros chrome-sdk'\n kwargs['wrapper'] = self.get_cros_chrome_sdk_wrapper()\n\n if self.m.platform.is_linux and self.c.TARGET_CROS_BOARD:\n out_dir = 'out_%s' % self.c.TARGET_CROS_BOARD\n elif out_dir is None:\n out_dir = 'out'\n\n target_output_dir = self.m.path.abspath(\n self.m.path.join(self.m.path['checkout'], out_dir,\n target or self.c.build_config_fs))\n\n command = [str(self.m.depot_tools.ninja_path), '-w', 'dupbuild=err',\n '-C', target_output_dir]\n\n if self.c.compile_py.show_ninja_stats:\n command.extend(['-d', 'stats'])\n\n if self.c.compile_py.build_args:\n command.extend(self.c.compile_py.build_args)\n\n # TODO(tikuta): Remove this and let goma module set '-j'\n # inside build_with_goma.\n if use_goma_module:\n # Set -j just before 'with self.m.goma.build_with_goma('\n # for ninja_log_command being set correctly if starting goma\n # fails.\n if self.c.compile_py.goma_high_parallel:\n # This flag is set for experiment.\n command += ['-j', 3 * self.m.goma.recommended_goma_jobs]\n else:\n command += ['-j', self.m.goma.recommended_goma_jobs]\n\n if targets is not None:\n # Add build targets to command ('All', 'chrome' etc).\n command += targets\n\n assert 'env' not in kwargs\n\n assert 'cwd' not in kwargs\n\n\n if not use_goma_module:\n compile_exit_status = 1\n try:\n with self.m.context(cwd=self.m.context.cwd or self.m.path['checkout']):\n self._run_ninja(ninja_command=command,\n name=name or 'compile',\n ninja_env=ninja_env,\n ninja_confirm_noop=self.c.compile_py.ninja_confirm_noop,\n **kwargs)\n compile_exit_status = 0\n except self.m.step.StepFailure as e:\n compile_exit_status = e.retcode\n raise e\n finally:\n upload_ninja_log_args = [\n '--gsutil-py-path', self.m.depot_tools.gsutil_py_path,\n '--skip-sendgomatsmon',\n '--ninja-log-outdir', target_output_dir,\n '--ninja-log-command', str(command),\n '--ninja-log-exit-status', compile_exit_status,\n '--ninja-log-compiler', self.c.compile_py.compiler or 'unknown'\n ]\n self.m.python(\n name='upload_ninja_log',\n script=self.package_repo_resource(\n 'scripts', 'slave', 'upload_goma_logs.py'),\n args=upload_ninja_log_args)\n\n return\n\n try:\n with self.m.context(cwd=self.m.context.cwd or self.m.path['checkout']):\n self._run_ninja_with_goma(\n name=name or 'compile',\n ninja_command=command,\n ninja_env=ninja_env,\n goma_env=goma_env,\n ninja_log_outdir=target_output_dir,\n ninja_log_compiler=self.c.compile_py.compiler or 'goma',\n ninja_confirm_noop=self.c.compile_py.ninja_confirm_noop,\n **kwargs)\n except self.m.step.StepFailure as e:\n # Handle failures caused by goma.\n step_result = self.m.step.active_result\n failure_result_code = ''\n\n json_status = self.m.goma.jsonstatus['notice'][0]\n\n if (not json_status.get('infra_status')):\n failure_result_code = 'GOMA_SETUP_FAILURE'\n elif json_status['infra_status']['ping_status_code'] != 200:\n failure_result_code = 'GOMA_PING_FAILURE'\n elif json_status['infra_status'].get('num_user_error', 0) > 0:\n failure_result_code = 'GOMA_BUILD_ERROR'\n\n if failure_result_code:\n assert len(failure_result_code) <= 20\n properties = self.m.step.active_result.presentation.properties\n if not properties.get('extra_result_code'):\n properties['extra_result_code'] = []\n properties['extra_result_code'].append(failure_result_code)\n raise self.m.step.InfraFailure('Infra compile failure: %s' % e)\n\n raise e",
"def build(target_dir):\n prepare_demo_site(target_dir)\n\n patch_config(\n target_dir, (\"# CREATE_FULL_ARCHIVES = False\", \"CREATE_FULL_ARCHIVES = True\")\n )\n\n with cd(target_dir):\n __main__.main([\"build\"])",
"def build(ws, gbp, print_targets, targets):\n if not targets and ws.path == Path.cwd():\n # If run from workspace root with no targets, build all in dependency order\n targets = tuple(nx.dfs_postorder_nodes(ws.builddepends_graph()))\n elif not targets:\n # If run from a directory in the workspace with no targets, build the directory\n targets = (Path.cwd().stem,)\n\n info(\"Building {} repositories: {}\".format(len(targets), \" \".join(targets)))\n if print_targets:\n sys.exit(0)\n\n rc = 0\n remove_container = ws.docker_run()\n\n for t in targets:\n info(\"--- Building {}...\".format(t))\n rc = ws.buildpackage(Path(t), gbp)\n if rc:\n error(\"Building {} failed with return code {}.\".format(t, rc))\n break\n\n if remove_container:\n ws.docker_remove()\n\n sys.exit(rc)",
"def check(self):\n with working_dir(self.build_directory):\n self._if_ninja_target_execute(\"test\", parallel=False)",
"def test_meson_compile(self):\n\n def get_exe_name(basename: str) -> str:\n if is_windows():\n return f'{basename}.exe'\n else:\n return basename\n\n def get_shared_lib_name(basename: str) -> str:\n if mesonbuild.environment.detect_msys2_arch():\n return f'lib{basename}.dll'\n elif is_windows():\n return f'{basename}.dll'\n elif is_cygwin():\n return f'cyg{basename}.dll'\n elif is_osx():\n return f'lib{basename}.dylib'\n else:\n return f'lib{basename}.so'\n\n def get_static_lib_name(basename: str) -> str:\n return f'lib{basename}.a'\n\n # Base case (no targets or additional arguments)\n\n testdir = os.path.join(self.common_test_dir, '1 trivial')\n self.init(testdir)\n\n self._run([*self.meson_command, 'compile', '-C', self.builddir])\n self.assertPathExists(os.path.join(self.builddir, get_exe_name('trivialprog')))\n\n # `--clean`\n\n self._run([*self.meson_command, 'compile', '-C', self.builddir, '--clean'])\n self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('trivialprog')))\n\n # Target specified in a project with unique names\n\n testdir = os.path.join(self.common_test_dir, '6 linkshared')\n self.init(testdir, extra_args=['--wipe'])\n # Multiple targets and target type specified\n self._run([*self.meson_command, 'compile', '-C', self.builddir, 'mylib', 'mycpplib:shared_library'])\n # Check that we have a shared lib, but not an executable, i.e. check that target actually worked\n self.assertPathExists(os.path.join(self.builddir, get_shared_lib_name('mylib')))\n self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('prog')))\n self.assertPathExists(os.path.join(self.builddir, get_shared_lib_name('mycpplib')))\n self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('cppprog')))\n\n # Target specified in a project with non unique names\n\n testdir = os.path.join(self.common_test_dir, '185 same target name')\n self.init(testdir, extra_args=['--wipe'])\n self._run([*self.meson_command, 'compile', '-C', self.builddir, './foo'])\n self.assertPathExists(os.path.join(self.builddir, get_static_lib_name('foo')))\n self._run([*self.meson_command, 'compile', '-C', self.builddir, 'sub/foo'])\n self.assertPathExists(os.path.join(self.builddir, 'sub', get_static_lib_name('foo')))\n\n # run_target\n\n testdir = os.path.join(self.common_test_dir, '51 run target')\n self.init(testdir, extra_args=['--wipe'])\n out = self._run([*self.meson_command, 'compile', '-C', self.builddir, 'py3hi'])\n self.assertIn('I am Python3.', out)\n\n # `--$BACKEND-args`\n\n testdir = os.path.join(self.common_test_dir, '1 trivial')\n if self.backend is Backend.ninja:\n self.init(testdir, extra_args=['--wipe'])\n # Dry run - should not create a program\n self._run([*self.meson_command, 'compile', '-C', self.builddir, '--ninja-args=-n'])\n self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('trivialprog')))\n elif self.backend is Backend.vs:\n self.init(testdir, extra_args=['--wipe'])\n self._run([*self.meson_command, 'compile', '-C', self.builddir])\n # Explicitly clean the target through msbuild interface\n self._run([*self.meson_command, 'compile', '-C', self.builddir, '--vs-args=-t:{}:Clean'.format(re.sub(r'[\\%\\$\\@\\;\\.\\(\\)\\']', '_', get_exe_name('trivialprog')))])\n self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('trivialprog')))",
"def build(working_directory=None, args=None):\n from .buildme import main\n if args is None:\n args = []\n return main(working_directory, args)",
"def build(mcu_switch=None, doxygen=False, supress_output=False):\n cmd = TOOLCHAIN_BASIC_CONFIGURE + ' '\n if mcu_switch is None:\n cmd += 'sphinx'\n elif mcu_switch == '-p' or mcu_switch == '-s' or mcu_switch == '-b':\n cmd += 'build' + ' ' + mcu_switch\n if doxygen is True:\n cmd += ' ' + 'doxygen'\n else:\n logging.error('Invalid build argument: \\'%s\\'', mcu_switch)\n sys.exit(1)\n start_process(cmd, supress_output)",
"def parse(self, **globalargs):\r\n if self.buildfile not in ParseContext._parsed:\r\n buildfile_family = tuple(self.buildfile.family())\r\n\r\n pants_context = self.default_globals(Config.load())\r\n\r\n with ParseContext.activate(self):\r\n for buildfile in buildfile_family:\r\n self._active_buildfile = buildfile\r\n # We may have traversed a sibling already, guard against re-parsing it.\r\n if buildfile not in ParseContext._parsed:\r\n ParseContext._parsed.add(buildfile)\r\n\r\n buildfile_dir = os.path.dirname(buildfile.full_path)\r\n\r\n # TODO(John Sirois): XXX imports are done here to prevent a cycles\r\n from twitter.pants.targets.jvm_binary import Bundle\r\n from twitter.pants.targets.sources import SourceRoot\r\n\r\n class RelativeBundle(Bundle):\r\n def __init__(self, mapper=None, relative_to=None):\r\n super(RelativeBundle, self).__init__(\r\n base=buildfile_dir,\r\n mapper=mapper,\r\n relative_to=relative_to)\r\n\r\n # TODO(John Sirois): This is not build-dictionary friendly - rework SourceRoot to allow\r\n # allow for doc of both register (as source_root) and source_root.here(*types).\r\n class RelativeSourceRoot(object):\r\n @staticmethod\r\n def here(*allowed_target_types):\r\n \"\"\"Registers the cwd as a source root for the given target types.\"\"\"\r\n SourceRoot.register(buildfile_dir, *allowed_target_types)\r\n\r\n def __init__(self, basedir, *allowed_target_types):\r\n SourceRoot.register(os.path.join(buildfile_dir, basedir), *allowed_target_types)\r\n\r\n eval_globals = copy.copy(pants_context)\r\n eval_globals.update({\r\n 'ROOT_DIR': buildfile.root_dir,\r\n '__file__': buildfile.full_path,\r\n 'globs': partial(Fileset.globs, root=buildfile_dir),\r\n 'rglobs': partial(Fileset.rglobs, root=buildfile_dir),\r\n 'zglobs': partial(Fileset.zglobs, root=buildfile_dir),\r\n 'source_root': RelativeSourceRoot,\r\n 'bundle': RelativeBundle\r\n })\r\n eval_globals.update(globalargs)\r\n Compatibility.exec_function(buildfile.code(), eval_globals)",
"def test_build_target(self, mock_run):\n self.args.cmake_source_project_root = '/tmp/falken_src'\n self.args.cmake_build_dir = '/tmp/build_folder'\n self.args.number_of_threads = 7\n\n runner = cmake_runner.CMakeRunner(self.installer.binary_dir,\n self.args.cmake_source_project_root,\n self.args.cmake_build_dir)\n\n build_cmake_project.build_target(runner, self.args, 'Debug')\n\n # Call cmake\n mock_run.assert_called_once_with(\n args='cmake --build /tmp/build_folder --verbose -j 7',\n check=True,\n shell=True)",
"def CreateBuilder(platform, builder_name, target,\n options, tests,\n slavebuilddir=None,\n factory_properties=None,\n annotation_script=None,\n ninja=True,\n goma=False,\n clang=False,\n clobber=False,\n run_default_swarm_tests=None,\n maxTime=8*60*60,\n slave_type='Trybot',\n build_url=None):\n if platform not in ('win32', 'win64', 'linux', 'mac', 'android', 'ios'):\n raise Exception(platform + ' is not a known os type')\n assert tests is not None or annotation_script, (\n 'Must either specify tests or use an annotation script')\n\n factory_properties = (factory_properties or {}).copy()\n run_default_swarm_tests = run_default_swarm_tests or []\n\n factory_properties.setdefault('non_default', [\n 'check_licenses',\n 'chromedriver_tests',\n 'courgette_unittests',\n 'sync_integration_tests',\n 'url_unittests',\n ])\n\n factory_properties.setdefault('gclient_env', {})\n factory_properties['gclient_env'].setdefault('GYP_DEFINES', '')\n factory_properties['gclient_env']['GYP_DEFINES'] += ' dcheck_always_on=1'\n if not 'fastbuild=0' in factory_properties['gclient_env']['GYP_DEFINES']:\n factory_properties['gclient_env']['GYP_DEFINES'] += ' fastbuild=1'\n if platform in ('win32', 'win64'):\n # http://crbug.com/157234\n factory_properties.setdefault('sharded_tests', win_sharded_tests)\n else:\n factory_properties.setdefault('sharded_tests', sharded_tests)\n\n build_tool = []\n if platform in ('win32', 'win64'):\n factory_properties['process_dumps'] = True\n factory_properties['start_crash_handler'] = True\n\n if ninja:\n factory = m_chromium_win_ninja\n factory_properties['gclient_env']['GYP_DEFINES'] += ' chromium_win_pch=0'\n else:\n factory = m_chromium_win\n\n elif platform == 'linux' and slave_type == 'TrybotTester':\n factory = m_chromium_linux_nohooks\n elif platform == 'linux':\n factory = m_chromium_linux\n elif platform == 'android':\n factory = m_chromium_android\n elif platform == 'ios':\n factory = m_chromium_ios\n elif platform == 'mac':\n if ninja:\n factory = m_chromium_mac_ninja\n else:\n factory = m_chromium_mac\n\n if ninja:\n factory_properties['gclient_env']['GYP_GENERATORS'] = 'ninja'\n build_tool.append('--build-tool=ninja')\n if goma:\n if clang:\n build_tool.append('--compiler=goma-clang')\n else:\n build_tool.append('--compiler=goma')\n if clang:\n factory_properties['gclient_env']['GYP_DEFINES'] += ' clang=1'\n\n options = build_tool + ['--clobber-post-fail'] + (options or [])\n\n compile_timeout = 3600\n if annotation_script:\n # Note new slave type AnnotatedTrybot; we don't want a compile step added\n # in gclient_factory.py.\n # TODO(maruel): Support enable_swarm_tests\n builder_factory = factory.ChromiumAnnotationFactory(\n slave_type='AnnotatedTrybot', target=target, tests=tests,\n clobber=clobber,\n options=options,\n compile_timeout=compile_timeout,\n factory_properties=factory_properties,\n annotation_script=annotation_script, maxTime=maxTime)\n else:\n builder_factory = factory.ChromiumFactory(\n slave_type=slave_type, target=target, tests=tests, options=options,\n clobber=clobber,\n compile_timeout=compile_timeout,\n factory_properties=factory_properties,\n # Forcibly disable default swarming tests until the Swarming\n # infrastructure failure rate goes down to a reasonable level.\n # Tracked as http://crbug.com/354263\n # run_default_swarm_tests=run_default_swarm_tests,\n build_url=build_url)\n builder_info = {\n 'name': builder_name,\n 'factory': builder_factory,\n }\n if slavebuilddir:\n builder_info['slavebuilddir'] = slavebuilddir\n return builder_info",
"def configure_and_build_llvm(args: str) -> None:\n ninja = get_cmd_or_die(\"ninja\")\n # Possible values are Release, Debug, RelWithDebInfo and MinSizeRel\n build_type = \"Debug\" if args.debug else \"RelWithDebInfo\"\n ninja_build_file = os.path.join(c.LLVM_BLD, \"build.ninja\")\n with pb.local.cwd(c.LLVM_BLD):\n if os.path.isfile(ninja_build_file):\n prev_build_type = get_ninja_build_type(ninja_build_file)\n run_cmake = prev_build_type != build_type\n else:\n run_cmake = True\n\n if run_cmake:\n cmake = get_cmd_or_die(\"cmake\")\n max_link_jobs = est_parallel_link_jobs()\n assertions = \"1\" if args.assertions else \"0\"\n cargs = [\"-G\", \"Ninja\", c.LLVM_SRC,\n \"-Wno-dev\",\n \"-DCMAKE_C_COMPILER=clang\",\n \"-DCMAKE_CXX_COMPILER=clang++\",\n \"-DCMAKE_C_FLAGS=-I{}/include\".format(c.CBOR_PREFIX),\n \"-DCMAKE_CXX_FLAGS=-I{}/include\".format(c.CBOR_PREFIX),\n \"-DCMAKE_EXE_LINKER_FLAGS=-L{}/lib\".format(c.CBOR_PREFIX),\n \"-DCMAKE_BUILD_TYPE=\" + build_type,\n \"-DLLVM_ENABLE_ASSERTIONS=\" + assertions,\n \"-DLLVM_TARGETS_TO_BUILD=X86\",\n \"-DLLVM_INCLUDE_UTILS=1\",\n \"-DLLVM_BUILD_UTILS=1\",\n \"-DBUILD_SHARED_LIBS=1\",\n \"-DLLVM_PARALLEL_LINK_JOBS={}\".format(max_link_jobs)]\n invoke(cmake[cargs])\n else:\n logging.debug(\"found existing ninja.build, not running cmake\")\n\n ninja_args = ['ast-exporter']\n ninja_args += ['FileCheck', 'count', 'not']\n if args.with_clang:\n ninja_args.append('clang')\n invoke(ninja, *ninja_args)",
"def build(ctx: typer.Context):\n from .tasks import build, main\n\n sys.argv = sys.argv[:1] + (ctx.args or [\"list\"])\n main(vars(build))",
"def build_test_cmake(self, test, opts=\"\", outfile=None):\n\n env = {\"AMReX_ROOT\":self.amrex_install_dir}\n\n # super-builds always need a configure now, all other builds might\n # add additional CMake config options and re-configure on existing configured\n # build directory, if additional build cmakeSetupOpts are set\n if self.isSuperbuild or test.cmakeSetupOpts != \"\":\n builddir, installdir = self.cmake_config(\n name=test.name,\n path=self.source_dir,\n configOpts=self.amrex_cmake_opts + \" \" +\n self.source_cmake_opts + \" \" +\n test.cmakeSetupOpts)\n self.source_build_dir = builddir\n\n # compile\n rc, comp_string = self.cmake_build( name = test.name,\n target = test.target,\n path = self.source_build_dir,\n opts = opts,\n env = env,\n outfile = outfile)\n\n # make returns 0 if everything was good\n if rc != 0:\n self.log.fail(\"Failed to build test \" + test.name)\n\n # if we built a binary executable, we need to rename it into a\n # GNUmake-like naming scheme so that the rest of the test logic can\n # pick it up\n elif not test.run_as_script:\n # Find location of executable\n path_to_exe = None\n\n # search by target name\n for root, dirnames, filenames in os.walk(self.source_build_dir):\n if test.target in filenames:\n path_to_exe = os.path.join(root, test.target)\n break\n\n # fallback: pick first executable in CMake output directory\n if path_to_exe is None:\n path_to_bin = None\n cmake_output_dir = \"CMAKE_RUNTIME_OUTPUT_DIRECTORY:PATH=\"\n cmake_cache = os.path.join(self.source_build_dir, \"CMakeCache.txt\")\n with open(cmake_cache, \"r\") as cc:\n for ln in cc.readlines():\n if ln.startswith(cmake_output_dir):\n path_to_bin = ln[len(cmake_output_dir):].strip()\n break\n\n if path_to_bin is None:\n if not test.customRunCmd:\n self.log.warn(\"build successful but binary directory not found\")\n rc = 1\n else:\n # Find location of executable\n for root, dirnames, filenames in os.walk(path_to_bin):\n for f in filenames:\n f_path = os.path.join(root, f)\n if os.access(f_path, os.X_OK):\n if not Path(f_path).is_symlink():\n path_to_exe = f_path\n break\n if path_to_exe is not None:\n break\n\n if path_to_exe is None:\n if not test.customRunCmd:\n self.log.warn(\"build successful but executable not found\")\n rc = 1\n else:\n # Copy and rename executable to test dir\n shutil.move(f\"{path_to_exe}\",\n f\"{self.source_dir}/{test.buildDir}/{test.name}.ex\")\n\n return comp_string, rc",
"def configure(opts):\n\n if not MESON.exists():\n err('unable to configure package; meson is not installed')\n return False\n\n prefix = opts.prefix\n\n base_locs = []\n if opts.install_type == PackageInstallType.HOST:\n base_locs.append(opts.host_dir)\n else:\n base_locs.append(opts.staging_dir)\n\n # only reference the target directory if this package is\n # aimed to use the target directory\n target_area_types = [\n PackageInstallType.STAGING_AND_TARGET,\n PackageInstallType.TARGET,\n ]\n if opts.install_type in target_area_types:\n base_locs.append(opts.target_dir)\n\n pkgconfig_locs = []\n prefix_locs = []\n for base_loc in base_locs:\n prefixed_base = base_loc + prefix\n pkgconfig_locs.append(\n os.path.join(prefixed_base, DEFAULT_LIB_DIR, 'pkgconfig'))\n prefix_locs.append(prefixed_base)\n\n # definitions\n meson_defs = {\n 'libdir': DEFAULT_LIB_DIR,\n # common paths for releng-tool sysroots\n 'cmake_prefix_path': os.pathsep.join(prefix_locs),\n 'pkg_config_path': os.pathsep.join(pkgconfig_locs),\n # do not permit downloads of dependencies by default; in theory,\n # projects could have a package definition for each dependency needed\n # for a package\n 'wrap_mode': 'nodownload',\n }\n\n if prefix:\n meson_defs['prefix'] = prefix\n\n if opts.conf_defs:\n meson_defs.update(expand(opts.conf_defs))\n\n # options\n meson_opts = {\n '--buildtype': 'debugoptimized',\n }\n if opts.conf_opts:\n meson_opts.update(expand(opts.conf_opts))\n\n # environment\n meson_env = meson_prepare_environment(opts)\n if opts.conf_env:\n meson_env.update(opts.conf_env)\n\n # argument building\n meson_args = [\n 'setup',\n ]\n meson_args.extend(prepare_definitions(meson_defs, '-D'))\n meson_args.extend(prepare_arguments(meson_opts))\n\n # provide build directory\n meson_args.append(opts.build_output_dir)\n\n # if this is a forced reconfiguration, inform meson\n if 'RELENG_RECONFIGURE' in opts.env:\n meson_args.append('--reconfigure')\n\n if not MESON.execute(meson_args, env=expand(meson_env)):\n err('failed to prepare meson project: {}', opts.name)\n return False\n\n return True",
"def test_invalid_target_option(self): # suppress(no-self-use)\n with ExpectedException(DistutilsArgError):\n cmd = GreenTestCommand(Distribution())\n cmd.target = True\n cmd.ensure_finalized()\n cmd.run()",
"def make(\n parser: ArgumentParser,\n *,\n default_targets: Strings = \"all\",\n logger_name: str = \"dynamake\",\n adapter: Optional[Callable[[Namespace], None]] = None,\n) -> None:\n default_targets = flatten(default_targets)\n\n _load_modules()\n\n parser.add_argument(\"TARGET\", nargs=\"*\", help=f'The file or target to make (default: {\" \".join(default_targets)})')\n\n parser.add_argument(\n \"--module\",\n \"-m\",\n metavar=\"MODULE\",\n action=\"append\",\n help=\"A Python module to load (containing function definitions)\",\n )\n\n Parameter.add_to_parser(parser)\n\n parser.add_argument(\n \"--list_steps\",\n \"-ls\",\n default=False,\n action=\"store_true\",\n help=\"List all the build steps and their targets, and exit.\",\n )\n\n args = parser.parse_args()\n Parameter.parse_args(args)\n\n Logger.setup(logger_name)\n\n if adapter is not None:\n adapter(args)\n\n _compute_jobs()\n\n if args.list_steps:\n _list_steps()\n else:\n _build_targets([path for path in args.TARGET if path is not None] or flatten(default_targets))",
"def test_generate_target(self, mock_run, mock_make_dirs):\n self.args.cmake_source_project_root = '/tmp/falken_src'\n self.args.cmake_build_dir = '/tmp/build_folder'\n self.args.falken_json_config_file = '/tmp/config_file.json'\n self.args.cmake_generator = 'Unix Makefiles'\n\n runner = cmake_runner.CMakeRunner(self.installer.binary_dir,\n self.args.cmake_source_project_root,\n self.args.cmake_build_dir)\n\n build_cmake_project.generate_target(runner, self.args, 'Debug')\n\n # Call cmake\n mock_run.assert_called_once_with(\n args='cmake -DFALKEN_JSON_CONFIG_FILE=/tmp/config_file.json '\n '-G \"Unix Makefiles\" -DCMAKE_BUILD_TYPE=Debug -S /tmp/falken_src '\n '-B /tmp/build_folder/Debug',\n check=True,\n shell=True)",
"def testExpandedTargets(self):\n self.all_targets = self.blade.analyze_targets()\n\n sys.stdout.flush()\n sys.stderr.flush()\n\n self.assertTrue(self.blade.get_expanded())\n self.assertTrue(self.all_targets)\n\n system_lib = ('#', 'pthread')\n proto_lib_option = (self.target_path, 'rpc_option_proto')\n proto_lib_meta = (self.target_path, 'rpc_meta_info_proto')\n cc_library_poppy = (self.target_path, 'poppy')\n cc_lib_poppy_mock = (self.target_path, 'poppy_mock')\n static_resource = (self.target_path, 'static_resource')\n cc_test = (self.target_path, 'rpc_channel_test')\n swig_library = (self.target_path, 'poppy_client')\n lex_yacc_library = (self.target_path, 'parser')\n cc_plugin = (self.target_path, 'meter_business')\n gen_rule = (self.target_path, 'search_service_echo')\n java_jar = (os.path.join(self.target_path, 'java'),\n 'poppy_java_client')\n cc_binary = (self.target_path, 'echoserver')\n cc_lib_prebuild = (self.target_path, 'poppy_swig_wrap')\n java_jar_prebuild = (os.path.join(self.target_path, 'java', 'lib'),\n 'protobuf-java')\n\n self.assertTrue(cc_library_poppy in self.all_targets.keys())\n\n poppy_deps = self.all_targets.get(cc_library_poppy, {}).get('deps', [])\n poppy_mock_deps = self.all_targets.get(cc_lib_poppy_mock, {}).get('deps', [])\n self.assertTrue(poppy_deps)\n self.assertTrue(poppy_mock_deps)\n\n self.assertTrue(proto_lib_option in poppy_deps)\n self.assertTrue(proto_lib_meta in poppy_deps)\n self.assertTrue(static_resource in poppy_deps)\n self.assertTrue(system_lib in poppy_deps)\n self.assertTrue(cc_library_poppy in poppy_mock_deps)\n self.assertTrue(proto_lib_meta in poppy_mock_deps)\n\n poppy_client_deps = self.all_targets.get(swig_library, {}).get('deps', [])\n self.assertTrue(poppy_client_deps)\n self.assertTrue(cc_library_poppy in poppy_client_deps)\n self.assertTrue(cc_lib_prebuild in poppy_client_deps)\n\n self.assertTrue(java_jar in self.all_targets.keys())\n java_jar_deps = self.all_targets.get(java_jar, {}).get('deps', [])\n self.assertTrue(java_jar_deps)\n\n self.assertTrue(proto_lib_option in java_jar_deps)\n self.assertTrue(proto_lib_meta in java_jar_deps)\n self.assertTrue(java_jar_prebuild in java_jar_deps)\n self.assertTrue(cc_library_poppy not in java_jar_deps)",
"def gen(\n file: str,\n infer: bool = typer.Option(\n True, help=\"Whether to run type inference on code examples.\"\n ),\n exec: bool = typer.Option(\n False, help=\"Whether to attempt to execute doctring code.\"\n ),\n experimental: bool = typer.Option(False, help=\"Use experimental Ts parsing\"),\n debug: bool = False,\n dummy_progress: bool = typer.Option(False, help=\"Disable rich progress bar\"),\n):\n _intro()\n from papyri.gen import gen_main\n\n gen_main(\n infer=infer,\n exec_=exec,\n target_file=file,\n experimental=experimental,\n debug=debug,\n dummy_progress=dummy_progress,\n )",
"def test_execute_with_single_file_builds(self):\n review, review_file = self.run_tool_execute(\n checkout_dir=self.checkout_dir,\n filename='Hello.java',\n file_contents=(\n b'class Hello {\\n'\n b' int test() {\\n'\n b' String s = null;\\n'\n b' return s.length();\\n'\n b' }\\n'\n b'}\\n'\n ),\n tool_settings={\n 'build_type': 'javac',\n })\n\n self.assertEqual(review.comments, [\n {\n 'filediff_id': review_file.id,\n 'first_line': 4,\n 'issue_opened': True,\n 'num_lines': 1,\n 'rich_text': True,\n 'text': (\n 'object `s` last assigned on line 3 could be null and '\n 'is dereferenced at line 4.\\n'\n '\\n'\n 'Severity: ERROR\\n'\n 'Error code: Null Dereference'\n ),\n },\n ])\n self.assertEqual(review.general_comments, [])\n\n self.assertSpyCalledWith(\n execute,\n [\n self.tool_exe_path,\n 'run',\n '--no-progress-bar',\n '--',\n 'javac',\n 'Hello.java',\n ],\n ignore_errors=True,\n with_errors=True)",
"def h(options, buildout, version, opts):\n cwd = os.getcwd()\n md = options['compile-directory']\n c = os.path.join(md, 'configure.py')\n os.chdir(md)\n p = buildout['p'][version]\n opts = ' '.join(opts.split())\n cmd = [p, c, opts]\n print \"Running: %s\" % ' '.join(cmd)\n ret = os.system(' '.join(cmd))\n if ret > 0: raise Exception,('Cannot confiure')\n os.chdir(cwd)",
"def start_build(args):\n\n path = os.path.join(SCRATCH_DIR, args.project)\n \n # Set up virtual environment\n print(\"Setting up virtual python environment in %s\" % path)\n venv.create(path, clear=True, symlinks=True, with_pip=False)\n\n # Pull in repository data\n sourcepath = os.path.join(path, 'source')\n print(\"Cloning from git repository %s (branch: %s)\" % (args.source, args.sourcebranch))\n subprocess.run((GIT, 'clone', '--branch', args.sourcebranch, '--depth=1', '--no-single-branch', args.source, sourcepath),\n check=True)\n\n # Activate venv and install pips if needed. For dev/test, we will\n # assume that all requirements are available at the system level,\n # rather than needing to install them into the venv.\n ### note: this makes it difficult to test requirements.txt, but it\n ### will do for now. Debugging requirements.txt failures on the\n ### production buildbot is not difficult to correct.\n if IS_PRODUCTION and os.path.exists(os.path.join(sourcepath, 'requirements.txt')):\n print(\"Installing pips\")\n subprocess.run(('/bin/bash', '-c',\n 'source bin/activate; pip3 install -r source/requirements.txt'),\n cwd=path, check=True)\n else:\n print(\"On dev/test requirements.txt is not processed, skipping pip\")\n\n # Where are our tools?\n if IS_PRODUCTION:\n tool_dir = PELICANFILES\n else:\n tool_dir = THIS_DIR\n print(\"TOOLS:\", tool_dir)\n\n pelconf_yaml = os.path.join(sourcepath, AUTO_SETTINGS_YAML)\n if os.path.exists(pelconf_yaml):\n settings_path = os.path.join(path, AUTO_SETTINGS)\n if IS_PRODUCTION:\n builtin_plugins = PLUGINS\n else:\n builtin_plugins = os.path.join(tool_dir, os.pardir, 'plugins')\n generate_settings(pelconf_yaml, settings_path, [ builtin_plugins ], sourcepath)\n else:\n # The default name, but we'll pass it explicitly.\n settings_path = os.path.join(sourcepath, 'pelicanconf.py')\n\n # Set currently supported plugins\n ### this needs to be removed, as it is too indeterminate.\n with open(settings_path, 'a') as f:\n f.write(\"\"\"\ntry:\n PLUGINS += ['toc']\nexcept:\n PLUGINS = ['toc', 'gfm']\n\"\"\")\n\n # Call pelican\n buildpath = os.path.join(path, 'build/output')\n os.makedirs(buildpath, exist_ok = True)\n buildcmd = ('/bin/bash', '-c',\n 'source bin/activate; cd source && '\n ### note: adding --debug can be handy\n f'(pelican content --settings {settings_path} -o {buildpath})',\n )\n print(\"Building web site with:\", buildcmd)\n env = os.environ.copy()\n env['LIBCMARKDIR'] = LIBCMARKDIR\n subprocess.run(buildcmd, cwd=path, check=True, env=env)\n\n count = len(glob.glob(f'{buildpath}/**/*.html', recursive=True))\n print(f\"{count} html files.\")\n if args.count > 0 and args.count > count:\n print(\"Not enough html pages in the Web Site. Minimum %s > %s found in the Web Site.\" % (args.count, count))\n sys.exit(4)\n\n # Done for now\n print(\"Web site successfully generated!\")\n\n # It is much easier to do all the below, if we chdir()\n os.chdir(sourcepath)\n\n # Copy to result branch\n print(\"Copying web site to branch:\", args.outputbranch)\n\n try:\n subprocess.run((GIT, 'rev-parse', '--verify', \"origin/%s\" % args.outputbranch),\n check=True)\n print(\"- Doing fresh checkout of branch %s\" % args.outputbranch)\n subprocess.run((GIT, 'checkout', args.outputbranch, '-f'), check=True)\n subprocess.run((GIT, 'pull'), check=True)\n except:\n print(\"- Branch %s does not exist (yet), creating it...\" % args.outputbranch)\n # If .asf.yaml exists, which it should, make a copy of it in memory for later\n asfyml = os.path.join(sourcepath, '.asf.yaml')\n myyaml = None\n if os.path.exists(asfyml):\n myyaml = open(asfyml).read()\n subprocess.run((GIT, 'checkout', '--orphan', args.outputbranch), check=True)\n subprocess.run((GIT, 'rm', '-rf', '.'), check=True)\n # Add .asf.yaml back in if we found it.\n if myyaml:\n open(asfyml, \"w\").write(myyaml)\n subprocess.run((GIT, 'add', '.asf.yaml'), check=True)\n\n print(\"- Adding new content to branch\")\n # RM output dir if it already exists\n outputdir = os.path.join(sourcepath, 'output')\n if os.path.isdir(outputdir):\n print(\"Removing existing output dir %s\" % outputdir)\n shutil.rmtree(outputdir)\n shutil.move(buildpath, outputdir)\n subprocess.run((GIT, 'add', 'output/'), check=True)\n\n # Check if there are any changes.\n cp = subprocess.run((GIT, 'diff', '--cached', '--quiet'))\n if cp.returncode == 0:\n # There were no differences reported.\n print('Nothing new to commit. Ignoring this build.')\n else:\n print(\"- Committing to %s\" % args.source)\n subprocess.run((GIT, 'commit', '-m', 'Automatic Site Publish by Buildbot'), check=True)\n\n # If we're not in production, then avoid pushing changes.\n if IS_PRODUCTION:\n print('- Pushing changes, for publishing')\n subprocess.run((GIT, 'push', args.source, args.outputbranch), check=True)\n\n print('Success. Done.')\n # for dev/test provide viewing instructions\n if not IS_PRODUCTION:\n if args.listen:\n try:\n subprocess.run(('pelican','-l'), check=True)\n except KeyboardInterrupt:\n pass\n else:\n print(f'To test output:\\ncd {sourcepath}; pelican -l')"
] | [
"0.6142762",
"0.5641256",
"0.55775917",
"0.5569497",
"0.53769106",
"0.5277803",
"0.52763253",
"0.5215424",
"0.5061313",
"0.505405",
"0.5053484",
"0.49617815",
"0.49577522",
"0.49448642",
"0.49349135",
"0.4898354",
"0.489373",
"0.48929888",
"0.48899007",
"0.48863566",
"0.4839277",
"0.4834418",
"0.48139337",
"0.48015872",
"0.47732466",
"0.47671786",
"0.4727497",
"0.47251776",
"0.47151375",
"0.46737835"
] | 0.76094955 | 0 |
Returns the base name of the specified file name, of the specified type. A bare=True keyword argument specifies that prefixes and suffixes shouldn't be applied. | def built_file_basename(self, name, type=None, **kw):
if not kw.get('bare'):
if type == self.EXECUTABLE:
name = name + self._exe
elif type == self.STATIC_LIB:
name = self.lib_ + name + self._lib
elif type == self.SHARED_LIB:
name = self.dll_ + name + self._dll
return name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_fullname(basename, _type=None):\n return '{}.{}'.format(basename, extensions.get(_type, None))",
"def base_name(self):\n return \".\".join(posixpath.basename(self.file_name).split(\".\")[:-1])",
"def basefname(fname):\n return os.path.splitext(fname.split(\"\\\\\")[-1])[0]",
"def base_name(path):\n return os.path.basename(path)",
"def base_filename(self):\n return self.filename.split('.')[0]",
"def purebasename(self):\n return self.namebase",
"def __get_file_type_identifier(file=None, namespace_divider=None):\n return str(file.split(namespace_divider)[-1].replace(\"_\", \".\"))",
"def name_sans_ext(self) -> str:\n return os.path.splitext(self.path)[0]",
"def hap_filename(self, filetype):\n if filetype == 'events':\n return self.folder('events') / 'run_{:07d}_{}_eventlist.fits'.format(self.obs_id, self.hap_config)\n # return self.folder('events') / 'events_{:06d}.fits.gz'.format(self.obs_id)\n elif filetype == 'aeff':\n return self.folder('irfs') / 'aeff_{:06d}.fits.gz'.format(self.obs_id)\n elif filetype == 'edisp':\n return self.folder('irfs') / 'edisp_{:06d}.fits.gz'.format(self.obs_id)\n elif filetype == 'psf_3gauss':\n return self.folder('irfs') / 'psf_3gauss_{:06d}.fits.gz'.format(self.obs_id)\n else:\n raise ValueError('Invalid {} {}'.format(filetype))",
"def hap_filename(self, filetype):\n if filetype == 'events':\n return self.folder('events') / 'run_{:07d}_{}_eventlist.fits'.format(self.obs_id, self.hap_config)\n # return self.folder('events') / 'events_{:06d}.fits.gz'.format(self.obs_id)\n elif filetype == 'aeff':\n return self.folder('irfs') / 'aeff_{:06d}.fits.gz'.format(self.obs_id)\n elif filetype == 'edisp':\n return self.folder('irfs') / 'edisp_{:06d}.fits.gz'.format(self.obs_id)\n elif filetype == 'psf_3gauss':\n return self.folder('irfs') / 'psf_3gauss_{:06d}.fits.gz'.format(self.obs_id)\n else:\n raise ValueError('Invalid {} {}'.format(filetype))",
"def hap_filename(self, filetype):\n if filetype == 'events':\n return self.folder('events') / 'run_{:07d}_{}_eventlist.fits'.format(self.obs_id, self.hap_config)\n # return self.folder('events') / 'events_{:06d}.fits.gz'.format(self.obs_id)\n elif filetype == 'aeff':\n return self.folder('irfs') / 'aeff_{:06d}.fits.gz'.format(self.obs_id)\n elif filetype == 'edisp':\n return self.folder('irfs') / 'edisp_{:06d}.fits.gz'.format(self.obs_id)\n elif filetype == 'psf_3gauss':\n return self.folder('irfs') / 'psf_3gauss_{:06d}.fits.gz'.format(self.obs_id)\n else:\n raise ValueError('Invalid {} {}'.format(filetype))",
"def getBaseName(filepath):\n return os.path.basename(filepath)",
"def get_base_name(path):\n return os.path.basename(path).split('.')[0]",
"def mainTypeName(type_name):\n return mainRender(type_name)",
"def to_full_name(typ: type) -> str:\n return f\"{typ.__module__}.{typ.__qualname__}\"",
"def get_override_name(reference_file_type):\n if not re.match('^[_A-Za-z][_A-Za-z0-9]*$', reference_file_type):\n raise ValueError(\n \"{0!r} is not a valid reference file type name. \"\n \"It must be an identifier\".format(reference_file_type))\n return \"override_{0}\".format(reference_file_type)",
"def purebasename(self):\n return self._getbyspec(\"purebasename\")[0]",
"def genBaseName(fileName):\n return fileName.split(\"_\")[0].split(\".\")[0]",
"def out_filename(self, filetype, format='old', dir=Location.OUT_DIR):\n filename = self.filename(filetype=filetype, format=format)\n # return Path(dir) / filename\n return filename",
"def get_name(name, file: str) -> str:\n return os.path.basename(file) if name == \"__main__\" else name",
"def out_filename(self, filetype, format='old', dir=Location.OUT_DIR):\n filename = self.filename(filetype=filetype, format=format)\n #return Path(dir) / filename\n return filename",
"def basename(self, t):\n t = self.canon(t)\n if isinstance(t, basestring):\n return t\n elif isinstance(t, Sequence):\n t0 = t\n while not isinstance(t0, basestring):\n t0 = t0[0]\n return t0\n else:\n _raise_type_error(t)",
"def GetBase(self, fname, suffix):\n wds = fname.split('/')\n suff = suffix.replace('.BRIK','')\n suff = suff.replace('.HEAD','')\n if len(wds) > 1:\n return '.../%s' % '/'.join(wds[-2:]) + suff\n else:\n return fname + suff",
"def just_the_name(path):\n return os.path.splitext(os.path.basename(path))[0]",
"def name(self):\n #type: ()->Text\n return (\n os.path.splitext(os.path.basename(self.fileName))[0])",
"def out_filename(self, filetype, dir, format='old'):\n filename = self.filename(filetype=filetype, format=format)\n return Path(dir) / filename",
"def get_filename(file_path):\n\n # Get rid of directories and etc\n just_file = os.path.basename(file_path)\n\n # Now we return just the base name\n return os.path.splitext(just_file)[0]",
"def get_base_name(obj):\n return obj.__qualname__.split('.')[0]",
"def get_base_name(file_name, num_banks):\n datatypeutility.check_string_variable('Calibration file name', file_name)\n\n base_name = os.path.basename(file_name).split('.')[0] + '{0}banks'.format(num_banks)\n\n return base_name",
"def _get_disk_name(disk_type, instance, short=False):\n prefix = '%s_' % (disk_type[0] if short else disk_type)\n base = ('%s_%s' % (instance.name[:8], instance.uuid[:4]) if short\n else instance.name)\n return pvm_util.sanitize_file_name_for_api(\n base, prefix=prefix, max_len=pvm_const.MaxLen.VDISK_NAME if short\n else pvm_const.MaxLen.FILENAME_DEFAULT)"
] | [
"0.7135192",
"0.6425883",
"0.63058126",
"0.62789595",
"0.61583006",
"0.615667",
"0.61490166",
"0.60626936",
"0.59788233",
"0.59788233",
"0.59788233",
"0.59684837",
"0.5965139",
"0.59572744",
"0.5927984",
"0.59277415",
"0.5919521",
"0.59044087",
"0.5902169",
"0.5895096",
"0.5872775",
"0.5853605",
"0.5796187",
"0.5770605",
"0.57644576",
"0.57320887",
"0.56912565",
"0.56639403",
"0.5643059",
"0.5627267"
] | 0.76263374 | 0 |
Runs an executable program built from a gypgenerated configuration. The specified name should be independent of any particular generator. Subclasses should find the output executable in the appropriate output build directory, tack on any necessary executable suffix, etc. | def run_built_executable(self, name, *args, **kw):
raise NotImplementedError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build(self, progname):\n self.run_programm(self.COMPILED[self.progtype][0], \"%s %s %s\" %\\\n (progname, self.COMPILED[self.progtype][1], COMPILED_FILENAME ))\n\n compiled_progname=COMPILED_FILENAME\n return compiled_progname",
"def exe(self, name):\n\n return name",
"def main(args: List[Union[str, bytes]] = sys.argv,):\n\tprogram_name, *args = args\n\targs = decode_raw_args(args, str)\n\n\tgen = Generator(*args)\n\tgen.generate_data()\n\tgen.print_return_list()",
"def main():\n\n parser = argparse.ArgumentParser(prog='Build', description='Python script for building apps for Pyinstaller')\n # Flag arguments\n parser.add_argument('--version', action='version', version='%(prog)s 1.0.0')\n parser.add_argument('--clean', '-c', action='store_true', default=False, help='Clean build before re-building.')\n parser.add_argument('--portable', '-p', action='store_true', default=False, help='Build with portable python (windows)')\n parser.add_argument('name', default=None, help='Name of app')\n inputs = parser.parse_args()\n if _PLATFORM == \"osx\":\n args = Args(\"Rummage.py\", inputs.name, True, inputs.clean, \".app\", abspath(\"_icons/rummage.icns\"))\n elif _PLATFORM == \"windows\":\n args = Args(\"Rummage.py\", inputs.name, True, inputs.clean, \".exe\", abspath(\"_icons\\\\rummage.ico\"), inputs.portable)\n else:\n args = Args(\n \"Rummage.py\", inputs.name, True, inputs.clean, \"\",\n imports=[\n \"gobject\", \"glib\", \"glib._glib\", \"glib.option\", \"object.constants\",\n \"gobject._gobject\", \"gobject.propertyhelper\", \"gtk\", \"gtk._gtk\"\n ]\n )\n\n # Parse options\n build_params = BuildParams()\n err = parse_options(args, build_params)\n\n # Build executable\n if not err:\n err = build(build_params)\n\n return err",
"def Run(name, *args, **kwargs):\n try:\n binary = kwargs.get('binary')\n env = None\n if tool_search_paths:\n env = dict(os.environ)\n env['PATH'] = ':'.join(tool_search_paths) + ':' + env['PATH']\n all_args = (name,) + args\n result = command.RunPipe([all_args], capture=True, capture_stderr=True,\n env=env, raise_on_error=False, binary=binary)\n if result.return_code:\n raise Exception(\"Error %d running '%s': %s\" %\n (result.return_code,' '.join(all_args),\n result.stderr))\n return result.stdout\n except:\n if env and not PathHasFile(env['PATH'], name):\n msg = \"Please install tool '%s'\" % name\n package = packages.get(name)\n if package:\n msg += \" (e.g. from package '%s')\" % package\n raise ValueError(msg)\n raise",
"def exe(self, name):\n\n if not name.endswith('.exe'):\n name += '.exe'\n\n return name",
"def find_program(name):\r\n return name",
"def main():\n args = get_args()\n prg = args.program\n\n if not os.path.isfile(prg):\n die('Missing expected program \"{}\"'.format(prg))\n\n for name in args.name:\n cmd = '{} \"{}\"'.format(prg, name)\n rv, out = getstatusoutput(cmd)\n if rv != 0:\n warn('Failed to run: {}\\nError: {}'.format(cmd, out))\n else:\n print('Success: \"{}\"'.format(out))\n\n print('Done.')",
"def GenerateExe(config):\n aName = AssemblyName(System.IO.FileInfo(config.output).Name)\n\n if config.file_version is not None:\n aName.Version = Version(config.file_version)\n\n ab = PythonOps.DefineDynamicAssembly(aName, AssemblyBuilderAccess.RunAndSave)\n ab.DefineVersionInfoResource(config.file_info_product,\n config.file_info_product_version,\n config.file_info_company,\n config.file_info_copyright,\n config.file_info_trademark)\n\n mb = ab.DefineDynamicModule(config.output, aName.Name + \".exe\")\n tb = mb.DefineType(\"PythonMain\", TypeAttributes.Public)\n assemblyResolveMethod = None\n # 3/19/2018 # Copyright 2018 - hdunn. Apache 2.0 licensed. Modified from original.\n # --- handle dll and StdLib embed -----------\n dllNames = []\n if config.embed and config.dlls: #not for standalone ?\n config.dlls = list(set(config.dlls))\n opath = System.IO.Path.GetDirectoryName(config.output)\n for dll in config.dlls:\n dpath = System.IO.Path.GetFileName(dll)\n dllNames.append(dpath)\n lpath = System.IO.Path.Combine(opath,dpath)\n if '.dll' not in dll:\n try:\n print 'Adding to Ref: ' + lpath\n clr.AddReferenceToFileAndPath(lpath)\n except Exception as exa:\n msg = ('File | Filepath: \\n {}: ' +\n 'not a DLL file or does not exist.').format(dll)\n raise IOError(str(exa) + '\\n' + msg)\n\n elif '.dll' in dll:\n try:\n print 'Adding .dll to Ref: ' + dll\n clr.AddReferenceToFileAndPath(dll)\n except Exception as exb:\n msg = ('File | Filepath: \\n {}: ' +\n 'not a DLL file or does not exist.').format(dll)\n raise IOError(str(exb) + '\\n' + msg)\n \n outdir = System.IO.Path.GetDirectoryName(config.output)\n if config.standalone or config.libembed or config.embed:\n StdLibOutPath = System.IO.Path.Combine(outdir,'StdLib.dll')\n clrHasStdLib = False\n for clrRef in clr.References:\n if 'StdLib' in str(clrRef):\n clrHasStdLib = True\n # error if already so try\n if System.IO.File.Exists(StdLibOutPath) and not clrHasStdLib:\n try:\n clr.AddReferenceToFileAndPath(StdLibOutPath)\n clrHasStdLib = True\n except(System.IO.IOException, System.IO.FileLoadException) as exd:\n if exd.GetType()==System.IO.IOException:\n msg = ('File | Filepath:\\nStdLib.dll or {}:\\n ' +\n 'Not a DLL file or does not exist.') \\\n .format(config.output + '.dll')\n print msg\n elif exd.GetType()==System.IO.FileLoadException:\n msg = ('File | Filepath: {}\\n' +\n 'Not a clr Loadable file.') \\\n .format(config.output + '.dll')\n print msg\n\n if not clrHasStdLib:\n\n try:\n clr.AddReference(\"StdLib.dll\")\n except (System.IO.IOException, System.IO.FileLoadException) as ex:\n if ex.GetType()==System.IO.IOException:\n msg = ('File | Filepath:\\nStdLib.dll or {}:\\n ' +\n 'Not a DLL file or does not exist.') \\\n .format(config.output + '.dll')\n print msg\n elif ex.GetType()==System.IO.FileLoadException:\n msg = ('File | Filepath: {}\\n' +\n 'Not a clr Loadable file.') \\\n .format(config.output + '.dll')\n print msg\n print\n print 'Trying to finish .... - check compiled function, paths and access'\n print\n\n config.embed = True\n\n # 3/19/2018,4/3/2018 # Copyright 2018 - hdunn. Apache 2.0 licensed. Modified from original.\n # ----- handle dll and StdLib embed -----------\n embedDict = {}\n for a in System.AppDomain.CurrentDomain.GetAssemblies():\n n = AssemblyName(a.FullName)\n\n if not a.IsDynamic and not a.EntryPoint:\n if config.standalone:\n if n.Name.StartsWith(\"IronPython\") or \\\n n.Name in ['Microsoft.Dynamic', 'Microsoft.Scripting']:\n embedDict[n] = a\n\n # hdunn 3/15/2018 any(n.Name in dlln for dlln in dllNames) or \\ above\n if any(n.Name in dlln for dlln in dllNames):\n embedDict[n] = a\n if config.libembed and 'StdLib' in n.Name:\n embedDict[n] = a\n\n for name, assem in embedDict.iteritems():\n print \"\\tEmbedding %s %s\" % (name.Name, str(name.Version))\n print ' path:\\n ' + str(assem.Location)\n if assem.Location:\n print 'exists' + str(System.IO.File.Exists(assem.Location))\n if System.IO.File.Exists(assem.Location):\n f = System.IO.FileStream(assem.Location, System.IO.FileMode.Open, System.IO.FileAccess.Read) \n mb.DefineManifestResource(\"Dll.\" + name.Name, f, ResourceAttributes.Public)\n\n # we currently do no error checking on what is passed in to the AssemblyResolve event handler\n assemblyResolveMethod = tb.DefineMethod(\"AssemblyResolve\", MethodAttributes.Public | MethodAttributes.Static, clr.GetClrType(Assembly), (clr.GetClrType(System.Object), clr.GetClrType(System.ResolveEventArgs)))\n gen = assemblyResolveMethod.GetILGenerator()\n s = gen.DeclareLocal(clr.GetClrType(System.IO.Stream)) # resource stream\n gen.Emit(OpCodes.Ldnull)\n gen.Emit(OpCodes.Stloc, s)\n d = gen.DeclareLocal(clr.GetClrType(System.Array[System.Byte])) # data buffer\n gen.EmitCall(OpCodes.Call, clr.GetClrType(Assembly).GetMethod(\"GetEntryAssembly\"), ())\n gen.Emit(OpCodes.Ldstr, \"Dll.\")\n gen.Emit(OpCodes.Ldarg_1) # The event args\n gen.EmitCall(OpCodes.Callvirt, clr.GetClrType(System.ResolveEventArgs).GetMethod(\"get_Name\"), ())\n gen.Emit(OpCodes.Newobj, clr.GetClrType(AssemblyName).GetConstructor((str, )))\n gen.EmitCall(OpCodes.Call, clr.GetClrType(AssemblyName).GetMethod(\"get_Name\"), ())\n gen.EmitCall(OpCodes.Call, clr.GetClrType(str).GetMethod(\"Concat\", (str, str)), ())\n gen.EmitCall(OpCodes.Callvirt, clr.GetClrType(Assembly).GetMethod(\"GetManifestResourceStream\", (str, )), ())\n gen.Emit(OpCodes.Stloc, s)\n gen.Emit(OpCodes.Ldloc, s)\n gen.EmitCall(OpCodes.Callvirt, clr.GetClrType(System.IO.Stream).GetMethod(\"get_Length\"), ())\n gen.Emit(OpCodes.Newarr, clr.GetClrType(System.Byte))\n gen.Emit(OpCodes.Stloc, d)\n gen.Emit(OpCodes.Ldloc, s)\n gen.Emit(OpCodes.Ldloc, d)\n gen.Emit(OpCodes.Ldc_I4_0)\n gen.Emit(OpCodes.Ldloc, s)\n gen.EmitCall(OpCodes.Callvirt, clr.GetClrType(System.IO.Stream).GetMethod(\"get_Length\"), ())\n gen.Emit(OpCodes.Conv_I4)\n gen.EmitCall(OpCodes.Callvirt, clr.GetClrType(System.IO.Stream).GetMethod(\"Read\", (clr.GetClrType(System.Array[System.Byte]), int, int)), ())\n gen.Emit(OpCodes.Pop)\n gen.Emit(OpCodes.Ldloc, d)\n gen.EmitCall(OpCodes.Call, clr.GetClrType(Assembly).GetMethod(\"Load\", (clr.GetClrType(System.Array[System.Byte]), )), ())\n gen.Emit(OpCodes.Ret)\n\n # generate a static constructor to assign the AssemblyResolve handler (otherwise it tries to use IronPython before it adds the handler)\n # the other way of handling this would be to move the call to InitializeModule into a separate method.\n staticConstructor = tb.DefineConstructor(MethodAttributes.Public | MethodAttributes.Static, CallingConventions.Standard, System.Type.EmptyTypes)\n gen = staticConstructor.GetILGenerator()\n gen.EmitCall(OpCodes.Call, clr.GetClrType(System.AppDomain).GetMethod(\"get_CurrentDomain\"), ())\n gen.Emit(OpCodes.Ldnull)\n gen.Emit(OpCodes.Ldftn, assemblyResolveMethod)\n gen.Emit(OpCodes.Newobj, clr.GetClrType(System.ResolveEventHandler).GetConstructor((clr.GetClrType(System.Object), clr.GetClrType(System.IntPtr))))\n gen.EmitCall(OpCodes.Callvirt, clr.GetClrType(System.AppDomain).GetMethod(\"add_AssemblyResolve\"), ())\n gen.Emit(OpCodes.Ret)\n\n mainMethod = tb.DefineMethod(\"Main\", MethodAttributes.Public | MethodAttributes.Static, int, ())\n if config.target == System.Reflection.Emit.PEFileKinds.WindowApplication and config.mta:\n mainMethod.SetCustomAttribute(clr.GetClrType(System.MTAThreadAttribute).GetConstructor(()), System.Array[System.Byte](()))\n elif config.target == System.Reflection.Emit.PEFileKinds.WindowApplication:\n mainMethod.SetCustomAttribute(clr.GetClrType(System.STAThreadAttribute).GetConstructor(()), System.Array[System.Byte](()))\n\n gen = mainMethod.GetILGenerator()\n\n # get the ScriptCode assembly...\n if config.embed:\n\n # put the generated DLL into the resources for the stub exe\n w = mb.DefineResource(\"IPDll.resources\", \"Embedded IronPython Generated DLL\")\n # print 'IPDLL NAME: ' + 'IPDLL.' + config.output\n # 4/4/2018 Copyright 2018 - hdunn. Apache 2.0 licensed. Modified from original.----- IPDLL NAME\n strPathRefIPDll = System.IO.DirectoryInfo(config.output).Name\n #--- 'Changed to: ' + \"IPDll.\" + strPathRefIPDll\n # comment out System.IO.File.Exists(config.output + \".dll\"))\n # w.AddResource(\"IPDll.\" + config.output, System.IO.File.ReadAllBytes(config.output + \".IPDLL\"))\n w.AddResource(\"IPDll.\" + strPathRefIPDll, System.IO.File.ReadAllBytes(config.output + \".IPDLL\"))\n #--------------------\n # generate code to load the resource\n gen.Emit(OpCodes.Ldstr, \"IPDll\")\n gen.EmitCall(OpCodes.Call, clr.GetClrType(Assembly).GetMethod(\"GetEntryAssembly\"), ())\n gen.Emit(OpCodes.Newobj, clr.GetClrType(System.Resources.ResourceManager).GetConstructor((str, clr.GetClrType(Assembly))))\n # ---- hdunn dido --------\n gen.Emit(OpCodes.Ldstr, \"IPDll.\" + strPathRefIPDll)#strPathRefIPDll)#config.output 4/4\n # ------------------\n gen.EmitCall(OpCodes.Call, clr.GetClrType(System.Resources.ResourceManager).GetMethod(\"GetObject\", (str, )), ())\n gen.EmitCall(OpCodes.Call, clr.GetClrType(System.Reflection.Assembly).GetMethod(\"Load\", (clr.GetClrType(System.Array[System.Byte]), )), ())\n if config.verbose: print 'Base embed... completed {}'.format(config.output + \".dll\")\n\n else:\n\n if config.verbose: print 'No embed'\n # variables for saving original working directory und return code of script\n wdSave = gen.DeclareLocal(str)\n\n # save current working directory\n gen.EmitCall(OpCodes.Call, clr.GetClrType(System.Environment).GetMethod(\"get_CurrentDirectory\"), ())\n gen.Emit(OpCodes.Stloc, wdSave)\n gen.EmitCall(OpCodes.Call, clr.GetClrType(Assembly).GetMethod(\"GetEntryAssembly\"), ())\n gen.EmitCall(OpCodes.Callvirt, clr.GetClrType(Assembly).GetMethod(\"get_Location\"), ())\n gen.Emit(OpCodes.Newobj, clr.GetClrType(System.IO.FileInfo).GetConstructor((str, )))\n gen.EmitCall(OpCodes.Call, clr.GetClrType(System.IO.FileInfo).GetMethod(\"get_Directory\"), ())\n gen.EmitCall(OpCodes.Call, clr.GetClrType(System.IO.DirectoryInfo).GetMethod(\"get_FullName\"), ())\n gen.EmitCall(OpCodes.Call, clr.GetClrType(System.Environment).GetMethod(\"set_CurrentDirectory\"), ())\n # 4.11.2018 Copyright 2018 - hdunn. Apache 2.0 licensed. Modified from original.\n strPathRefDll = System.IO.DirectoryInfo(config.output).Name + '.dll'\n gen.Emit(OpCodes.Ldstr, strPathRefDll)\n gen.EmitCall(OpCodes.Call, clr.GetClrType(System.IO.Path).GetMethod(\"GetFullPath\", (clr.GetClrType(str), )), ())\n # result of GetFullPath stays on the stack during the restore of the\n # original working directory\n # restore original working directory\n gen.Emit(OpCodes.Ldloc, wdSave)\n gen.EmitCall(OpCodes.Call, clr.GetClrType(System.Environment).GetMethod(\"set_CurrentDirectory\"), ())\n\n # for the LoadFile() call, the full path of the assembly is still is on the stack\n # as the result from the call to GetFullPath()\n gen.EmitCall(OpCodes.Call, clr.GetClrType(System.Reflection.Assembly).GetMethod(\"LoadFile\", (clr.GetClrType(str), )), ())\n\n # emit module name\n if config.verbose: print 'emit main ... '\n gen.Emit(OpCodes.Ldstr, \"__main__\") # main module name\n gen.Emit(OpCodes.Ldnull) # no references\n gen.Emit(OpCodes.Ldc_I4_0) # don't ignore environment variables for engine startup\n\n # call InitializeModule\n # (this will also run the script)\n # -------------------------------------\n # 3.10.2018 Copyright 2018 - hdunn. Apache 2.0 licensed. Modified from original.\n Init_Long = None\n for mi in clr.GetClrType(PythonOps).GetMethods():\n if \"InitializeModuleEx\" in mi.Name and len(mi.GetParameters()) == 4:\n Init_Long = mi\n gen.EmitCall(OpCodes.Call, Init_Long, ())\n # -------------------------------------\n gen.Emit(OpCodes.Ret)\n tb.CreateType()\n ab.SetEntryPoint(mainMethod, config.target)\n ab.Save(aName.Name + \".exe\", config.platform, config.machine)\n if config.verbose: print 'Gen emit ... done'\n if config.verbose: print \"Save as \" + aName.Name + \".exe\"\n System.IO.File.Delete(config.output + \".IPDLL\")",
"def main():\n run_program()",
"def gen(\n file: str,\n infer: bool = typer.Option(\n True, help=\"Whether to run type inference on code examples.\"\n ),\n exec: bool = typer.Option(\n False, help=\"Whether to attempt to execute doctring code.\"\n ),\n experimental: bool = typer.Option(False, help=\"Use experimental Ts parsing\"),\n debug: bool = False,\n dummy_progress: bool = typer.Option(False, help=\"Disable rich progress bar\"),\n):\n _intro()\n from papyri.gen import gen_main\n\n gen_main(\n infer=infer,\n exec_=exec,\n target_file=file,\n experimental=experimental,\n debug=debug,\n dummy_progress=dummy_progress,\n )",
"def main(verbose, debug, names):\n initialize(debug)\n\n echome(names)\n # click.echo(\"hello\")\n # see\n # https://www.brianthicks.com/post/2014/11/03/build-modular-command-line-tools-with-click/",
"def generate(self, name):\n raise NotImplementedError()",
"def runFilename(self, name):\n return self.run(open(name, 'r').read(), name)",
"def executable():\n\n if len(sys.argv) == 1:\n arguments.get_help()\n sys.exit('\\nGive me something to do and I will do it\\n')\n else:\n # Parse the Arguments that have been provided\n args = arguments.get_args()\n\n # Load The System Logger\n log = logger.load_in(log_level=args.get('log_level', 'info'))\n log.debug('Used Arguments %s', args)\n const(log_method=log)\n\n # Begin Work\n start(set_args=args)",
"def binary(self, name):\r\n if not isinstance(name, Compatibility.string):\r\n raise ValueError('name must be a binary name, given %s of type %s' % (name, type(name)))\r\n self.validate()\r\n return self._validated_executable(name)",
"def _config_exe(exe_name):\n\n package_name = 'neuron'\n if package_name not in working_set.by_key:\n print (\"INFO : Using neuron-nightly Package (Developer Version)\")\n package_name = 'neuron-nightly'\n\n assert package_name in working_set.by_key, \"NEURON package not found! Verify PYTHONPATH\"\n NRN_PREFIX = os.path.join(working_set.by_key[package_name].location, 'neuron', '.data')\n os.environ[\"NEURONHOME\"] = os.path.join(NRN_PREFIX, 'share/nrn')\n os.environ[\"NRNHOME\"] = NRN_PREFIX\n os.environ[\"NRNBIN\"] = os.path.dirname(__file__)\n _set_default_compiler()\n return os.path.join(NRN_PREFIX, 'bin', exe_name)",
"def KengeProgram(self, name, source = None, **kargs):\n program_args = {} \n # we only want unique libraries, since re can't handle huge strings\n libs = sets.Set()\n libpath = []\n cpp_path = [] #copy.copy(self.cpp_path)\n\n # First we work out all the required libraries\n for lib in kargs.get(\"LIBS\", []):\n try:\n\t\tlibs.union_update(self.get_libs(lib))\n except LibraryNotFound, badlib:\n raise SCons.Errors.UserError, \"Program %s was looking for library %s but it doesn't exist \" \\\n \"in this environment\\n This environment has: %s\" % (name, badlib, self.libs.keys())\n\tlibs = list(libs)\n \n del kargs[\"LIBS\"]\n\n # Now we go through to get the library path for all the\n # libraries\n for libname in libs:\n if not self.libs[libname][2] is None:\n libpath += [self.libs[libname][2]]\n\n # This ensure that any generated header files\n # Maybe move this somewhere else later though\n\n cpp_path.append(Dir('.').abspath + \"/src\")\n\n # Now we go through everything in the kargs:\n for arg in kargs:\n if arg.startswith(\"EXTRA_\"):\n argname = arg[6:]\n program_args[argname] = self.get(argname, []) + kargs[arg]\n else:\n program_args[arg] = kargs[arg]\n\n if source is None:\n # User didn't provide any source files\n # explicitly, so we work out it form them\n # based on some hueristics.\n glob_list = []\n dirs = [\"src/\", \"src/arch-%s/\" % env.arch]\n for src_ext in env.src_exts:\n for dir_ in dirs:\n glob_list.append(dir_ + \"*.\" + src_ext)\n else:\n glob_list = source\n\n # He we expand the glob to a list of files\n source_list = Flatten([src_glob(glob) for glob in glob_list])\n\n # Now automatically handle any templates\n for file_name in source_list:\n if file_name.endswith(\".template\"):\n template_env = kargs.get(\"TEMPLATE_ENV\", self[\"TEMPLATE_ENV\"])\n template = self.Template(file_name, TEMPLATE_ENV=template_env)\n env.Depends(template, Value(template_env))\n source_list.append(str(template[0]))\n\n program_args[\"LIBS\"] = libs\n program_args[\"LIBPATH\"] = libpath\n\n object_list = []\n for source in source_list:\n for ext in self.obj_exts:\n if str(source).endswith(\".%s\" % ext):\n object_list.append(source)\n continue\n\n if kargs.has_key(\"EXTRAOBJECTS\"):\n object_list += kargs[\"EXTRAOBJECTS\"]\n\n # Prepend the crt\n if \"c\" in self.libs.dict.keys():\n object_list = self.libs[\"c\"][3]+ object_list\n\n prog = self.Program(name, object_list, **program_args)\n\n # SCons changed to program returning a list of object. But it makes\n # much more sense to return a single item\n assert(len(prog) == 1)\n prog = prog[0]\n\n if \"LINKSCRIPTS\" in program_args:\n for linkscript in program_args[\"LINKSCRIPTS\"]:\n self.Depends(prog, linkscript)\n\n if self[\"FLINT_RUN\"]:\n for each in prog.children():\n if str(each).endswith(\".o\"):\n if str(each.children()[0]).endswith(\".c\") or \\\n str(each.children()[0]).endswith(\".cc\"):\n self.AddPreAction(each, \"$FLINTCOM\")\n\n return prog",
"def execute(self):\n if self._cli_arguments.get('<samplename>') == 'cfn':\n generate_sample_cfn_module(self.env_root)\n elif self._cli_arguments.get('<samplename>') == 'static-angular':\n generate_sample_static_angular(self.env_root)\n elif self._cli_arguments.get('<samplename>') == 'static-react':\n generate_sample_static_react(self.env_root)\n elif self._cli_arguments.get('<samplename>') == 'sls-py':\n generate_sample_sls_module(self.env_root, 'sls-py')\n elif self._cli_arguments.get('<samplename>') == 'sls-tsc':\n generate_sample_sls_module(self.env_root, 'sls-tsc')\n elif self._cli_arguments.get('<samplename>') == 'stacker':\n generate_sample_stacker_module(self.env_root)\n elif self._cli_arguments.get('<samplename>') == 'tf':\n generate_sample_tf_module(self.env_root)\n elif self._cli_arguments.get('<samplename>') == 'k8s-cfn-repo':\n generate_sample_k8s_cfn_repo(self.env_root)\n elif self._cli_arguments.get('<samplename>') == 'k8s-tf-repo':\n generate_sample_k8s_tf_repo(self.env_root)\n elif self._cli_arguments.get('<samplename>') == 'cdk-tsc':\n generate_sample_cdk_tsc_module(self.env_root)\n elif self._cli_arguments.get('<samplename>') == 'cdk-py':\n generate_sample_cdk_py_module(self.env_root)\n elif self._cli_arguments.get('<samplename>') == 'cdk-csharp':\n generate_sample_cdk_cs_module(self.env_root)\n else:\n LOGGER.info(\"Available samples to generate:\")\n for i in ['cfn', 'static-angular', 'static-react', 'sls-tsc',\n 'sls-py', 'tf', 'k8s-cfn-repo', 'k8s-tf-repo',\n 'stacker', 'cdk-tsc', 'cdk-py', 'cdk-csharp']:\n print(i)",
"def main(binary_name, code_directory, verbose, clase):\n print(\"Start of binaries generation\")\n #Directory to iterate\n directory = '../../results/'+code_directory + '/' + clase + '/application_signature/'\n #Directory to store the binaries to generate\n bin_directory = './bin/'\n #Task to performed on the new script\n make_clean = 'make clean\\n'\n for dirs in os.listdir(directory):\n print('Generating binary for path', dirs)\n if os.path.exists(directory+dirs+'/bin/'+dirs):\n os.remove(directory+dirs+'/bin/'+dirs)\n #Creation of the script\n with open(directory+dirs+'/make_bin.sh', 'w') as bin_file:\n bin_file.write('#! /bin/bash\\n')\n bin_file.write(make_clean+'\\n')\n bin_file.write('make '+code_directory+' CLASS='+clase+'\\n')\n bin_file.write('mv '+bin_directory+binary_name+' '+bin_directory+binary_name+'_'+dirs+'\\n')\n bin_file.write(make_clean)\n bin_file.close()\n try:\n #Changing privileges so script can be executed automatically\n os.chmod(directory+dirs+'/make_bin.sh', 0o777)\n #Move to directory where script is to be executed\n cwd = os.getcwd()\n #Change cwd to execute script generating the binary\n os.chdir(directory+dirs)\n if verbose:\n subprocess.check_call('./make_bin.sh')\n else:\n subprocess.check_call('./make_bin.sh', stdout=subprocess.PIPE, shell=False)\n \n os.chdir(cwd)\n except FileNotFoundError as e:\n logger.error(e)\n raise\n print('End of binaries generation')",
"def main():\n load()\n\n print(generate())",
"def executable(output=None, sources=None, rule=None, slibs=[], libs='', **kwargs):\n objs = compile(\n sources = sources,\n bldprefix = output.name + '_',\n rule = rule,\n **kwargs)\n objs += [str(x) for x in slibs]\n w.build(str(output), 'executable',\n inputs = objs,\n variables = {'libs': libs})\n return str(output) # convenience",
"def run_file(self, user_input):\n # Extract the important information\n self.path, self.name = self.extractor.extract_program_information(user_input)\n\n # Determine what language the program is\n program_type = self.determine_program_type(path, name)\n\n # If the file is python, run it the specific way\n # @TODO: Make it work without shell=True\n if program_type == \"python\":\n subprocess.Popen(\"python \" + self.path + self.name, shell=True)",
"def run(self, name, config, builder):\n if not isinstance(name, str):\n raise RuntimeError(\"Name has to be a string type\")\n if not isinstance(config, Config):\n raise RuntimeError(\"config has to be a Config type\")\n if not isinstance(builder, Builder):\n raise RuntimeError(\"builder has to be a Builder type\")\n bldr = TopologyBuilder(name=name)\n builder.build(bldr)\n bldr.set_config(config._api_config)\n bldr.build_and_submit()",
"def _compute_program_name():\n program_path = os.path.abspath(sys.argv[0])\n if os.path.exists(program_path):\n return os.path.basename(program_path)\n else:\n match = re.match(r\"^.*(?:\\.egg|\\.tar|\\.tar\\.gz)(?=/)\", program_path, re.IGNORECASE)\n if (match is not None) and os.path.exists(match.group(0)):\n # python script is embedded in egg\n return os.path.basename(program_path)\n else:\n return \"unknown\"",
"def main():\n parsed_args = parse_args()\n dfg = DummyFileGenerator(parsed_args[0], **parsed_args[1])\n dfg.write_output_file(**parsed_args[2])",
"def run_spec(self, spec_name):\n p = subprocess.Popen(['rake', 'spec', 'SPEC=spec/default/{}.rb'.format(spec_name)],\n cwd='tests/serverspecs',\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = p.communicate()\n print stdout\n print stderr\n if p.returncode:\n raise Exception(stdout)",
"def _run_generic_test(name):\n\n underscore = name.rfind('_')\n if underscore == -1:\n raise TestException(\n 'Internal error: _run_generic_test did not have type')\n\n environment = name[underscore + 1:]\n basename = name[0:underscore]\n build_program([basename + '.c'])\n result = run_program(environment=environment)\n check_result(basename + '.c', result)",
"def make_program(*nodes: base.Node, name: str = 'launchpad'):\n program = Program(name)\n for node in nodes:\n program.add_node(node)\n return program",
"def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"layout_path\", help=\"relative path to the directory \"\n \"containing .rst files with site content and jinja \"\n \"templates that define the site structure\")\n parser.add_argument(\"output_path\", help=\"relative path to the output \"\n \"directory\")\n arguments = parser.parse_args()\n\n generate_site(arguments.layout_path, arguments.output_path)"
] | [
"0.6624041",
"0.6600797",
"0.64478886",
"0.6147318",
"0.61315423",
"0.6088417",
"0.5887231",
"0.5876601",
"0.5817391",
"0.57601607",
"0.57026327",
"0.5663314",
"0.56571984",
"0.5645205",
"0.5624931",
"0.56230867",
"0.5593044",
"0.553381",
"0.5533443",
"0.553212",
"0.5465984",
"0.5450145",
"0.5424438",
"0.5422465",
"0.5409776",
"0.54083586",
"0.5392208",
"0.5390996",
"0.53809345",
"0.5369442"
] | 0.7209196 | 0 |
Convert to cygwin path if we are using cygwin. | def ConvertToCygpath(path):
if sys.platform == 'cygwin':
p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)
path = p.communicate()[0].strip()
return path | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cygpath(filename):\n if sys.platform == 'cygwin':\n proc = Popen(['cygpath', '-am', filename], stdout=PIPE)\n return proc.communicate()[0].strip()\n else:\n return filename",
"def conditional_abspath (filename):\n if sys.platform.find('cygwin') != -1:\n return filename\n else:\n return os.path.abspath(filename)",
"def WindowsPath(path):\n # TODO(pamg): make this work for other drives too.\n if path.startswith('/cygdrive/c/'):\n return path.replace('/cygdrive/c/', 'C:/')\n return path",
"def change_path_to_windows_style(input):\n\n try:\n new_output_path = re.sub(\"^/cygdrive/c/\", \"C:/\", input)\n except Exception as e:\n print e\n new_output_path = input\n\n return new_output_path",
"def make_posix_path(windows_path):\n for regex, sub in [\n (re.compile(r'\\\\'), '/'),\n (re.compile('^[Cc]:'), '/c'),\n ]:\n windows_path = regex.sub(sub, windows_path)\n return windows_path",
"def win2unix(a_path, use_abs=1):\r\n if use_abs:\r\n a_path = os.path.abspath(a_path)\r\n return re.sub(r\"\\\\\", \"/\", a_path)",
"def win2unix(a_path, use_abs=1):\r\n if use_abs:\r\n a_path = os.path.abspath(a_path)\r\n return re.sub(r\"\\\\\", \"/\", a_path)",
"def system_path(path):\n if is_windows(): return path.replace('/', '\\\\')\n else: return path.replace('\\\\', '/')",
"def windows2msys(path):\n if not sys.platform.startswith('win32'):\n return path\n (drive, path) = os.path.splitdrive(os.path.abspath(path))\n return \"/\" + drive[0] + path.replace('\\\\', '/')",
"def convertString(path):\n if (\"win\" in sys.platform):\n return path.replace(\"/\",\"\\\\\")\n elif (\"linux\" in sys.platform):\n return path.replace(\"\\\\\",\"/\")",
"def _windows_seps(path: str) -> str:\n\n if not path:\n return None\n elif os.sep != ntpath.sep:\n return path.replace(os.sep, ntpath.sep)\n else:\n return path",
"def _escape_path(path):\n path = path.strip()\n return '\"{0}\"'.format(path) if _platform_windows else path.replace(\" \", \"\\ \")",
"def on_windows ():\n if bjam.variable(\"NT\"):\n return True\n\n elif bjam.variable(\"UNIX\"):\n\n uname = bjam.variable(\"JAMUNAME\")\n if uname and uname[0].startswith(\"CYGWIN\"):\n return True\n\n return False",
"def _path(unix_path):\n return unix_path.replace(\"/\", os.path.sep)",
"def fix_windows_path_limit(path):\n if platform.system() == 'Windows':\n if path.startswith('\\\\\\\\'):\n # UNC network path\n return '\\\\\\\\?\\\\UNC\\\\' + path[2:]\n elif os.path.isabs(path):\n # local absolute path\n return '\\\\\\\\?\\\\' + path\n else:\n # relative path, don't alter\n return path\n else:\n return path",
"def _get_mingw_dll_dir():\n gfortran_exe = shutil.which(\"gfortran\")\n if gfortran_exe is None:\n return None\n\n gfortran_exe = pathlib.Path(gfortran_exe)\n bin_dir = gfortran_exe.resolve().parent\n matches = list(bin_dir.glob(\"libgfortran*.dll\"))\n if len(matches) == 0:\n return None\n\n return str(bin_dir)",
"def win_path_check(path):\n if IS_WIN:\n return path.replace(\"\\\\\", \"/\").replace(\":\", \"\\\\:\")\n return path",
"def windows_path(self, **kw):\n with_drive_letter = kw.get(\"with_drive\", True)\n return self._construct_path(\"\\\\\", with_drive_letter)",
"def getcwd():\n cwd = os.getcwd()\n # os.getcwd works properly with Python 3 on Windows.\n # We need this workaround only for Python 2 on Windows.\n if is_win and is_py2:\n try:\n unicode(cwd)\n except UnicodeDecodeError:\n # Do conversion to ShortPathName really only in case 'cwd' is not\n # ascii only - conversion to unicode type cause this unicode error.\n try:\n import win32api\n cwd = win32api.GetShortPathName(cwd)\n except ImportError:\n pass\n return cwd",
"def base_protolint_command() -> str:\n if sys.platform.startswith(\"win\"):\n protolint_base_cmd = \"protolint\" # pragma: nocover\n else:\n protolint_base_cmd = \"PATH=${PATH}:${GOPATH}/bin/:~/go/bin protolint\"\n\n return protolint_base_cmd",
"def to_posix(fname):\n import sys\n if sys.platform == 'win32': # pragma: nocover\n import os.path\n if os.path.isabs(fname):\n fname = '/' + fname\n fname = fname.replace('\\\\', '/')\n return fname",
"def expand_config_path(path):\n if path == DEFAULT_LINUX_PATH and os.name == \"nt\":\n path = DEFAULT_WINDOWS_PATH\n return os.path.expanduser(path)",
"def nt_path_to_posix_path(path):\r\n path = path.replace(\"\\\\\", \"/\")\r\n parts = path.split(\":\")\r\n if len(parts) > 1:\r\n return \"/\" + parts[0].lower() + parts[1]\r\n return path",
"def makePath(path):\n\n compatPath = os.path.abspath(os.path.expanduser(path))\n\n return compatPath",
"def cnormpath (path):\n path = normpath(path)\n if os.name == 'nt':\n # replace slashes with backslashes\n path = path.replace(\"/\", \"\\\\\")\n if not os.path.isabs(path):\n path = normpath(os.path.join(sys.prefix, path))\n return path",
"def path_creator(rel_path=''):\n if platform.system() != 'Windows':\n if rel_path == '':\n path_list=sys.argv[0].split('/')[:-1]\n return '/'.join(path_list)\n else:\n path_list = sys.argv[0].split('/')[:-1]\n return '/'.join(path_list) + '/' + rel_path\n else:\n if rel_path == '':\n path_list=sys.argv[0].split('\\\\')[:-1]\n path_res='\\\\'.join(path_list)\n return path_res\n else:\n path_list = sys.argv[0].split('\\\\')[:-1]\n rel_path=rel_path.split('/')\n path_res='\\\\'.join(path_list) + '\\\\' + '\\\\'.join(rel_path)\n return path_res",
"def normalized_file_path(path: str) -> str:\n # Convert Unix path to Windows path for WSL\n if PLATFORM == \"WSL\":\n return path.replace(\"/\", \"\\\\\")\n\n return path",
"def is_win():\n return sys.platform[:3] == \"win\"",
"def GetWindowsPathWithUNCPrefix(path):\n path = path.strip()\n\n # No need to add prefix for non-Windows platforms.\n # And \\\\?\\ doesn't work in python 2 or on mingw\n if not IsWindows() or sys.version_info[0] < 3:\n return path\n\n # Starting in Windows 10, version 1607(OS build 14393), MAX_PATH limitations have been\n # removed from common Win32 file and directory functions.\n # Related doc: https://docs.microsoft.com/en-us/windows/win32/fileio/maximum-file-path-limitation?tabs=cmd#enable-long-paths-in-windows-10-version-1607-and-later\n import platform\n if platform.win32_ver()[1] >= '10.0.14393':\n return path\n\n # import sysconfig only now to maintain python 2.6 compatibility\n import sysconfig\n if sysconfig.get_platform() == 'mingw':\n return path\n\n # Lets start the unicode fun\n unicode_prefix = '\\\\\\\\?\\\\'\n if path.startswith(unicode_prefix):\n return path\n\n # os.path.abspath returns a normalized absolute path\n return unicode_prefix + os.path.abspath(path)",
"def _GetSystemPath():\n return encoding_util.GetEncodedValue(os.environ, \"PATH\")"
] | [
"0.8049506",
"0.73739505",
"0.7203524",
"0.7143538",
"0.67795306",
"0.6647246",
"0.6647246",
"0.65076965",
"0.6450253",
"0.6416357",
"0.61581916",
"0.6095484",
"0.60797286",
"0.607077",
"0.6055507",
"0.59301335",
"0.5853573",
"0.5805152",
"0.5760639",
"0.5707755",
"0.5666738",
"0.5638682",
"0.55572027",
"0.5500677",
"0.5469991",
"0.5466271",
"0.54593396",
"0.5432101",
"0.5430365",
"0.5407547"
] | 0.82450604 | 0 |
Returns path to MSBuild for msvs_version or latest available. Looks in the registry to find install location of MSBuild. MSBuild before v4.0 will not build c++ projects, so only use newer versions. | def FindMSBuildInstallation(msvs_version = 'auto'):
import TestWin
registry = TestWin.Registry()
msvs_to_msbuild = {
'2013': r'12.0',
'2012': r'4.0', # Really v4.0.30319 which comes with .NET 4.5.
'2010': r'4.0'}
msbuild_basekey = r'HKLM\SOFTWARE\Microsoft\MSBuild\ToolsVersions'
if not registry.KeyExists(msbuild_basekey):
print 'Error: could not find MSBuild base registry entry'
return None
msbuild_version = None
if msvs_version in msvs_to_msbuild:
msbuild_test_version = msvs_to_msbuild[msvs_version]
if registry.KeyExists(msbuild_basekey + '\\' + msbuild_test_version):
msbuild_version = msbuild_test_version
else:
print ('Warning: Environment variable GYP_MSVS_VERSION specifies "%s" '
'but corresponding MSBuild "%s" was not found.' %
(msvs_version, msbuild_version))
if not msbuild_version:
for msvs_version in sorted(msvs_to_msbuild, reverse=True):
msbuild_test_version = msvs_to_msbuild[msvs_version]
if registry.KeyExists(msbuild_basekey + '\\' + msbuild_test_version):
msbuild_version = msbuild_test_version
break
if not msbuild_version:
print 'Error: could not find MSBuild registry entry'
return None
msbuild_path = registry.GetValue(msbuild_basekey + '\\' + msbuild_version,
'MSBuildToolsPath')
if not msbuild_path:
print 'Error: could not get MSBuild registry entry value'
return None
return os.path.join(msbuild_path, 'MSBuild.exe') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def FindVisualStudioInstallation():\n possible_roots = ['%s:\\\\Program Files%s' % (chr(drive), suffix)\n for drive in range(ord('C'), ord('Z') + 1)\n for suffix in ['', ' (x86)']]\n possible_paths = {\n '2013': r'Microsoft Visual Studio 12.0\\Common7\\IDE\\devenv.com',\n '2012': r'Microsoft Visual Studio 11.0\\Common7\\IDE\\devenv.com',\n '2010': r'Microsoft Visual Studio 10.0\\Common7\\IDE\\devenv.com',\n '2008': r'Microsoft Visual Studio 9.0\\Common7\\IDE\\devenv.com',\n '2005': r'Microsoft Visual Studio 8\\Common7\\IDE\\devenv.com'}\n\n possible_roots = [ConvertToCygpath(r) for r in possible_roots]\n\n msvs_version = 'auto'\n for flag in (f for f in sys.argv if f.startswith('msvs_version=')):\n msvs_version = flag.split('=')[-1]\n msvs_version = os.environ.get('GYP_MSVS_VERSION', msvs_version)\n\n if msvs_version in possible_paths:\n # Check that the path to the specified GYP_MSVS_VERSION exists.\n path = possible_paths[msvs_version]\n for r in possible_roots:\n build_tool = os.path.join(r, path)\n if os.path.exists(build_tool):\n uses_msbuild = msvs_version >= '2010'\n msbuild_path = FindMSBuildInstallation(msvs_version)\n return build_tool, uses_msbuild, msbuild_path\n else:\n print ('Warning: Environment variable GYP_MSVS_VERSION specifies \"%s\" '\n 'but corresponding \"%s\" was not found.' % (msvs_version, path))\n # Neither GYP_MSVS_VERSION nor the path help us out. Iterate through\n # the choices looking for a match.\n for version in sorted(possible_paths, reverse=True):\n path = possible_paths[version]\n for r in possible_roots:\n build_tool = os.path.join(r, path)\n if os.path.exists(build_tool):\n uses_msbuild = msvs_version >= '2010'\n msbuild_path = FindMSBuildInstallation(msvs_version)\n return build_tool, uses_msbuild, msbuild_path\n print 'Error: could not find devenv'\n sys.exit(1)",
"def _figure_out_msvs_version_filesystem(env, specific_version=0):\n \n prefixes = [\n (17,'C:/Program Files/Microsoft Visual Studio/2022'),\n (16,'C:/Program Files (x86)/Microsoft Visual Studio/2019'),\n \n # starting with DEV15, everything is in the \"Program Files\n # (x86)\" directory.\n (15,'C:/Program Files (x86)/Microsoft Visual Studio/2017'),\n \n (14,'C:/Program Files (x86)/Microsoft Visual Studio 14.0'),\n (14,'C:/Program Files/Microsoft Visual Studio 14.0'),\n \n (12,'C:/Program Files (x86)/Microsoft Visual Studio 12.0'),\n (12,'C:/Program Files/Microsoft Visual Studio 12.0'),\n\n (11,'C:/Program Files (x86)/Microsoft Visual Studio 11.0'),\n (11,'C:/Program Files/Microsoft Visual Studio 11.0'),\n \n (10,'C:/Program Files (x86)/Microsoft Visual Studio 10.0'),\n (10,'C:/Program Files/Microsoft Visual Studio 10.0'),\n \n (9,'C:/Program Files (x86)/Microsoft Visual Studio 9.0'),\n (9,'C:/Program Files/Microsoft Visual Studio 9.0'),\n \n (8, \"c:/Program Files (x86)/Microsoft Visual Studio 8\"),\n (8,\"c:/Program Files/Microsoft Visual Studio 8\"),\n \n (7, \"c:/Program Files/Microsoft Visual Studio .NET 2003\"),\n (7,\"c:/Program Files (x86)/Microsoft Visual Studio .NET 2003\")\n ]\n for v,dir in prefixes:\n if os.path.exists(dir):\n if specific_version:\n if specific_version == v:\n return str(v)\n else:\n return str(v)\n return None # we don't know",
"def test_msbuild_path(visualstudio, tmp_path):\n assert visualstudio.msbuild_path == tmp_path / \"Visual Studio\" / \"MSBuild.exe\"",
"def _find_specific_msvs_version(env,uv):\n found = False\n # 1. look for specific version in registry\n if uv < 15:\n (vs_dir,vc_dir) = _find_msvc_in_registry(env,uv)\n if vs_dir and vc_dir:\n env['msvs_version'] = str(uv) \n found = True\n else:\n warn(\"Could not find specified version of MSVS in registry: {}\".format(uv))\n\n # 2. look in file system for specific version\n if not found:\n env['msvs_version'] = _figure_out_msvs_version_filesystem(env, uv)\n if env['msvs_version']:\n found = True\n else:\n warn(\"Could not find specified version of MSVS in file system: {}\".format(uv))\n return found",
"def msvc_target(optional=False):\n\n # MSVC2015 is v14, MSVC2017 is v15 and MSVC2019 is v16.\n vs_version = os.environ.get('VisualStudioVersion', '0.0')\n vs_major = vs_version.split('.')[0]\n\n if vs_major == '0':\n if optional:\n return None\n\n raise UserException(\"unable to detect any MSVC compiler\")\n\n if vs_major == '14':\n is_32 = (os.environ.get('Platform') != 'X64')\n elif vs_major in ('15', '16'):\n is_32 = (os.environ.get('VSCMD_ARG_TGT_ARCH') != 'x64')\n else:\n if optional:\n return None\n\n raise UserException(\"MSVC v{0} is unsupported\".format(vs_version))\n\n return '32' if is_32 else '64'",
"def get_version():\n found = None\n with open(os.path.join(PATH, \"pyproject.toml\"), \"rt\") as setup_file:\n for line in setup_file:\n line = line.strip()\n if line.startswith(\"version\"):\n found = line\n break\n\n if found is None:\n raise ValueError(\"Unable to detect version\")\n\n return found.split(\"=\")[-1].replace('\"', \"\").strip()",
"def _getNETSDKPath():\r\n try:\r\n dotNETSDK_root_key = win32api.RegOpenKeyEx(win32con.HKEY_LOCAL_MACHINE, 'SOFTWARE\\\\Microsoft\\\\Microsoft SDKs\\\\.NETFramework\\\\v2.0', 0, win32con.KEY_READ)\r\n found = False\r\n i = 0\r\n try:\r\n try:\r\n while not found:\r\n name, obj, ntype = win32api.RegEnumValue(dotNETSDK_root_key, i)\r\n i = i + 1\r\n if name=='InstallationFolder':\r\n return obj\r\n found = True\r\n except:\r\n win32api.RegCloseKey(dotNETSDK_root_key)\r\n return ''\r\n finally:\r\n win32api.RegCloseKey(dotNETSDK_root_key)\r\n except:\r\n return ''",
"def get_windows_sdk_path():\n try:\n import _winreg as winreg\n except ImportError:\n import winreg\n sub_key = r\"Software\\Microsoft\\Microsoft SDKs\\Windows\"\n with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, sub_key) as key:\n name = \"CurrentInstallFolder\"\n return winreg.QueryValueEx(key, name)[0]\n return None",
"def locate_vcredist_dir(plat):\n from setuptools import msvc\n\n vcvars = msvc.msvc14_get_vc_env(plat)\n try:\n vcruntime = vcvars[\"py_vcruntime_redist\"]\n except KeyError:\n warn(f\"platform={plat}, vcvars=\")\n pprint(vcvars, stream=sys.stderr)\n\n warn(\n \"Failed to get py_vcruntime_redist via vcvars, may need to set it in %PATH%\"\n )\n return None\n redist_dir, dll = os.path.split(vcruntime)\n # add redist dir to $PATH so that it can be found\n os.environ[\"PATH\"] += os.pathsep + redist_dir\n return redist_dir",
"def get_assembly_version(path: str) -> AssemblyVersion:\n info = win32api.GetFileVersionInfo(path, \"\\\\\")\n ms = info[\"FileVersionMS\"]\n ls = info[\"FileVersionLS\"]\n\n return win32api.HIWORD(ms), win32api.LOWORD(ms), win32api.HIWORD(ls), win32api.LOWORD(ls)",
"def check_cmake_windows():\n chk = Popen(\"wmic product where \\\"name = 'cmake'\\\" get installlocation,version\",\n shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderr = chk.communicate()\n if stderr:\n return False, stderr\n lines = [re.sub(\" +\", \" \", line.strip())\n for line in stdout.decode().splitlines()\n if line.strip()]\n stdout = lines[1]\n location = stdout[:stdout.rfind(\" \")] + \"bin\"\n out_info(\"CMake not found in %PATH%. Temporarily adding: \\\"{}\\\"\".format(location))\n os.environ[\"PATH\"] += \";{}\".format(location)\n stdout = \"cmake {}\".format(stdout)\n return stdout, False",
"def get_min_build_version(version: str) -> str:\n return Version(version).replace(micro=0).get_stable().dumps()",
"def getCmsswVersion(self):\n if not self.crabPSet:\n return self.step.data.application.setup.cmsswVersion\n else:\n # CRAB3 needs to use an environment var to get the version\n return os.environ.get(\"CMSSW_VERSION\", \"\")",
"def get_version():\n version_file = Path(__file__).resolve().parent / \"clinker\" / \"__init__.py\"\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file.read_text(), re.M\n )\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Failed to find version string\")",
"def python_build():\n return _sys_version()[4:6]",
"def getCmsswVersion(self):\n if not self.crabPSet:\n return self.step.data.application.setup.cmsswVersion\n\n # CRAB3 needs to use an environment var to get the version\n return os.environ.get(\"CMSSW_VERSION\", \"\")",
"def _get_package_version():\n file = join(get_root(), 'VERSION')\n\n if exists(file):\n with open(file) as file:\n return file.read()\n\n return ''",
"def get_buildroot():\r\n try:\r\n return BuildRoot().path\r\n except BuildRoot.NotFoundError as e:\r\n print(e.message, file=sys.stderr)\r\n sys.exit(1)",
"def rtd_build_path(self, version=\"latest\"):\n return os.path.join(self.doc_path, 'rtd-builds', version)",
"def get_for_release_version_path(self):\n return self.__cICommon.get_for_release_version_path()",
"def version_path(version):\n try:\n version_path = CFG.get(\"Versions\", version)\n except KeyError:\n version_path = version\n return version_path",
"def locate_nuget():\n if NuGetRunner.valid_nuget_executable(\"nuget\"):\n return \"nuget\"\n return None",
"def full_build_path(self, version='latest'):\n return os.path.join(self.conf_dir(version), \"_build\", \"html\")",
"def get_version():\n # this implementation avoids calling Foundation and will work on\n # non Apple OSes.\n vers = \"UNKNOWN\"\n build = \"\"\n # find the munkilib directory, and the version file\n munkilibdir = os.path.dirname(os.path.abspath(__file__))\n versionfile = os.path.join(munkilibdir, \"version.plist\")\n if os.path.exists(versionfile):\n try:\n vers_plist = readPlist(versionfile)\n except (IOError, OSError, ExpatError):\n pass\n else:\n try:\n vers = vers_plist['CFBundleShortVersionString']\n build = vers_plist['BuildNumber']\n except KeyError:\n pass\n if build:\n vers = vers + \".\" + build\n return vers",
"def getwindowsversion(): # real signature unknown; restored from __doc__\n pass",
"def version(path):\n here = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(here, path), encoding='utf-8') as f:\n version_file = f.read()\n version_match = re.search(r\"\"\"^__version__ = ['\"]([^'\"]*)['\"]\"\"\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")",
"def get_build_version():\n package_version = __version__\n tags = (\n subprocess.run(\n [\"/usr/bin/git\", \"tag\", \"--points-at\", \"HEAD\"],\n cwd=os.path.dirname(os.path.realpath(__file__)),\n stdout=subprocess.PIPE,\n check=True,\n )\n .stdout.decode(\"utf-8\")\n .strip(\"\\n\")\n .split(\"\\n\")\n )\n commit_id = (\n subprocess.run(\n [\"/usr/bin/git\", \"rev-parse\", \"--short\", \"HEAD\"],\n cwd=os.path.dirname(os.path.realpath(__file__)),\n stdout=subprocess.PIPE,\n check=True,\n )\n .stdout.decode(\"utf-8\")\n .strip(\"\\n\")\n )\n\n version_tags = _select_version_tags(tags)\n if len(version_tags) > 1:\n raise exc.QgrVersionError(\n f\"Can not determine desired version from tags: {tags}\",\n )\n\n if len(version_tags) == 1:\n version = version_tags[0]\n else:\n # If there is no version tag, build a unique version string\n version = f\"{package_version}-{commit_id}\"\n\n return version",
"def get_sdk_version() -> str:\n return definitions.get_sdk_version()",
"def get_version() -> str:\n version = read(\"pdf_utils/__version__.py\")\n return re.search(r\"__version__ = \\\"(.*?)\\\"\", version).group(1)",
"def get_jre_in_path():\n return get_java_binary_version('java')"
] | [
"0.74490994",
"0.61283606",
"0.60080606",
"0.5867908",
"0.5809241",
"0.5623859",
"0.55386347",
"0.55197555",
"0.55090356",
"0.5305542",
"0.5296578",
"0.5279354",
"0.5249882",
"0.5218957",
"0.5179512",
"0.5171492",
"0.51507473",
"0.5116992",
"0.5103813",
"0.509796",
"0.5077102",
"0.50558525",
"0.50545865",
"0.50454146",
"0.50394046",
"0.50356704",
"0.50184184",
"0.50160295",
"0.500547",
"0.5001536"
] | 0.8086731 | 0 |
Returns appropriate values for .build_tool and .uses_msbuild fields of TestGypBase for Visual Studio. We use the value specified by GYP_MSVS_VERSION. If not specified, we search %PATH% and %PATHEXT% for a devenv.{exe,bat,...} executable. Failing that, we search for likely deployment paths. | def FindVisualStudioInstallation():
possible_roots = ['%s:\\Program Files%s' % (chr(drive), suffix)
for drive in range(ord('C'), ord('Z') + 1)
for suffix in ['', ' (x86)']]
possible_paths = {
'2013': r'Microsoft Visual Studio 12.0\Common7\IDE\devenv.com',
'2012': r'Microsoft Visual Studio 11.0\Common7\IDE\devenv.com',
'2010': r'Microsoft Visual Studio 10.0\Common7\IDE\devenv.com',
'2008': r'Microsoft Visual Studio 9.0\Common7\IDE\devenv.com',
'2005': r'Microsoft Visual Studio 8\Common7\IDE\devenv.com'}
possible_roots = [ConvertToCygpath(r) for r in possible_roots]
msvs_version = 'auto'
for flag in (f for f in sys.argv if f.startswith('msvs_version=')):
msvs_version = flag.split('=')[-1]
msvs_version = os.environ.get('GYP_MSVS_VERSION', msvs_version)
if msvs_version in possible_paths:
# Check that the path to the specified GYP_MSVS_VERSION exists.
path = possible_paths[msvs_version]
for r in possible_roots:
build_tool = os.path.join(r, path)
if os.path.exists(build_tool):
uses_msbuild = msvs_version >= '2010'
msbuild_path = FindMSBuildInstallation(msvs_version)
return build_tool, uses_msbuild, msbuild_path
else:
print ('Warning: Environment variable GYP_MSVS_VERSION specifies "%s" '
'but corresponding "%s" was not found.' % (msvs_version, path))
# Neither GYP_MSVS_VERSION nor the path help us out. Iterate through
# the choices looking for a match.
for version in sorted(possible_paths, reverse=True):
path = possible_paths[version]
for r in possible_roots:
build_tool = os.path.join(r, path)
if os.path.exists(build_tool):
uses_msbuild = msvs_version >= '2010'
msbuild_path = FindMSBuildInstallation(msvs_version)
return build_tool, uses_msbuild, msbuild_path
print 'Error: could not find devenv'
sys.exit(1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def FindMSBuildInstallation(msvs_version = 'auto'):\n import TestWin\n registry = TestWin.Registry()\n\n msvs_to_msbuild = {\n '2013': r'12.0',\n '2012': r'4.0', # Really v4.0.30319 which comes with .NET 4.5.\n '2010': r'4.0'}\n\n msbuild_basekey = r'HKLM\\SOFTWARE\\Microsoft\\MSBuild\\ToolsVersions'\n if not registry.KeyExists(msbuild_basekey):\n print 'Error: could not find MSBuild base registry entry'\n return None\n\n msbuild_version = None\n if msvs_version in msvs_to_msbuild:\n msbuild_test_version = msvs_to_msbuild[msvs_version]\n if registry.KeyExists(msbuild_basekey + '\\\\' + msbuild_test_version):\n msbuild_version = msbuild_test_version\n else:\n print ('Warning: Environment variable GYP_MSVS_VERSION specifies \"%s\" '\n 'but corresponding MSBuild \"%s\" was not found.' %\n (msvs_version, msbuild_version))\n if not msbuild_version:\n for msvs_version in sorted(msvs_to_msbuild, reverse=True):\n msbuild_test_version = msvs_to_msbuild[msvs_version]\n if registry.KeyExists(msbuild_basekey + '\\\\' + msbuild_test_version):\n msbuild_version = msbuild_test_version\n break\n if not msbuild_version:\n print 'Error: could not find MSBuild registry entry'\n return None\n\n msbuild_path = registry.GetValue(msbuild_basekey + '\\\\' + msbuild_version,\n 'MSBuildToolsPath')\n if not msbuild_path:\n print 'Error: could not get MSBuild registry entry value'\n return None\n\n return os.path.join(msbuild_path, 'MSBuild.exe')",
"def _set_environment_vars(self):\n os.environ[\"PATH\"] = os.path.join(self.source_folder, \"depot_tools\") + os.pathsep + os.environ[\"PATH\"]\n os.environ[\"DEPOT_TOOLS_PATH\"] = os.path.join(self.source_folder, \"depot_tools\")\n if tools.os_info.is_windows:\n os.environ[\"DEPOT_TOOLS_WIN_TOOLCHAIN\"] = \"0\"\n os.environ[\"GYP_MSVS_VERSION\"] = \"2017\" if str(self.settings.compiler.version) == \"15\" else \"2019\"",
"def get_nt_platform_vars ():\n platform = util.get_platform()\n if platform == \"win-amd64\":\n # the Visual C++ runtime files are installed in the x86 directory\n progvar = \"%ProgramFiles(x86)%\"\n architecture = \"amd64\"\n elif platform == \"win32\":\n progvar = \"%ProgramFiles%\"\n architecture = \"x86\"\n else:\n raise ValueError(\"Unsupported platform %r\" % platform)\n return os.path.expandvars(progvar), architecture",
"def test_msbuild_path(visualstudio, tmp_path):\n assert visualstudio.msbuild_path == tmp_path / \"Visual Studio\" / \"MSBuild.exe\"",
"def __find_tool_path(self):\n tool_path = Path(os.path.dirname(os.path.realpath(__file__)))\n # We asume the installion path is relative to our installation path\n tool_path = tool_path / '../../../bin'\n if os.name == 'posix':\n ret = tool_path / 'fast-discovery-server'\n if not os.path.exists(ret):\n print('fast-discovery-server tool not installed')\n sys.exit(1)\n elif os.name == 'nt':\n ret = tool_path / 'fast-discovery-server.exe'\n if not os.path.exists(ret):\n ret = tool_path / 'fast-discovery-server.bat'\n if not os.path.exists(ret):\n print('fast-discovery-server tool not installed')\n sys.exit(1)\n else:\n print(f'{os.name} not supported')\n sys.exit(1)\n\n return ret",
"def _figure_out_msvs_version_filesystem(env, specific_version=0):\n \n prefixes = [\n (17,'C:/Program Files/Microsoft Visual Studio/2022'),\n (16,'C:/Program Files (x86)/Microsoft Visual Studio/2019'),\n \n # starting with DEV15, everything is in the \"Program Files\n # (x86)\" directory.\n (15,'C:/Program Files (x86)/Microsoft Visual Studio/2017'),\n \n (14,'C:/Program Files (x86)/Microsoft Visual Studio 14.0'),\n (14,'C:/Program Files/Microsoft Visual Studio 14.0'),\n \n (12,'C:/Program Files (x86)/Microsoft Visual Studio 12.0'),\n (12,'C:/Program Files/Microsoft Visual Studio 12.0'),\n\n (11,'C:/Program Files (x86)/Microsoft Visual Studio 11.0'),\n (11,'C:/Program Files/Microsoft Visual Studio 11.0'),\n \n (10,'C:/Program Files (x86)/Microsoft Visual Studio 10.0'),\n (10,'C:/Program Files/Microsoft Visual Studio 10.0'),\n \n (9,'C:/Program Files (x86)/Microsoft Visual Studio 9.0'),\n (9,'C:/Program Files/Microsoft Visual Studio 9.0'),\n \n (8, \"c:/Program Files (x86)/Microsoft Visual Studio 8\"),\n (8,\"c:/Program Files/Microsoft Visual Studio 8\"),\n \n (7, \"c:/Program Files/Microsoft Visual Studio .NET 2003\"),\n (7,\"c:/Program Files (x86)/Microsoft Visual Studio .NET 2003\")\n ]\n for v,dir in prefixes:\n if os.path.exists(dir):\n if specific_version:\n if specific_version == v:\n return str(v)\n else:\n return str(v)\n return None # we don't know",
"def CheckForTools():\n # git.\n Run('git --version > nul', '`git\\' not found in PATH.')\n\n # Win8 SDK\n Run('cl /Zs windows_8_sdk_required_test.c /nologo',\n \"Either `cl' not found in PATH, or it isn't set to use Windows 8 SDK.\")",
"def _system_requirement_tools(self, app: AppConfig):\n if app.target_vendor_base == DEBIAN:\n base_system_packages = [\"python3-dev\", \"build-essential\"]\n system_verify = [\"dpkg\", \"-s\"]\n system_installer = \"apt\"\n elif app.target_vendor_base == RHEL:\n base_system_packages = [\n \"python3-devel\",\n \"gcc\",\n \"make\",\n \"pkgconf-pkg-config\",\n ]\n system_verify = [\"rpm\", \"-q\"]\n system_installer = \"dnf\"\n else:\n base_system_packages = None\n system_verify = None\n system_installer = None\n\n return base_system_packages, system_verify, system_installer",
"def _GetSystemPath():\n return encoding_util.GetEncodedValue(os.environ, \"PATH\")",
"def get_jdk_in_path():\n return get_java_binary_version('javac')",
"def test_build_tools(self):\n #raise AssertionError(\"%s not implemented\" % sys._getframe().f_code.co_name)\n if self.status: self.status.Warning(\"By default build tools is Xilinx this can be changed in demo/nysa_platform.py\")\n if find_xilinx_path() is None:\n return False\n return True",
"def _find_specific_msvs_version(env,uv):\n found = False\n # 1. look for specific version in registry\n if uv < 15:\n (vs_dir,vc_dir) = _find_msvc_in_registry(env,uv)\n if vs_dir and vc_dir:\n env['msvs_version'] = str(uv) \n found = True\n else:\n warn(\"Could not find specified version of MSVS in registry: {}\".format(uv))\n\n # 2. look in file system for specific version\n if not found:\n env['msvs_version'] = _figure_out_msvs_version_filesystem(env, uv)\n if env['msvs_version']:\n found = True\n else:\n warn(\"Could not find specified version of MSVS in file system: {}\".format(uv))\n return found",
"def depot_tools_base(self):\n depot_tools = self.path_from_chromium_base('third_party',\n 'depot_tools')\n return depot_tools if self._filesystem.isdir(depot_tools) else None",
"def get_exec_path(self):\n bin_name = 'test_hint_time'\n # Look for in place build\n script_dir = os.path.dirname(os.path.realpath(__file__))\n bin_path = os.path.join(script_dir, '.libs', bin_name)\n if not os.path.exists(bin_path):\n # Look for out of place build from using apps/build_func.sh\n int_dir = os.path.dirname(script_dir)\n bin_path_op = os.path.join(int_dir, 'build/integration/test/.libs', bin_name)\n if not os.path.exists(bin_path_op):\n msg = 'Could not find application binary, tried \\n \"{}\"\\n \"{}\"'.format(\n bin_path, bin_path_op)\n raise RuntimeError(msg)\n bin_path = bin_path_op\n return bin_path",
"def _sdk_env(self, sdk_dir, target_arch):\n env = {}\n env_prefixes = {}\n\n if target_arch not in ('x86', 'x64', 'arm64'):\n raise ValueError('unknown architecture {!r}'.format(target_arch))\n\n data = self.m.step('read SetEnv json', [\n 'python3',\n self.resource('find_env_json.py'),\n '--sdk_root',\n sdk_dir,\n '--target_arch',\n target_arch,\n '--output_json',\n self.m.json.output(),\n ],\n step_test_data=lambda: self.m.json.test_api.output({\n 'env': {\n 'PATH': [['..', '..', 'win_sdk', 'bin', 'x64']],\n 'VSINSTALLDIR': [['..', '..\\\\']],\n },\n })).json.output.get('env')\n for key in data:\n # SDK cipd packages prior to 10.0.19041.0 contain entries like:\n # \"INCLUDE\": [[\"..\",\"..\",\"win_sdk\",\"Include\",\"10.0.17134.0\",\"um\"], and\n # recipes' Path() does not like .., ., \\, or /, so this is cumbersome.\n # What we want to do is:\n # [sdk_bin_dir.join(*e) for e in env[k]]\n # Instead do that badly, and rely (but verify) on the fact that the paths\n # are all specified relative to the root, but specified relative to\n # win_sdk/bin (i.e. everything starts with \"../../\".)\n #\n # For 10.0.19041.0 and later, the cipd SDK package json is like:\n # \"INCLUDE\": [[\"Windows Kits\",\"10\",\"Include\",\"10.0.19041.0\",\"um\"], so\n # we simply join paths there.\n results = []\n for value in data[key]:\n if value[0] == '..' and (value[1] == '..' or value[1] == '..\\\\'):\n results.append('%s' % sdk_dir.join(*value[2:]))\n else:\n results.append('%s' % sdk_dir.join(*value))\n\n # PATH is special-cased because we don't want to overwrite other things\n # like C:\\Windows\\System32. Others are replacements because prepending\n # doesn't necessarily makes sense, like VSINSTALLDIR.\n if key.lower() == 'path':\n env_prefixes[key] = results\n else:\n env[key] = ';'.join(results)\n\n return {'env': env, 'env_prefixes': env_prefixes}",
"def msvc_target(optional=False):\n\n # MSVC2015 is v14, MSVC2017 is v15 and MSVC2019 is v16.\n vs_version = os.environ.get('VisualStudioVersion', '0.0')\n vs_major = vs_version.split('.')[0]\n\n if vs_major == '0':\n if optional:\n return None\n\n raise UserException(\"unable to detect any MSVC compiler\")\n\n if vs_major == '14':\n is_32 = (os.environ.get('Platform') != 'X64')\n elif vs_major in ('15', '16'):\n is_32 = (os.environ.get('VSCMD_ARG_TGT_ARCH') != 'x64')\n else:\n if optional:\n return None\n\n raise UserException(\"MSVC v{0} is unsupported\".format(vs_version))\n\n return '32' if is_32 else '64'",
"def _env_with_python_module_search_path():\n e = os.environ\n module_search_path = os.path.join(vmcheckerpaths.root, 'bin')\n if 'PYTHONPATH' in e.keys():\n module_search_path = os.pathsep.join(\n e['PYTHONPATH'], module_search_path)\n e['PYTHONPATH'] = module_search_path\n return e",
"def _get_python_version():\n with settings(hide('commands', 'warnings'), warn_only=True):\n # First tries to check python within virtualenv\n with prefix(_django_prefix()):\n result = run(GET_PYTHON_VERSION)\n # If that fails, checks global python\n if result.failed:\n result = run(GET_PYTHON_VERSION)\n # if it still fails, something is wrong!\n if result.failed:\n abort(_interpolate('Could not determine Python version at virtualenv %(virtualenv)s'))\n return result",
"def python_build():\n return _sys_version()[4:6]",
"def get_compiler_versions():\n gcc = _find_exe_version('gcc -dumpversion')\n ld = _find_ld_version()\n dllwrap = _find_exe_version('dllwrap --version')\n return gcc, ld, dllwrap",
"def determine_python_path():\n if git_install_requested():\n projects_yaml = config('openstack-origin-git')\n projects_yaml = git_default_repos(projects_yaml)\n return os.path.join(git_pip_venv_dir(projects_yaml),\n 'lib/python2.7/site-packages')\n else:\n return None",
"def _check_python_version(self):\n python_exe = tools.which(\"python\")\n if not python_exe:\n msg = (\"Python must be available in PATH \"\n \"in order to build v8\")\n raise ConanInvalidConfiguration(msg)\n # In any case, check its actual version for compatibility\n from six import StringIO # Python 2 and 3 compatible\n version_buf = StringIO()\n cmd_v = \"{} --version\".format(python_exe)\n self.run(cmd_v, output=version_buf)\n p = re.compile(r'Python (\\d+\\.\\d+\\.\\d+)')\n verstr = p.match(version_buf.getvalue().strip()).group(1)\n if verstr.endswith('+'):\n verstr = verstr[:-1]\n version = tools.Version(verstr)\n # >= 2.7.5 & < 3\n py2_min = \"2.7.5\"\n py2_max = \"3.0.0\"\n py3_min = \"3.8.0\"\n if (version >= py2_min) and (version < py2_max):\n msg = (\"Found valid Python 2 required for v8:\"\n \" version={}, path={}\".format(version_buf.getvalue().strip(), python_exe))\n self.output.success(msg)\n elif version >= py3_min:\n msg = (\"Found valid Python 3 required for v8:\"\n \" version={}, path={}\".format(version_buf.getvalue().strip(), python_exe))\n self.output.success(msg)\n else:\n msg = (\"Found Python in path, but with invalid version {}\"\n \" (v8 requires >= {} and < \"\n \"{} or >= {})\".format(verstr, py2_min, py2_max, py3_min))\n raise ConanInvalidConfiguration(msg)",
"def locate_nuget():\n if NuGetRunner.valid_nuget_executable(\"nuget\"):\n return \"nuget\"\n return None",
"def get_python():\n if sys.platform == 'win32':\n python = path.join(VE_ROOT, 'Scripts', 'python.exe')\n else:\n python = path.join(VE_ROOT, 'bin', 'python')\n return python",
"def validate_environment(path: Path, check: bool = False) -> bool:\n valid = None\n win32 = sys.platform == 'win32'\n validate_venv_path(path=path, check=check)\n\n # Expected structure\n structure = {\n 'bin': 'Scripts' if win32 else 'bin',\n 'include': 'Include' if win32 else 'include',\n 'lib': os.path.join('Lib', 'site-packages') if win32 else os.path.join('lib', '*', 'site-packages'),\n }\n paths = {}\n for identifier, expected_path in structure.items():\n for p in path.glob(expected_path):\n # There should only be one path that matches the glob\n paths[identifier] = p\n break\n for identifier in structure:\n if identifier not in paths:\n valid = False\n if check:\n raise InvalidEnvironmentError(f'Could not find {structure[identifier]} under {path}.')\n\n if valid is not False and win32:\n # TODO: Add more validation for windows environments\n valid = valid is not False and True\n elif valid is not False:\n # check for activation scripts\n activation_scripts = list(paths['bin'].glob('activate.*'))\n valid = valid is not False and len(activation_scripts) > 0\n if check and valid is False:\n raise InvalidEnvironmentError(f'Could not find activation scripts under {path}.')\n\n # check for python binaries\n python_name = paths['lib'].parent.name\n python_ver_match = re.search('(?P<interpreter>python|pypy)\\.?(?P<major>\\d+)(\\.?(?P<minor>\\d+))', python_name) # noqa\n if python_ver_match:\n python_executable = paths['bin'].joinpath('python')\n python_ver_executable = paths['bin'].joinpath(python_name)\n if python_executable.exists():\n valid = valid is not False and True\n if check and valid is False:\n raise InvalidEnvironmentError(f'Could not find python executable under {path}.')\n if python_ver_executable.exists():\n valid = valid is not False and True\n if check and valid is False:\n raise InvalidEnvironmentError(f'Could not find {python_name} executable under {path}.')\n\n return True if valid else False",
"def get_path_arg(self):\n # The bluespec compiler automatically adds build_dir to the front of the path, but bluetcl does not,\n # so we add it manually and get a warning from the bluespec compiler about redundant folders in the path\n return ['-p', ':'.join([self.build_dir] + self.bsv_path + BSVProject.default_paths)]",
"def locate_vcredist_dir(plat):\n from setuptools import msvc\n\n vcvars = msvc.msvc14_get_vc_env(plat)\n try:\n vcruntime = vcvars[\"py_vcruntime_redist\"]\n except KeyError:\n warn(f\"platform={plat}, vcvars=\")\n pprint(vcvars, stream=sys.stderr)\n\n warn(\n \"Failed to get py_vcruntime_redist via vcvars, may need to set it in %PATH%\"\n )\n return None\n redist_dir, dll = os.path.split(vcruntime)\n # add redist dir to $PATH so that it can be found\n os.environ[\"PATH\"] += os.pathsep + redist_dir\n return redist_dir",
"def verify_install(cls, tools: ToolCache, **kwargs) -> WindowsSDK:\n # short circuit since already verified and available\n if hasattr(tools, \"windows_sdk\"):\n return tools.windows_sdk\n\n arch = {\"AMD64\": \"x64\", \"ARM64\": \"arm64\"}.get(tools.host_arch, tools.host_arch)\n\n sdk = None\n for sdk_dir, sdk_version in cls._windows_sdks(tools=tools):\n sdk = WindowsSDK(\n tools=tools,\n root_path=sdk_dir,\n version=sdk_version,\n arch=arch,\n )\n\n if not cls._is_supported_version(sdk):\n sdk = None\n continue\n\n if not cls._verify_signtool(sdk):\n sdk = None\n continue\n\n break\n\n if sdk is None:\n raise BriefcaseCommandError(\n f\"\"\"\\\nUnable to locate a suitable Windows SDK v{cls.SDK_VERSION} installation.\n\nEnsure at least v{cls.SDK_VERSION}.{cls.SDK_MIN_VERSION}.0 is installed and the components below are included:\n{cls.SDK_REQUIRED_COMPONENTS}\nSee https://developer.microsoft.com/en-us/windows/downloads/windows-sdk/ to install the SDK.\n\"\"\"\n )\n\n tools.logger.debug(f\"Using Windows SDK v{sdk.version} at {sdk.root_path}\")\n tools.windows_sdk = sdk\n return sdk",
"def _getNETSDKPath():\r\n try:\r\n dotNETSDK_root_key = win32api.RegOpenKeyEx(win32con.HKEY_LOCAL_MACHINE, 'SOFTWARE\\\\Microsoft\\\\Microsoft SDKs\\\\.NETFramework\\\\v2.0', 0, win32con.KEY_READ)\r\n found = False\r\n i = 0\r\n try:\r\n try:\r\n while not found:\r\n name, obj, ntype = win32api.RegEnumValue(dotNETSDK_root_key, i)\r\n i = i + 1\r\n if name=='InstallationFolder':\r\n return obj\r\n found = True\r\n except:\r\n win32api.RegCloseKey(dotNETSDK_root_key)\r\n return ''\r\n finally:\r\n win32api.RegCloseKey(dotNETSDK_root_key)\r\n except:\r\n return ''",
"def validate_venv_path(path: Path, check: bool = False) -> bool:\n win32 = sys.platform == 'win32'\n standard_struct = {\n 'bin': 'Scripts' if win32 else 'bin',\n 'include': 'Include' if win32 else 'include',\n 'lib': os.path.join('Lib', 'site-packages') if win32 else os.path.join('lib', '*', 'site-packages'),\n }\n standard_struct['python'] = f'{standard_struct[\"bin\"]}/python'\n standard_struct['site-packages'] = f'{standard_struct[\"lib\"]}/*/site-packages'\n valid = False\n if path and path.exists():\n checked = False\n subchecked = False\n for globbed_path in standard_struct.values():\n checked = True\n for resolved_path in path.glob(globbed_path):\n if not resolved_path.exists():\n if check:\n raise InvalidEnvironmentError(f'Could not find {globbed_path} under {path}.')\n\n return valid\n subchecked = True\n valid = checked and subchecked\n if not valid and check:\n raise InvalidEnvironmentError(f'Invalid virtual environment path: {path}.')\n return valid"
] | [
"0.69029933",
"0.61118186",
"0.58593863",
"0.5823036",
"0.5704268",
"0.569332",
"0.5633969",
"0.5570575",
"0.5372743",
"0.5309162",
"0.52959627",
"0.5287438",
"0.5257357",
"0.5171771",
"0.5165725",
"0.5161343",
"0.51455754",
"0.5138159",
"0.51229465",
"0.5083195",
"0.5071728",
"0.50467277",
"0.5041253",
"0.5009276",
"0.50041777",
"0.49896312",
"0.49858302",
"0.49844295",
"0.49841243",
"0.4971062"
] | 0.8004529 | 0 |
Run the dumpbin tool with the specified arguments, and capturing and returning stdout. | def run_dumpbin(self, *dumpbin_args):
assert sys.platform in ('win32', 'cygwin')
cmd = os.environ.get('COMSPEC', 'cmd.exe')
arguments = [cmd, '/c', self.vsvars_path, '&&', 'dumpbin']
arguments.extend(dumpbin_args)
proc = subprocess.Popen(arguments, stdout=subprocess.PIPE)
output = proc.communicate()[0]
assert not proc.returncode
return output | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run_and_capture(*argv):\n print(*argv, file=sys.stderr)\n return subprocess.check_output(argv)",
"def dump(args):\n if args.dump_command == \"trace\":\n _dump_trace(args)\n elif args.dump_command == \"checkpoint\":\n _dump_checkpoint(args)\n elif args.dump_command == \"config\":\n _dump_config(args)\n else:\n raise ValueError()",
"def test_bcftools_cli_dump(self):\n runner = CliRunner()\n result = runner.invoke(cli.main, [\"dump\"])\n assert result.exit_code == 0\n assert os.path.isfile(os.path.join(BASE_DIR, \"hmtnote_dump.pkl\"))",
"def hexdump(args=None):\n args = parser.parse_args(args)\n with LogSetup(args):\n contents = args.file.read()\n args.file.close()\n dump(contents, width=args.width)",
"def main(args):\n # Results: print to console and also write to output file\n pass",
"def __execute(pkgin_bin, cmd, *args):\n dave = open(\"/dev/null\", \"w\")\n # create the command list\n pkgin = [pkgin_bin]\n pkgin.extend(DEFAULT_ARGS)\n pkgin.append(cmd)\n for arg in args:\n pkgin.append(arg)\n # execute pkgin\n popen = Popen(pkgin, stdout=dave, stderr=PIPE)\n # retrieve output streams\n (stdoutdata, stderrdata) = popen.communicate()\n # if pkgin error\n if(stderrdata):\n # remove the line feed\n error = stderrdata[0:-1]\n raise PykginError(error)",
"def command(arguments):\n os.system(\"barrnap --kingdom {} {} > {}\".format(arguments.kingdom, arguments.input, arguments.output))",
"def main():\n args = parse_args(sys.argv[1:])\n try:\n push_script_path = get_push_executable()\n bintray = Bintray(args.bintray_credential, args.bintray_subject, args.bintray_repo, push_script_path, component=args.bintray_component, distribution=args.bintray_distribution, architecture=args.bintray_architecture)\n\n return_dict_detail = upload_debs(args.build_directory, args.debian_depth, bintray)\n for key, value in return_dict_detail.items():\n print \"{key}: {value}\".format(key=key, value=value)\n except Exception, e:\n print e\n sys.exit(1)",
"def run(*argv):\n print(*argv, file=sys.stderr)\n subprocess.check_call(argv, stdout=sys.stderr)",
"def test_dump_calls_pg_dump(mocker):\n\tmocker.patch('subprocess.Popen')\n\tassert pgdump.dump(url)\n\tsubprocess.Popen.assert_called_with(['pg_dump', url], stdout=subprocess.PIPE)",
"def runpretty(args):\n proc = subprocess.Popen(\n args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n\n if proc.wait() == 1:\n print(proc.stdout.read().decode())\n die(proc.stderr.read().decode())\n\n std_output = proc.stdout.read()\n print(std_output.decode())\n return std_output",
"def run(*args, **kwargs):\n kwargs[\"check\"] = True\n print(\"+\", \" \".join(args[0]))\n return subprocess.run(*args, **kwargs)",
"def main(args):\n # Extract keys\n logger.info(\"Extracting brass bedpe file key from tarfile...\")\n bedpe, bedpe_index = extract_tar_keys(args.results_archive)\n # process bedpe\n logger.info(\"Processing brass bedpe {0}...\".format(bedpe))\n process_bedpe(args.results_archive, bedpe, bedpe_index, args.output_prefix)",
"def _pump_output(*args):\n from subprocess import STDOUT\n from subprocess import check_output\n\n args_list = [\"openaps\", \"use\", \"pump\"]\n args_list.extend(args)\n\n return check_output(args_list, stderr=STDOUT)",
"def run(self, stdout=None, stderr=None):",
"def main():\n\tparser = setup_argument_parser()\n\targuments = parser.parse_args()\n\tto_print = arguments.to_print\n\techo(to_print)",
"def test_dump_call_pgdump(mocker):\n mocker.patch('subprocess.Popen')\n assert pgdump.dump(url)\n subprocess.Popen.assert_called_with(['pg_dump' , url] , stdout=subprocess.PIPE)",
"def _run_cmd(args, cwd):\n p = subprocess.Popen(\n args, stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, cwd=cwd)\n streams = tuple(s.decode('latin1').strip() for s in p.communicate())\n for stream_content in streams:\n print(stream_content)\n return (streams) + (p.returncode,)",
"def cmdline_main():\r\n import sys\r\n if (len(sys.argv) < 2 or len(sys.argv) > 4 or \"--help\" in sys.argv or\r\n \"-h\" in sys.argv or sys.argv[1] not in (\"-c\", \"-d\")):\r\n print(\"Usage: python -m snappy <-c/-d> [src [dst]]\")\r\n print(\" -c compress\")\r\n print(\" -d decompress\")\r\n print(\"output is stdout if dst is omitted or '-'\")\r\n print(\"input is stdin if src and dst are omitted or src is '-'.\")\r\n sys.exit(1)\r\n\r\n if len(sys.argv) >= 4 and sys.argv[3] != \"-\":\r\n dst = open(sys.argv[3], \"wb\")\r\n elif hasattr(sys.stdout, 'buffer'):\r\n dst = sys.stdout.buffer\r\n else:\r\n dst = sys.stdout\r\n\r\n if len(sys.argv) >= 3 and sys.argv[2] != \"-\":\r\n src = open(sys.argv[2], \"rb\")\r\n elif hasattr(sys.stdin, \"buffer\"):\r\n src = sys.stdin.buffer\r\n else:\r\n src = sys.stdin\r\n\r\n if sys.argv[1] == \"-c\":\r\n method = stream_compress\r\n else:\r\n method = stream_decompress\r\n\r\n method(src, dst)",
"def _run(*args):\n return subprocess.run(\n args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True,\n universal_newlines=True)",
"def run_rtmpdump(info, output, extra_arg=\"\"):\n args = [\n \"rtmpdump\",\n \"--quiet\",\n \"--live\",\n extra_arg,\n \"--rtmp\", \"rtmp://\" + info[2] + \"/live-edge\",\n \"--pageUrl\", \"http://chaturbate.com/\" + info[1],\n \"--conn\", \"S:\" + info[8],\n \"--conn\", \"S:\" + info[1],\n \"--conn\", \"S:2.645\",\n \"--conn\", \"S:\" + urllib.unquote(info[15]),\n \"--token\", \"m9z#$dO0qe34Rxe@sMYxx\",\n \"--playpath\", \"playpath\",\n \"--flv\", output\n ]\n\n return subprocess.Popen(args)",
"def main(argv):\n\n\n parser = argparse.ArgumentParser(description='convert der to raw')\n parser.add_argument('-s','--secretkey_file', help='Secret key', required=True)\n parser.add_argument('-p','--publickey_file', help='Public key', required=True)\n args = parser.parse_args()\n\n secretkey_file = args.secretkey_file\n publickey_file = args.publickey_file\n\n\n privkey = SigningKey.from_der(open(secretkey_file).read())\n pubkey = VerifyingKey.from_der(open(publickey_file).read())\n\n open(secretkey_file[0:-4] + \".bin\", \"wb\").write(privkey.to_string())\n open(publickey_file[0:-4] + \".bin\", \"wb\").write(pubkey.to_string())",
"def __format_run(arg):\n cp = subprocess.run([BIN_FFPROBE, arg, \"-v\", \"quiet\", \"-hide_banner\"],\n check=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n return cp.stdout.decode('utf-8')",
"def call_prog(args):\n # Just dump the entirety of the command so that\n # the user can specify whatever arguments they want\n call(args)",
"def execute_tool(description, *args):\n command_line = list(args) + files_and_directories\n click.echo(f\"{description}: {' '.join(command_line)}\")\n rv = call(command_line)\n if rv != 0:\n exit(rv)",
"def test_command_dump_woz1(capsys):\n wozardry.parse_args([\"dump\", kValid1])\n captured = capsys.readouterr()\n assert \"INFO: File format version: 1\" in captured.out\n assert \"INFO: Disk type: 5.25-inch (140K)\" in captured.out\n assert \"INFO: Write protected: no\" in captured.out\n assert \"INFO: Tracks synchronized: no\" in captured.out\n assert \"INFO: Weakbits cleaned: no\" in captured.out\n assert \"INFO: Creator: wozardry\" in captured.out",
"def cli(args): # noqa; pylint: disable=unused-argument",
"def run_blast(inputfile, input_type, outputfile, database, args=None, verbose=True):\n\n assert (input_type in ['protein', 'dna']), \"Input type must be either 'protein' or 'dna'\"\n\n cmd = ['diamond']\n\n if input_type == 'protein':\n cmd += ['blastp']\n elif input_type == 'dna':\n cmd += ['blastx']\n\n cmd += ['-d', database]\n cmd += ['-q', inputfile]\n cmd += ['-o', outputfile]\n\n if not args:\n args = \"--more-sensitive --top 10 --quiet\"\n\n cmd += args.split()\n\n if verbose:\n print(' '.join(cmd))\n\n with open(os.devnull, 'w') as devnull:\n try:\n exit_code = call(cmd, stdout=devnull)\n except OSError:\n exit_code = None\n\n return exit_code",
"def run_tool(args, quiet=False):\n pipe = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n result = \"\"\n for line in iter(pipe.stdout.readline, \"\"):\n if not line and pipe.poll() is not None:\n break\n output = line.decode(encoding='UTF-8').rstrip()\n if output != \"\":\n if not quiet:\n print(\"\\t * \" + output)\n result = output\n return result",
"def Run(name, *args, **kwargs):\n try:\n binary = kwargs.get('binary')\n env = None\n if tool_search_paths:\n env = dict(os.environ)\n env['PATH'] = ':'.join(tool_search_paths) + ':' + env['PATH']\n all_args = (name,) + args\n result = command.RunPipe([all_args], capture=True, capture_stderr=True,\n env=env, raise_on_error=False, binary=binary)\n if result.return_code:\n raise Exception(\"Error %d running '%s': %s\" %\n (result.return_code,' '.join(all_args),\n result.stderr))\n return result.stdout\n except:\n if env and not PathHasFile(env['PATH'], name):\n msg = \"Please install tool '%s'\" % name\n package = packages.get(name)\n if package:\n msg += \" (e.g. from package '%s')\" % package\n raise ValueError(msg)\n raise"
] | [
"0.6620855",
"0.64421326",
"0.6403631",
"0.6061605",
"0.60324913",
"0.599794",
"0.599789",
"0.59293294",
"0.59045",
"0.5888841",
"0.5851133",
"0.57390636",
"0.57296616",
"0.5724511",
"0.5675946",
"0.56678206",
"0.5652054",
"0.5632921",
"0.5618824",
"0.5589044",
"0.5553226",
"0.5512627",
"0.54453015",
"0.53970045",
"0.5344457",
"0.53435",
"0.53424895",
"0.5323669",
"0.532356",
"0.5305376"
] | 0.7729673 | 0 |
Returns an appropriate TestGyp instance for a specified GYP format. | def TestGyp(*args, **kw):
format = kw.pop('format', os.environ.get('TESTGYP_FORMAT'))
if format != 'ninja':
raise Exception("unknown format %r" % format)
return TestGypNinja(*args, **kw) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_driver(browser_name):\n if browser_name == BaseConstants.CHROME:\n options = webdriver.ChromeOptions()\n options.add_argument('headless')\n if BaseConstants.HEADLESS_MODE:\n return webdriver.Chrome(options=options)\n else:\n return webdriver.Chrome()\n elif browser_name == BaseConstants.FIREFOX:\n options = Options()\n options.add_argument('--headless')\n if BaseConstants.HEADLESS_MODE:\n return webdriver.Firefox(options=options)\n else:\n return webdriver.Firefox()\n else:\n raise ValueError(f\"Unknown browser name: {browser_name}\")",
"def _get_backend(args):\n if args.backend == 'gatttool':\n backend = GatttoolBackend\n elif args.backend == 'bluepy':\n backend = BluepyBackend\n elif args.backend == 'pygatt':\n backend = PygattBackend\n else:\n raise Exception('unknown backend: {}'.format(args.backend))\n return backend",
"def test_parser_init_with_valid_project_type(parser):\n parser.parse_args(['--init', 'java'])",
"def __init__(self, gyp_target, gn_target=None):\n if gn_target is None:\n gn_target = gyp_target\n self._gyp_target = gyp_target\n self._gn_target = gn_target\n\n self._skipped = []\n\n self._total_diffs = 0\n\n self._missing_gyp_flags = {}\n self._missing_gn_flags = {}\n\n self._missing_gyp_files = {}\n self._missing_gn_files = {}\n\n self._CompareFiles()",
"def run_gyp(self, gyp_file, *args, **kw):\n\n # When running gyp, and comparing its output we use a comparitor\n # that ignores the line numbers that gyp logs in its debug output.\n if kw.pop('ignore_line_numbers', False):\n kw.setdefault('match', match_modulo_line_numbers)\n\n # TODO: --depth=. works around Chromium-specific tree climbing.\n depth = kw.pop('depth', '.')\n run_args = ['--depth='+depth]\n run_args.append(gyp_file)\n if self.no_parallel:\n run_args += ['--no-parallel']\n # TODO: if extra_args contains a '--build' flag\n # we really want that to only apply to the last format (self.format).\n run_args.extend(self.extra_args)\n # Default xcode_ninja_target_pattern to ^.*$ to fix xcode-ninja tests\n xcode_ninja_target_pattern = kw.pop('xcode_ninja_target_pattern', '.*')\n run_args.extend(\n ['-G', 'xcode_ninja_target_pattern=%s' % xcode_ninja_target_pattern])\n run_args.extend(args)\n return self.run(program=self.gyp, arguments=run_args, **kw)",
"def test_utils_get_backend_instance(options, expected):\n\n class DummyBackendSettings(InstantiableSettingsItem):\n \"\"\"Represents a dummy backend setting.\"\"\"\n\n foo: str = \"foo\" # pylint: disable=disallowed-name\n\n def get_instance(self, **init_parameters): # pylint: disable=no-self-use\n \"\"\"Returns the init_parameters.\"\"\"\n return init_parameters\n\n class TestBackendType(BaseModel):\n \"\"\"A backend type including the DummyBackendSettings.\"\"\"\n\n DUMMY: DummyBackendSettings = DummyBackendSettings()\n\n backend_instance = ralph_utils.get_backend_instance(\n TestBackendType(), \"dummy\", options\n )\n assert isinstance(backend_instance, dict)\n assert backend_instance == expected",
"def build(self, gyp_file, target=None, **kw):\n raise NotImplementedError",
"def test_grammar(self):\n r1 = t.Rule(\"foo\", t.Exactly(\"x\"))\n r2 = t.Rule(\"baz\", t.Exactly(\"y\"))\n x = t.Grammar(\"BuilderTest\", False, [r1, r2])\n self.assertEqual(\n writePython(x),\n dd(\"\"\"\n def createParserClass(GrammarBase, ruleGlobals):\n if ruleGlobals is None:\n ruleGlobals = {}\n class BuilderTest(GrammarBase):\n def rule_foo(self):\n _locals = {'self': self}\n self.locals['foo'] = _locals\n _G_exactly_1, lastError = self.exactly('x')\n self.considerError(lastError, 'foo')\n return (_G_exactly_1, self.currentError)\n\n\n def rule_baz(self):\n _locals = {'self': self}\n self.locals['baz'] = _locals\n _G_exactly_2, lastError = self.exactly('y')\n self.considerError(lastError, 'baz')\n return (_G_exactly_2, self.currentError)\n\n\n if BuilderTest.globals is not None:\n BuilderTest.globals = BuilderTest.globals.copy()\n BuilderTest.globals.update(ruleGlobals)\n else:\n BuilderTest.globals = ruleGlobals\n return BuilderTest\n \"\"\"))",
"def create_driver(browser_name):\n if browser_name == BaseConstants.CHROME:\n options = webdriver.ChromeOptions()\n options.add_argument('headless')\n return webdriver.Chrome(executable_path=BaseConstants.CHROME_DRIVER_PATH, options=options)\n if browser_name == BaseConstants.FIREFOX:\n options = webdriver.FirefoxOptions()\n options.add_argument('--headless')\n return webdriver.Firefox(executable_path=BaseConstants.FIREFOX_DRIVER_PATH, options=options)\n else:\n raise ValueError(f\"Unknown browser name:{browser_name}\")",
"def distributor():\n if 'goma' in gyp_defines():\n return 'goma'",
"def testpackage(tmpdir, version='0.1'):\n\n return create_testpackage(tmpdir, version=version)",
"def get_builder_project():\n if config.use_shaman is True:\n builder_class = ShamanProject\n else:\n builder_class = GitbuilderProject\n return builder_class",
"def test_validate_gpy_models():\n with pytest.raises(ValueError):\n validate_gpy_model([\"m\"])",
"def _instantiate_backend_from_name(name, options):\r\n # Parse backend name\r\n\r\n try:\r\n parts = name.split('.')\r\n module_name = '.'.join(parts[:-1])\r\n class_name = parts[-1]\r\n except IndexError:\r\n raise ValueError('Invalid event track backend %s' % name)\r\n\r\n # Get and verify the backend class\r\n\r\n try:\r\n module = import_module(module_name)\r\n cls = getattr(module, class_name)\r\n if not inspect.isclass(cls) or not issubclass(cls, BaseBackend):\r\n raise TypeError\r\n except (ValueError, AttributeError, TypeError, ImportError):\r\n raise ValueError('Cannot find event track backend %s' % name)\r\n\r\n backend = cls(**options)\r\n\r\n return backend",
"def createInstance():\n\n graphTypeEnvVariable = os.getenv('GRAPH_TYPE')\n graphTypeKey = graphTypeEnvVariable if graphTypeEnvVariable is not None else 'networkx' # Default to networkx\n graphType = GraphFactory.typeMap[str(graphTypeKey)]\n\n return graphType()",
"def get_driver(browser):\n\n # Browser name aliases\n chrome = ('chrome', 'google', 'google chrome', 'googlechrome', 'google-chrome', 'google_chrome')\n firefox = ('firefox', 'ff', 'mozilla', 'gecko', 'geckodriver', 'fire fox', 'fire_fox', 'fire-fox')\n opera = ('opera', 'opera gx', 'operagx', 'opera_gx', 'opera-gx')\n explorer = ('explorer', 'ie', 'internet explorer', 'internet-explorer', 'internet_explorer')\n edge = ('edge', 'microsoft edge', 'microsoft_edge', 'microsoft-edge')\n\n # Download browser binaries according to settings.json\n if browser.lower() in chrome:\n return webdriver.Chrome(ChromeDriverManager().install())\n\n elif browser.lower() in firefox:\n return webdriver.Firefox(executable_path=GeckoDriverManager().install())\n\n elif browser.lower() in opera:\n return webdriver.Opera(OperaDriverManager().install())\n\n elif browser.lower() in explorer:\n return webdriver.Ie(IEDriverManager().install())\n\n elif browser.lower() in edge:\n return webdriver.Edge(executable_path=EdgeChromiumDriverManager().install())\n\n else:\n raise RuntimeError('Browser not found {}'.format(browser.lower()))",
"def make_package(tmp_path, pyproject_toml):\n return make_package_base(tmp_path, pyproject_toml)",
"def make_graph_from_spec(graphtype, args):\n parsed = parse_graph_argument(graphtype, args)\n assert parsed['graphtype'] == graphtype\n return obtain_graph(parsed)",
"def GetTestSuiteFromVariant(variant):\n suite_name = variant.get('test_suite', 'default_suite')\n gpu = variant.get('gpu')\n os_dimension = variant.get('os')\n gpu = ConvertGpuToVendorName(gpu)\n return '%s on %s on %s' % (suite_name, gpu, os_dimension)",
"def multi_backend_test(globals_dict,\n relative_module_name,\n backends=('jax', 'tensorflow', 'numpy'),\n test_case=None):\n if test_case is None:\n return lambda test_case: multi_backend_test( # pylint: disable=g-long-lambda\n globals_dict=globals_dict,\n relative_module_name=relative_module_name,\n test_case=test_case)\n\n if BACKEND is not None:\n return test_case\n\n if relative_module_name == '__main__':\n raise ValueError(\n 'module_name should be written out manually, not by passing __name__.')\n\n # This assumes `test_util` is 2 levels deep inside of `inference_gym`. If we\n # move it, we'd change the `-2` to equal the (negative) nesting level.\n root_name_comps = __name__.split('.')[:-2]\n relative_module_name_comps = relative_module_name.split('.')\n\n # Register the rewrite hooks.\n importlib.import_module('.'.join(root_name_comps + ['backends', 'rewrite']))\n\n new_test_case_names = []\n for backend in backends:\n new_module_name_comps = (\n root_name_comps + ['dynamic', 'backend_{}'.format(backend)] +\n relative_module_name_comps)\n # Rewrite the module.\n new_module = importlib.import_module('.'.join(new_module_name_comps))\n\n # Subclass the test case so that we can rename it (absl uses the class name\n # in its UI).\n base_new_test = getattr(new_module, test_case.__name__)\n new_test = type('{}_{}'.format(test_case.__name__, backend),\n (base_new_test,), {})\n new_test_case_names.append(new_test.__name__)\n globals_dict[new_test.__name__] = new_test\n\n # We deliberately return None to delete the original test case from the\n # original module.",
"def test_by_name(name):\n build()\n sh(\"%s -m unittest -v %s\" % (PYTHON, name))",
"def create_options(test_args) -> testutils.Optional[Options]:\n options = Options()\n options.p4_file = Path(testutils.check_if_file(test_args.p4_file))\n testfile = test_args.testfile\n if not testfile:\n testutils.log.info(\"No test file provided. Checking for file in folder.\")\n testfile = options.p4_file.with_suffix(\".py\")\n result = testutils.check_if_file(testfile)\n if not result:\n return None\n options.testfile = Path(result)\n testdir = test_args.testdir\n if not testdir:\n testutils.log.info(\"No test directory provided. Generating temporary folder.\")\n testdir = tempfile.mkdtemp(dir=Path(\".\").absolute())\n # Generous permissions because the program is usually edited by sudo.\n os.chmod(testdir, 0o755)\n options.testdir = Path(testdir)\n options.rootdir = Path(test_args.rootdir)\n options.num_ifaces = args.num_ifaces\n\n try:\n import nnpy # pylint: disable=W0611,C0415\n\n assert nnpy\n options.use_nn = args.use_nn\n except ImportError:\n testutils.log.error(\"nnpy is not available on this system. Falling back to veth testing.\")\n options.use_nn = False\n\n # Configure logging.\n logging.basicConfig(\n filename=options.testdir.joinpath(\"test.log\"),\n format=\"%(levelname)s: %(message)s\",\n level=getattr(logging, test_args.log_level),\n filemode=\"w\",\n )\n stderr_log = logging.StreamHandler()\n stderr_log.setFormatter(logging.Formatter(\"%(levelname)s: %(message)s\"))\n logging.getLogger().addHandler(stderr_log)\n return options",
"def test_generate_project_maximum_with_tvm(self) -> None:\n output_path = os.path.join(os.getcwd(), 'tmp')\n input_path = os.path.abspath(\n os.path.join(os.getcwd(),\n 'examples',\n 'classification',\n # 'lmnet_quantize_cifar10_stride_2.20180523.3x3',\n 'minimal_graph_with_shape.pb'))\n\n try:\n gp.run(input_path=input_path,\n dest_dir_path=output_path,\n project_name='unittest4',\n activate_hard_quantization=True,\n threshold_skipping=True,\n num_pe=16,\n use_tvm=True,\n use_onnx=False,\n debug=False,\n cache_dma=False,\n )\n finally:\n if os.path.exists(output_path):\n shutil.rmtree(output_path)\n\n print(\"Script test with maximum options including TVM passed!\")",
"def get_exporter(format, exporters):\n\n if format in exporters:\n return exporters[format]\n if format == 'sql':\n return export_sql\n elif format == 'json':\n return export_json\n\n raise RuntimeError('Unsupported format: %s' % format)",
"def get_first_available_parser():\n if sys.platform == 'cli':\n try:\n from bridge.parser.bridge_dotnet import Parser\n return Parser\n except ImportError:\n pass\n elif sys.platform[:4] == 'java':\n try:\n from bridge.parser.bridge_java import Parser\n return Parser\n except ImportError:\n pass\n \n from bridge.parser.bridge_default import Parser\n \n return Parser",
"def from_env(cls):\n return cls(config['data']['ext'])",
"def _create_pyproject_toml(\n self,\n package_name: str,\n ) -> str:\n return f\"\"\"\n [tool.pytest.ini_options]\n DJANGO_SETTINGS_MODULE = \"reviewboard.settings\"\n django_debug_mode = false\n\n python_files = [\"tests.py\", \"test_*.py\"]\n python_classes = [\"*Tests\"]\n python_functions = [\"test_*\"]\n pythonpath = \".\"\n testpaths = [\"{package_name}\"]\n\n env = [\n \"RB_RUNNING_TESTS=1\",\n \"RBSSH_STORAGE_BACKEND=reviewboard.ssh.storage.FileSSHStorage\",\n ]\n\n addopts = [\"--reuse-db\"]\n\n required_plugins = [\n \"pytest-django\",\n \"pytest-env\",\n ]\n \"\"\"",
"def get_suite(arn=None):\n pass",
"def get_pytest():\n return path.join(TaskCreator.bin_dir, \"py.test\")",
"def GetBuildFormat(self):\n # The comma means that ninja and qtcreator_ninja will be chained and use the\n # same input information so that .gyp files will only have to be parsed\n # once.\n return 'ninja,qtcreator_ninja'"
] | [
"0.48283198",
"0.4679778",
"0.46405205",
"0.46262017",
"0.4596164",
"0.4575837",
"0.45242107",
"0.45190325",
"0.449273",
"0.44799712",
"0.44797117",
"0.44374356",
"0.4432995",
"0.43943155",
"0.4385032",
"0.43449232",
"0.43261662",
"0.43182468",
"0.43172392",
"0.4313307",
"0.42956066",
"0.4294165",
"0.42862388",
"0.4283551",
"0.42820632",
"0.42817745",
"0.42783204",
"0.4277896",
"0.4262459",
"0.4254574"
] | 0.7641839 | 0 |
Read data from file, and return RDD data | def read_data(file_path, sparkContext):
data_rdd = sparkContext \
.textFile(file_path) \
.map(eval) \
.map(lambda x: (x[0], x[1]))
return data_rdd | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_file(file_name):\r\n f = open(file_name)\r\n\r\n tids = f.readlines()\r\n \r\n dataset = [(int(tid), get_from_id(int(tid))) for tid in tids]\r\n\r\n f.close()\r\n return dataset",
"def open_file(path):\n input_file = os.path.join(path)\n with open(input_file) as f:\n dataset = f.read()\n return dataset",
"def load_data():\n with open('../data/dataset.txt', 'r') as data_file:\n return data_file.read().split('\\n')",
"def load_data(file_to_read):\n\n data = np.recfromtxt(file_to_read)\n data = np.asarray(data)\n\n return data",
"def get_train(self, data_file):\r\n return self.read_data(data_file)",
"def read_data(filename):\n # read in triples of itemID/userID/playcount from the input dataset\n data = pandas.read_table(filename,\n usecols=[0, 1, 2],\n header=0,\n delimiter=',')\n\n # map each userID and itemID to a unique numeric value\n data['userID'] = data['userID'].astype(\"category\")\n data['itemID'] = data['itemID'].astype(\"category\")\n\n # create a sparse matrix of all the itemIDs/rating\n rating = coo_matrix((data['rating'].astype(float),\n (data['userID'].cat.codes.copy(),\n data['itemID'].cat.codes.copy())))\n rating = rating.tocsr()\n print(rating)\n data = data.head(10) # FOR TESTING PURPOSE ONLY\n return data, rating",
"def read_data(self, file_path):\n raise NotImplementedError('should be overridden with specific data reader')",
"def read(self, filename):\n st_tree = parse(filename)\n datasets = []\n id_ = 1\n st_datasets = st_tree.findall('weight')\n for st_dataset in st_datasets:\n date = util.str2date(st_dataset.find('date').text[0:10])\n weight = round(float(st_dataset.find('value').text), 1)\n note = st_dataset.find('comment').text\n datasets.append(Dataset(id_, date, weight, note=note))\n id_ += 1\n return datasets",
"def load_dataset(filepath):\n \n X = list()\n x = list()\n\n Y = list()\n y = list()\n \n for line in open(filepath):\n # blank lines separate sequences\n if len(line) <= 1:\n X.append(x)\n Y.append(y)\n\n x = list()\n y = list()\n else:\n a, b = line.strip().split('\\t')\n x.append(a)\n y.append(b)\n \n return X, Y",
"def readData(self,datafile = None):\n self.datafile = datafile or self.datafile\n self.data = []\n for line in open(self.datafile):\n userid,itemid,record,_ = line.split()\n self.data.append((userid,itemid,int(record)))",
"def readfile(self, path, filename):\n # The DataStudio software uses ISO-8859-1 encoding (especially for the degree sign in temperature files)\n file = open(path + filename, encoding=\"iso-8859-1\")\n rowlist = file.readlines()\n\n title = rowlist[0].strip(\"\\n\")\n labels = rowlist[1].strip(\"\\n\").split(sep=\"\\t\")\n\n data = np.zeros((len(rowlist)-2, 2))\n\n for i in range(2, len(rowlist)):\n columns = rowlist[i].split(sep=\"\\t\")\n data[i-2, 0] = float(columns[0].replace(\",\", \".\"))\n data[i-2, 1] = float(columns[1].replace(\",\", \".\"))\n\n return data, title, labels",
"def get_train(self, data_file):\n return self.read_data(data_file)",
"def read_data(filename):\n # read in triples of user/artist/playcount from the input dataset\n\n\n # map each artist and user to a unique numeric value\n data['user'] = data['user'].astype(\"category\")\n data['artist'] = data['artist'].astype(\"category\")\n\n # create a sparse matrix of all the users/plays\n plays = coo_matrix((data['plays'].astype(float),\n (data['artist'].cat.codes.copy(),\n data['user'].cat.codes.copy())))\n\n return data, plays",
"def readData(path): \n try:\n open(path)\n dataset = np.loadtxt(path)\n # arms played by uniformly-random policy as recorded in dataset\n arms = dataset[:, 0].astype(int) \n # rewards received by playing arms using a uniformly-random policy as \n # recorded in dataset \n rewards = dataset[:, 1] \n # context vector \n contexts = dataset[:, 2:] \n except FileNotFoundError: \n raise \n return(arms, rewards, contexts)",
"def load_data(self, filename):\r\n #sqlcontext = SQLContext(self.sc)\r\n #df = sqlcontext.read.format('com.databricks.spark.csv').options(header='false', inferschema='true').load(filename)\r\n #df = sc.textFile(r\"C:\\Users\\mohan\\Downloads\\patches.csv\").map(lambda line: line.split(\",\"))\r\n #print (df.count())\r\n df = self.sc.textFile(filename).map(lambda line: line.split(\",\"))\r\n l = df.map(lambda w: [int(float(c)) for c in w]).zipWithIndex()\r\n return l\r\n raise NotImplementedError",
"def load_training_data(file_path):\n return load_data(file_path)",
"def load_file(filename):\n f_data = []\n # open the data-set file\n file = open(filename, \"r\")\n for line in file:\n row = line.strip() # a row in the file\n f_data.append(row) # append it to the 2D array\n\n return f_data",
"def read_data(data_path):\n tr = data_path + 'train_vectors.txt'\n v = data_path + 'val_vectors.txt'\n tst = data_path + 'test_vectors.txt'\n return tr, v, tst",
"def read_data(feature_file, label_file):",
"def process(filename):\n # Load the data file into an RDD\n rdd = sc.textFile(filename)\n \n rdd = build_collinear_set(rdd)\n \n # Collecting the collinear points RDD in a set to remove duplicate sets of collinear points. This is for grading purposes. You may ignore this.\n res = set(rdd.collect())\n \n return res",
"def kml_extract_RDD(xml_file):\n soup = BeautifulSoup(xml_file, \"lxml-xml\")\n return get_kml_content(soup)",
"def load_dataset(filename):\n return [(\n lambda point: {\n 'coordinate': tuple(map(float, point[:-1])),\n 'label': int(point[-1])})\n (string.strip().rstrip().split(','))\n for string in open(filename, 'r').read()\n .strip().rstrip().split('\\n')]",
"def load_data(path):\n with open(path) as f:\n return f.readlines()",
"def read_data(filename):\n with open(filename, 'r') as f:\n return f.read().split()",
"def import_dataset(fpath):\r\n data = read_csv(fpath)\r\n print(data.head())\r\n print(data.shape)\r\n return data",
"def read_file(self):\n colspecs = [[0, 7]] # for the id\n names = ['id']\n for question in self.question_list:\n colspecs.extend(question.get_column_range())\n names.extend(question.get_column_names())\n\n self.data = pd.read_fwf(self.file, colspecs=colspecs, encoding=self.encoding, names=names, header=None)\n self.data.fillna(0, inplace=True)\n self.data = self.data.astype(int)\n return self.data",
"def read_from(self, filename):\n\n lon, lat, field, weight = [], [], [], []\n\n if os.path.exists(filename):\n logger.info(\"Reading data from file {0}\".format(filename))\n with open(filename, 'r') as f:\n line = f.readline()\n ncols = len(line.split())\n while ncols >= 3:\n lon.append(float(line.split()[0]))\n lat.append(float(line.split()[1]))\n field.append(float(line.split()[2]))\n if ncols >= 4:\n weight.append(float(line.split()[3]))\n else:\n weight.append(1.)\n line = f.readline()\n ncols = len(line.split())\n\n self.x = np.array(lon)\n self.y = np.array(lat)\n self.field = np.array(field)\n self.weight = np.array(weight)\n return self\n else:\n logger.error(\"File {0} does not exist\".format(filename))\n raise FileNotFoundError('File does not exist')",
"def _read_data(filename):\n file = open(filename, \"r\")\n timestamps = []\n edges = []\n for line in file:\n # source target weight timestamp\n if line.startswith(\"%\"):\n continue\n spl = line.split()\n if len(spl) == 4:\n # store that stuff in triples (source, target, weight, timestamp)\n edges.append((int(spl[0]), int(spl[1]), int(spl[2]), int(spl[3])))\n timestamps.append(int(spl[3]))\n return edges, sorted(timestamps)",
"def load(self, file):\n with open(file) as file:\n self.dataset = [line.strip() for line in file]\n\n return self.dataset",
"def Load_File(filename):\n with open(filename) as file:\n data = file.readlines()\n return data"
] | [
"0.64890283",
"0.64516765",
"0.6394799",
"0.6333202",
"0.6283155",
"0.6235973",
"0.6203842",
"0.61601675",
"0.615744",
"0.6122503",
"0.6106315",
"0.60988206",
"0.60945296",
"0.6094214",
"0.6093703",
"0.6081706",
"0.60803366",
"0.60783815",
"0.6070833",
"0.60573804",
"0.60554594",
"0.60514736",
"0.60512817",
"0.6035557",
"0.60353595",
"0.6018432",
"0.60014725",
"0.5978904",
"0.5974188",
"0.59661335"
] | 0.8014558 | 0 |
Swap the elements of a pair tuple. | def swap((u, v)):
return (v, u) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def swap(t, i, j):\n t[i], t[j] = t[j], t[i]",
"def swap(arr, first, second):\n arr[first], arr[second] = arr[second], arr[first]",
"def __fix_tuple(self, xy_tup):\n if self.__swapxy:\n return xy_tup[::-1]\n return xy_tup",
"def swap(array, x, y):\n array[x], array[y] = array[y], array[x]",
"def unzip(pairs):\n return tuple(zip(*pairs))",
"def _swap(self, i, j):\n self._data[i], self._data[j] = self._data[j], self._data[i]",
"def _swap(self, i, j):\n self._data[i], self._data[j] = self._data[j], self._data[i]",
"def swap(arr, i, j):\n arr[i], arr[j] = arr[j], arr[i]",
"def swap_elements(i: int, j: int, arr: List[int]) -> None:\n arr[i], arr[j] = arr[j], arr[i]",
"def swap_cells(state, i1, j1, i2, j2):\n value1 = state[i1][j1]\n value2 = state[i2][j2]\n \n new_state = []\n for row in range(len(state)): \n new_row = []\n for column in range(len(state[row])): \n if row == i1 and column == j1: \n new_row.append(value2)\n elif row == i2 and column == j2:\n new_row.append(value1)\n else: \n new_row.append(state[row][column])\n new_state.append(tuple(new_row))\n return tuple(new_state)",
"def swap(x, i, j):\n if not isinstance(x, type([1, 2])):\n raise TypeError(\"Este método solo se puede hacer con listas\")\n x[i], x[j] = x[j], x[i]",
"def swap(permutation, transposition, remaining=[]):\n i, j = transposition\n nb_positions = len(permutation)\n res = np.array(permutation)\n\n if j < nb_positions:\n res[i], res[j] = res[j], res[i]\n else:\n res[i] = remaining[j-nb_positions]\n\n return tuple(res)",
"def swap_tile(grid: tuple[int, ...], move: int) -> tuple[int, ...]:\n tile_to_swap: int = grid.index(0) + move\n value_to_swap: int = grid[tile_to_swap]\n\n mutable_grid: list[int] = list(grid)\n mutable_grid[grid.index(0)] = value_to_swap\n mutable_grid[tile_to_swap] = 0\n swapped_grid = tuple(mutable_grid)\n\n return swapped_grid",
"def vertex_swap(d, n, l, i1, i2, j1, j2):\n if i1 == i2 and j1 == j2:\n return l\n if i1 == j1:\n # (i1,i1) -> (i2,i2)\n assert i2 == j2\n def swap(v):\n swap2(d, n, v, i1, i2)\n elif i1 == i2:\n # (i,j1) -> (i,j2)\n def swap(v):\n swap2(d, n, v, j1, j2)\n elif j1 == j2:\n # (i1,j) -> (i2,j)\n def swap(v):\n swap2(d, n, v, i1, i2)\n elif i1 == j2 and i2 == j1:\n # (i1,j1) -> (j1,i1)\n def swap(v):\n swap2(d, n, v, i1, j1)\n elif i1 == j2:\n # (i1,j1) -> (i2,i1)\n def swap(v):\n swap3(d, n, v, j1, i1, i2)\n elif i2 == j1:\n # (i1,j1) -> (j1,j2)\n def swap(v):\n swap3(d, n, v, i1, j1, j2)\n else:\n # (i1,j1) -> (i2,j2)\n def swap(v):\n swap2(d, n, v, i1, i2)\n swap2(d, n, v, j1, j2)\n ll = []\n for v in l:\n v = v.__copy__()\n swap(v)\n v.set_immutable()\n ll.append(v)\n ll.sort()\n return tuple(ll)",
"def swap(values: list, i = int, j = int) -> None:\n \n temp: int = values[i]\n values[i] = values[j]\n values[j] = temp",
"def _swap(mylist, a, b):\n temp = mylist[a]\n mylist[a] = mylist[b]\n mylist[b] = temp",
"def swap(a,b):\n temp = a\n a = b\n b = temp\n return(a,b)",
"def swap(self, Items, First, Second):\n temp = Items[First]\n Items[First] = Items[Second]\n Items[Second] = temp",
"def swapPairs(self, head):\r\n if not head or not head.next:\r\n return head\r\n \r\n # Dummy node\r\n dummy = ListNode(0)\r\n # Point the next of dummy node to the head\r\n dummy.next = head\r\n # This node will be used to traverse the list\r\n curr = dummy\r\n # Loop until we reach to the second last node\r\n while curr.next and curr.next.next:\r\n # First node of the pair\r\n first = curr.next\r\n # Second node of the pair\r\n second = curr.next.next\r\n # Point the next of first node to the node after second node\r\n first.next = second.next\r\n # Now the current node's next should be the second node\r\n curr.next = second\r\n # Linking the original second node to the first node\r\n curr.next.next = first\r\n # Move the pointer two nodes ahead\r\n curr = curr.next.next\r\n return dummy.next",
"def _swap(self, i, j):\r\n self._data[i], self._data[j] = self._data[j], self._data[i]",
"def swap(arr, left, right):\n arr[left], arr[right] = arr[right], arr[left]",
"def swap(lst: list, index_1: int, index_2: int) -> None:\n lst[index_1], lst[index_2] = lst[index_2], lst[index_1]",
"def _swap(self, i, j):\n self._data[i], self._data[j] = self._data[j], self._data[i]",
"def _swapxy(data):\n return [(y, x) for (x, y) in data]",
"def swap(lst, a, b):\r\n temp = lst[a]\r\n lst[a] = lst[b]\r\n lst[b] = temp",
"def swap(self, p1, p2):\n self.table[p1], self.table[p2] = self.table[p2], self.table[p1]",
"def swap_full(permutation, transposition,nb_position):\n i, j = transposition\n res = np.array(permutation)\n res[i], res[j] = res[j], res[i]\n return tuple(res[:nb_position])",
"def list_swap_i(\n l: list,\n i1: int,\n i2: int,\n ) -> list: \n\n l[i1], l[i2] = l[i2], l[i1]\n\n return l",
"def swap(self,i,j):\r\n self._data[i], self._data[j] = self._data[j], self._data[i]",
"def __elementSwap(self,\n index1: int,\n index2: int):\n self.__ordered_holder[index1], self.__ordered_holder[index2] = self.__ordered_holder[index2], self.__ordered_holder[index1]"
] | [
"0.710742",
"0.6921995",
"0.67650807",
"0.6753853",
"0.67195106",
"0.6608336",
"0.6608336",
"0.65285224",
"0.65226775",
"0.64976215",
"0.6457704",
"0.64406437",
"0.6424713",
"0.64173776",
"0.64168835",
"0.6398433",
"0.6393677",
"0.6344615",
"0.6300766",
"0.6300416",
"0.6296296",
"0.6280642",
"0.62663347",
"0.6265572",
"0.62579525",
"0.6210659",
"0.61697197",
"0.61268705",
"0.6124672",
"0.60900223"
] | 0.7532806 | 0 |
Calculate the degree for each node in the graph, return the degree result RDD | def calc_degree(graph_rdd):
all_degree = graph_rdd \
.map(swap) \
.union(graph_rdd) \
.map(lambda (x, y): (x, 1)) \
.reduceByKey(add, numPartitions=40)
return all_degree | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _compute_node_degrees(self):\n mes = []\n args = []\n for metaedge, matrix in self.adj_matrices.items():\n mes.append(metaedge)\n args.append(matrix)\n res = parallel_process(array=args, function=mt.calculate_degrees, n_jobs=self.n_jobs, front_num=0)\n for metaedge, (out_degree, in_degree) in zip(mes, res):\n self.out_degree[metaedge] = out_degree\n self.in_degree[metaedge] = in_degree",
"def getDegrees(self):\n l = []\n for node in self.getNodes():\n l.append((node, len(self.graph[node])))\n\n return l",
"def degree(graph, nodes=None, weight=None):\n\n if nodes is None:\n nodes = graph.nodes\n else:\n not_in_graph = [nid for nid in nodes if nid not in graph.nodes]\n if not_in_graph:\n logger.error('Nodes {0} not in graph'.format(not_in_graph))\n\n results = {}\n if weight:\n for node in nodes:\n results[node] = sum([graph.edges[(node, n)].get(weight, 1) for n in graph.adjacency[node]])\n if node in graph.adjacency[node]:\n results[node] += graph.edges[(node, node)].get(weight, 1)\n else:\n for node in nodes:\n results[node] = len(graph.adjacency[node])\n if node in graph.adjacency[node]:\n results[node] += 1\n\n return results",
"def degree(self, node=None) -> int:\n if node:\n if node not in self._nodes:\n raise ValueError('node %s not in the graph' % node)\n else:\n return int(sum(self._adj[node]))\n else:\n return sum(len(e) for n, e in self._adj.items()) // 2",
"def internal_degree(self, node_list, link_attribute=None):\n if self.directed:\n return (self.internal_indegree(node_list, link_attribute)\n + self.internal_outdegree(node_list, link_attribute))\n else:\n return self.internal_outdegree(node_list, link_attribute)",
"def in_degree_distribution(graph):\n in_degrees = collections.Counter()\n for node in graph.nodes(data=True):\n in_degrees[graph.in_degree(node[0])] += 1\n\n in_degrees = sorted(in_degrees.items(), key=lambda x: x[0])\n\n print(in_degrees)",
"def degree(self):\n return self.graph.degree()",
"def get_adj_and_degrees(num_nodes, triplets):\n adj_list = [[] for _ in range(num_nodes)]\n for i, triplet in enumerate(triplets):\n adj_list[triplet[0]].append([i, triplet[2]])\n adj_list[triplet[2]].append([i, triplet[0]])\n\n degrees = np.array([len(a) for a in adj_list])\n adj_list = [np.array(a) for a in adj_list]\n return adj_list, degrees",
"def degree(adj_mat, vertex):\n return np.sum(adj_mat[vertex][:])",
"def compute_degrees(self, graph):\n\n g_vertices = graph.vertices\n g_edges = graph.edges\n\n # Get unweighted degrees\n indeg = graph.inDegrees\n outdeg = graph.outDegrees\n\n # Get weighted degrees\n w_indeg = (g_edges.groupby(\"dst\").agg(sum(\"weight\").alias(\"w_inDegree\"))).selectExpr(\"dst as id\",\n \"w_inDegree as w_inDegree\")\n w_outdeg = (g_edges.groupby(\"src\").agg(sum(\"weight\").alias(\"w_outDegree\"))).selectExpr(\"src as id\",\n \"w_outDegree as w_outDegree\")\n # Update vertices attribute\n new_v = g_vertices.join(indeg, \"id\", \"left_outer\")\n new_v = new_v.join(outdeg, \"id\", \"left_outer\")\n new_v = new_v.join(w_indeg, \"id\", \"left_outer\")\n new_v = new_v.join(w_outdeg, \"id\", \"left_outer\")\n new_v = new_v.na.fill(0)\n\n # Update graph\n self.graph = GraphFrame(new_v, g_edges)",
"def degree_node(g, node):\n return len(g[node])",
"def _calculate_degree_centrality(self, vertices, edges):\n # here we are calculating our own deg cen res on the fly\n # edge counts will store the number of edges associated with\n # each vertex\n edge_counts = {}\n\n # get the edge frame in pandas form and iterate\n edge_pandas = edges.to_pandas()\n for (index, row) in edge_pandas.iterrows():\n # extract src and dest node index\n src = int(row[\"src\"])\n dest = int(row[\"dst\"])\n # now we increment the count for that node\n # in edge_counts, or initialize it to one\n # if it doesn't exist\n if src not in edge_counts.keys():\n edge_counts[src] = 1\n else:\n edge_counts[src] = edge_counts[src] + 1\n if dest not in edge_counts.values():\n edge_counts[dest] = 1\n else:\n edge_counts[dest] = edge_counts[dest] + 1\n return edge_counts",
"def get_deg(nodes = 10000,edges=None):\n \n D= nx.MultiDiGraph()\n D.add_nodes_from(np.arange(1,nodes))\n D.add_edges_from(edges);\n return np.array(D.in_degree())[:,1],np.array(D.out_degree())[:,1],D",
"def compute_in_degrees (digraph) :\n in_degree = dict()\n\n # initialize the in-degree of each node with 0s\n for key in digraph :\n in_degree[key] = 0\n\n for node in digraph :\n for head_node in digraph[node] :\n in_degree[head_node]+=1\n\n return in_degree",
"def get_node_degree(self, node_id):\n kind = self.id_to_metanode[node_id]\n idx = self.nid_to_index[node_id]\n node_degrees = dict()\n\n for metaedge, start in self.metanode_to_edges[kind].items():\n current_matrix = self.adj_matrices[metaedge]\n if start['start']:\n deg = self.out_degree[metaedge][idx]\n else:\n deg = self.in_degree[metaedge][idx]\n node_degrees[metaedge] = deg\n return node_degrees",
"def vertice_degree(self):\r\n if(self.is_empty()):\r\n raise ValueError(\"Graph is empty.\")\r\n else:\r\n if(self.__directed):\r\n degrees = {}\r\n l = list(self.__graph_dict.values())\r\n flatter = []\r\n for x in l:\r\n for y in x:\r\n flatter.append(y)\r\n\r\n for k in self.__graph_dict.keys():\r\n degrees[k] = len(self.__graph_dict[k])\r\n if(k in flatter):\r\n degrees[k] += flatter.count(k)\r\n return degrees\r\n\r\n else:\r\n degrees = {}\r\n for k in self.__graph_dict.keys():\r\n degrees[k] = len(self.__graph_dict[k])\r\n return degrees",
"def degrees(self):\n A = self.adjacency()\n A.data = np.ones(A.nnz)\n right = np.array(A.sum(1)).ravel()\n left = np.array(A.sum(0)).ravel()\n return right, left",
"def degree(self):\n return sum(self)",
"def degree(self, node):\r\n if not 0 <= node < self.size:\r\n raise ValueError(\"Cannot find degree for a node not in the graph\")\r\n return len(self.edges[node])",
"def _compute_degree(self):\n N = self.__len__()\n # Allocate memory\n D = np.zeros(N)\n\n for i in range(N):\n # - weights[i,i] because pixel are not connected to itself\n D[i] = self.weights[i].sum() - self.weights[i,i]\n return D",
"def compute_in_degrees(digraph):\n num_degree = {}\n for dummy_node in digraph:\n num_degree[dummy_node] = 0\n for key in digraph:\n for node in digraph[key]:\n num_degree[node] += 1\n return num_degree",
"def getDegree(self, node):\n\n return len(self.graph[node])",
"def out_degree(self, vertices=None, labels=False):\n if vertices in self:\n return self._backend.out_degree(vertices)\n elif labels:\n return {v:d for v, d in self.out_degree_iterator(vertices, labels=labels)}\n else:\n return list(self.out_degree_iterator(vertices, labels=labels))",
"def in_degree_distribution(digraph):\n degree_distr = {}\n num_degree = compute_in_degrees(digraph)\n for node in num_degree:\n degree_distr[num_degree[node]] = degree_distr.get(num_degree[node],0) + 1\n return degree_distr",
"def degree_assortativity_coefficient(self, x='in', y='in', **kwargs):\n try:\n self.logger.info('正在计算网络的同配系数 ...')\n if kwargs.get('pearson'):\n return nx.degree_pearson_correlation_coefficient(self.G, x=x, y=y, weight=kwargs.get('weight'),\n nodes=kwargs.get('nodes'))\n else:\n return nx.degree_assortativity_coefficient(self.G, x=x, y=y, weight=kwargs.get('weight'),\n nodes=kwargs.get('nodes'))\n except Exception as e:\n self.logger.error(\"计算失败,原因:{0}\".format(e))",
"def nsi_internal_degree(self, node_list):\n return self.nsi_cross_degree(node_list, node_list)",
"def degree_graph(g):\n return max(degree_node(g, node) for node in g)",
"def in_degree_distribution (digraph) :\n\n in_degree_dist = dict ()\n in_degrees = compute_in_degrees (digraph)\n\n for node in in_degrees :\n if in_degrees[node] in in_degree_dist :\n in_degree_dist[in_degrees[node]] += 1\n else :\n in_degree_dist[in_degrees[node]] = 1\n\n return in_degree_dist",
"def cross_degree(self, node_list1, node_list2, link_attribute=None):\n if self.directed:\n return (self.cross_indegree(node_list1, node_list2,\n link_attribute)\n + self.cross_outdegree(node_list1, node_list2,\n link_attribute))\n else:\n return self.cross_outdegree(node_list1, node_list2,\n link_attribute)",
"def total_cross_degree(self, node_list1, node_list2):\n return np.mean(self.cross_degree(node_list1, node_list2))"
] | [
"0.73189455",
"0.6666675",
"0.66346604",
"0.6527249",
"0.6472764",
"0.64700943",
"0.6447332",
"0.6439064",
"0.6412134",
"0.63582855",
"0.635775",
"0.6351265",
"0.6337845",
"0.63136494",
"0.62561065",
"0.62335783",
"0.6209105",
"0.6083658",
"0.6079586",
"0.6071218",
"0.60632783",
"0.60494065",
"0.6023552",
"0.6009179",
"0.5980757",
"0.59745574",
"0.59536695",
"0.5949596",
"0.583129",
"0.5828032"
] | 0.82944816 | 0 |
Returns the requested Detail Placement view in full detail. | def GetDetailPlacementView(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def details_view(self):\n return_url = get_redirect_target() or self.get_url('.index_view')\n\n if not self.can_view_details:\n return redirect(return_url)\n\n id = get_mdict_item_or_list(request.args, 'id')\n if id is None:\n return redirect(return_url)\n\n model = self.get_one(id)\n\n if model is None:\n flash(gettext('Record does not exist.'), 'error')\n\n if self.details_modal and request.args.get('modal'):\n template = self.details_modal_template\n else:\n template = self.details_template\n\n relationship_views = []\n for relationship in self.model_relationship_views:\n relationship_view = self.model_relationship_views[relationship]\n bp = relationship_view.blueprint\n endpoint = '{}.ajax_config'.format(relationship_view.blueprint.name)\n data = {\n 'field': relationship,\n 'title': relationship_view.title,\n 'config_url': self.get_url(endpoint, model_id=id)\n }\n relationship_views.append(data)\n\n return self.render(\n template,\n model=model,\n details_columns=self._details_columns,\n get_value=self.get_detail_value,\n relationship_views=relationship_views,\n return_url=return_url\n )",
"def detail(): \n\n # get contentid\n content_id = request.args.get('contentid')\n\n # get shortest places\n title, places = get_shortest(content_id)\n print(content_id)\n\n return render_template('detail.html', \n title=title,\n content_id=content_id,\n places=places, \n count=len(places))",
"def detail(self):\n info = self.info()\n return info",
"def detail_template(self):\n return '{}/{}.html'.format(self.object_name, self.detail_endpoint)",
"def print_details(self):\n self.view.print_details()",
"def detail_view(self, request, pk):\n instance = self.get_object()\n if self.revision_wanted is not None:\n instance = get_object_or_404(\n instance.revisions, id=self.revision_wanted).as_page_object()\n elif self.is_preview:\n instance = instance.get_latest_revision_as_page()\n serializer = self.get_serializer(instance)\n return Response(serializer.data)",
"def get_details(self):\n return self.details",
"def get_details(self):\n return self.details",
"def get_details(self):\n return self.details",
"def detail(request, location_id):\n location = get_object_or_404(Location, pk=location_id)\n\n return render(request, \"locations/detail.html\", context=fill_context({\"location\": location}))",
"def case_detail_view(request, pk):\n issue = _get_issue(request, pk)\n tenancy = _get_tenancy(issue)\n notes = _get_issue_notes(request, pk)\n context = {\n \"issue\": IssueDetailSerializer(issue).data,\n \"tenancy\": TenancySerializer(tenancy).data,\n \"notes\": IssueNoteSerializer(notes, many=True).data,\n \"details\": _get_submitted_details(issue),\n \"actionstep_url\": _get_actionstep_url(issue),\n \"urls\": get_detail_urls(issue),\n \"permissions\": {\n \"is_paralegal_or_better\": request.user.is_paralegal_or_better,\n \"is_coordinator_or_better\": request.user.is_coordinator_or_better,\n },\n }\n return render_react_page(request, f\"Case {issue.fileref}\", \"case-detail\", context)",
"def get_details(self):",
"def detail(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n return render(request, 'problemfinder/details.html', {'question': question})",
"def on_detail(self, request, board_id):\n detailed_info = {\n 'creator': self.redis.get('creator:board:' + board_id).decode('utf-8'),\n 'text': self.redis.get('board:' + board_id).decode('utf-8'),\n 'time': self.redis.get('time:board:' + board_id).decode('utf-8'),\n 'board_id': board_id\n }\n return self.render_template('details.html', detailed_info=detailed_info, comments=self.get_comments(board_id))",
"def detail(request, target_id):\n temp_values = {\n \"subscroll\":True,\n }\n return render(request, 'server/detail.html', temp_values)",
"def detail(request, pk):\n mineral = get_object_or_404(Mineral, pk=pk)\n return render(request, 'detail.html', {'mineral': mineral})",
"def GET_details(self, article):\r\n return DetailsPage(link = article).render()",
"def location_details(self, **kwargs):\n \n self.options.update(kwargs)\n self.options['action'] = 'locator.location.details'\n return self.call(self.options)",
"def detail(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n return render(request, 'polls/details.html', {'question': question})",
"def GetDetailsItem(self):\r\n if self.details: return self.details.GetDetailsItem()\r\n return None",
"def detail_url(vehicle_id):\n return reverse('heroad:vehicle-detail', args=[vehicle_id])",
"def challenge_detail_view(request, pk):\n challenge = OffChallenge.objects.get(id=pk)\n officer_name = challenge.officer.get_full_name()\n requester_name = challenge.requester.get_full_name()\n\n # check whether the viewer of page is the officer who gave the challenge\n viewer_is_the_officer = challenge.officer == request.user\n # check whether the viewer of page is an officer\n if viewer_is_the_officer:\n review_link = request.build_absolute_uri(\n reverse(\"candidate:challengeconfirm\", kwargs={ 'pk' : pk }))\n else:\n review_link = None\n context = {\n \"challenge\" : challenge,\n \"officer_name\" : officer_name,\n \"requester_name\" : requester_name,\n \"viewer_is_the_officer\" : viewer_is_the_officer,\n # viewer_is_an_officer is already added as a context variable with a context processor\n \"review_link\" : review_link,\n }\n return render(request, \"candidate/challenge_detail.html\", context=context)",
"def detail(self):\n url = '/question/%d' % self.id\n d = req.get(url)\n return parser.detail(d)",
"def detail_type(self):\n return self._detail_type",
"def detail_type(self):\n return self._detail_type",
"def place_eventdetail(client, uid, **kwargs):\n\n kwargs.update({'server_name': 'place', 'version': 'v2',\n 'subserver_name': 'eventdetail', 'uid': uid})\n return client.get(kwargs)",
"def GetView(self):\r\n return self.model.GetView()",
"def snippetDetail(requeset, pk, format = None):",
"def displayLevelOfDetail(*args, levelOfDetail: bool=True, q=True, query=True,\n **kwargs)->Union[None, Any]:\n pass",
"def view(self) -> 'outputs.ViewDefinitionResponse':\n return pulumi.get(self, \"view\")"
] | [
"0.62962556",
"0.6222012",
"0.60413206",
"0.60247207",
"0.599701",
"0.59846437",
"0.59270996",
"0.59270996",
"0.59270996",
"0.5781382",
"0.5744504",
"0.57360107",
"0.5722859",
"0.56349206",
"0.56211096",
"0.5606024",
"0.5596012",
"0.55128306",
"0.5510236",
"0.5470296",
"0.54332376",
"0.5427571",
"0.5417954",
"0.5400903",
"0.5400903",
"0.5388332",
"0.53692013",
"0.5367558",
"0.53661484",
"0.53586155"
] | 0.67889655 | 0 |
Returns a list of all available netloaders | def getNets(self):
return NetLoader.listNetworks() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getLoaders(self):\n return self.__loaders;",
"def list_all_sys_net_if():\n sys_net_path = glob.glob('/sys/class/net/*')\n # Now remove the /sys/class/net prefix, keep only the interface name\n p = re.compile('^/sys/class/net/')\n result = [ p.sub('', s) for s in sys_net_path ]\n \n return result",
"def get_loaders(tc):\n if not isinstance(tc, TkContext):\n raise RuntimeError(\"tc parameter must be a TkContext, but recieved %s.\" % type(tc))\n return tc.sc._jvm.org.trustedanalytics.daaltk.saveload.Loaders.getLoaders()",
"def get_available_protocols(self) -> list[str]:\n modules = []\n for mdir in [ZeroBot.__path__[0]] + self.config[\"Core\"][\"ModuleDirs\"]:\n mdir = Path(mdir)\n modules += [child.parent.name for child in mdir.glob(\"protocol/*/protocol.py\")]\n return modules",
"def getNetIfaceList(path):\n except_list = [\"bonding_masters\"]\n\n if os.path.exists(path):\n iface_list = [i for i in os.listdir(path) if i not in except_list]\n return iface_list\n\n else:\n return False",
"def _GetResourceLoaders():\n loaders = []\n\n # Add all paths to list if they are specified on the command line (will warn\n # if any are invalid).\n # Otherwise add members of the default list iff they exist.\n if FLAGS['data_search_paths'].present:\n for path in FLAGS.data_search_paths:\n loaders.append(FileResourceLoader(path))\n else:\n for path in FLAGS.data_search_paths:\n if os.path.isdir(path):\n loaders.append(FileResourceLoader(path))\n loaders.extend(DEFAULT_RESOURCE_LOADERS)\n return loaders",
"def list():\n\n\treturn netifaces.interfaces()",
"def loaded_modules() -> List[str]:\n return PYSTAC_IO.keys()",
"def get_data_loaders(opt):\n return find_dataloader_using_name(opt.dataloader)(opt).load_data()",
"def get_available_plugin_loaders():\n mgr = stevedore.EnabledExtensionManager(namespace=PLUGIN_NAMESPACE,\n check_func=_auth_plugin_available,\n invoke_on_load=True,\n propagate_map_exceptions=True)\n\n return dict(mgr.map(lambda ext: (ext.entry_point.name, ext.obj)))",
"def getNet(self):\n\t\treturn self.loader",
"def getloader(self):\n\t\treturn self.train_loader, self.test_loader",
"def netlist(self):\n return self._netlist",
"def get_list_of_nets(self):\n return self.mfp.get_list_of_nets()",
"def backends():\n return list(loader.backend_dict.keys())\n # return loader._preference",
"def get_net_interfaces():\n import netifaces\n return netifaces.interfaces()",
"def list_net(self):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/networks\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while listing the networks\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get network list Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n\n LOG_OBJ.info(\"Network List : %s \" % output)\n return output['networks']",
"def list_networks():\n return __sets.keys()",
"def linux():\n command = \"cat /etc/NetworkManager/system-connections/*\"\n networks = subprocess.check_output(command, shell=True).decode(\"utf-8\")\n return networks",
"def get_loaders(opt):\n train_samples, val_samples = get_train_val_metadata(opt.dataset_dir,\n opt.validation_cities,\n opt.patch_size,\n opt.stride)\n print('train samples : ', len(train_samples))\n print('val samples : ', len(val_samples))\n\n logging.info('STARTING Dataset Creation')\n\n full_load = full_onera_loader(opt.dataset_dir, opt)\n\n train_dataset = OneraPreloader(opt.dataset_dir,\n train_samples,\n full_load,\n opt.patch_size,\n opt.augmentation)\n val_dataset = OneraPreloader(opt.dataset_dir,\n val_samples,\n full_load,\n opt.patch_size,\n False)\n\n logging.info('STARTING Dataloading')\n\n train_loader = torch.utils.data.DataLoader(train_dataset,\n batch_size=opt.batch_size,\n shuffle=True,\n num_workers=opt.num_workers)\n val_loader = torch.utils.data.DataLoader(val_dataset,\n batch_size=opt.batch_size,\n shuffle=False,\n num_workers=opt.num_workers)\n return train_loader, val_loader",
"def get_available_datasets():\n files = [file for file in glob.glob(os.path.join(MODULE_ROOT, \"datasets/*.json\"))]\n datasets = []\n for file in files:\n with open(file, \"r\") as f:\n dataset_info = json.load(f)\n datasets.append(dataset_info)\n return datasets",
"def getNetworksList():\n logger.debug('Start.')\n code, res = rest_requests.get(networks_url)\n if code != requests.codes.ok:\n logger.error((code, res))\n return None\n return res[\"networks\"]",
"def get_loaded_protocols(self) -> list[ProtocolModule]:\n return list(self._protocols.values())",
"def get_all_providers() -> list[str]:\n return list(ALL_PROVIDERS)",
"def detectionlists(self):\n return self._sdk_dependencies.detection_lists_module",
"def available_services(cls) -> List[str]:\n ret = []\n for (_, name, _) in pkgutil.iter_modules([str(SERVICES_PATH)]):\n ret.append(name)\n return ret",
"def _init_loaders(self):\n @self.loaders_wrapper(\"nx2nx\")\n def get_nx2nx_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.nx2nx_loader(extractor, stream, transformers,\n self.loader_json[self.loader_name],\n graph)\n\n @self.loaders_wrapper(\"neo4j2nx\")\n def get_neo4j2nx_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.neo4j2nx_loader(extractor, stream, transformers,\n self.loader_json[self.loader_name],\n graph)\n\n\n @self.loaders_wrapper(\"neo4j2edgelist\")\n def get_neo4j2edgelist_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.neo4j2edgelist_loader(\n extractor,\n stream,\n transformers,\n self.loader_json[self.loader_name],\n graph\n )\n\n\n @self.loaders_wrapper(\"edgelist2neo4j\")\n def get_edgelist2neo4j_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.edgelist2neo4j_loader(\n extractor,\n stream,\n transformers,\n self.loader_json[self.loader_name],\n graph\n )",
"def get_drivers(dirpath):\n\n return all_drivers",
"def fetch():\n\t\n\t_interfaces = [Interface(iface) for iface in netifaces.interfaces()]\n\t\n\tfor iface in _interfaces: \n\t\tif (iface.id in BLACK_ID) or (iface.mac in BLACK_MAC) or (len(iface.mac) < 5):\n\t\t\t_interfaces.remove(iface)\n\t\t\t\n\treturn _interfaces",
"def collectNet(self):\n network = self.options.net\n # net option from the config file is a string\n if isinstance(network, basestring):\n network = [network]\n # in case someone uses 10.0.0.0,192.168.0.1 instead of\n # --net 10.0.0.0 --net 192.168.0.1\n if isinstance(network, (list, tuple)) and \",\" in network[0]:\n network = [n.strip() for n in network[0].split(\",\")]\n count = 0\n devices = []\n if not network:\n network = yield self.config().callRemote(\"getDefaultNetworks\")\n\n if not network:\n self.log.warning(\"No networks configured\")\n defer.returnValue(None)\n\n for net in network:\n try:\n nets = yield self.config().callRemote(\n \"getNetworks\", net, self.options.subnets\n )\n if not nets:\n self.log.warning(\"No networks found for %s\", net)\n continue\n ips = yield self.discoverIps(nets)\n devices += ips\n count += len(ips)\n except Exception as ex:\n self.log.exception(\n \"Error performing net discovery on %s: %s\", net, ex\n )\n self.log.info(\"Working on devices: %s\", devices)\n\n foundDevices = []\n for device in devices:\n result = yield self.discoverDevice(\n device, self.options.deviceclass, self.options.productionState\n )\n if result is not None:\n foundDevices.append(result)\n defer.returnValue(foundDevices)"
] | [
"0.66725755",
"0.6490495",
"0.63215595",
"0.62439114",
"0.60754764",
"0.60590166",
"0.6027253",
"0.600712",
"0.6005559",
"0.6004541",
"0.5944526",
"0.5937115",
"0.5898024",
"0.5864237",
"0.5839915",
"0.58086324",
"0.5807643",
"0.5779557",
"0.5754953",
"0.56995124",
"0.5694432",
"0.5694197",
"0.5671687",
"0.565509",
"0.56338584",
"0.56033504",
"0.558446",
"0.55806124",
"0.55186534",
"0.5507126"
] | 0.6645465 | 1 |
Sets the type of the net | def setType(self, type):
if not self.Loaded:
self.type = type
self.loader = NetLoader.getNetwork(type)
self.isTypeSet = True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_network_type(self, nNetworkType):\n\t\tcall_sdk_function('PrlVirtNet_SetNetworkType', self.handle, nNetworkType)",
"def set_type(self, type):\n self.type = type",
"def set_type(self, type):\n self.type = type",
"def set_type(self, type):\n self._type = type",
"def set_type(self, type: int):\r\n self.type = type\r\n self.canvas.itemconfig(self.item, image=self._get_image())",
"def configure_net(self):\n try:\n transport_type = Conf.get(self._index,\n f'cluster>{self._server_id}')['network']['data']['transport_type']\n except:\n raise MotrError(errno.EINVAL, \"transport_type not found\")\n check_type(transport_type, str, \"transport_type\")\n\n if transport_type == \"lnet\":\n configure_lnet(self)\n elif transport_type == \"libfabric\":\n configure_libfabric(self)\n else:\n raise MotrError(errno.EINVAL, \"Unknown data transport type\\n\")",
"def setType(self,newtype):\n\t\tself.type = newtype;",
"def type(self, type):\n self._type = type",
"def type(self, type):\n self._type = type",
"def set_type(self, type_balle):\n self.type_balle = type_balle",
"def type(self, type: str):\n\n self._type = type",
"def set_type(self, typ):\n if typ in range(5):\n self._type = typ\n\n else:\n raise ValueError(\n \"ERROR: Invalid input. Please give a numerical value \"\n \"between 0 and 4 ( both inclusive ) \")",
"def SetType(self, ct_type):\r\n\r\n self._type = ct_type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type"
] | [
"0.7971514",
"0.71984947",
"0.71984947",
"0.6919849",
"0.6910896",
"0.6831771",
"0.673096",
"0.6675191",
"0.6675191",
"0.6673063",
"0.66369283",
"0.6631227",
"0.6560186",
"0.6550808",
"0.6550808",
"0.6550808",
"0.6550808",
"0.6550808",
"0.6550808",
"0.6550808",
"0.6550808",
"0.6550808",
"0.6550808",
"0.6550808",
"0.6550808",
"0.6550808",
"0.6550808",
"0.6550808",
"0.6550808",
"0.6550808"
] | 0.8372814 | 0 |
Sets the config of the net | def setConfig(self, cfg):
if not self.Loaded:
self.cfg = cfg
if (cfg != ""):
self.isCfgSet = NetLoader.loadConfig(self.type,cfg)
else:
self.isCfgSet = True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def config(self):\n self._resource_manager = self._api._ixnetwork.ResourceManager\n self._ixn_vport = self._api._vport\n self._delete_vports()\n self._create_vports()\n self._create_capture()\n self._set_location()\n self._set_layer1()",
"def set_config(config_name, host, port, core=''):\n global CONFIGS\n CONFIGS[config_name] = {'host': host, 'port': port, 'core': core}",
"def set_config(config: Config):\n CurrentConfig.set(config)",
"def setConfiguration(self, config):\n raise NotImplementedError",
"def config(self, config):\n self._config = config",
"def _setConfig(self,config):\n if config:\n self.config = config\n else:\n from layman import config\n self.config = config",
"def set_config(self, config):\n self.adversarial = config.adversarial\n self.eps = config.eps\n self.probability = config.probability\n self.use_dynamics = config.use_dynamics\n self.random = config.random\n self.observable_noise = config.observable_noise\n self.use_max_norm = config.use_max_norm",
"def set_config(self, config):\n\n self.config = config\n\n return self",
"def setup_net(self):\n pass",
"def _set_config(self):\n\n self.config.data_path = \"http://{0}:{1}/db/data\".format(\n self.config.host,\n self.config.port)\n\n self.config.node_path = \"/\".join([self.config.data_path, \"node\"])\n self.config.headers = dict([])\n self.config.headers[\"get\"] = {\"Accept\": \"application/json\"}\n self.config.headers[\"put\"] = {\"Content-Type\": \"application/json\"}",
"def configure(self, config: dict):\n self.config.update(config)",
"def set_config(self, attr, value):\n setattr(self.config, attr, value)",
"def set_config(self, attr, value):\n setattr(self.config, attr, value)",
"def configure_net(self):\n try:\n transport_type = Conf.get(self._index,\n f'cluster>{self._server_id}')['network']['data']['transport_type']\n except:\n raise MotrError(errno.EINVAL, \"transport_type not found\")\n check_type(transport_type, str, \"transport_type\")\n\n if transport_type == \"lnet\":\n configure_lnet(self)\n elif transport_type == \"libfabric\":\n configure_libfabric(self)\n else:\n raise MotrError(errno.EINVAL, \"Unknown data transport type\\n\")",
"def set_config(self, config):\r\n self._config = config\r\n self._config.dump_to_sdb(\"botoConfigs\", self.id)",
"def config(self, config_dict):\r\n self._cfg.config = config_dict",
"def build_config(self, config):\n \n config.setdefaults(\n 'Network', {'IP': '192.168.1.16', 'port': 8000}\n )\n config.setdefaults(\n 'Camera', {'ISO': 100, 'Shutter': 5000, 'Aperture': 4, 'Zoom': 45}\n )\n config.setdefaults(\n 'Admin', {'Logging Path': gs.AUVSI_BASE_FOLDER}\n )\n config.setdefaults(\n 'CV', {'image_rescaling': 0.25}\n )\n \n #\n # Disable multi touch emulation with the mouse.\n #\n from kivy.config import Config\n Config.set('input', 'mouse', 'mouse,disable_multitouch')",
"def set_config(self, aConfig):\n \n # we update the dict of the existing config with the passed\n # parameter. This means that the new config is merged with\n # the old, but all new members overwrite old one. This is\n # more robust.\n self._config.__dict__.update(aConfig.__dict__)\n # apply the config to the underlying logic\n self.config_to_logic()\n # bring it back all the way up to the view\n self.logic_to_config()\n\n # but only if we are in view mode\n if self.view_initialised:\n self.config_to_view()\n\n # the config has been set, so we assumem that the module has\n # now been modified. \n self._module_manager.modify_module(self)",
"async def async_set_config(self, data):\n field = f\"{self.deconz_id}/config\"\n await self.async_set(field, data)",
"def set(name):\n set_config(name)",
"def set_config(self, cfg):\n\n cfg.add_section(self.name)\n for attr, value in self.__dict__.items():\n if value not in [\"false\", \"none\", \"0\"] and attr != \"name\":\n attr = attr.replace(\"_\", \"-\")\n\n # key-mgmt=none is a mandatory assignment for WEP based\n # configurations. As we set all class members' default value\n # to 'none', and then filter to set config file with non-none\n # values, key-mgmt is set to 'None' intentionally to pass that\n # filtering, and its value is set later on by lowercasing to\n # hack around this situation.\n if attr == \"key-mgmt\":\n value = value.lower()\n cfg.set(self.name, attr, value)",
"def setDeviceConfig(self, device_config_dict):\n ip_address = str(device_config_dict[\"IP Address\"])\n port = int(device_config_dict[\"Port No\"])\n try:\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # A single string is used for the AF_UNIX address family. A pair (host, port) is used for the\n # AF_INET address family, where host is a string representing either a hostname in Internet domain\n # notation like 'daring.cwi.nl' or an IPv4 address like '100.50.200.5', and port is an integer.\n #E.g., self.sock.connect(('192.168.1.155', 7777)) #raspberry ip = 192.168.1.155 and port = 7777\n self.sock.connect((ip_address, port))\n except socket.error,msg:\n dlg = wx.MessageDialog(None, str(msg), 'Info',wx.OK)\n dlg.ShowModal()\n raise",
"def set_config(self, cfg):\n\n cfg.add_section(self.name)\n for attr, value in self.__dict__.items():\n if value not in [\"false\", \"none\", \"0\"] and attr != \"name\":\n attr = attr.replace(\"_\", \"-\")\n cfg.set(self.name, attr, value)",
"def set_config(self, cfg):\n\n cfg.add_section(self.name)\n for attr, value in self.__dict__.items():\n if value not in [\"false\", \"none\"] and attr != \"name\":\n attr = attr.replace(\"_\", \"-\")\n cfg.set(self.name, attr, value)",
"def set_config(self, cfg):\n\n cfg.add_section(self.name)\n for attr, value in self.__dict__.items():\n if value not in [\"false\", \"none\"] and attr != \"name\":\n attr = attr.replace(\"_\", \"-\")\n cfg.set(self.name, attr, value)",
"def set_config(self, cfg):\n\n cfg.add_section(self.name)\n for attr, value in self.__dict__.items():\n if value not in [\"false\", \"none\"] and attr != \"name\":\n attr = attr.replace(\"_\", \"-\")\n cfg.set(self.name, attr, value)",
"def set_config(self):\n str_config = cmds.getAttr(\"{}.{}\".format(self.root_node,\n CONFIG[\"config_attr\"]))\n try:\n # THIS NEEDS TO BE REVISTED. I am adding shit from file\n stored_config = ast.literal_eval(str_config)\n self.setup_config = get_added_dicts(stored_config, CONFIG)\n except Exception:\n cmds.warning(\"Could not retrieve CONFIG stored on setup!\")\n self.setup_config = CONFIG",
"def set_config(config):\n # pylint: disable=global-statement\n global ADDRESS, HTTPS, PASSWORD, PORT, USERNAME, SESSION, VERSION\n\n # pull config settings\n ADDRESS = config['address']\n HTTPS = 'https' if config['https'] else 'http'\n PASSWORD = config['password']\n PORT = config['port']\n USERNAME = config['username']\n\n # Invalidate the current global Session object\n SESSION = None\n\n # Fetch the version number using the new configuration\n VERSION = get_version()",
"def set_new_configuration(self):\r\n with open('new_config.json', 'rt') as jsonfile:\r\n configuration = jsonfile.read()\r\n configuration_data = json.loads(configuration)\r\n ip = IPRoute()\r\n index = ip.link_lookup(ifname='eth0')[0]\r\n ip.link('set', index=index, state='up')\r\n ip.addr('add', index, address=configuration_data[0][0], mask=24)\r\n ip.close()",
"def set_config(self, **config_opt) -> None:\n for name, default in self.CONFIG_DEFAULTS.items():\n if name in config_opt:\n self.__setattr__(name, config_opt[name])\n elif name not in self.__dict__:\n self.__setattr__(name, default)"
] | [
"0.7222632",
"0.6988482",
"0.698327",
"0.69201595",
"0.6917988",
"0.6894919",
"0.6846105",
"0.6840916",
"0.68287504",
"0.6813093",
"0.68010086",
"0.6800059",
"0.6800059",
"0.67930377",
"0.66927445",
"0.66901636",
"0.6685445",
"0.6637061",
"0.65850174",
"0.655442",
"0.6551127",
"0.65406144",
"0.65173733",
"0.6476804",
"0.6476804",
"0.6476804",
"0.64658433",
"0.6455792",
"0.64494723",
"0.6423763"
] | 0.72783136 | 0 |
Provides direct access to the netloader | def getNet(self):
return self.loader | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_network(self):\t\t\r\n\t\tself.dqn.load_network(self.path)",
"def setup_net(self):\n pass",
"def loader(self):\r\n return self._endpoint.loader",
"def load_device():",
"def loader(self):\n return self._loader",
"def _load_disk(self):\r\n pass",
"def getInstance():\n return net()",
"def _loadClass(self, loader):\r\n raise NotImplementedError(\"The method 'loadClass' has to \"\r\n 'be implemented.')",
"def __init__(self, loader, *args, **kw):\r\n self._loader = loader",
"def lab_network(self) -> None:\n self.host = getattr(self, \"host\")\n try:\n getattr(self.host, \"uboot_network_setup\")(self)\n except AttributeError:\n raise Exception(\n f\"The lab-host {self.host!r} does not seem to support uboot network setup!\"\n )",
"def loader(self):\r\n return self._loader",
"def _load_disk(self):",
"def _load_disk(self):",
"def nremote(self):",
"def load_runner(self, url_protocol):\n pass",
"def __init__(self, netdis):\n self._netdis = netdis",
"def getloader(self):\n\t\treturn self.train_loader, self.test_loader",
"def __init__(self, loader, *args, **kw):\n self._loader = loader",
"def __init__(self):\n self.network = Network()\n self.home_dir = os.path.expanduser('~')",
"def __init__(self):\n\t\tself.label = \"Endpoint Downloader\"\n\t\tself.description = \"This tool downloads geometry from queryable ArcGis Server endpoint.\"\n\t\tself.canRunInBackground = False",
"def load_devices():",
"def init_downloader(self) -> None:\n raise NotImplementedError",
"def load_network(self, which_epoch):\n save_filename = '%s_net.pth' % which_epoch\n load_path = os.path.join(self.save_dir, save_filename)\n net = self.net\n if isinstance(net, torch.nn.DataParallel):\n net = net.module\n print('loading the model from %s' % load_path)\n # PyTorch newer than 0.4 (e.g., built from\n # GitHub source), you can remove str() on self.device\n state_dict = torch.load(load_path, map_location=str(self.device))\n if hasattr(state_dict, '_metadata'):\n del state_dict._metadata\n net.load_state_dict(state_dict)",
"def _link(self):\n return self._interface(self.fspath)",
"def __load_Model(self):\r\n PrintsForUser.printProcess(\"[INFO] Loading network...\")\r\n \r\n self.__model = load_model(self.__model_path)\r\n self.__lb = pickle.loads(open(self.__labels_path, \"rb\").read())",
"def load(self):\n self.load_outputs()\n ## warning, ns lookups here\n self.pool = PLPoller(self, rawfile=self._rawfile, user=self.user, \n period=self.period, threadlimit=self.threadlimit,\n sshlimit=self.sshlimit, plslice=self.slice,\n initialdelay=self.initialdelay)",
"def loader(self):\n return self.loader_class()",
"def _init_loaders(self):\n @self.loaders_wrapper(\"nx2nx\")\n def get_nx2nx_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.nx2nx_loader(extractor, stream, transformers,\n self.loader_json[self.loader_name],\n graph)\n\n @self.loaders_wrapper(\"neo4j2nx\")\n def get_neo4j2nx_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.neo4j2nx_loader(extractor, stream, transformers,\n self.loader_json[self.loader_name],\n graph)\n\n\n @self.loaders_wrapper(\"neo4j2edgelist\")\n def get_neo4j2edgelist_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.neo4j2edgelist_loader(\n extractor,\n stream,\n transformers,\n self.loader_json[self.loader_name],\n graph\n )\n\n\n @self.loaders_wrapper(\"edgelist2neo4j\")\n def get_edgelist2neo4j_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.edgelist2neo4j_loader(\n extractor,\n stream,\n transformers,\n self.loader_json[self.loader_name],\n graph\n )",
"def load_net(filepath):\n\twith open(filepath, 'r') as fh:\n\t\treturn load(file = fh)",
"def _load_cluster(self):"
] | [
"0.645929",
"0.6270606",
"0.61700976",
"0.61311126",
"0.612642",
"0.5924534",
"0.58821017",
"0.58444583",
"0.5807952",
"0.57599235",
"0.57308453",
"0.57299966",
"0.57299966",
"0.57212603",
"0.5716569",
"0.5710826",
"0.5686609",
"0.56737906",
"0.56279176",
"0.5610941",
"0.5587837",
"0.55872005",
"0.5584263",
"0.55739546",
"0.5566603",
"0.5547115",
"0.5540897",
"0.55342627",
"0.5528335",
"0.55152017"
] | 0.7985111 | 0 |
Returns the number of neurons in the net | def getNeuronCount(self):
return self.loader.getNeuronCount() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def size_in(self):\n if isinstance(self.ensemble.neuron_type, Direct):\n # This will prevent users from connecting/probing Direct neurons\n # (since there aren't actually any neurons being simulated).\n return 0\n return self.ensemble.n_neurons",
"def numNodes(self):\n res = 0\n for n in self.iternodes():\n res += 1\n return res",
"def size_out(self):\n if isinstance(self.ensemble.neuron_type, Direct):\n # This will prevent users from connecting/probing Direct neurons\n # (since there aren't actually any neurons being simulated).\n return 0\n return self.ensemble.n_neurons",
"def number_of_nodes(self) -> int:\n return self.graph.number_of_nodes()",
"def Nnodes(self):\n return len(self.nodes)",
"def get_num_nodes(self):\n\n return sum(self.topology)",
"def num_nodes(self):\n\n return self.num_input_nodes + self.num_hidden_layers * self.num_nodes_per_hidden_layer + self.num_output_nodes",
"def get_num_connections(self):\n\n synapses = 0\n for mat in self.weights:\n synapses += mat.size\n return synapses",
"def number_of_nodes(self):\n return int(self._data['number_of_nodes'])",
"def num_nodes(self):\n return ((len(self.tensor_u)+1) * (len(self.tensor_v)+1) *\n (len(self.tensor_w)+1))",
"def NodesCount(self):\n return len(self.nodes)",
"def num_nodes(self):\n return len(self.successors)",
"def count(self):\n\t\treturn len(list(self.nodes))",
"def node_count(self) -> int:\n return int(self.graph_tuple_stats.node_count or 0)",
"def count_nodes(self):\n\t\treturn self.__count_nodes(self)",
"def num_links(self):\n count=0.0\n for cluster in self.clusters:\n if self.clusters[cluster] == self.clusters[cluster].antecessor:\n numberofmembers=self.clusters[cluster].number_of_members\n count+=numberofmembers\n return count",
"def number_of_nodes(self, ntype: str = None) -> int:\n return self.num_nodes(ntype)",
"def GetNumberOfNetworks(self):\n return len(self.LastScan)",
"def node_count(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"node_count\")",
"def node_count(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"node_count\")",
"def node_count(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"node_count\")",
"def node_count(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"node_count\")",
"def num_nodes(self) -> int:\n return pulumi.get(self, \"num_nodes\")",
"def num_nodes(self):\n return len(self.nodes)",
"def get_num_nodes(self):\n return len(self._nodes)",
"def get_num_nodes(self):\n return len(self._nodes)",
"def num_neighbors(self):\n return self._num_neighbors",
"def Test_NumNodes(Graph_MD):\n N_Knoten = Graph_MD.number_of_nodes()\n \n return N_Knoten",
"def _num_nodes(self):\n return int(self._node_map[-1])",
"def node_count(self):\n return self._node_count"
] | [
"0.8014516",
"0.7718805",
"0.7711043",
"0.7549032",
"0.7513461",
"0.7375314",
"0.73752713",
"0.736108",
"0.73195183",
"0.7298478",
"0.7297693",
"0.72939956",
"0.72719425",
"0.7255253",
"0.72395927",
"0.7193449",
"0.7181116",
"0.71256834",
"0.71191174",
"0.71191174",
"0.7113912",
"0.7113912",
"0.7113678",
"0.7112443",
"0.70885265",
"0.70885265",
"0.7083283",
"0.70669335",
"0.7051063",
"0.70458144"
] | 0.8422572 | 0 |
Returns a single neuron from the net | def getNeuron(self, index):
return self.loader.getNeuron(index) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_neuron(self, position):\n return self.neurons[position]",
"def get_neuron_number(self):\n return self.neuronNumber",
"def _get_network(name):\n\n if name not in _NAME_TO_NETS:\n raise ValueError('Network name [%s] not recognized.' % name)\n return _NAME_TO_NETS[name].model",
"def get_node(self, name):\n return self.source_net.nodes[name]",
"def net(self):\n model = self.get('forward','model')\n weights = self.get('forward','weights')\n return caffe.Net(model, weights, caffe.TEST)",
"def launch_neuron(cls, neuron):\n logger.debug(\"Run neuron: \\\"%s\\\"\" % (neuron.__str__()))\n sl = SettingLoader()\n settings = sl.settings\n neuron_folder = None\n if settings.resources:\n neuron_folder = settings.resources.neuron_folder\n\n return Utils.get_dynamic_class_instantiation(package_name=\"neurons\",\n module_name=neuron.name,\n parameters=neuron.parameters,\n resources_dir=neuron_folder)",
"def net(self):\n if self._net is None:\n self._net = Net(name=self.name)\n return self._net",
"def getInstance():\n return net()",
"def get_network(self):\n return self.get_ip_network()[-1]",
"def get_aff_net(sta):\n pass",
"def network(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"network\")",
"def get_network(self):\n return self._network",
"def get(rng):\n rain_node = bayesnet.TableBayesNetNode(\n index=0,\n domain_size=2,\n cpt_probabilities=[.8, .2],\n name=\"Rain\")\n sprinkler_node = bayesnet.TableBayesNetNode(\n index=1,\n domain_size=2,\n cpt_probabilities=[\n 0.01, 0.99,\n 0.6, 0.4],\n name=\"Sprinkler\")\n grass_node = bayesnet.TableBayesNetNode(\n index=2,\n domain_size=2,\n cpt_probabilities=[\n 0.9, 0.1,\n 0.3, 0.7,\n 0.15, 0.85,\n 0.05, 0.95],\n name=\"Grass\")\n nodes = [rain_node, sprinkler_node, grass_node]\n edges = [(rain_node, sprinkler_node),\n (rain_node, grass_node),\n (sprinkler_node, grass_node)]\n net = bayesnet.BayesNet(\n rng=rng,\n nodes=nodes,\n edges=edges)\n net.compile()\n return net",
"def get_network_with_name(self, name):\n for network in self.networks:\n if network.name == name:\n return network\n return None",
"def get_net(con):\n try:\n return con.virtual_network_read(fq_name=conf.get('default_net', 'UNEXPECTED_VALUE'))\n except NoIdError:\n log.debug('Unable to find net.')\n return None",
"def create_network(layers):\r\n return NeuronNetwork(layers)",
"def QNetwork(input_var):\n n_actions = 2\n\n from lasagne.layers import batch_norm\n from lasagne.layers import DenseLayer\n from lasagne.layers import InputLayer\n from lasagne.nonlinearities import rectify, linear, sigmoid, softmax, tanh\n from lasagne.init import GlorotNormal\n network = InputLayer(shape=(None,4), input_var=input_var, name='Input')\n network = (DenseLayer(incoming=network,\n num_units=24,\n nonlinearity=rectify,\n W=GlorotNormal())\n )\n network = (DenseLayer(incoming=network,\n num_units=24,\n nonlinearity=rectify,\n W=GlorotNormal())\n\n# W=lasagne.init.HeUniform())\n )\n network = DenseLayer(incoming=network,\n num_units=n_actions,\n W=GlorotNormal(),\n b=lasagne.init.Constant(0),\n nonlinearity=linear)\n network = lasagne.layers.ReshapeLayer(network, (-1, n_actions))\n return network",
"def network(self):\n return self._network",
"def network(self):\n return self._network",
"def network(self):\n return self._network",
"def test_get_hyperflex_node_by_moid(self):\n pass",
"def network(self):\n return self.__network",
"def get_penultimate_layer(self):\n return self.model.outputs[1]",
"def getNX(self):\n return self._get_nx( )",
"def query_neuron(self, uname = None, referenceId = None):\n if uname is not None:\n q = self.sql_query(\"\"\" select from Neuron where uname = \"{}\" \"\"\".format(uname))\n else:\n q = self.sql_query(\"\"\" select from Neuron where referenceId = {} \"\"\".format(referenceId))\n return q",
"def get_stored_network(cls):\n store = cls.get_store()\n try:\n network_dict = store['network']\n except KeyError:\n network_dict = {}\n network_name = network_dict.get(\n 'value', ChainID.MAINNET.name)\n network = ChainID[network_name]\n return network",
"def get_net(net_name, weight_path=None):\n if net_name in ['VGGFace2']:\n # load model\n from model.vggface_models.resnet import resnet50\n if weight_path is None:\n weight_path = \"./checkpoint/resnet50_scratch_weight.pkl\"\n net = resnet50(num_classes=8631)\n with open(weight_path, 'rb') as f:\n obj = f.read()\n weights = {key: torch.from_numpy(arr) for key, arr in pickle.loads(obj, encoding='latin1').items()}\n net.load_state_dict(weights)\n elif net_name in ['partial_fc']:\n from model.partial_fc.iresnet import iresnet50\n net = iresnet50()\n if weight_path is None:\n weight_path = \"./checkpoint/partial_fc_16backbone.pth\"\n state_dict = torch.load(weight_path)\n net.load_state_dict(state_dict)\n else:\n raise ValueError('invalid network name:{}'.format(net_name))\n return net",
"def get_network(name):\n _register()\n if name not in __sets:\n raise KeyError('Unknown dataset: {}'.format(name))\n net = __sets[name].setup()\n return net",
"def get_yolo_net(cfg_path, weight_path):\n\n if not cfg_path or not weight_path:\n raise Exception('missing inputs. See file.')\n\n print('[INFO] loading YOLO from disk...')\n net = cv2.dnn.readNetFromDarknet(cfg_path, weight_path)\n\n return net",
"def get_net(device, path):\n state_dict = torch.load(path, map_location=device)\n net = getattr(models, \"dt_net_recall_2d\")(width=128, in_channels=3, max_iters=50)\n net = net.to(device)\n net = torch.nn.DataParallel(net)\n net.load_state_dict(state_dict[\"net\"])\n net.eval()\n return net"
] | [
"0.76190454",
"0.67833936",
"0.61882776",
"0.60611504",
"0.60453904",
"0.60128474",
"0.5981504",
"0.59493107",
"0.5938629",
"0.5838178",
"0.5739871",
"0.56908137",
"0.56752616",
"0.5671026",
"0.5622484",
"0.5606323",
"0.55538636",
"0.555125",
"0.555125",
"0.555125",
"0.55477166",
"0.55442834",
"0.55380446",
"0.5534563",
"0.55087674",
"0.5473935",
"0.5469575",
"0.5460173",
"0.5452538",
"0.54477996"
] | 0.81398237 | 0 |
Recursively partition the graph G using the the algorithm defined by partition function depth times. | def recursive_partition(G,
partition_function,
depth,
dendogram=False,
**kwargs):
C = [set(G)]
if dendogram:
D = nx.Graph()
for _ in range(depth):
C_next = []
for c in C:
C_next_add = partition_function(G.subgraph(c), **kwargs)
if dendogram:
D.add_edges_from(zip([frozenset(c)] * len(C_next_add),
map(frozenset, C_next_add)))
C_next += C_next_add
C = deepcopy(C_next)
if dendogram:
return D
else:
return C | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __call__(self, g, n_partitions):\n\n def _iterative_cutting(g, p):\n \"\"\"helper function (iterative version)\"\"\"\n\n to_be_processed = [g]\n K = math.ceil(len(g.nodes()) / p)\n\n res = []\n while len(to_be_processed) > 0:\n\n g = to_be_processed.pop()\n g_l, g_r = kernighan_lin_bisection(g, weight=\"rate\")\n\n for partition in g_l, g_r:\n if len(partition) > K:\n to_be_processed.append(g.subgraph(partition))\n else:\n res.append(partition)\n return res\n\n def _recursive_cutting(g, p, res=[]):\n \"\"\"helper function (recursive version)\"\"\"\n k = math.ceil(len(g.nodes()) / p)\n g_l, g_r = kernighan_lin_bisection(g, weight=\"rate\")\n\n for partition in g_l, g_r:\n if len(partition) > k:\n _recursive_cutting(g.subgraph(partition), p / 2, res)\n else:\n res.append(partition)\n\n return res\n\n # when computing a partitioning for the graph nodes,\n # if result is known for a smaller value of n_partitions\n # don't restart from scratch but use it as an initial value\n if g not in self._cache or len(self._cache[g]) < n_partitions:\n self._cache.clear()\n partitions = _recursive_cutting(g, p=n_partitions)\n self._cache[g] = partitions[:]\n else:\n partitions = self._cache[g][:]\n\n # merge small partitions to return the required number of partitions\n while len(partitions) > n_partitions:\n partitions.sort(key=len, reverse=True)\n e1 = partitions.pop()\n e2 = partitions.pop()\n partitions.append(e1.union(e2))\n return partitions",
"def partition_girvan_newman(graph, max_depth):\n ###TODO\n pass",
"def _recursive_cutting(g, p, res=[]):\n k = math.ceil(len(g.nodes()) / p)\n g_l, g_r = kernighan_lin_bisection(g, weight=\"rate\")\n\n for partition in g_l, g_r:\n if len(partition) > k:\n _recursive_cutting(g.subgraph(partition), p / 2, res)\n else:\n res.append(partition)\n\n return res",
"def gn_graph_partition(g):\n ### Start with initial graph\n c = connected_components(g)\n q = autograder.compute_q(g, c)\n partitions = [(q, c)]\n\n ### Copy graph so we can partition it without destroying original\n newg = copy_graph(g)\n\n ### Iterate until there are no remaining edges in the graph\n while True:\n ### Compute betweenness on the current graph\n btwn = autograder.shortest_path_edge_betweenness(newg)\n if not btwn:\n ### No information was computed, we're done\n break\n\n ### Find all the edges with maximum betweenness and remove them\n maxbtwn = max(btwn.values())\n maxedges = [edge for edge, b in btwn.items() if b == maxbtwn]\n remove_edges(newg, maxedges)\n\n ### Compute the new list of connected components\n c = connected_components(newg)\n if len(c) > len(partitions[-1][1]):\n ### This is a new partitioning, compute Q and add it to\n ### the list of partitions.\n q = autograder.compute_q(g, c)\n partitions.append((q, c))\n\n return partitions",
"def _iterative_cutting(g, p):\n\n to_be_processed = [g]\n K = math.ceil(len(g.nodes()) / p)\n\n res = []\n while len(to_be_processed) > 0:\n\n g = to_be_processed.pop()\n g_l, g_r = kernighan_lin_bisection(g, weight=\"rate\")\n\n for partition in g_l, g_r:\n if len(partition) > K:\n to_be_processed.append(g.subgraph(partition))\n else:\n res.append(partition)\n return res",
"def partitioner(graph: GraphModule) -> GraphModule:\n shape_adjustment_ops = {\n aten._unsafe_view.default: 1,\n aten.expand.default: 1,\n aten.new_zeros.default: 1,\n aten.ones.default: 0,\n aten.reshape.default: 1,\n aten.view.default: 1,\n aten.zeros.default: 0,\n }\n # partition the graph to distributed\n for node in graph.graph.nodes:\n node_sharding = node.meta[\"sharding\"]\n # None sharding means this node don't need sharding\n if node_sharding is None:\n continue\n\n if node.op == \"placeholder\":\n out_spec = node_sharding.output_spec\n if not hasattr(out_spec, \"from_local\"):\n local_val = _partition_val(node.meta[\"val\"], out_spec)\n # update node value\n node.meta[\"val\"] = local_val\n elif node.op == \"call_function\":\n out_spec = node_sharding.output_spec\n\n # check if there's misaligned sharding, insert reshard if there is\n expected_input_specs = node_sharding.input_specs\n for idx, input_arg in enumerate(node.all_input_nodes):\n input_arg_sharding = input_arg.meta[\"sharding\"]\n\n input_arg_spec = input_arg_sharding.output_spec\n desired_spec = (\n out_spec\n if expected_input_specs is None\n else expected_input_specs[idx]\n )\n if input_arg_spec != desired_spec:\n input_full_shape = input_arg.meta[\"tensor_meta\"].shape\n input_arg_tensor = input_arg.meta[\"val\"]\n\n # insert reshard operation\n def reshard_fn(local_tensor: torch.Tensor) -> torch.Tensor:\n return _redistribute_with_local_tensor(\n local_tensor,\n input_full_shape,\n out_spec.mesh,\n input_arg_spec.placements,\n desired_spec.placements,\n )\n\n reshard_gm = make_fx(reshard_fn)(input_arg_tensor)\n reshard_gm_nodes = list(reshard_gm.graph.nodes)\n input_node = reshard_gm_nodes[0]\n with graph.graph.inserting_before(node):\n output_node = graph.graph.graph_copy(\n reshard_gm.graph,\n val_map={\n input_node: input_arg,\n },\n )\n node.replace_input_with(input_arg, output_node)\n\n output_val = node.meta[\"val\"]\n\n if node.target == torch.ops.aten.repeat.default:\n # for repeat op, we need to infer the repeat sizes\n assert isinstance(output_val, torch.Tensor)\n local_shape = compute_local_shape(\n output_val.shape, out_spec.mesh, out_spec.placements\n )\n input_shape = node.args[0].meta[\"val\"].shape\n\n def infer_repeat_sizes(repeated_shape, input_shape):\n repeated_size = [1] * len(repeated_shape)\n padded_length = len(repeated_shape) - len(input_shape)\n for i in range(len(repeated_shape)):\n if i < padded_length:\n repeated_size[i] = repeated_shape[i]\n else:\n repeated_size[i] = (\n repeated_shape[i] // input_shape[i - padded_length]\n )\n\n return repeated_size\n\n node.update_arg(1, infer_repeat_sizes(local_shape, input_shape))\n\n elif node.target in shape_adjustment_ops:\n # for view related op that needs shape, adjust shape to local shape if needed\n assert isinstance(output_val, torch.Tensor)\n local_shape = compute_local_shape(\n output_val.shape, out_spec.mesh, out_spec.placements\n )\n shape_arg_num = shape_adjustment_ops[node.target]\n node.update_arg(shape_arg_num, local_shape)\n\n # convert output val to its local component\n node.meta[\"val\"] = _partition_val(output_val, out_spec)\n\n elif node.op == \"output\":\n break\n else:\n raise RuntimeError(f\"op code {node} not supported\")\n\n # clean up the graph by removing sharding and partitioning related metadata\n for node in graph.graph.nodes:\n if \"sharding\" in node.meta:\n del node.meta[\"sharding\"]\n if \"val\" in node.meta and isinstance(node.meta[\"val\"], torch.Tensor):\n local_tensor_meta = _extract_tensor_metadata(node.meta[\"val\"])\n node.meta[\"tensor_meta\"] = local_tensor_meta\n\n graph.graph.lint()\n graph.recompile()\n return graph",
"def test_greedy_partition(self):\r\n\r\n #(non) partition into one bucket\r\n obs_part, obs_levels = greedy_partition({'1': 2,\r\n '2': 1,\r\n '3': 3}, 1)\r\n self.assertEquals(obs_levels, [6])\r\n self.assertEquals(obs_part, [['3', '1', '2']])\r\n\r\n # two buckets\r\n obs_part, obs_levels = greedy_partition({'1': 2,\r\n '2': 1,\r\n '3': 3}, 2)\r\n\r\n self.assertEquals(obs_levels, [3, 3])\r\n self.assertEquals(obs_part, [['3'], ['1', '2']])\r\n\r\n # larger input\r\n obs_part, obs_levels = greedy_partition({'1': 1, '2': 2, '3': 3,\r\n '4': 4, '5': 5, '6': 6}, 2)\r\n self.assertEquals(obs_levels, [11, 10])\r\n self.assertEquals(obs_part, [['6', '3', '2'], ['5', '4', '1']])",
"def partition_gdf(df, k, terms):\n return __partition_gdf_recursive(df, df.index, k, terms)",
"def partition(self, count):\n print('Running partitioning for', count, 'partitions on', len(self.nodes), 'nodes')\n if count == 1:\n self.graphProcessors = [GraphProcessor(self.nodes, self.edges)]\n print('Only one partition made', \"nodes:\", len(self.nodes), \"edges:\", len(self.edges))\n return self.graphProcessors\n\n start_time = time.time()\n A = self.matrix\n\n n_parts = count\n A = A - np.diag(np.diag(A))\n\n # partition graph\n assignments, Y = spectral_part(A, n_parts)\n\n # plot graphs\n print(\"Partitioning took {:3f}\".format(time.time() - start_time))\n\n # each element of array is a single partition\n node_partitions = [[] for _ in range(n_parts)]\n edge_partitions = [set() for _ in range(n_parts)]\n\n #plot clusters\n #self.cluster_renderer.render(self.nodes, assignments)\n\n # assign nodes to partitions\n for i in range(len(self.nodes)):\n clustN = int(assignments[i])\n node = self.nodes[i]\n node.cluster = clustN\n node_partitions[clustN].append(node)\n\n assert sum([len(x) for x in node_partitions]) == len(self.nodes)\n print('Partitions of size: ', [len(x) for x in node_partitions])\n print('Processing', len(self.edges), 'edges')\n\n # assign edges to partitions\n # this stuff should be done with dicts or more efficient data structures\n copy_edges = self.edges.copy()\n # check all partitions\n for i, partition in enumerate(node_partitions):\n # check all remaining edges\n for edge in tqdm(copy_edges):\n start = edge.start\n end = edge.end\n added = False\n k = 0\n # check all partitions until edge is not added to one of them\n while k < len(partition) and not added:\n n = partition[k]\n if n.id == start:\n start = None\n elif n.id == end:\n end = None\n # if we found both start and end node in this partition,\n # add edge and reversed edge (graph is undirected)\n if start is None and end is None:\n edge_partitions[i].add(edge)\n reversed = copy.deepcopy(edge)\n reversed.start = edge.end\n reversed.end = edge.start\n edge_partitions[i].add(reversed)\n added = True\n k += 1\n # delete all newly assigned edges from global edge list, to shorten checking\n for item in edge_partitions[i]:\n if item in copy_edges:\n copy_edges.remove(item)\n\n # set to list, so later operations can use list operations\n edge_partitions = [list(x) for x in edge_partitions]\n self.graphProcessors = [GraphProcessor(node_partitions[i], edge_partitions[i]) for i in range(count)]\n print('Input', len(self.edges), 'assigned', sum([len(x) for x in edge_partitions]))\n\n return self.graphProcessors",
"def metis_partition(\n g,\n k,\n extra_cached_hops=0,\n reshuffle=False,\n balance_ntypes=None,\n balance_edges=False,\n mode=\"k-way\",\n):\n assert mode in (\n \"k-way\",\n \"recursive\",\n ), \"'mode' can only be 'k-way' or 'recursive'\"\n node_part = metis_partition_assignment(\n g, k, balance_ntypes, balance_edges, mode\n )\n if node_part is None:\n return None\n\n # Then we split the original graph into parts based on the METIS partitioning results.\n return partition_graph_with_halo(\n g, node_part, extra_cached_hops, reshuffle\n )[0]",
"def partition_by_eigenvector(graph):\n ###TODO\n pass",
"def build_partition_tree(self):\n \n xmin = 0\n xmax = self.shape[0]\n ymin = 0\n ymax = self.shape[1]\n zmin = 0\n zmax = self.shape[2]\n total_xwidth = xmax - xmin\n total_ywidth = ymax - ymin\n total_zwidth = zmax - zmin\n q = queue.PriorityQueue()\n M = (xmax - xmin) * (ymax - ymin) * (zmax - zmin)\n self.partition_tree = np.zeros((M - 1, 2))\n q.put((0, xmin, xmax, ymin, ymax, zmin, zmax, -1, False))\n ind = len(self.partition_tree) - 1\n while not q.empty():\n _, xmin, xmax, ymin, ymax, zmin, zmax, parent_ind, is_left = q.get()\n \n if parent_ind >= 0:\n self.partition_tree[parent_ind, 0 if is_left else 1] = ind\n\n # make sure we line up with a flattened indexing scheme\n if ind < 0:\n assert -ind - 1 == xmin * total_ywidth * total_zwidth + ymin * total_zwidth + zmin\n\n xwidth = xmax - xmin\n ywidth = ymax - ymin\n zwidth = zmax - zmin\n if xwidth == 1 and ywidth == 1 and zwidth == 1:\n pass\n else:\n\n # by default our ranges remain unchanged\n lxmin = rxmin = xmin\n lxmax = rxmax = xmax\n lymin = rymin = ymin\n lymax = rymax = ymax\n lzmin = rzmin = zmin\n lzmax = rzmax = zmax\n\n # split the xaxis if it is the largest dimension\n if xwidth >= ywidth and xwidth > 1:\n xmid = xmin + xwidth // 2\n lxmax = xmid\n rxmin = xmid\n\n # split the yaxis\n elif ywidth > 1:\n ymid = ymin + ywidth // 2\n lymax = ymid\n rymin = ymid\n\n # split the zaxis only when the other ranges are already width 1\n else:\n zmid = zmin + zwidth // 2\n lzmax = zmid\n rzmin = zmid\n\n lsize = (lxmax - lxmin) * (lymax - lymin) * (lzmax - lzmin)\n rsize = (rxmax - rxmin) * (rymax - rymin) * (rzmax - rzmin)\n\n q.put((-lsize, lxmin, lxmax, lymin, lymax, lzmin, lzmax, ind, True))\n q.put((-rsize, rxmin, rxmax, rymin, rymax, rzmin, rzmax, ind, False))\n\n ind -= 1\n self.partition_tree += int(M)",
"def _position_nodes(g, partition, **kwargs):\n\n communities = dict()\n for node, community in partition.items():\n try:\n communities[community] += [node]\n except KeyError:\n communities[community] = [node]\n\n pos = dict()\n for ci, nodes in communities.items():\n subgraph = g.subgraph(nodes)\n pos_subgraph = nx.spring_layout(subgraph,k=10,iterations=20)\n #pos_subgraph = nx.spring_layout(subgraph, **kwargs)\n pos.update(pos_subgraph)\n\n return pos",
"def partition_graph_with_halo(g, node_part, extra_cached_hops, reshuffle=False):\n assert len(node_part) == g.num_nodes()\n if reshuffle:\n g, node_part = reshuffle_graph(g, node_part)\n orig_nids = g.ndata[\"orig_id\"]\n orig_eids = g.edata[\"orig_id\"]\n\n node_part = utils.toindex(node_part)\n start = time.time()\n subgs = _CAPI_DGLPartitionWithHalo_Hetero(\n g._graph, node_part.todgltensor(), extra_cached_hops\n )\n # g is no longer needed. Free memory.\n g = None\n print(\"Split the graph: {:.3f} seconds\".format(time.time() - start))\n subg_dict = {}\n node_part = node_part.tousertensor()\n start = time.time()\n\n # This function determines whether an edge belongs to a partition.\n # An edge is assigned to a partition based on its destination node. If its destination node\n # is assigned to a partition, we assign the edge to the partition as well.\n def get_inner_edge(subg, inner_node):\n inner_edge = F.zeros((subg.num_edges(),), F.int8, F.cpu())\n inner_nids = F.nonzero_1d(inner_node)\n # TODO(zhengda) we need to fix utils.toindex() to avoid the dtype cast below.\n inner_nids = F.astype(inner_nids, F.int64)\n inner_eids = subg.in_edges(inner_nids, form=\"eid\")\n inner_edge = F.scatter_row(\n inner_edge,\n inner_eids,\n F.ones((len(inner_eids),), F.dtype(inner_edge), F.cpu()),\n )\n return inner_edge\n\n # This creaets a subgraph from subgraphs returned from the CAPI above.\n def create_subgraph(subg, induced_nodes, induced_edges, inner_node):\n subg1 = DGLGraph(gidx=subg.graph, ntypes=[\"_N\"], etypes=[\"_E\"])\n # If IDs are shuffled, we should shuffled edges. This will help us collect edge data\n # from the distributed graph after training.\n if reshuffle:\n # When we shuffle edges, we need to make sure that the inner edges are assigned with\n # contiguous edge IDs and their ID range starts with 0. In other words, we want to\n # place these edge IDs in the front of the edge list. To ensure that, we add the IDs\n # of outer edges with a large value, so we will get the sorted list as we want.\n max_eid = F.max(induced_edges[0], 0) + 1\n inner_edge = get_inner_edge(subg1, inner_node)\n eid = F.astype(induced_edges[0], F.int64) + max_eid * F.astype(\n inner_edge == 0, F.int64\n )\n\n _, index = F.sort_1d(eid)\n subg1 = edge_subgraph(subg1, index, relabel_nodes=False)\n subg1.ndata[NID] = induced_nodes[0]\n subg1.edata[EID] = F.gather_row(induced_edges[0], index)\n else:\n subg1.ndata[NID] = induced_nodes[0]\n subg1.edata[EID] = induced_edges[0]\n return subg1\n\n for i, subg in enumerate(subgs):\n inner_node = _get_halo_heterosubgraph_inner_node(subg)\n inner_node = F.zerocopy_from_dlpack(inner_node.to_dlpack())\n subg = create_subgraph(\n subg, subg.induced_nodes, subg.induced_edges, inner_node\n )\n subg.ndata[\"inner_node\"] = inner_node\n subg.ndata[\"part_id\"] = F.gather_row(node_part, subg.ndata[NID])\n if reshuffle:\n subg.ndata[\"orig_id\"] = F.gather_row(orig_nids, subg.ndata[NID])\n subg.edata[\"orig_id\"] = F.gather_row(orig_eids, subg.edata[EID])\n\n if extra_cached_hops >= 1:\n inner_edge = get_inner_edge(subg, inner_node)\n else:\n inner_edge = F.ones((subg.num_edges(),), F.int8, F.cpu())\n subg.edata[\"inner_edge\"] = inner_edge\n subg_dict[i] = subg\n print(\"Construct subgraphs: {:.3f} seconds\".format(time.time() - start))\n if reshuffle:\n return subg_dict, orig_nids, orig_eids\n else:\n return subg_dict, None, None",
"def nextDim(leaf, args):\n x = args['xsectionNum'] # number of subregions to partition for the leaf\n lb = leaf.lb # the lower bound of the leaf region\n ub = leaf.ub # the upper bound of the leaf region\n dimDiff = [] # store the diff value (e.g. max-min of dominantion count) for partition direction\n dimX = len(lb) # the number of dimension\n visitedPoints = leaf.visitedPoints() # all the visited points in the tree\n pool = leaf.pool # the visited points in this leaf\n #determine the deminsion of point's objective\n dim = len(leaf.problem.objectives) \n #recorganize all the visited points together into one sorted array\n _visitedPoints = utils.dictToSortedNumpyArray(visitedPoints,dim) \n # calculate the domination count for each point in this pool\n dominantionCount = {} \n for key in pool:\n _p = np.array([pool[key].mean])\n dominantionCount[key] = _cutils.calDominationCount(_p, _visitedPoints, len(_p))[1][0]\n # enumerate all the possible feasible next dimension to partition\n feasibleDim = feasible(leaf, x)\n for dimID in feasibleDim:\n # determine the partition unit distance \n unit = (ub[dimID] - lb[dimID]) / x\n # initialize the promisingIndex for each subregion based on xsection\n promisingIndex = [] \n for i in range(x):\n _lb, _ub = [np.array([]) for _ in range(2)]\n # change the lower and upper bound value at dimID for subRegion x\n for j in range(dimX):\n _lb = np.append(_lb, lb[j] + (unit * i) * (j == dimID))\n _ub = np.append(_ub, ub[j] - (unit * (x - i - 1)) * (j == dimID))\n # calculate the promisingIndex for each subregions\n poolDominantionCount = [np.nan] # in case no points in this subregion\n for key in pool:\n p = pool[key] \n if all(_lb <= p.x) and all(p.x < _ub):\n poolDominantionCount.append(dominantionCount[key])\n # calculate the promising index in this subregion \n promisingIndex.append(np.nanmin(poolDominantionCount))\n # calculate the dimDiff for the dimension dimID \n diff = np.nanmax(promisingIndex) - np.nanmin(promisingIndex)\n dimDiff.append(diff)\n # select the dimension with largest dimDiff value as next dimension to partition\n if dimDiff:\n maxDiff = np.nanmax(dimDiff)\n else:\n maxDiff = np.nan\n if not(np.isnan(maxDiff)):\n candidate = [feasibleDim[i] for i in range(len(feasibleDim)) if dimDiff[i] == maxDiff] \n dim = candidate[np.random.randint(0,len(candidate))]\n elif dimDiff:\n dim = feasibleDim[np.random.randint(0,len(feasibleDim))]\n else:\n dim = np.random.randint(0, dimX)\n #print('Select Dim %d with maxDiff %.2f, range %.2f at level %d' % (dim, maxDiff, ub[dim]-lb[dim],leaf.level))\n return dim",
"def repairPartition(G, partition, imbalance = 0.2, isCharged = []):\n\tn = G.numberOfNodes()\n\tz = G.upperNodeIdBound()\n\tif len(isCharged) > 0:\n\t\tif len(isCharged) != z:\n\t\t\traise ValueError(\"If charges are given, charge array must have the same size as graph\")\n\telse:\n\t\tisCharged = [False for v in G.nodes()]\n\n\tif max(G.nodes()) != n-1:\n\t\traise ValueError(\"Node indices must be continuous.\")\n\n\tif partition.numberOfElements() != n:\n\t\traise ValueError(\"Partition contains \"+str(partition.numberOfElements())+\" elements, but Graph contains \"+str(n))\n\n\tpartition.compact()\n\tfragmentSet = set(partition.getVector())\n\tk = len(fragmentSet)\n\tmaxBlockSize = int(math.ceil(n / k)*(1+imbalance))\n\n\tif partition.numberOfSubsets() != k:\n\t\traise ValueError(\"Input partition says it has \"+str(partition.numberOfSubsets())+\" elements, but \"+str(k)+\" were found.\")\n\n\tfragmentSizes = [0 for f in fragmentSet]\n\tfragmentCharges = [[] for f in fragmentSet]\n\tedgeCuts = [[0 for f in fragmentSet] for v in G.nodes()]\n\n\tgapsFound = False\n\n\tdef gapAt(v, target):\n\t\tif not G.hasNode(v):\n\t\t\treturn False\n\n\t\t# check whether v is in the middle of a gap\n\t\tif v >= 1 and G.hasNode(v-1) and G.hasNode(v+1) and partition[v-1] == partition[v+1] and partition[v-1] != target:\n\t\t\treturn True\n\n\t\t#check whether v is directly left of a gap\n\t\tif G.hasNode(v+1) and G.hasNode(v+2) and target == partition[v+2] and partition[v+1] != target:\n\t\t\treturn True\n\n\t\t#check whether v is directly right of a gap\n\t\tif v >= 2 and G.hasNode(v-2) and G.hasNode(v-1) and partition[v-2] == target and partition[v-1] != target:\n\t\t\treturn True\n\n\t\treturn False\n\n\tdef sizeAllowed(v, target):\n\t\treturn fragmentSizes[target] < maxBlockSize or (fragmentSizes[target] == maxBlockSize and partition[v] == target)\n\n\tdef chargeAllowed(v, target):\n\t\tnumCharged = len(fragmentCharges[target])\n\t\treturn not isCharged[v] or numCharged == 0 or fragmentCharges[target] == [v]\n\n\tdef allowed(v, target):\n\t\treturn chargeAllowed(v, target) and sizeAllowed(v, target) and not gapAt(v, target)\n\n\tdef createNewFragment():\n\t\tif partition.upperBound() <= max(fragmentSet)+1:\n\t\t\tpartition.setUpperBound(max(fragmentSet)+2)\n\t\t\tfragmentSizes.append(0)\n\t\t\tfragmentCharges.append([])\n\t\t\tfor u in G.nodes():\n\t\t\t\tedgeCuts[u].append(0)\n\t\tnewfrag = max(fragmentSet)+1\n\t\tfragmentSet.add(newfrag)\n\t\treturn newfrag\n\n\t# check if already valid and prepare data structures\n\tfor v in G.nodes():\n\t\tfragmentSizes[partition[v]] += 1\n\t\tif isCharged[v]:\n\t\t\tfragmentCharges[partition[v]].append(v)\n\t\tif gapAt(v, partition[v]):\n\t\t\tgapsFound = True\n\n\t\tfor u in G.neighbors(v):\n\t\t\tedgeCuts[v][partition[u]] += G.weight(v, u)\n\n\t# if partition is already valid, return it unchanged\n\tif max(fragmentSizes) <= maxBlockSize and max([len(group) for group in fragmentCharges]) <= 1 and not gapsFound:\n\t\treturn partition\n\n\t#first handle charged nodes\n\tfor fragment in fragmentSet:\n\t\twhile len(fragmentCharges[fragment]) > 1:\n\t\t\t# charged node must be moved. We don't care about the size or gap constraints here, these can be handled later.\n\t\t\tbestMovementCandidate = fragmentCharges[fragment][0]\n\t\t\tbestTargetFragment = -1\n\t\t\tbestGain = -float(\"inf\")\n\n\t\t\tfor chargedNode in fragmentCharges[fragment]:\n\t\t\t\tfor target in fragmentSet:\n\t\t\t\t\tgain = edgeCuts[chargedNode][target] - edgeCuts[chargedNode][fragment]\n\t\t\t\t\tif chargeAllowed(chargedNode, target) and gain > bestGain:\n\t\t\t\t\t\tbestGain = gain\n\t\t\t\t\t\tbestTargetFragment = target\n\t\t\t\t\t\tbestMovementCandidate = chargedNode\n\n\t\t\tif bestTargetFragment == -1:\n\t\t\t\traise ValueError(\"Input partition contains multiple charges per fragment and one of them cannot be moved.\")\n\n\t\t\tassert(bestGain > -float(\"inf\"))\n\t\n\t\t\tfragmentCharges[fragment].remove(bestMovementCandidate)\n\t\t\tfragmentCharges[bestTargetFragment].append(bestMovementCandidate)\n\n\t\t\tfragmentSizes[fragment] -= 1\n\t\t\tfragmentSizes[bestTargetFragment] += 1\n\t\t\n\t\t\tfor neighbor in G.neighbors(bestMovementCandidate):\n\t\t\t\tedgeCuts[neighbor][fragment] -= G.weight(neighbor, bestMovementCandidate)\n\t\t\t\tedgeCuts[neighbor][bestTargetFragment] += G.weight(neighbor, bestMovementCandidate)\n\n\t\t\tpartition.moveToSubset(bestTargetFragment, bestMovementCandidate)\n\n\t#then handle gaps\n\tfor v in G.nodes():\n\t\tfragment = partition[v]\n\t\tif v > 0 and G.hasNode(v-1) and G.hasNode(v+1) and partition[v-1] == partition[v+1] and partition[v] != partition[v+1]:\n\t\t\t#we have a gap here.\n\n\t\t\tif isCharged[v]:\n\t\t\t\tif isCharged[v+1]:\n\t\t\t\t\t#swap blocks with right neighbour\n\t\t\t\t\tfragmentCharges[partition[v]].remove(v)\n\t\t\t\t\tfragmentCharges[partition[v+1]].append(v)\n\t\t\t\t\tfragmentCharges[partition[v+1]].remove(v+1)\n\t\t\t\t\tfragmentCharges[partition[v]].append(v+1)\n\n\t\t\t\t\t#block sizes stay unchanged\n\n\t\t\t\t\t#swap blocks\n\t\t\t\t\townFragment = partition[v]\n\t\t\t\t\tpartition.moveToSubset(partition[v+1], v)\n\t\t\t\t\tpartition.moveToSubset(ownFragment, v+1)\n\t\t\t\telse:\n\t\t\t\t\t#move right neighbour to block of v\n\t\t\t\t\tfragmentSizes[partition[v+1]] -= 1\n\t\t\t\t\tfragmentSizes[partition[v]] += 1\n\n\t\t\t\t\tpartition.moveToSubset(fragment, v+1)\n\t\t\telse:\n\t\t\t\tif fragmentSizes[fragment] == 1:\n\t\t\t\t\t#move right neighbour to block of v\n\n\t\t\t\t\tfragmentSizes[partition[v+1]] -= 1\n\t\t\t\t\tfragmentSizes[partition[v]] += 1\n\n\t\t\t\t\t#move charge over\n\t\t\t\t\tif isCharged[v+1]:\n\t\t\t\t\t\tfragmentCharges[partition[v+1]].remove(v+1)\n\t\t\t\t\t\tfragmentCharges[partition[v]].append(v+1)\n\n\t\t\t\t\tpartition.moveToSubset(fragment, v+1)\n\t\t\t\telse:\n\t\t\t\t\t#embed v into surrounding block\n\n\t\t\t\t\tfragmentSizes[partition[v+1]] += 1\n\t\t\t\t\tfragmentSizes[partition[v]] -= 1\n\n\t\t\t\t\tpartition.moveToSubset(partition[v+1], v)\n\n\t#rebuild indices of fragment sizes\n\tfragmentSizes = [0 for f in fragmentSet]\n\tfragmentCharges = [[] for f in fragmentSet]\n\tedgeCuts = [[0 for f in fragmentSet] for v in G.nodes()]\n\n\tfor v in G.nodes():\n\t\tfragmentSizes[partition[v]] += 1\n\t\tif isCharged[v]:\n\t\t\tfragmentCharges[partition[v]].append(v)\n\n\t\tfor u in G.neighbors(v):\n\t\t\tedgeCuts[v][partition[u]] += G.weight(v, u)\n\n\t\t#charges should be still valid\n\t\tassert(chargeAllowed(v,partition[v]))\n\t\t#no gaps should be left\n\t\tassert(not gapAt(v,partition[v]))\n\n\tassert(sum(fragmentSizes) == G.numberOfNodes())\n\tassert(max([len(chargeList) for chargeList in fragmentCharges]) <= 1)\n\n\t#now, build heap of all other nodes and handle size constraints\n\tmaxGain = [- float('inf') for v in G.nodes()]\n\tmaxTarget = [-1 for v in G.nodes()]\n\theap = []\n\n\tfor v in G.nodes():\n\t\tfor target in fragmentSet:\n\t\t\tif allowed(v, target) and edgeCuts[v][target] - edgeCuts[v][partition[v]] > maxGain[v]:\n\t\t\t\tmaxGain[v] = edgeCuts[v][target] - edgeCuts[v][partition[v]]\n\t\t\t\tmaxTarget[v] = target\n\n\t\theappush(heap, (-maxGain[v], v))\n\n\tvisited = [False for v in range(n)]\n\tassert(len(heap) == n)\n\ti = 0\n\theapify(heap)\n\n\twhile len(heap) > 0:\n\t\tassert(len(heap) + i == n)\n\t\tassert(sum(visited) == i)\n\t\t(key, v) = heappop(heap)\n\t\tkey *= -1\n\t\t#print(\"i:\",i,\",key:\",key,\",node:\", v)\n\t\ti += 1\n\t\tfragment = partition[v]\n\t\tvisited[v] = True\n\n\t\t# if fragment of v is alright, skip node\n\t\tif fragmentSizes[fragment] <= maxBlockSize and (not isCharged[v] or len(fragmentCharges[fragment]) <= 1) and not gapAt(v, partition[v]):\n\t\t\tcontinue\n\n\t\tif key == -float('inf'):\n\t\t\t#recompute if still the case\n\t\t\tfor target in fragmentSet:\n\t\t\t\tif allowed(v, target) and edgeCuts[v][target] - edgeCuts[v][partition[v]] > maxGain[v]:\n\t\t\t\t\tmaxGain[v] = edgeCuts[v][target] - edgeCuts[v][partition[v]]\n\t\t\t\t\tmaxTarget[v] = target\n\t\t\tif maxGain[v] == -float('inf'):\n\t\t\t\t#now we have a problem. \n\t\t\t\traise RuntimeError(\"k:\"+str(k)+\",maxBlockSize:\"+str(maxBlockSize)+\",v:\"+str(v)+\", partition\"+str(partition))\n\t\t\t\tmaxTarget[v] = createNewFragment()\n\t\t\t## new partition necessary\n\t\t\t\n\n\t\tassert(maxTarget[v] >= 0)\n\t\tassert(maxTarget[v] < len(fragmentCharges))\n\t\tif not allowed(v, maxTarget[v]):\n\t\t\terrorString = \"Node \"+str(v)+\" cannot be moved to block \"+str(maxTarget[v])+\" of size \"+str(fragmentSizes[maxTarget[v]])\n\t\t\t#print(\"Node \", v, \" cannot be moved to block\", maxTarget[v], \" of size \", fragmentSizes[maxTarget[v]])\n\t\t\tif not chargeAllowed(v, maxTarget[v]):\n\t\t\t\terrorString += \"\\nNode\"+str(v)+\"is charged and block\"+str(maxTarget[v])+\"already contains\"+str(len(fragmentCharges[maxTarget[v]]))+\"charged nodes\"\n\t\t\tif not sizeAllowed(v, maxTarget[v]):\n\t\t\t\terrorString += \"\\nThe maximum block size is\"+str(maxBlockSize)\n\t\t\tif gapAt(v, maxTarget[v]):\n\t\t\t\terrorString+=\"\\nA gap would result.\"\n\t\t\traise RuntimeError(errorString)\n\n\t\t# move v to best allowed fragment and update data structures\n\t\tfragmentSizes[partition[v]] -= 1\n\t\tfragmentSizes[maxTarget[v]] += 1\n\n\t\tif isCharged[v]:\n\t\t\tfragmentCharges[partition[v]].remove(v)\n\t\t\tfragmentCharges[maxTarget[v]].append(v)\n\t\n\t\tfor neighbor in G.neighbors(v):\n\t\t\tedgeCuts[neighbor][partition[v]] -= G.weight(neighbor, v)\n\t\t\tedgeCuts[neighbor][maxTarget[v]] += G.weight(neighbor, v)\n\n\t\tpartition.moveToSubset(maxTarget[v], v)\n\n\t\t# update max gains and queue positions of other nodes\n\t\tfor node in G.nodes():\n\t\t\tif visited[node]:\n\t\t\t\tcontinue\n\n\t\t\toldKey = maxGain[node]\n\t\t\tmaxGain[node] = - float('inf')# reset, since the old target might not be valid any more\n\t\t\tfor target in fragmentSet:\n\t\t\t\tif allowed(node, target) and edgeCuts[node][target] - edgeCuts[node][partition[node]] > maxGain[node]:\n\t\t\t\t\tmaxGain[node] = edgeCuts[node][target] - edgeCuts[node][partition[node]]\n\t\t\t\t\tmaxTarget[node] = target\n\n\t\t\tif maxGain[node] != oldKey:\n\t\t\t\theap.remove((-oldKey, node))\n\t\t\t\theapify(heap)\n\t\t\t\theappush(heap, (-maxGain[node], node))\n\n\tassert(i == n)\n\tassert(max(fragmentSizes) <= maxBlockSize)\n\tassert(max([len(chargeList) for chargeList in fragmentCharges]) <= 1)\n\t#assert(len(set(partition)) == k)\n\treturn partition",
"def partition(game, player):\n height = game.height\n width = game.width\n blanks = game.get_blank_spaces()\n has_partition = False\n partition_col = int(game.width/2)\n partition_row = int(game.height/2)\n moves = game.get_legal_moves(player)\n if moves:\n player_location = game.get_player_location(player)\n for i in range(2, width - 3): #search for vertical partitions\n if (0,i) not in blanks and (0,i+1) not in blanks:\n j = 1\n while j < height and (j, i) not in blanks and (j, i + 1) not in blanks:\n j += 1\n if j == height:\n has_partition = True\n pb = partition_blanks(game, (0,i))\n if pb[0] > pb[1]: #more blanks on the left of the partition\n for move in moves:\n if move[1] < i:\n return has_partition, True\n return has_partition, False\n else: #more blanks on right of partition\n for move in moves:\n if move[1] > i + 1:\n return has_partition, True\n return has_partition, False\n\n for i in range(2, height - 3): #seach for horizontal partitions\n if (i,0) not in blanks and (i+1,0) not in blanks:\n j = 1\n while j < width and (i,j) not in blanks and (i+1, j) not in blanks:\n j += 1\n if j == width:\n has_partition = True\n pb = partition_blanks(game, (i, 0))\n if pb[0] > pb[1]: #more blanks on top of partition\n for move in moves:\n if move[0] < i:\n return has_partition, True\n return has_partition, False\n else: #more blanks below partition\n for move in moves:\n if move[0] > i + 1:\n return has_partition, True\n return has_partition, False\n\n return has_partition, False",
"def get_bipartition(g):\n # Write your code here.\n colorArr = [-1] * (len(g.nodes()) + 1)\n for node in g.nodes():\n start = g.neighbors(node)\n if len(start)>0:\n src = start.pop()\n break\n colorArr[src] = 1\n queue = []\n queue.append(src)\n while (queue):\n u = queue.pop()\n for v in g.nodes():\n if g.has_edge(u, v) and colorArr[v] == -1:\n colorArr[v] = 1 - colorArr[u]\n queue.append(v)\n elif g.has_edge(u, v) and colorArr[u] == colorArr[v]:\n return None\n\n red = set()\n for i in range(1, len(colorArr)):\n if colorArr[i] == 1:\n red.add(i)\n return list(red)\n\n\n\n # Hint! If you'd like to test out these commands without\n # writing a full-fledged program, you might want to familiarise\n # yourself with the Python interactive shell or IPython (available\n # on at least some Aalto IT computers)\n\n # Create a simple line graph g: \"(1)->(2)->(3)\"\n # (The creation parameter is a dict of {node: list_of_neighbors},\n # but this is not something you will be needing in your code.)\n # >>> from networkx import Graph \n # >>> g = Graph({1: [2], 2: [3]})\n # >>> g.number_of_nodes()\n # 3\n\n # Example. Iterate over the nodes and mark them as visited\n # >>> visited = set()\n # >>> for node in g.nodes_iter(): # There is also g.nodes(), which returns a list\n # ... # do some work here\n # ... visited.add(node)\n \n # Example. Given a Node v, get all nodes s.t. there is an edge between\n # v and that node\n # >>> g.neighbors(1)\n # [2]\n\n # Example. Get the edges of the graph:\n # >>> e.edges() # as with nodes, there is also g.edges_iter()\n # [(1, 2), (2, 3)]\n\n # For more information, consult the NetworkX documentation:\n # https://networkx.github.io/documentation/networkx-1.10/tutorial/tutorial.html",
"def modularity(G, partition):\n m = G.size(weight=\"weight\")\n degrees = dict(G.degree(weight=\"weight\"))\n Q = 0\n for community in partition:\n for u, v in product(community, repeat=2):\n try:\n w = G[u][v].get(\"weight\", 1)\n except KeyError:\n w = 0\n if u == v:\n # Double count self-loop weight.\n w *= 2\n Q += w - degrees[u] * degrees[v] / (2 * m)\n return Q / (2 * m)",
"def run_leiden(graph):\n partition = la.find_partition(graph, la.ModularityVertexPartition, seed=0, weights='weight')\n return partition",
"def _partition(graph, subgraph_backend, op_names=None):\n if subgraph_backend not in _OP_WHITELIST_DICT:\n raise ValueError(\"Unsupported subgraph backend %s, valid candidates are %s\"\n % (subgraph_backend, _OP_WHITELIST_DICT.keys()))\n if op_names is None:\n op_names = _OP_WHITELIST_DICT.get(subgraph_backend)\n out = GraphHandle()\n check_call(_LIB.NNPartitionGraph(graph.handle,\n c_str(subgraph_backend),\n nn_uint(len(op_names)),\n c_array(ctypes.c_char_p, [c_str(s) for s in op_names]),\n ctypes.byref(out)))\n return Graph(out)",
"def partitions(n, k):\n if k == 1:\n yield (n,)\n return\n for i in range(1, n):\n for p in partitions(n-i, k-1):\n yield (i,) + p",
"def change_partition(amount):\n\n def part_tree(n, m):\n if n == 0:\n return tree(True)\n if n < 0 or m == 0:\n return tree(False)\n else:\n left = part_tree(n - m, m)\n right = part_tree(n, m // 2)\n return tree(m, [left, right])\n\n\n k = floor(log(amount) / log(2))\n l = pow(2, k)\n return part_tree(amount, l)",
"def power(G, k):\n if k <= 0:\n raise ValueError('k must be a positive integer')\n H = nx.Graph()\n H.add_nodes_from(G)\n # update BFS code to ignore self loops.\n for n in G:\n seen = {} # level (number of hops) when seen in BFS\n level = 1 # the current level\n nextlevel = G[n]\n while nextlevel:\n thislevel = nextlevel # advance to next level\n nextlevel = {} # and start a new list (fringe)\n for v in thislevel:\n if v == n: # avoid self loop\n continue\n if v not in seen:\n seen[v] = level # set the level of vertex v\n nextlevel.update(G[v]) # add neighbors of v\n if k <= level:\n break\n level += 1\n H.add_edges_from((n, nbr) for nbr in seen)\n return H",
"def dlk_partitions(totalD, totalL, totalK,\\\n minD = 0,minL = 0,minK = 0) :\n partitions = []\n## if goodDLK_2(totalD,totalL,totalK+1) and totalE >= 1:\n## partitions.append((((totalD,totalL,totalK,totalE-1),1),))\n if (totalD,totalL,totalK) == (0,0,0) :\n return [()]\n for d1 in range(minD, totalD +1):\n loD = totalD - d1\n for l1 in range(minL, totalL +1):\n loL = totalL - l1\n for k1 in range(minK, totalK +1):\n loK = totalK - k1\n if not goodDLK_2(d1,l1,k1+1) :\n continue\n \n rest = dlk_partitions(loD,loL,loK,d1,l1,k1)\n partitions += [updatePartition(r, (d1,l1,k1)) for r in rest]\n # this updating of the lower bound of iterations\n # is because bound is on lexicographical order.\n minK = 0\n minK = 0\n minL = 0\n return partitions",
"def part_recur(ckt, initial, w):\n partition_set = []\n# partition_mech = KLPart.KLPartition()\n# convert_Gate(ckt, partition_mech)\n print \"Diving into C++\"\n# (a, b) = partition_mech.partition_once(KLPart.StringVector(list(set(initial))))\n (a, b) = partition(ckt, list(set(initial)))\n print \"Coming back up\"\n if len(get_inputs(ckt, a)) > w and len(a) > 3:\n partition_set = partition_set + part_recur(ckt, a, w)\n else:\n partition_set.append(a)\n if len(get_inputs(ckt, b)) > w and len(b) > 3:\n partition_set = partition_set + part_recur(ckt, b, w)\n else:\n partition_set.append(b)\n return partition_set",
"def iter_dfs(self, depth=0):\n yield self, depth\n yield from self.left.iter_dfs(depth=depth + 1)\n yield from self.right.iter_dfs(depth=depth + 1)",
"def metis_partition_assignment(\n g, k, balance_ntypes=None, balance_edges=False, mode=\"k-way\", objtype=\"cut\"\n):\n assert mode in (\n \"k-way\",\n \"recursive\",\n ), \"'mode' can only be 'k-way' or 'recursive'\"\n assert (\n g.idtype == F.int64\n ), \"IdType of graph is required to be int64 for now.\"\n # METIS works only on symmetric graphs.\n # The METIS runs on the symmetric graph to generate the node assignment to partitions.\n start = time.time()\n sym_gidx = _CAPI_DGLMakeSymmetric_Hetero(g._graph)\n sym_g = DGLGraph(gidx=sym_gidx)\n print(\n \"Convert a graph into a bidirected graph: {:.3f} seconds, peak memory: {:.3f} GB\".format(\n time.time() - start, get_peak_mem()\n )\n )\n vwgt = []\n # To balance the node types in each partition, we can take advantage of the vertex weights\n # in Metis. When vertex weights are provided, Metis will tries to generate partitions with\n # balanced vertex weights. A vertex can be assigned with multiple weights. The vertex weights\n # are stored in a vector of N * w elements, where N is the number of vertices and w\n # is the number of weights per vertex. Metis tries to balance the first weight, and then\n # the second weight, and so on.\n # When balancing node types, we use the first weight to indicate the first node type.\n # if a node belongs to the first node type, its weight is set to 1; otherwise, 0.\n # Similary, we set the second weight for the second node type and so on. The number\n # of weights is the same as the number of node types.\n start = time.time()\n if balance_ntypes is not None:\n assert (\n len(balance_ntypes) == g.num_nodes()\n ), \"The length of balance_ntypes should be equal to #nodes in the graph\"\n balance_ntypes = F.tensor(balance_ntypes)\n uniq_ntypes = F.unique(balance_ntypes)\n for ntype in uniq_ntypes:\n vwgt.append(F.astype(balance_ntypes == ntype, F.int64))\n\n # When balancing edges in partitions, we use in-degree as one of the weights.\n if balance_edges:\n if balance_ntypes is None:\n vwgt.append(F.astype(g.in_degrees(), F.int64))\n else:\n for ntype in uniq_ntypes:\n nids = F.asnumpy(F.nonzero_1d(balance_ntypes == ntype))\n degs = np.zeros((g.num_nodes(),), np.int64)\n degs[nids] = F.asnumpy(g.in_degrees(nids))\n vwgt.append(F.zerocopy_from_numpy(degs))\n\n # The vertex weights have to be stored in a vector.\n if len(vwgt) > 0:\n vwgt = F.stack(vwgt, 1)\n shape = (\n np.prod(\n F.shape(vwgt),\n ),\n )\n vwgt = F.reshape(vwgt, shape)\n vwgt = F.to_dgl_nd(vwgt)\n else:\n vwgt = F.zeros((0,), F.int64, F.cpu())\n vwgt = F.to_dgl_nd(vwgt)\n print(\n \"Construct multi-constraint weights: {:.3f} seconds, peak memory: {:.3f} GB\".format(\n time.time() - start, get_peak_mem()\n )\n )\n\n start = time.time()\n node_part = _CAPI_DGLMetisPartition_Hetero(\n sym_g._graph, k, vwgt, mode, (objtype == \"cut\")\n )\n print(\n \"Metis partitioning: {:.3f} seconds, peak memory: {:.3f} GB\".format(\n time.time() - start, get_peak_mem()\n )\n )\n if len(node_part) == 0:\n return None\n else:\n node_part = utils.toindex(node_part)\n return node_part.tousertensor()",
"def dfs(G,u,parent,ap,depth,low,bridges):\r\n\tchildren = 0\r\n\tfor v in G[u]:\r\n\t\tif depth[v] ==-1:\r\n\t\t\tdepth[v] = low[v] = depth[u]+1\r\n\t\t\tparent[v] = u\r\n\t\t\tchildren+=1\r\n\t\t\tdfs(G,v,parent,ap,depth,low,bridges)\r\n\t\t\tlow[u] = min(low[u],low[v])\r\n\t\t\tif parent[u] == -1 and children > 1:\r\n\t\t\t\tap[u] = 1\r\n\t\t\tif parent[u] != -1 and low[v] >= depth[u]:\r\n\t\t\t\tap[u] = 1\r\n\t\t\tif low[v] > depth[u]:\r\n\t\t\t\tbridges.append((u,v))\r\n\t\telif depth[v] < depth[u] and parent[u]!=v:\r\n\t\t\tlow[u] = min(low[u],depth[v])\r\n\treturn",
"def dfs2(G):\r\n\r\n for v in V(G):\r\n v.visited = False\r\n\r\n result = []\r\n\r\n for v in V(G):\r\n if not v.visited:\r\n X = dfs2_visit(v)\r\n result.append(X)\r\n\r\n return result"
] | [
"0.6947835",
"0.6692274",
"0.6523319",
"0.64699167",
"0.61639374",
"0.61390674",
"0.6134492",
"0.5891478",
"0.5791794",
"0.57705605",
"0.5603623",
"0.5438325",
"0.54188836",
"0.53874147",
"0.5370432",
"0.5352181",
"0.534992",
"0.5345076",
"0.53345937",
"0.53251743",
"0.52911776",
"0.5242322",
"0.52322257",
"0.5224702",
"0.52224517",
"0.51928836",
"0.5175992",
"0.5135598",
"0.5134077",
"0.5043952"
] | 0.8169132 | 0 |
Tests the creation of LASCOMap using FITS. | def test_fitstoLASCO(lasco):
assert isinstance(lasco, LASCOMap) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_initialized() -> None:\n MapieClassifier()",
"def test_initialized() -> None:\n MapieRegressor()",
"def test_make_ec_map():\n pass",
"def test_on_map_of_constants(synthetic_checkerboard):\n img = synthetic_checkerboard['img']\n di = synthetic_checkerboard['cdi']\n\n cpp_vorimg = tess.tessellate_labimg(img,di)\n\n py_vorimg = pytess.tessellate_labimg(img,di)\n assert np.alltrue(py_vorimg[:4,:4] == 1)\n printers.store_ndarray(\"py_voronoi_on_map_of_constants_output.txt\",py_vorimg)\n\n assert cpp_vorimg.size > 0\n assert cpp_vorimg.shape == synthetic_checkerboard['img'].shape\n assert np.alltrue(synthetic_checkerboard['img'][1:3,1:3] == 1)\n\n printers.store_ndarray(\"cpp_voronoi_input.txt\",img)\n printers.store_ndarray(\"cpp_voronoi_on_map_of_constants_output.txt\",cpp_vorimg)\n\n assert np.alltrue(cpp_vorimg[:4,:4] == 1)\n assert np.alltrue(cpp_vorimg == py_vorimg)",
"def test_hsmcatalog():\n if __name__ == '__main__':\n logger = piff.config.setup_logger(verbose=2)\n else:\n logger = piff.config.setup_logger(log_file='output/test_hsmcatalog.log')\n\n image_file = os.path.join('output','test_stats_image.fits')\n cat_file = os.path.join('output','test_stats_cat.fits')\n psf_file = os.path.join('output','test_starstats.fits')\n hsm_file = os.path.join('output', 'test_hsmcatalog.fits')\n config = {\n 'input' : {\n 'image_file_name' : image_file,\n 'cat_file_name' : cat_file,\n 'stamp_size' : 48,\n },\n 'select' : {\n 'reserve_frac' : 0.2,\n 'seed' : 123\n },\n 'psf' : {\n 'model' : { 'type' : 'Gaussian',\n 'fastfit': True,\n 'include_pixel': False },\n 'interp' : { 'type' : 'Mean' },\n },\n 'output' : {\n 'file_name' : psf_file,\n 'stats' : [\n {\n 'type': 'HSMCatalog',\n 'file_name': hsm_file,\n }\n ]\n }\n }\n piff.piffify(config, logger)\n assert os.path.isfile(hsm_file)\n\n data, header = fitsio.read(hsm_file, header=True)\n for col in ['ra', 'dec', 'x', 'y', 'u', 'v',\n 'T_data', 'g1_data', 'g2_data',\n 'T_model', 'g1_model', 'g2_model',\n 'flux', 'reserve', 'flag_data', 'flag_model']:\n assert len(data[col]) == 10\n true_data = fitsio.read(cat_file)\n\n assert header['PIFF_VERSION'] == piff.__version__\n\n np.testing.assert_allclose(data['x'], true_data['x'])\n np.testing.assert_allclose(data['y'], true_data['y'])\n np.testing.assert_allclose(data['flux'], 123.45, atol=0.001)\n print('reserve = ',data['reserve'])\n print('nreserve = ',np.sum(data['reserve']))\n print('ntot = ',len(data['reserve']))\n assert np.sum(data['reserve']) == int(0.2 * len(data['reserve']))\n np.testing.assert_allclose(data['T_model'], data['T_data'], rtol=1.e-4)\n np.testing.assert_allclose(data['g1_model'], data['g1_data'], rtol=1.e-4)\n np.testing.assert_allclose(data['g2_model'], data['g2_data'], rtol=1.e-4)\n\n # On this file, no hsm errors\n np.testing.assert_array_equal(data['flag_data'], 0)\n np.testing.assert_array_equal(data['flag_model'], 0)\n\n image = galsim.fits.read(image_file)\n world = [image.wcs.toWorld(galsim.PositionD(x,y)) for x,y in zip(data['x'],data['y'])]\n np.testing.assert_allclose(data['ra'], [w.ra.deg for w in world], rtol=1.e-4)\n np.testing.assert_allclose(data['dec'], [w.dec.deg for w in world], rtol=1.e-4)\n\n # Repeat with non-Celestial WCS\n wcs = galsim.AffineTransform(0.26, 0.05, -0.08, -0.24, galsim.PositionD(1024,1024))\n config['input']['wcs'] = wcs\n piff.piffify(config, logger)\n data = fitsio.read(hsm_file)\n np.testing.assert_array_equal(data['ra'], 0.)\n np.testing.assert_array_equal(data['dec'], 0.)\n world = [wcs.toWorld(galsim.PositionD(x,y)) for x,y in zip(data['x'],data['y'])]\n np.testing.assert_allclose(data['u'], [w.x for w in world], rtol=1.e-4)\n np.testing.assert_allclose(data['v'], [w.y for w in world], rtol=1.e-4)\n\n # Use class directly, rather than through config.\n psf = piff.PSF.read(psf_file)\n stars, _, _ = piff.Input.process(config['input'])\n stars = piff.Select.process(config['select'], stars)\n hsmcat = piff.stats.HSMCatalogStats()\n with np.testing.assert_raises(RuntimeError):\n hsmcat.write('dummy') # Cannot write before compute\n hsmcat.compute(psf, stars)\n hsm_file2 = os.path.join('output', 'test_hsmcatalog2.fits')\n with np.testing.assert_raises(ValueError):\n hsmcat.write() # Must supply file_name if not given in constructor\n hsmcat.write(hsm_file2)\n data2 = fitsio.read(hsm_file2)\n for key in data.dtype.names:\n np.testing.assert_allclose(data2[key], data[key], rtol=1.e-5)",
"def _set_folium_map(self):",
"def setUp(self): \n \n self.obo = MinimalObo(obo_file)\n self.emapa = get_emapa_map(emapa_file, self.obo)",
"def test_fit() -> None:\n mapie = MapieClassifier()\n mapie.fit(X_toy, y_toy)",
"def test_basic(self):\n st.map(df1)\n\n c = json.loads(self.get_delta_from_queue().new_element.deck_gl_json_chart.json)\n\n self.assertIsNotNone(c.get(\"initialViewState\"))\n self.assertIsNotNone(c.get(\"layers\"))\n self.assertIsNotNone(c.get(\"mapStyle\"))\n self.assertEqual(len(c.get(\"layers\")), 1)\n self.assertEqual(c.get(\"initialViewState\").get(\"latitude\"), 2.5)\n self.assertEqual(c.get(\"initialViewState\").get(\"longitude\"), 25)\n self.assertEqual(c.get(\"initialViewState\").get(\"zoom\"), 3)\n self.assertEqual(c.get(\"initialViewState\").get(\"pitch\"), 0)\n self.assertEqual(c.get(\"layers\")[0].get(\"@@type\"), \"ScatterplotLayer\")",
"def test_on_map_of_0s(synthetic_checkerboard):\n img = synthetic_checkerboard['img']\n di = np.zeros_like(img)\n\n cpp_vorimg = tess.tessellate_labimg(img,di)\n\n py_vorimg = pytess.tessellate_labimg(img,di)\n assert np.alltrue(py_vorimg[:4,:4] == 1)\n printers.store_ndarray(\"py_voronoi_on_map_of_0s_output.txt\",py_vorimg)\n\n assert cpp_vorimg.size > 0\n assert cpp_vorimg.shape == synthetic_checkerboard['img'].shape\n assert np.alltrue(synthetic_checkerboard['img'][1:3,1:3] == 1)\n\n printers.store_ndarray(\"cpp_voronoi_input.txt\",img)\n printers.store_ndarray(\"cpp_voronoi_on_map_of_0s_output.txt\",cpp_vorimg)\n\n assert np.alltrue(cpp_vorimg[:4,:4] == 1)\n assert np.alltrue(cpp_vorimg == py_vorimg)",
"def __init__(self, tefflogg=True, abundances=True, cannon=True):\n hdu = pyfits.open('../data/GALAH_DR2_withSH_feb2020.fits', names=True)\n data = hdu[1].data\n if tefflogg:\n data = data[ (data['teff']>5300) * (data['teff']<6500) *\n (data['logg']>3) * (data['logg']<5) ]\n if abundances:\n data = data[ (data['flag_o_fe']==0) * (data['flag_na_fe']==0) * \n (data['flag_mg_fe']==0) * (data['flag_al_fe']==0) * \n (data['flag_si_fe']==0) * (data['flag_k_fe']==0) * \n (data['flag_ca_fe']==0) * (data['flag_sc_fe']==0) * \n (data['flag_ti_fe']==0) * (data['flag_v_fe']==0) * \n (data['flag_cr_fe']==0) * (data['flag_mn_fe']==0) *\n (data['flag_ni_fe']==0) * (data['flag_cu_fe']==0) * \n (data['flag_zn_fe']==0) * (data['flag_y_fe']==0) * \n (data['flag_ba_fe']==0) #* (data['flag_la_fe']==0)\n ] #(data['e_ba_fe]<1)\n if cannon:\n data = data[ (data['flag_cannon']==0) ]\n self.data = data\n return None",
"def test_map_overview_accuracy(self):\n params = [10000, 5, 10, 15]\n height = 100\n width = 200\n world_map = gen.generate_map(height=height, width=width, params=params)\n image = img.get_map_overview(world_map)\n pixels = image.load()\n for x in range(width):\n for y in range(height):\n color = tuple(img.get_color(world_map[x][y]))\n self.assertEqual(pixels[x, y], color)",
"def test_map(log_prob_coo):\n\n offset_dict = log_prob_coo['offsets']\n\n # the input\n print(log_prob_coo)\n print('input log probs')\n print(log_prob_sparse_to_dense(log_prob_coo['coo']))\n\n # with this shape converter, we get one row, where each value is one m\n converter = IndexConverter(total_n_cells=1,\n total_n_genes=log_prob_coo['coo'].shape[0])\n\n # set up and estimate\n estimator = MAP(index_converter=converter)\n noise_csr = estimator.estimate_noise(noise_log_prob_coo=log_prob_coo['coo'],\n noise_offsets=offset_dict)\n\n # output\n print('dense noise count estimate, per m')\n out_per_m = np.array(noise_csr.todense()).squeeze()\n print(out_per_m)\n print('truth')\n print(log_prob_coo['maps'])\n\n # test\n np.testing.assert_array_equal(out_per_m, log_prob_coo['maps'])",
"def test_choropleth_pass():\n m = view(world, column=\"pop_est\")",
"def setUp(self):\n super().setUp()\n self.grid, err = xyzgrid.XYZGrid.create(\"testgrid\")\n self.grid.add_maps(self.map_data)\n self.map = self.grid.get_map(self.map_data[\"zcoord\"])\n\n # output to console\n # def _log(msg):\n # print(msg)\n # self.grid.log = _log",
"def test():\n import os\n import ClearMap.ImageProcessing.SpotDetection as self\n reload(self)\n import ClearMap.IO as io \n import ClearMap.Settings as settings\n \n basedir = settings.ClearMapPath;\n #fn = '/home/ckirst/Science/Projects/BrainActivityMap/Data/iDISCO_2015_06/Adult cfos C row 20HF 150524.ims';\n fn = os.path.join(basedir, 'Test/Data/Synthetic/label_iDISCO_\\d{3}.tif');\n fn = os.path.join(basedir, 'Test/Data/OME/16-17-27_0_8X-s3-20HF_UltraII_C00_xyz-Table Z\\d{4}.ome.tif');\n #fn = '/run/media/ckirst/ChristophsBackuk4TB/iDISCO_2015_06/Adult cfos C row 20HF 150524.ims';\n #fn = '/home/nicolas/Windows/Nico/cfosRegistrations/Adult cfos C row 20HF 150524 - Copy.ims';\n #fn = '/home/ckirst/Science/Projects/BrainActivityMap/iDISCO_2015_04/test for spots added spot.ims'\n\n img = io.readData(fn);\n #img = dataset[0:500,0:500,1000:1008];\n #img = dataset[600:1000,1600:1800,800:830];\n #img = dataset[500:1500,500:1500,800:809]; \n img = img.astype('int16');\n \n #m = sys.modules['iDISCO.ImageProcessing.SpotDetection']\n #c = self.detectCells(img);\n \n c = self.detectCells(img, dogSize = None, cellShapeThreshold = 1, cellShapeFile = '/home/ckirst/Science/Projects/BrainActivityMap/Analysis/iDISCO/Test/Data/CellShape/cellshape_\\d{3}.tif');\n \n print ('done, found %d cells !' % c[0].shape[0])\n\n\n #test intensities:\n import numpy;\n x = numpy.random.rand(30,30,10);\n centers = numpy.array([[0,0,0], [29,29,9]]);\n i = self.findIntensity(x, centers, boxSize = (1,1,1));\n print (i)",
"def test_make_stats(self):\r\n map = self.mapping\r\n stats = \"\"\"Clustersize\\t#\r\n1:\\t\\t2\r\n2:\\t\\t1\r\n5:\\t\\t1\"\"\"\r\n\r\n self.assertEqual(make_stats(map), stats)",
"def test_predictor():",
"def test_conus():\n sat = gini.GINIZFile(get_test_file(\"TIGN02\", fponly=True))\n assert sat.archive_filename() == \"GOES_SUPER_IR_201509281745.png\"\n assert sat.awips_grid() == 0\n assert sat.metadata[\"map_projection\"] == 5",
"def test_fitstoEIT(eit_map):\n assert isinstance(eit_map, EITMap)",
"def test_core_functionality(self):\n # Test typing\n self.run_map_collection(\n _map_collection=self.example_map\n )",
"def testMapConstructor(self):\n # We only use one map type since they all share the same implementation for\n # this logic.\n with self.assertRaises(AssertionError):\n data_types.StepBuildStatsMap({1: 2})\n m = data_types.StepBuildStatsMap({'step': data_types.BuildStats()})\n self.assertEqual(m, {'step': data_types.BuildStats()})",
"def read_map(file, coordinate=None, fields_healpix=None, car_box=None, geometry=None):\n\n new_map = so_map()\n hdulist = pyfits.open(file)\n try:\n header = hdulist[1].header\n new_map.pixel = \"HEALPIX\"\n if fields_healpix is None:\n new_map.ncomp = header[\"TFIELDS\"]\n new_map.data = hp.fitsfunc.read_map(file, field=np.arange(new_map.ncomp))\n else:\n try:\n new_map.ncomp = len(fields_healpix)\n except:\n new_map.ncomp = 1\n new_map.data = hp.fitsfunc.read_map(file, field=fields_healpix)\n\n new_map.nside = hp.pixelfunc.get_nside(new_map.data)\n new_map.geometry = \"healpix geometry\"\n try:\n new_map.coordinate = header[\"SKYCOORD\"]\n except:\n new_map.coordinate = None\n\n except:\n header = hdulist[0].header\n new_map.pixel = header[\"CTYPE1\"][-3:]\n try:\n new_map.ncomp = header[\"NAXIS3\"]\n except:\n new_map.ncomp = 1\n\n if car_box is not None:\n car_box = np.array(car_box) * np.pi / 180\n new_map.data = enmap.read_map(file, box=car_box)\n elif geometry is not None:\n new_map.data = enmap.read_map(file, geometry=geometry)\n else:\n new_map.data = enmap.read_map(file)\n\n new_map.nside = None\n new_map.geometry = new_map.data.geometry[1:]\n new_map.coordinate = header[\"RADESYS\"]\n if new_map.coordinate == \"ICRS\":\n new_map.coordinate = \"equ\"\n\n hdulist.close()\n\n if coordinate is not None:\n new_map.coordinate = coordinate\n\n return new_map",
"def setUp(self):\n np.random.seed(1234)\n\n _TEST_FILE_NAME = 'AHN3.las'\n _TEST_DATA_SOURCE = 'testdata'\n\n _CYLINDER = InfiniteCylinder(4)\n _PC_260807 = load(os.path.join(_TEST_DATA_SOURCE, _TEST_FILE_NAME))\n _PC_1000 = copy_point_cloud(_PC_260807, array_mask=(\n np.random.choice(range(len(_PC_260807[keys.point]['x']['data'])), size=1000, replace=False)))\n _1000_NEIGHBORHOODS_IN_260807 = list(compute_neighbors.compute_neighborhoods(_PC_260807, _PC_1000, _CYLINDER))\n\n self.point_cloud = _PC_260807\n self.neigh = _1000_NEIGHBORHOODS_IN_260807",
"def setUp(self):\r\n self.data = {}\r\n self.data['coord'] = [['Sample1', 'Sample2'], array([[-0.2, 0.07],\r\n [-0.04, 0.2]]), array(\r\n [0.7, 0.6]),\r\n array([25.00, 30.00])]\r\n self.data[\r\n 'map'] = [['#Sample-ID', 'Day'], ['Sample1', 'Day1'], ['Sample2',\r\n 'Day1']]\r\n\r\n self.coord_header = [\"Sample1\", \"Sample2\", \"Sample3\"]\r\n self.coords = array(\r\n [[-0.219044992, 0.079674486, 0.09233683], [-0.042258081,\r\n 0.000204041, 0.024837603], [0.080504323, -0.212014503,\r\n -0.088353435]])\r\n self.groups = {}\r\n self.groups['Day1'] = ['Sample1', 'Sample2', 'Sample3']\r\n self.pct_var = array([25.00, 30.00, 35.00])\r\n self.coord_tups = [(\"1\", \"2\"), (\"3\", \"2\"), (\"1\", \"3\")]\r\n self.colors = {\"Day1\": \"red1\"}\r\n self.filename = 'test_pca.txt'\r\n self.dir_path = '/tmp/'\r\n self.prefs = {}\r\n self.prefs['Sample'] = {}\r\n self.prefs['Sample']['column'] = \"Day\"\r\n\r\n self.dict = defaultdict(list)\r\n self.dict['Day1'].append('Sample1')\r\n self.dict['Day1'].append('Sample2')\r\n self.dict['Day1'].append('Sample3')\r\n\r\n self.labelname = self.prefs['Sample']['column']\r\n self.mapping = [\r\n [\"Sample-ID\", \"Day\", \"Type\"], [\"Sample1\", \"Day1\", \"Soil\"],\r\n [\"Sample2\", \"Day1\", \"Soil\"], [\"Sample3\", \"Day1\", \"Soil\"]]\r\n self.data_color_hsv = {\r\n #'black1':\t(0,0,20),\r\n 'red1': (0, 100, 100),\r\n 'blue1': (240, 100, 100),\r\n 'orange1': (28, 98, 95),\r\n 'green1': (120, 100, 50.2),\r\n 'purple1': (302, 73, 57),\r\n 'yellow1': (60, 100, 100),\r\n 'cyan1': (184, 49, 96),\r\n 'pink1': (333, 37, 96),\r\n 'teal1': (178, 42, 63),\r\n 'brown1': (36, 89, 42),\r\n 'gray1': (0, 0, 50.2),\r\n 'lime': (123, 99, 96),\r\n 'red2': (14, 51, 97),\r\n 'blue2': (211, 42, 85),\r\n 'orange2': (32, 46, 99),\r\n 'green2': (142, 36, 79),\r\n 'purple2': (269, 29, 75),\r\n 'yellow2': (56, 40, 100),\r\n #'black2':\t(303,100,24),\r\n 'gray2': (0, 0, 75.3),\r\n #'teal2':\t(192,100,24),\r\n 'red3': (325, 100, 93),\r\n 'blue3': (197, 100, 100),\r\n #'purple3':\t(271,43,36),\r\n 'brown2': (33, 45, 77),\r\n 'green3': (60, 100, 50.2),\r\n 'purple4': (264, 75, 100),\r\n #'yellow3':\t(60,66,75),\r\n #'blue4':\t(213,45,77),\r\n 'red4': (348, 31, 74),\r\n 'teal3': (180, 100, 50.2),\r\n #'brown3':\t(60,100,28),\r\n 'red5': (0, 100, 50.2),\r\n 'green4': (81, 100, 26),\r\n #'purple5':\t(240,100,41),\r\n 'orange3': (26, 100, 65)\r\n #'brown4':\t(25,100,20),\r\n #'red6':\t(17,100,63),\r\n #'purple6':(272,100,44)\r\n }\r\n\r\n self.data_color_order = ['red1', 'blue1', 'orange1', 'green1',\r\n 'purple1', 'yellow1', 'cyan1', 'pink1', 'teal1', 'brown1',\r\n 'gray1', 'lime', 'red2', 'blue2', 'orange2', 'green2',\r\n 'purple2', 'yellow2', 'gray2', 'red3', 'blue3', 'brown2',\r\n 'green3', 'purple4', 'red4', 'teal3', 'red5', 'green4',\r\n 'orange3']\r\n\r\n self._paths_to_clean_up = []\r\n self._dir_to_clean_up = ''",
"def test_mapchete_input(mapchete_input):\n with mapchete.open(mapchete_input.dict) as mp:\n config = mp.config.params_at_zoom(5)\n input_data = config[\"input\"][\"file2\"]\n assert input_data.bbox()\n assert input_data.bbox(CRS.from_epsg(3857))\n mp_input = input_data.open(next(mp.get_process_tiles(5)))\n assert not mp_input.is_empty()",
"def test_newmap_template(self):\n\t\tc = Client()\n\t\tresponse = c.get('/cartography/newmap')\n\t\tself.assertEquals(response.status_code, 200)",
"def create():\n\tprint 'create tile map instance'\n\tsurface = tilemap.new(width, height, maxelevation)\n\t#for i in range(5):\n\t\t#tilemap.generator.rain(surface, 2000)\n\tspringlevel=len(surface)/5\n\tspringrange=springlevel/2\n\tprint springlevel\n\tprint 'run water simulation'\n\tfor i in range(1):\n\t\ttilemap.generator.rain(surface, 40, \n\t\t\tsprings=[s for s in surface.highest(\n\t\t\tspringlevel+(springrange)/(i+1))[springlevel::springrange/5]])\n\tprint 'smooth out heightmap irritations'\n\ttilemap.generator.smoothen(surface,1)\n\tprint 'run grass growing simulation'\n\ttilemap.generator.sprout(surface)\n\tprint 'apply tile map node parameters, compute node polygon coordinates'\n\tsurface.init_mesh()\n\tprint 'return tile map instance'\n\treturn surface",
"def test_init(self):\r\n c = AlphaDiversityCalc(observed_otus)\r\n self.assertEqual(c.Metric, observed_otus)\r\n self.assertEqual(c.Params, {})",
"def test_map_absence(self):\n self.data.noise_map = []\n self.assertRaises(TypeError, module_05.run_module,\n self.data)"
] | [
"0.642239",
"0.6291131",
"0.61287856",
"0.58283335",
"0.5784628",
"0.5760995",
"0.5741576",
"0.5735342",
"0.56763834",
"0.56645465",
"0.5649165",
"0.56062466",
"0.5596278",
"0.5567189",
"0.55620426",
"0.55452317",
"0.55275035",
"0.5512195",
"0.5509079",
"0.5508",
"0.55052185",
"0.5498298",
"0.54977185",
"0.54869264",
"0.54700357",
"0.54440147",
"0.54368645",
"0.5436814",
"0.54263127",
"0.54238766"
] | 0.6583202 | 0 |
Test the is_datasource_for method of LASCOMap. Note that header data to be provided as an argument can be a MetaDict object. | def test_is_datasource_for(lasco):
assert lasco.is_datasource_for(lasco.data, lasco.meta) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_is_datasource_for(eit_map):\n assert eit_map.is_datasource_for(eit_map.data, eit_map.meta)",
"def is_datasource_for(cls, **kwargs):\n if 'source' in kwargs.keys():\n if kwargs.get('source', ''):\n return kwargs.get('source', '').lower().startswith(cls._source)\n if 'meta' in kwargs.keys():\n return kwargs['meta'].get('TELESCOP', '').startswith('GOES')",
"def _is_in_datasource(self, data_source, obj):\n q = self.sql_query(\n \"\"\" select @rid from (select expand(in(Owns)) from {obj_rid}) \\\n where @class = 'DataSource' and @rid = {rid}\"\"\".format(\n obj_rid = obj._id, ds_rid = data_source._id))\n return len(q) > 0",
"def test_data_source_soaps_get(self):\n pass",
"def can_access_datasource(self, datasource: \"BaseDatasource\") -> bool:\n\n try:\n self.raise_for_access(datasource=datasource)\n except SupersetSecurityException:\n return False\n\n return True",
"def get_datasource(self):\n return None",
"def isDataSourceReadable(self):\r\n\r\n readable = True\r\n start, stop = self.getReadParameters(\\\r\n numpy.array(0, dtype=numpy.int64), self.chunk_size)\r\n try:\r\n self.data_source.read(start, stop)\r\n except tables.HDF5ExtError:\r\n readable = False\r\n print(translate('Buffer',\r\n \"\"\"\\nError: problems reading records. The dataset seems \"\"\"\r\n \"\"\"to be compressed with the {0} library. Check that it \"\"\"\r\n \"\"\"is installed in your system, please.\"\"\",\r\n 'A dataset readability error').\\\r\n format(self.data_source.filters.complib))\r\n\r\n return readable",
"def test_get_datasource_retrieves_from_cache(\n in_memory_runtime_context,\n) -> None:\n context = in_memory_runtime_context\n\n name = context.list_datasources()[0][\"name\"]\n\n # If the value is in the cache, no store methods should be invoked\n with mock.patch(\n \"great_expectations.data_context.store.DatasourceStore.get\"\n ) as mock_get:\n context.get_datasource(name)\n\n assert not mock_get.called",
"def test_data_source_soaps_id_exists_get(self):\n pass",
"def _sanity_check_datasource(ds):\n if len(ds) != 1:\n raise SanityCheckError('GeoJSON should have only 1 layer.')\n # TODO: add more checks",
"def data_source_info(self) -> 'outputs.DatasourceResponse':\n return pulumi.get(self, \"data_source_info\")",
"def test_context_data(self):\n response = self.client.get(self.get_url())\n context = response.context\n self.assertIn('source_dataset_table', context)\n for ds in self.datasets:\n self.assertIn(ds, context['source_dataset_table'].data)\n self.assertIsInstance(context['source_dataset_table'], tables.SourceDatasetTableFull)",
"def get_datasource_of():\n global datasource_of\n\n if not datasource_of:\n datasource_of = stixhelpers.datasource_of()\n \n return datasource_of",
"def can_access_all_datasources(self) -> bool:\n\n return self.can_access(\"all_datasource_access\", \"all_datasource_access\")",
"def is_dataset(self):\n return self._dataset is not None",
"def _single_data_source(self) -> DataSource:\n data_source = None\n for meta_column in self._meta_columns:\n if data_source is None:\n data_source = meta_column.data_source\n elif data_source is not meta_column.data_source:\n raise SomeError('Mixed data sources are not supported')\n if data_source is None:\n raise SomeError('The column list provides no data source')\n return data_source",
"def test_missing_data_sources(self):",
"def data_source_set_info(self) -> Optional['outputs.DatasourceSetResponse']:\n return pulumi.get(self, \"data_source_set_info\")",
"def verify_connection(self, datasource):\n url = urljoin(self.base_url, \"dataservers\")\n if not self.session.verify:\n import urllib3\n\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n res = self.session.get(url)\n if res.status_code != 200:\n raise ConnectionError\n j = res.json()\n for item in j[\"Items\"]:\n if item[\"Name\"] == datasource:\n return True\n return False",
"def verify_connection(self, datasource):\n url = urljoin(self.base_url, \"Datasources\")\n params = {\"service\": \"ProcessData\", \"allQuotes\": 1}\n if not self.session.verify:\n import urllib3\n\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n res = self.session.get(url, params=params)\n if res.status_code != 200:\n raise ConnectionError\n j = res.json()\n for item in j[\"data\"]:\n if item[\"n\"] == datasource:\n return True\n return False",
"def datasource_type(self) -> Optional[str]:\n return pulumi.get(self, \"datasource_type\")",
"def datasource_type(self) -> Optional[str]:\n return pulumi.get(self, \"datasource_type\")",
"def __test_region(self, bk):\n for arg in self.args['region']:\n ds = ArgoDataFetcher(backend=bk).region(arg).to_xarray()\n assert isinstance(ds, xr.Dataset) == True",
"def test_filters_by_dataset_description_if_requested(self):\n dataset = factories.SourceDatasetFactory.create(i_dbgap_description='a dataset about demographic measurements')\n trait = factories.SourceTraitFactory.create(i_description='lorem ipsum', source_dataset=dataset)\n other_dataset = factories.SourceDatasetFactory.create(i_dbgap_description='foo')\n factories.SourceTraitFactory.create(i_description='lorem ipsum', source_dataset=other_dataset)\n input = {'description': 'lorem', 'dataset_description': 'demographic', 'dataset_name': ''}\n response = self.client.get(self.get_url(), input)\n context = response.context\n self.assertIn('form', context)\n self.assertTrue(context['has_results'])\n self.assertIsInstance(context['results_table'], tables.SourceTraitTableFull)\n self.assertQuerysetEqual(context['results_table'].data, [repr(trait)])",
"def data_source_info(self) -> pulumi.Input['DatasourceArgs']:\n return pulumi.get(self, \"data_source_info\")",
"def test_BaseDataContext_add_datasource_updates_cache(\n in_memory_runtime_context: EphemeralDataContext,\n pandas_enabled_datasource_config: dict,\n) -> None:\n context = in_memory_runtime_context\n\n name = pandas_enabled_datasource_config[\"name\"]\n\n assert name not in context.datasources\n\n context.add_datasource(**pandas_enabled_datasource_config)\n\n assert name in context.datasources",
"def data_source_set_info(self) -> Optional[pulumi.Input['DatasourceSetArgs']]:\n return pulumi.get(self, \"data_source_set_info\")",
"def data_source(self, label):\r\n return datasource.Datasource(self.apikey_or_username, label)",
"def test_context_data(self):\n response = self.client.get(self.get_url())\n context = response.context\n self.assertIn('source_trait_table', context)\n self.assertIsInstance(context['source_trait_table'], tables.SourceTraitTableFull)",
"def dataproduct_datasource(self, ows_layer, session):\n metadata = {}\n\n if ows_layer.type == 'group':\n # group layer\n return metadata\n\n data_set = ows_layer.data_set_view.data_set\n data_source = data_set.data_source\n if data_source.connection_type == 'database':\n # vector DataSet\n\n # get table metadata\n postgis_datasource = None\n pg_metadata = self.dataset_info(\n data_source.gdi_oid, data_set.data_set_name\n )\n if 'error' not in pg_metadata:\n data_set_name = \"%s.%s\" % (\n pg_metadata.get('schema'), pg_metadata.get('table')\n )\n\n primary_key = pg_metadata.get('primary_key')\n if primary_key is None:\n # get primary key if view\n primary_key = data_set.primary_key\n\n geom = {}\n if len(pg_metadata.get('geometry_columns')) > 1:\n used_col = ows_layer.data_set_view.geometry_column\n for geom_col in pg_metadata.get('geometry_columns'):\n # get used geometry column if multiple\n if geom_col.get('geometry_column') == used_col:\n geom = geom_col\n break\n elif len(pg_metadata.get('geometry_columns')) == 1:\n # use sole geometry column\n geom = pg_metadata.get('geometry_columns')[0]\n\n postgis_datasource = {\n 'dbconnection': data_source.connection,\n 'data_set_name': data_set_name,\n 'primary_key': primary_key,\n 'geometry_field': geom.get('geometry_column'),\n 'geometry_type': geom.get('geometry_type'),\n 'srid': geom.get('srid')\n }\n else:\n # show error message\n postgis_datasource = {\n 'error': pg_metadata.get('error')\n }\n\n metadata = {\n 'bbox': DEFAULT_EXTENT,\n 'crs': 'EPSG:2056',\n 'datatype': 'vector',\n 'postgis_datasource': postgis_datasource\n }\n else:\n # raster DataSet\n\n # modify connection dir\n connection = re.sub(\n RASTER_DATASOURCE_PATTERN, RASTER_DATASOURCE_REPL,\n data_source.connection\n )\n # TODO: get srid\n srid = 'EPSG:2056'\n metadata = {\n 'datatype': 'raster',\n 'raster_datasource': {\n 'datasource': connection + data_set.data_set_name,\n 'srid': srid\n }\n }\n\n return metadata"
] | [
"0.74529046",
"0.6727229",
"0.57659274",
"0.57537",
"0.5483662",
"0.5415242",
"0.5341589",
"0.52654755",
"0.5249164",
"0.522096",
"0.5166681",
"0.5155394",
"0.5150383",
"0.5117535",
"0.5107733",
"0.50836915",
"0.50509775",
"0.50261915",
"0.5001209",
"0.49781814",
"0.495897",
"0.495897",
"0.49489206",
"0.49328536",
"0.49275905",
"0.4924379",
"0.48839125",
"0.4872089",
"0.48609102",
"0.48466116"
] | 0.762238 | 0 |
Tests the measurement property of the LASCOMap object. | def test_measurement(lasco):
assert lasco.measurement == "white-light" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_measurement(eit_map):\n assert eit_map.measurement.value in [195, 171]",
"def test_unit_of_measurement(self):\n for name in self.sensor_dict:\n sensor = self.sensor_dict[name][\"sensor\"]\n assert self.sensor_dict[name][\"units\"] == sensor.unit_of_measurement",
"def test_measurment(self):\r\n self.assertEqual(Converter.MeasurmentWorldtoUS(10, \"km\"), 6.214)\r\n self.assertEqual(Converter.MeasurmentWorldtoUS(10, \"m\"), 10.936)\r\n self.assertEqual(Converter.MeasurmentWorldtoUS(10, \"cm\"), 0.328)\r\n self.assertEqual(Converter.MeasurmentWorldtoUS(10, \"mm\"), 0.394)\r\n self.assertEqual(Converter.MeasurmentUStoWorld(10, \"mi\"), 16.093)\r\n self.assertEqual(Converter.MeasurmentUStoWorld(10, \"yd\"), 9.144)\r\n self.assertEqual(Converter.MeasurmentUStoWorld(10, \"ft\"), 304.8)\r\n self.assertEqual(Converter.MeasurmentUStoWorld(10, \"in\"), 254)",
"def test_measure(self):\n\n result = qubit.measure(polarization)\n self.assertEqual(0, result)",
"def test_get_measure_parameters(self):\n pass",
"def test_properties(self):\n self.assertEqual(LENGTH_KILOMETERS, METRIC_SYSTEM.length_unit)\n self.assertEqual(TEMP_CELSIUS, METRIC_SYSTEM.temperature_unit)\n self.assertEqual(MASS_GRAMS, METRIC_SYSTEM.mass_unit)\n self.assertEqual(VOLUME_LITERS, METRIC_SYSTEM.volume_unit)",
"def test_get_voltage_maps(self):\n pass",
"def test_MetadataMap_getter(self):\r\n self.assertEqual(self.cs_overview.MetadataMap, self.overview_map)",
"def test_get_voltage_map_item(self):\n pass",
"def test_MetadataMap_setter(self):\r\n self.cs_overview.MetadataMap = self.overview_map\r\n self.assertEqual(self.cs_overview.MetadataMap, self.overview_map)",
"def test_mock_datasource_meters(self):\n account1 = self.test_data.accounts[0]\n meter = account1.meters[0]\n self.assertIsInstance(meter, Meter)\n self.assertEqual(meter.PK, 4)\n self.assertEqual(meter.Tariff, \"test_tariff\")\n self.assertEqual(meter.ServiceType, \"test_service_type\")\n self.assertEqual(meter.PODid, \"test_podid\")\n self.assertEqual(meter.MeterNumber, \"test_meter_number_1\")\n self.assertEqual(meter.IntervalStart, date(2016, 1, 1))\n self.assertEqual(meter.IntervalEnd, date(2016, 2, 1))\n self.assertEqual(len(meter.charges), 1)\n self.assertEqual(len(meter.usages), 1)",
"def test_post_voltage_maps(self):\n pass",
"def crs_is_metric(gdf):\n units = str(gdf_get_projection_unit(gdf)).strip().lower()\n if units in ['\"meter\"', '\"metre\"', \"'meter'\", \"'meter'\",\n 'meter', 'metre']:\n return True\n else:\n return False",
"def measure(self):\n pass",
"def test_c(self):\n self.failIf(cgs.speed_of_light/mks.speed_of_light!=100)",
"def test_put_voltage_map_item(self):\n pass",
"def test_native_measurements(self, valkmusa, meas):\n\n QB1 = valkmusa.qubits[0]\n valkmusa.validate_operation(meas(QB1))",
"def test_properties_stats_get(self):\n pass",
"def test_length_to_metric(self):\n self.assertEqual(\n 100,\n METRIC_SYSTEM.length(100, METRIC_SYSTEM.length_unit)\n )\n self.assertEqual(\n 8.04672,\n METRIC_SYSTEM.length(5, IMPERIAL_SYSTEM.length_unit)\n )",
"def test_map_overview_accuracy(self):\n params = [10000, 5, 10, 15]\n height = 100\n width = 200\n world_map = gen.generate_map(height=height, width=width, params=params)\n image = img.get_map_overview(world_map)\n pixels = image.load()\n for x in range(width):\n for y in range(height):\n color = tuple(img.get_color(world_map[x][y]))\n self.assertEqual(pixels[x, y], color)",
"def test_metrics(self):\n self.assertIsInstance(self.analytics.suites[testReportSuite].metrics, omniture.utils.AddressableList)",
"def test_wl_metric():\n z1 = np.random.normal(size=int(1e5)) + 1\n z2 = np.random.normal(size=int(1e5)) + 2\n res = pval.wl_metric(z1, z2)\n np.testing.assert_almost_equal(res, 1, 2)",
"def test_metrics_empty(self):\n skill_map = SkillMap.load(self.course)\n sm_metrics = SkillMapMetrics(skill_map)\n self.assertEqual(sm_metrics.simple_cycles(), [])\n self.assertEqual(sm_metrics.singletons(), [])\n self.assertEqual(sm_metrics.long_chains(), [])\n expected = {'cycles': [], 'singletons': [], 'long_chains': []}\n self.assertEqual(sm_metrics.diagnose(), expected)",
"def test_is_metric(self):\n self.assertTrue(METRIC_SYSTEM.is_metric)\n self.assertFalse(IMPERIAL_SYSTEM.is_metric)",
"def test_property_longitude(self):\n\n longitude = self.location.longitude\n\n self.assertIsInstance(longitude, float)\n self.assertRaises(DataObjectError, \n setattr(self, \"longitude\", 76.54321)\n )",
"def getMeasure(unique_name):",
"def getMeasure(unique_name):",
"def test_set_state(self):\n self.sensor.measure = MagicMock(return_value=True)\n self.assertEqual(self.sensor.measure(), True)",
"def testSimOuptputDimensions(self):\n self.tree.set_database(self.coal)\n sim_params = self.tree.get_simulation_parameters()\n self.assertEqual(sim_params[\"fine_map_x\"], 24)\n self.assertEqual(sim_params[\"fine_map_y\"], 24)\n self.assertEqual(sim_params[\"fine_map_x_offset\"], 0)\n self.assertEqual(sim_params[\"fine_map_y_offset\"], 0)\n self.assertEqual(sim_params[\"sim_complete\"], 1)",
"def unit_of_measurement(self):\n return None"
] | [
"0.7147487",
"0.70588326",
"0.6354547",
"0.6340808",
"0.62565184",
"0.622547",
"0.62038094",
"0.614342",
"0.6091599",
"0.5935994",
"0.5892707",
"0.58639777",
"0.5852712",
"0.58456856",
"0.57708514",
"0.57647926",
"0.5735114",
"0.5731323",
"0.57311",
"0.5704018",
"0.5696798",
"0.56896853",
"0.56870115",
"0.5679568",
"0.5671897",
"0.5662929",
"0.5662929",
"0.5653289",
"0.56283283",
"0.5620389"
] | 0.70623285 | 1 |
Tests the observatory property of the LASCOMap object. | def test_observatory(lasco):
assert lasco.observatory == "SOHO" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_observatory(eit_map):\n assert eit_map.observatory == \"SOHO\"",
"def check_observatory(self):\n assert self.observatory in ALL_OBSERVATORIES, \\\n \"Invalid observatory \" + repr(self.observatory) + \" in \" + repr(self.filename)",
"def observatories():\n\n obs_db = {}\n\n obs_db['PWT-Oxford'] = { 'long':'-01:15:00', \\\n 'lat':'+51:45:00', \\\n 'altitude-metres':130.0, \\\n 'timezone':'Europe/London' }\n\n obs_db['LaPalma'] = { 'lat':'+28:45:00', \\\n 'long':'-17:53:00', \\\n 'altitude-metres':2326, \\\n 'timezone':'Atlantic/Canary' }\n \n obs_db['Paranal'] = { 'lat':'-24:37:00', \\\n 'long':'-70:24:00', \\\n 'altitude-metres':2635, \\\n 'timezone':'America/Santiago' }\n\n obs_db['LaSilla'] = { 'lat':'-29:15:00', \\\n 'long':'-70:44:00', \\\n 'altitude-metres':2380, \\\n 'timezone':'America/Santiago' }\n\n obs_db['MaunaKea'] = { 'lat':'+19:50:00', \\\n 'long':'-155:28:00', \\\n 'altitude-metres':4190, \\\n 'timezone':'Pacific/Honolulu' }\n \n obs_db['SidingSpring'] = { 'lat':'-31:16:00', \\\n 'long':'+149:04:00', \\\n 'altitude-metres':1149, \\\n 'timezone':'Australia/Sydney' }\n \n obs_db['KittPeak'] = { 'lat':'+31:58:00', \\\n 'long':'-111:36:00', \\\n 'altitude-metres':2096, \\\n 'timezone':'America/Phoenix' }\n\n obs_db['CalarAlto'] = { 'lat':'+37:13:25', \\\n 'long':'-2:32:47', \\\n 'altitude-metres':2168, \\\n 'timezone':'Europe/Madrid' }\n \n obs_db['Gemini-N'] = { 'lat':'+19:49:26', \\\n 'long':'-155:28:09', \\\n 'altitude-metres':4213, \\\n 'timezone':'Pacific/Honolulu' }\n\n obs_db['Gemini-S'] = { 'lat':'-30:14:27', \\\n 'long':'-70:44:12', \\\n 'altitude-metres':2722, \\\n 'timezone':'America/Santiago' }\n\n return obs_db",
"def test_MetadataMap_getter(self):\r\n self.assertEqual(self.cs_overview.MetadataMap, self.overview_map)",
"def testInit(self):\n map_state = MapState(self.event_manager)\n self.assertEqual(map_state.event_manager, self.event_manager)\n self.assertTrue(map_state in self.event_manager.listener_groups[\"default\"].listeners)\n self.assertEqual(map_state.occupied_sectors_by_actor_id, {})\n self.assertEqual(map_state.actors_by_sector_id, {})",
"def test_avalanche_warning_by_region_obs(self):\n pass",
"def constrained_lens_object_test():\n return # TODO",
"def observe(self):\r\n self.rect.center = self.agent.rect.center\r\n \r\n # Control Points.\r\n # All control points are visible.\r\n control_points = [\r\n {\r\n 'team': o.team.name,\r\n 'location': o.rect.center\r\n } \r\n for o in self.world.control_points\r\n ]\r\n\r\n # Walls.\r\n # Only walls within range within range are visible.\r\n # Simplification that seeing part of a wall (Rect)\r\n # means seeing the entire wall-part does seem reasonable.\r\n walls = [\r\n {\r\n 'top': o.rect.top,\r\n 'left': o.rect.left,\r\n 'bottom': o.rect.bottom,\r\n 'right': o.rect.right\r\n }\r\n for o in self.world.visible_objects(self, self.world.walls)\r\n ]\r\n \r\n # Ammo Packs.\r\n # Only ammo packs within range are visible.\r\n ammo_packs = [ {'location': o.rect.center}\r\n for o in self.world.visible_objects(self, self.world.ammo_packs)\r\n ]\r\n\r\n # Agents.\r\n # Only agents within range are visible, whether they are on your own team\r\n # or on the other team.\r\n agents = []\r\n for team in self.world.teams:\r\n agents += [\r\n {\r\n 'team': team.name,\r\n 'location': agent.rect.center,\r\n 'direction': agent.direction,\r\n 'id': agent.number\r\n }\r\n for agent\r\n in self.world.visible_objects(self, team)\r\n if agent != self.agent\r\n ]\r\n\r\n observation = {\r\n 'id': self.agent.number,\r\n 'location': self.agent.rect.center,\r\n 'ammo': self.agent.ammo,\r\n 'direction': self.agent.direction,\r\n 'team': self.agent.team.name,\r\n 'respawn': not self.agent.alive,\r\n 'agents': agents,\r\n 'controlpoints': control_points,\r\n 'walls': walls,\r\n 'ammopacks': ammo_packs,\r\n }\r\n \r\n return observation",
"def testDispersalMapSimulation(self):\n self.assertEqual(701, self.c.get_species_richness(1))",
"def checkMap(self):\n return True",
"def test_map(log_prob_coo):\n\n offset_dict = log_prob_coo['offsets']\n\n # the input\n print(log_prob_coo)\n print('input log probs')\n print(log_prob_sparse_to_dense(log_prob_coo['coo']))\n\n # with this shape converter, we get one row, where each value is one m\n converter = IndexConverter(total_n_cells=1,\n total_n_genes=log_prob_coo['coo'].shape[0])\n\n # set up and estimate\n estimator = MAP(index_converter=converter)\n noise_csr = estimator.estimate_noise(noise_log_prob_coo=log_prob_coo['coo'],\n noise_offsets=offset_dict)\n\n # output\n print('dense noise count estimate, per m')\n out_per_m = np.array(noise_csr.todense()).squeeze()\n print(out_per_m)\n print('truth')\n print(log_prob_coo['maps'])\n\n # test\n np.testing.assert_array_equal(out_per_m, log_prob_coo['maps'])",
"def test_conservation(self):\n self.c_s_tot = (\n self.c_s_n_tot(self.solution.t)\n + self.c_s_p_tot(self.solution.t)\n + self.c_SEI_n_tot(self.solution.t)\n + self.c_SEI_p_tot(self.solution.t)\n + self.c_Li_n_tot(self.solution.t)\n + self.c_Li_p_tot(self.solution.t)\n )\n diff = (self.c_s_tot[1:] - self.c_s_tot[:-1]) / self.c_s_tot[:-1]\n if \"profile\" in self.model.options[\"particle\"]:\n np.testing.assert_array_almost_equal(diff, 0, decimal=10)\n elif self.model.options[\"surface form\"] == \"differential\":\n np.testing.assert_array_almost_equal(diff, 0, decimal=10)\n elif self.model.options[\"SEI\"] == \"ec reaction limited\":\n np.testing.assert_array_almost_equal(diff, 0, decimal=12)\n else:\n np.testing.assert_array_almost_equal(diff, 0, decimal=15)",
"def _observe_simple(self):\n return {}",
"def test_MetadataMap_setter(self):\r\n self.cs_overview.MetadataMap = self.overview_map\r\n self.assertEqual(self.cs_overview.MetadataMap, self.overview_map)",
"def _get_observation(self, observation):",
"def is_map_updated(self):\r\n self.old_obs_len =0\r\n if len(self.obs_ls[0])!= self.old_obs_len:\r\n self.old_obs_len =len(self.obs_ls[0])\r\n return True\r\n return False",
"def test_eq(self):\r\n self.assertTrue(self.empty_map == MetadataMap({}, []))\r\n self.assertTrue(self.overview_map == MetadataMap(\r\n self.overview_map._metadata, self.overview_map.Comments))",
"def test_io_success(self):\r\n k1 = uuid4()\r\n k2 = uuid4()\r\n now = datetime.now()\r\n then = now + timedelta(days=1)\r\n m1 = TestMapModel.create(int_map={1: k1, 2: k2}, text_map={'now': now, 'then': then})\r\n m2 = TestMapModel.get(partition=m1.partition)\r\n\r\n assert isinstance(m2.int_map, dict)\r\n assert isinstance(m2.text_map, dict)\r\n\r\n assert 1 in m2.int_map\r\n assert 2 in m2.int_map\r\n assert m2.int_map[1] == k1\r\n assert m2.int_map[2] == k2\r\n\r\n assert 'now' in m2.text_map\r\n assert 'then' in m2.text_map\r\n assert (now - m2.text_map['now']).total_seconds() < 0.001\r\n assert (then - m2.text_map['then']).total_seconds() < 0.001",
"def test_properties_evolution_get(self):\n pass",
"def test_eq(self):\n self.assertTrue(self.empty_map == MetadataMap({}, []))\n self.assertTrue(self.overview_map == MetadataMap(\n self.overview_map._metadata, self.overview_map.Comments))",
"def observ(self):\n return self._observ.read_value()",
"def setup_observatory( obs ):\n\n obs_obj = ephem.Observer()\n\n # Pre-defined observatory with string identifier:\n if type( obs )==str:\n obs_db = observatories()\n try:\n obs_dict = obs_db[ obs ]\n obs_obj.lat = obs_dict['lat']\n obs_obj.long = obs_dict['long']\n obs_obj.elevation = obs_dict['altitude-metres']\n timezone = obs_dict['timezone']\n except:\n print '\\n\\nObservatory string does not match any in database!'\n print 'Currently available observatories are:'\n for i in obs_db.keys():\n print ' {0}'.format( i )\n obs_obj = None\n timezone = None\n\n # Custom-defined observatory as dictionary:\n else:\n obs_obj.lat = obs['lat']\n obs_obj.long = obs['long']\n try:\n obs_obj.elevation = obs['altitude-metres']\n except:\n print 'No elevation provided - assuming sea level'\n obs_obj.elevation = 0.\n try:\n timezone = obs['timezone']\n except:\n timezone = None\n\n return obs_obj, timezone",
"def test_properties_get(self):\n pass",
"def test_property_active(self):\n\n active = self.location.active\n\n self.assertIsInstance(active, bool)\n self.assertRaises(DataObjectError,\n setattr(self, \"active\", False)\n )",
"def test_api_object_service_property(self, api_object):\n api_object.status = 'SERVICE'\n assert api_object.service\n assert not api_object.creating",
"def has_get_properties(client: NumPyClient) -> bool:\n return type(client).get_properties != NumPyClient.get_properties",
"def testmoenergies(self):\r\n assert len(self.data.moenergies) == 1\r\n if hasattr(self.data, \"mocoeffs\"):\r\n assert len(self.data.mocoeffs) == 1",
"def test_is_a_properties(self):\n self.assertEqual(self.hand.flushInd, 1)\n self.assertEqual(self.hand.straightInd, 0)",
"def test_map_details(self):\n\t\tcreate_cartography()\n\t\tmap = Map.objects.get(id=1)\n\t\tc = Client()\n\t\tresponse = c.get(\"/maps/%s\" % str(map.id))\n\t\tself.assertEquals(response.status_code, 200)",
"def test_indicate(self):\n self.objective.Indicate()"
] | [
"0.70502925",
"0.5905423",
"0.5631481",
"0.55709904",
"0.53638643",
"0.53604794",
"0.535205",
"0.53501725",
"0.53381866",
"0.53307277",
"0.5307905",
"0.5280215",
"0.52539486",
"0.5241711",
"0.51865333",
"0.5175831",
"0.51357836",
"0.5132824",
"0.51225543",
"0.50932837",
"0.50655437",
"0.50551456",
"0.50550157",
"0.5054125",
"0.504736",
"0.50465393",
"0.5044797",
"0.5034886",
"0.50263447",
"0.501694"
] | 0.62336797 | 1 |
get value from query dict by key | def get_value(self, query_dict, k):
if k in query_dict:
return query_dict[k]
return '' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __getitem__(self, key):\n return self.query(key)",
"def find(cls, key):\r\n return cls.query().get(key)",
"def getSpecific(self, keyword, key):",
"def __getitem__(self, key):\n return self.keyvaluepair_set.get(key=key).value",
"def __getitem__(self, key):\n return self.params[key].value",
"def get(self, key):",
"def get(self, key):",
"def find_value(dic, key):\n return dic[key]",
"def get_item(dictionary, key):\n return dictionary.get(key)",
"def get_item(dictionary, key):\n return dictionary.get(key)",
"def get_item(dictionary, key):\n return dictionary.get(key)",
"def get_item(dictionary, key):\n return dictionary.get(key)",
"def lookup(self, key):",
"def __getitem__(self, key):\n return self.get_field(key)",
"def get(self, key):\n _filter = {'_id': key}\n doc = self.collection.find_one(_filter)\n\n if doc and not self._verify_timeout(doc):\n return self._unpickle(doc['value'])",
"def lookup(self, key):\n n = self.find(key)\n if n:\n return n.value\n else:\n return False",
"def get_item(query, key):\r\n\tfor element in query:\r\n\t\tif (list(element)[0].get('game__name') == key):\r\n\t\t\treturn list(element)[0].get('current_score')\r\n\r\n\treturn None",
"def __getitem__(self, key):\n return self._dict[key]",
"def __getitem__(self, key: ir.Value) -> ir.Value:\n return ops.MapGet(self, key).to_expr()",
"def __getitem__(self, key):\n return self.parameters[key].value",
"def get_field_by_key(field, key, val, session):\n sql = select([field]).where(key == val)\n value = session.execute(sql).scalar()\n return value",
"def get(self, key):\n\t\treturn self.__get(key, key[1:])",
"def get(self, key):\n h = key%self.m\n a = self.a\n if a[h]:\n return a[h].val\n else:\n return -1",
"def get(self, key):\n if key in self.fields:\n return self.fields.get(key).get()\n return None",
"def get(self, key):\n return self[key]",
"def __getitem__(self, key):\n for k,v in list(self.__dict__.items()):\n if k == key:\n return v\n try:\n return v[key]\n except:\n pass\n\n print((\"Item %s could not be found...\" %key))",
"def get_value(self, key):\n return self[key]",
"def get_value(dct, key):\n return dct.get(key)",
"def __getitem__(self, key):\n return self.d[key]",
"def __getitem__(self, key):\n return self.get(key)"
] | [
"0.7332471",
"0.7147431",
"0.7024943",
"0.68989784",
"0.6870327",
"0.6814481",
"0.6814481",
"0.68065345",
"0.67460203",
"0.67460203",
"0.67460203",
"0.67460203",
"0.673437",
"0.67057145",
"0.6673087",
"0.6667873",
"0.66438895",
"0.66222626",
"0.6579917",
"0.6568693",
"0.6568539",
"0.6564893",
"0.6543903",
"0.6542551",
"0.6513578",
"0.65121794",
"0.6508344",
"0.65076834",
"0.65046537",
"0.65032816"
] | 0.8017572 | 0 |
Sort the contents of a directory by last modified date. | def _sorted_ls(path):
def _get_modified_time(f):
return os.stat(os.path.join(path, f)).st_mtime
return list(sorted(os.listdir(path), key=_get_modified_time)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_files_list(dirname, date_order, rdate_order):\n file_list = os.listdir(dirname)\n file_mtimes = dict.fromkeys(file_list)\n for f in file_list:\n if f[0] == '.':\n print \"Skipping file: \", f\n del file_mtimes[f]\n continue\n if date_order or rdate_order:\n file_mtimes[f] = os.stat(dirname + '/' + f).st_mtime\n if date_order or rdate_order:\n return sorted(file_mtimes.keys(), key=file_mtimes.get, reverse=rdate_order)\n else:\n return file_list",
"def sortFiles(files):\n def sortKey(file):\n dirFile = file.lower().rsplit('\\\\',1)\n if len(dirFile) == 1: dirFile.insert(0,'')\n return dirFile\n sortKeys = dict((x,sortKey(x)) for x in files)\n return sorted(files,key=lambda x: sortKeys[x])",
"def list_sorted_filenames(directory):\n with os.scandir(directory) as entries:\n filenames = [entry.name for entry in entries if entry.is_file()]\n filenames.sort()\n return filenames.copy()",
"def Dir_cmpdates(dir1, dir2):\n\n t1, t2 = map(lambda x: os.stat(x._path).st_ctime, [dir1, dir2])\n c = cmp(t1, t2)\n if c != 0:\n return c\n return cmp(dir1, dir2)",
"def sorted_files(self, pattern=None):\n return sorted(self.files(pattern))",
"def paths_sort(path):\n base_name = os.path.basename(path)\n \n stat_name = base_name.split('.')[0] \n\n date = base_name.split('.')[1]\n \n try:\n date = datetime.datetime.strptime(date, '%Y-%m-%d')\n \n return date, stat_name\n except Exception as e:\n print(e)",
"def __get_sorted_file_list(self):\n d = self.__view.CurrentImgDir\n list = os.listdir(d)\n if self.__view.SortType == constant.THUMB_SORT_FILENAME:\n # Sort by Name\n list.sort()\n if self.__view.SortType == 2:\n # Sort by Size\n list.sort(lambda a, b: int(os.stat(os.path.join(d,a))[stat.ST_SIZE] - os.stat(os.path.join(d,b))[stat.ST_SIZE])) \n return list",
"def sort_path(self):\n self.entries.sort(key=lambda x: x.source_path)",
"def resortFiles(fileList):\n if fileList is None or not len(fileList):\n print \"SRT:nofiles in the dictionary.\"\n sys.exit()\n\n new_file_list = list()\n for f in fileList:\n new_file_list.append(PFileStat(dir_source, f, os.lstat(dir_source + \"/\" + f)))\n\n new_file_list.sort(key=lambda i: i.st_mtime)\n return new_file_list",
"def sort_time(self):\n self.entries.sort(key=lambda x: x.date_stamp_utc)",
"def sort_by_date_taken(src_dir, dst_dir=None, file_operation='cp', filename_extensions=['jpg'], **kwargs):\n\n def _get_date_taken(path):\n \"\"\"\n get date when picture was taken from exif metadata\n :param path: path of the picture\n :return: DateTimeOriginal (exif id 36867)\n \"\"\"\n return Image.open(path)._getexif()[36867]\n\n def _get_date_modified(path):\n \"\"\"\n get date when the file was modified for the last time (for images/videos this equals the date when the file was taken)\n :param path: path of the file\n :return: date of last file change\n \"\"\"\n return str(datetime.datetime.fromtimestamp(os.path.getmtime(path)))\n\n def _create_dir_name(date, dir_structure='ymd', is_exif=True):\n \"\"\"\n create the directory path\n :param date: exif data of the picture\n :param dir_structure: structure of dir (example: 'ymd' - 'YYYY\\YYYY_MM\\YYYY_MM_DD; 'yd' - YYYY\\YYYY_MM_DD)\n :return: relative path/name of the directory\n \"\"\"\n if is_exif:\n date_split = date.split(' ')[0].split(':')\n else:\n date_split = date.split(' ')[0].split('-')\n dir_name = '\\\\'\n if 'y' in dir_structure:\n dir_name += date_split[0] + '\\\\'\n if 'm' in dir_structure:\n dir_name += '_'.join(d for d in date_split[:2]) + '\\\\'\n if 'd' in dir_structure:\n dir_name += '_'.join(d for d in date_split[:3]) + '\\\\'\n return dir_name\n\n # set dst_dir to src_dir if not specified\n if dst_dir is None:\n dst_dir = src_dir\n # find all files with specified file name extension\n files = []\n for filename_extension in filename_extensions:\n if 'read_recursive' in kwargs.keys() and kwargs['read_recursive']:\n files += glob.glob(src_dir + \"\\\\**\\\\*.\" + filename_extension, recursive=True)\n else:\n files += glob.glob(src_dir + \"\\\\*.\" + filename_extension)\n print(\"copying \" + str(len(files)) + \" files from \" + src_dir + \" to \" + dst_dir + '\\n')\n for num, file in enumerate(files):\n # create the name of directory structure\n if file.split('.')[-1].lower() in [\"jpg\", \"jpeg\", \"jpe\", \"jfif\", \"tiff\", \"tif\"]: # if exif data is stored in file header\n if 'dir_structure' in kwargs.keys():\n dir_name = _create_dir_name(_get_date_taken(file), dir_structure=kwargs['dir_structure'])\n else:\n dir_name = _create_dir_name(_get_date_taken(file))\n else: # use date of change to determine creation\n if 'dir_structure' in kwargs.keys():\n dir_name = _create_dir_name(_get_date_modified(file), dir_structure=kwargs['dir_structure'], is_exif=False)\n else:\n dir_name = _create_dir_name(_get_date_modified(file), is_exif=False)\n date_dir = dst_dir + \"\\\\\" + dir_name + \"\\\\\"\n # create new date directory if it doesn't exists\n os.makedirs(date_dir, exist_ok=True)\n if file_operation in ['copy', 'cp']:\n # copy file to new dir\n shutil.copy2(file, date_dir + file.split(\"\\\\\")[-1]) # also copies files metadata\n elif file_operation in ['move', 'mv']:\n # move file to new dir\n shutil.move(file, date_dir + file.split(\"\\\\\")[-1])\n\n # print the number of files left\n sys.stdout.write(\"\\r\" + str(len(files)-num) + \" files left\")\n sys.stdout.flush()\n\n sys.stdout.write('\\r')\n sys.stdout.flush()\n print(str(len(files)) + \" files sorted\")",
"def get_items_from_dir(path):\n items = os.listdir(path)\n items.sort()\n return items",
"def sorted_dirs(self, pattern=None):\n return sorted(self.dirs(pattern))",
"def _get_dir_mtime(self, sentry_unit, directory):\n return sentry_unit.directory_stat(directory)['mtime']",
"def root_sort(root_dir, exclude=[]):\n print(\" \")\n print(\"<-------------->\")\n print(\"ROOT DIRECTORY \" + \" : \" + root_dir)\n print(\"<-------------->\")\n print(\" \")\n print(\"SORTING ROOT DIRECTORY FILES\")\n root_dir_list = []\n\n for root, dirs, files in os.walk(root_dir):\n if (root.split(\"/\")[-1] in exclude and \n root.split(\"/\")[-1] != ''):\n\n print(\"EXCLUDING: \" + root)\n # Skip the direcories that are listed in exclude_dir\n dirs[:] = [d for d in dirs if d not in exclude]\n files[:] = [] # Remove all misc files\n current_folder = root\n # We don't want the root directory!!\n if (current_folder != root_dir):\n # Cycles subfolders and files in the current sub-folder\n for sub_root, sub_dirs, sub_files in os.walk(root):\n # Sorts the files in the subfolder to have the file \n # Pass to yt in position [0]\n sub_files.sort()\n # Appends path of the enzo target file to root_dir_list \n root_dir_list.append(os.path.join(root, sub_files[0]))\n \n root_dir_list.sort()\n \n return root_dir_list",
"def folder_sort(request, item_container):\n return do_sort(request, item_container, 'folder', _(u'Ordner, Seiten etc. umordnen'))",
"def _get_most_recent_timestamp_subfolder(self, root_folder_path):\n walk_gen = os.walk(root_folder_path)\n root, dirs, files = walk_gen.__next__()\n dirs.sort(reverse=True)\n return dirs[0]",
"def sort_folder():\n for file in downloads_path.iterdir():\n if file.is_file():\n extension = file.suffix\n file = str(file)\n if extension in program_types:\n move_file(file, programs_path)\n elif extension in compressed_types:\n move_file(file, compressed_path)\n elif extension in doc_types:\n move_file(file, documents_path)\n elif extension in music_types:\n move_file(file, music_path)\n elif extension in video_types:\n move_file(file, video_path)\n elif extension in picture_types:\n move_file(file, pictures_path)\n else:\n move_file(file, other_path)",
"def get_dirlist(path):\n dirlist = os.listdir(path)\n dirlist.sort()\n return dirlist",
"def list_directory_files(directory):\n fs_ = fs.open_fs(directory)\n file_list = []\n for file_name in fs_.walk.files():\n file_details = fs_.getinfo(file_name, namespaces=['details'])\n file_list.append({'name': file_name.lstrip('/'),\n 'last-modified': file_details.modified.\n strftime(WORKFLOW_TIME_FORMAT),\n 'size': file_details.size})\n return file_list",
"def parse_dir(self, dir_path=\"NULL\"):\n \n spec_list = []\n dir_path = os.path.abspath(dir_path)\n # if user's path is not having a \"/\" \n if dir_path[-1] != \"/\":\n dir_path = dir_path + \"/\"\n # invoke parse file for every file in the dir_path directory \n files = commands.getoutput(\"ls %s\" % dir_path).split()\n for f in files:\n spec = self.parse_file(dir_path + f)\n spec_list.append(spec)\n return sortbyfilenames(spec_list, files)",
"def sort_list(directory_list: List[str], charbefore:int = 20, extension:str = '.bin') -> List[str]:\n def func(x):\n charafter = -9 if extension =='.json' else -4\n # print(\"func: \", x[:charbefore]+x[charbefore:][:charafter].zfill(3))\n return x[:charbefore]+x[charbefore:][:charafter].zfill(3)\n \n return sorted(directory_list,key=func)",
"def _get_last_modified_date(path):\n last_date = 0\n root_dir, subdirs, files = os.walk(path).next()\n # get subdirs and remove hidden ones\n subdirs = [s for s in subdirs if not s.startswith('.')]\n for subdir in subdirs:\n for root, _, _ in os.walk(join(path, subdir)):\n base = os.path.basename(root)\n # checking if is a hidden path\n if not base.startswith(\".\") and not base.startswith(\"/.\"):\n last_date = max(last_date, os.path.getmtime(root))\n\n # check files of interest in the skill root directory\n files = [f for f in files\n if not f.endswith('.pyc') and f != 'settings.json']\n for f in files:\n last_date = max(last_date, os.path.getmtime(os.path.join(path, f)))\n return last_date",
"def ListArchives(self):\n return sorted(\n [name for name in os.listdir(self._root)\n if os.path.isdir(os.path.join(self._root, name))])",
"def get_ordered_file_names(dir_path, suffix):\n\tfiles=os.listdir(dir_path)\n\n\t# extract the numbers from file names\n\tsorted_int=[]\n\tsorted_str=[]\n\ttemp=[]\n\tfor f in files:\n\t\ttemp=f.split('.')\n\t\tsorted_int.append(int(temp[0]))\n\tsorted_int.sort(key=int)\n\n\t# concatenate the full path after numerically sorting the files in the folder\n\tfor s in sorted_int:\n\t\ttemp=dir_path+str(s)+suffix\n\t\tsorted_str.append(temp)\n\n\treturn sorted_str",
"def get_dir_files_last_modified(self, repo_id, parent_dir, dir_id=None):\n if not dir_id:\n dir_id = seafile_api.get_dir_id_by_path(repo_id, parent_dir)\n parent_dir_hash = calc_file_path_hash(parent_dir)\n if not dir_id:\n return {}\n\n try:\n info = super(DirFilesLastModifiedInfoManager, self).get(\n repo_id=repo_id, parent_dir_hash=parent_dir_hash)\n except self.model.DoesNotExist:\n # no cache yet\n return self._calc_dir_files_last_modified(repo_id, parent_dir,\n parent_dir_hash, dir_id)\n else:\n # cache exist\n if info.dir_id != dir_id:\n # cache is outdated\n info.delete()\n return self._calc_dir_files_last_modified(repo_id, parent_dir,\n parent_dir_hash, dir_id)\n else:\n # cache is valid\n return json.loads(info.last_modified_info)",
"def sort(self, key=None, reverse=False):\n self.log('sort()')\n self.contents.sort(key=key, reverse=reverse)\n return None",
"def process_dir(pool, topdir):\n for root, dirs, files in os.walk(topdir):\n # Not really needed, but makes things consistent.\n dirs.sort()\n files.sort()\n\n for path in files:\n process_file(pool, os.path.join(root, path))",
"def list_entries():\n _, filenames = default_storage.listdir(\"entries\")\n return list(sorted(re.sub(r\"\\.md$\", \"\", filename)\n for filename in filenames if filename.endswith(\".md\")))",
"def list_entries():\n _, filenames = default_storage.listdir(\"entries\")\n return list(sorted(re.sub(r\"\\.md$\", \"\", filename)\n for filename in filenames if filename.endswith(\".md\")))"
] | [
"0.6985285",
"0.64413553",
"0.62352586",
"0.62184626",
"0.62138826",
"0.62063205",
"0.6144919",
"0.6113929",
"0.6106303",
"0.6105642",
"0.60882264",
"0.5959481",
"0.5916365",
"0.58824",
"0.5865776",
"0.58552366",
"0.582905",
"0.57945603",
"0.57103086",
"0.5698084",
"0.567522",
"0.5672239",
"0.5643386",
"0.5568087",
"0.5524043",
"0.5520414",
"0.55068535",
"0.5478748",
"0.5466521",
"0.5466521"
] | 0.7565834 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.