query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Returns the first active subscription by id
def get_first_active_subscription(self): if self.has_active_subscription(): return self.subscriptions.filter(active=True)[0] else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_one(cls, sid):\n return Subscription.query.get_or_404(sid)", "def get_by_id(self, _id):\n return Subscription(self.context, ResourcePathServiceOperation(\"getById\", [_id], self.resource_path))", "def getSubscription(uniq):\n return Subscription(Cuebot.getStub('subscription').Get(\n subscription_pb2.SubscriptionGetRequest(id=uniq), timeout=Cuebot.Timeout).subscription)", "def subscription(self, uuid):\r\n return subs.Subscription(self, uuid)", "def get_subscription(self):\n return self.request({\n 'path': '/' + UUID + '/subscription'})", "def get_subscription(\n connection, subscription_id, project_id, fields=None, error_msg=None\n):\n return connection.get(\n url=f'{connection.base_url}/api/subscriptions/{subscription_id}',\n params={'fields': fields},\n headers={'X-MSTR-ProjectID': project_id},\n )", "async def get_subscription(\r\n self, installed_app_id: str, subscription_id: str\r\n ) -> dict:\r\n return await self.get(\r\n API_SUBSCRIPTION.format(\r\n installed_app_id=installed_app_id, subscription_id=subscription_id\r\n )\r\n )", "def get_by_id(self, id):\n accts = [acct for acct in self.accounts if UUID(acct.uuid) == UUID(id)]\n assert len(accts) <= 1\n if len(accts) == 0:\n raise KeyError('account with id {} unknown'.format(id))\n elif len(accts) > 1:\n log.warning('multiple accounts with same UUID found', uuid=id)\n return accts[0]", "def findSubscription(name):\n return Subscription(Cuebot.getStub('subscription').Find(\n subscription_pb2.SubscriptionFindRequest(name=name), timeout=Cuebot.Timeout).subscription)", "def retrieve(self, subscription_id, **kwargs):\n\n return self._retrieve(subscription_id, **kwargs)", "def select_subscription(profile=None, sub_name_or_id=None):\n if profile is None:\n profile = subscription_profile()\n\n if sub_name_or_id is None:\n sub_name_or_id = _prompt_sub_id_selection(profile)\n\n profile.set_active_subscription(sub_name_or_id)\n return profile", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Subscription':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = dict()\n\n __props__[\"accessed_at\"] = None\n __props__[\"auto_delete_on_idle\"] = None\n __props__[\"count_details\"] = None\n __props__[\"created_at\"] = None\n __props__[\"dead_lettering_on_filter_evaluation_exceptions\"] = None\n __props__[\"dead_lettering_on_message_expiration\"] = None\n __props__[\"default_message_time_to_live\"] = None\n __props__[\"enable_batched_operations\"] = None\n __props__[\"entity_availability_status\"] = None\n __props__[\"is_read_only\"] = None\n __props__[\"location\"] = None\n __props__[\"lock_duration\"] = None\n __props__[\"max_delivery_count\"] = None\n __props__[\"message_count\"] = None\n __props__[\"name\"] = None\n __props__[\"requires_session\"] = None\n __props__[\"status\"] = None\n __props__[\"type\"] = None\n __props__[\"updated_at\"] = None\n return Subscription(resource_name, opts=opts, __props__=__props__)", "def activate_subscription(**kwargs):\n sub, created = Subscription.objects.get_or_create(**kwargs)\n # check if it already existed and was deactivated\n if not created and not sub.active:\n sub.active = True\n sub.save()\n created = True\n return sub, created", "def get_account_by_id(self, id_):\n return next((account for account in self.accounts\n if account.id == id_), None)", "def retrieveAlcaSubscription():\n if GlobalValues._alcaSubscription == None:\n # This method will set subscription name from config\n alcaNewSelection()\n GlobalValues._alcaSubscription = \\\n _getSubscription(Workflow(spec = \"FileAlcaSkim\", \n owner = \"CMSTier0\",\n name = \"FileAlcaSkim\"),\n Fileset( name = GlobalValues._alcaSubName )\n ) \n \n return GlobalValues._alcaSubscription", "def retrieve_subscription(self,\n subscription_id,\n include=None):\n\n return super().new_api_call_builder.request(\n RequestBuilder().server('default')\n .path('/v2/subscriptions/{subscription_id}')\n .http_method(HttpMethodEnum.GET)\n .template_param(Parameter()\n .key('subscription_id')\n .value(subscription_id)\n .should_encode(True))\n .query_param(Parameter()\n .key('include')\n .value(include))\n .header_param(Parameter()\n .key('accept')\n .value('application/json'))\n .auth(Single('global'))\n ).response(\n ResponseHandler()\n .deserializer(APIHelper.json_deserialize)\n .is_api_response(True)\n .convertor(ApiResponse.create)\n ).execute()", "async def get_subscription_id(user: discord.User, redis: RedisDB):\n return await redis.get(user.id)", "def first(self, filter_deleted=False):\n objects = self.matching_objects(filter_deleted=filter_deleted)\n\n if len(objects) > 0:\n value = objects[0]\n if self.session is not None:\n if hasattr(value, \"id\"):\n self.session.watch(value)\n return value\n else:\n return None", "def get_single_user(self, id):\n for user in self.users:\n if user['id'] == id:\n return user", "def _get_subscription(self):\n response = requests.get(\n 'https://www.googleapis.com/youtube/v3/subscriptions',\n params={\n 'part': 'snippet',\n 'mine': 'true',\n 'forChannelId': self.channel_id\n },\n headers=self.headers\n )\n if response.status_code == 200:\n return response.json()\n return {}", "def get_subscription_id(self):\n subscription_id = self.get_intermediate(\"subscription_id\", None)\n if not subscription_id:\n subscription_id = self.cmd.cli_ctx.data.get('subscription_id')\n if not subscription_id:\n subscription_id = Profile(cli_ctx=self.cmd.cli_ctx).get_subscription_id()\n self.cmd.cli_ctx.data['subscription_id'] = subscription_id\n self.set_intermediate(\"subscription_id\", subscription_id, overwrite_exists=True)\n return subscription_id", "def find_by_id(self, id_):\n return self.by_id.get(id_)", "async def subscribe(self, subscription: Subscription, reqid: int) -> SubStreamPrivate:\n # a simple request response API, unblocking.\n\n # Because subscribe is callable multiple times with the same subdata,\n # but this would trigger \"already subscribed\" error on kraken side\n\n chanpriv = private_subscribe(channel_name=subscription.name,\n loop=asyncio.get_running_loop())\n\n subdata = Subscribe(subscription=subscription, reqid=reqid)\n\n strdata = self.subscribe_schema.dumps(subdata)\n await self.connect(strdata)\n\n # retrieving all channel_ids for this subscription:\n\n self._streams[subdata] = SubStreamPrivate(channelprivate=chanpriv)\n\n # await subscription to be set before returning\n return await self._streams[subdata]\n # TODO : maybe context manager to cleanup the queue when we dont use it or unsubscribe ?", "def saas_subscription_id(self) -> Optional[str]:\n return pulumi.get(self, \"saas_subscription_id\")", "def get_by_id(cls, id):\n return cls.query().get(id)", "def get_subscription(self):\n if not hasattr(self, '_subscription'):\n self._subscription = self.admin.subscriptions.select_related('plan').get_overlapping(\n self.admin_id, DateRange(self.period, self.period_end, bounds='[]'))\n return self._subscription", "def subscribers(id):\n return core.query(schema.streamBySubscribers, id)", "def customer_get_one(user_id):\n return customer_get(user_id)", "def _InsertSubscription(self,\n id='python.gcal.test%40gmail.com'):\n print 'Subscribing to the calendar with ID: %s' % id\n calendar = gdata.calendar.data.CalendarEntry()\n calendar.id = atom.data.Id(text=id)\n returned_calendar = self.cal_client.InsertCalendarSubscription(calendar)\n return returned_calendar", "def get(subject_name, user_email):\n return Subscription.get_by_key_name(subject_name + ':' + user_email)" ]
[ "0.7555253", "0.73221976", "0.6466023", "0.632846", "0.6229105", "0.6084679", "0.6083515", "0.603069", "0.5999968", "0.59402347", "0.59136426", "0.5903363", "0.5893972", "0.5842891", "0.5826261", "0.5789582", "0.570909", "0.5650244", "0.56212664", "0.56127733", "0.561177", "0.55845445", "0.5578854", "0.5577836", "0.557054", "0.5550458", "0.55424833", "0.5525151", "0.5516395", "0.54880804" ]
0.7979451
0
Returns a queryset with all the newsletters that this contact has subscriptions in.
def get_newsletters(self): return SubscriptionNewsletter.objects.filter(contact=self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_subscriptions(self):\n return self.subscriptions.all()", "def get_all_subscriptions(cls, **kwargs):\n return Subscription.query.filter(**kwargs).all()", "def subscriptions(self):\r\n return subs.AccountSubscriptions(self)", "def getAllSubscriptions(self):\n return self.request(\n \"getAllSubscriptions\",\n )", "def subscriptions(self):\r\n return v3.Subscriptions(self)", "def get_subscriptions(self) -> Iterator[\"Subscription\"]:\n yield from self._subscriptions[self.id]", "def get_subscribed_to_newsletter(self, queryset=None):\n if queryset is None:\n queryset = super(RevolvUserProfileManager, self).get_queryset()\n subscribed_users = queryset.filter(\n subscribed_to_newsletter=True\n ).order_by('user__date_joined')\n return subscribed_users", "def get_subscriptions(self):\n url = '{}/v2/subscriptions'.format(self.url)\n r = requests.get(url, headers=self.headers_v2)\n return r.json()", "def subscriptions(self):\r\n return subs.Subscriptions(self)", "def GetSubscriptions(self):\n\n return self.__GetJson(\"/subscriptions\", True)", "def get_subscriptions_from_self(self):\n return self._roster.get_my_subscriptions()", "def get_subscriptions(self, use_threading=False):\r\n \r\n if self._subscriptions is None:\r\n if use_threading:\r\n self.load_subscriptions_threaded()\r\n else:\r\n self._subscriptions = []\r\n for page in range(self._subscription_pages):\r\n self._load_subscriptions(page=page+1)\r\n return self._subscriptions", "def get_subscribers(self) -> Iterator[Any]:\n for subscription in self._subscriptions[self.id]:\n yield subscription.subscriber", "def subscriptions(self):\n if not hasattr(self, '_subscriptions'):\n subscriptions_resource = self.resource.subscriptions\n self._subscriptions = Subscriptions(\n subscriptions_resource, self.client)\n return self._subscriptions", "def list(cls, **kwargs):\n response = Yola().list_subscriptions(**kwargs)\n return [cls(**sub) for sub in response['results']]", "def listSubscriptions() -> object:\n\n db = Db()\n return db.Subscriptions.objects().to_json()", "def get_user_subscriptions(self, use_threading=False):\r\n \r\n subs = self.get_subscriptions(use_threading)\r\n return list(filter(lambda obj: isinstance(obj, User), subs))", "def subscriptions(self):\n if self.__subscriptions_manager is None:\n self.__subscriptions_manager = SubscriptionsManager(\n \"/subscriptions\", self._client\n )\n return self.__subscriptions_manager", "def get_subscriptions_to_self(self):\n return self._roster.get_my_subscribers()\n return self._roster.get_my_subscribers()", "def subscribedQueries(self):\n return map(Query.get, self.subscriptions)", "def list_subscriptions(self):\n return {'abonnementen': self.customer.abonnementen}", "def getSubscriptions(self):\n\n address = self.getAddress()\n if address is None:\n return []\n else:\n return [\n \"shellies/announce\",\n \"{}/online\".format(address),\n \"{}/emeter/{}/energy\".format(address, self.getChannel()),\n \"{}/emeter/{}/returned_energy\".format(address, self.getChannel()),\n \"{}/emeter/{}/power\".format(address, self.getChannel()),\n \"{}/emeter/{}/reactive_power\".format(address, self.getChannel()),\n \"{}/emeter/{}/voltage\".format(address, self.getChannel()),\n \"{}/emeter/{}/total\".format(address, self.getChannel()),\n \"{}/emeter/{}/total_returned\".format(address, self.getChannel())\n ]", "def GetSubscriptionsFrom(self):\n\n return self.__GetJson(\"/subscriptions/from\", True)", "def getSubscriptionList(self):\r\n return self.feeds", "def _get_subscriptions(self, topic_arn):\n return self.conn.get_all_subscriptions_by_topic(topic_arn)['ListSubscriptionsByTopicResponse']['ListSubscriptionsByTopicResult']['Subscriptions']", "def get_subscriptions_with_expired_invoices(self):\n subscriptions = []\n for invoice in self.get_expired_invoices():\n for invoice_item in invoice.invoiceitem_set.all():\n if (\n invoice_item.subscription\n and invoice_item.subscription not in subscriptions\n ):\n subscriptions.append(invoice_item.subscription)\n return subscriptions", "def subscriptions(self) -> list[Subscription]:\n return [\n *chain.from_iterable(self._simple_subscriptions.values()),\n *self._wildcard_subscriptions,\n ]", "def get_all_subscriptions(self, next_token=None):\r\n params = {'ContentType' : 'JSON'}\r\n if next_token:\r\n params['NextToken'] = next_token\r\n response = self.make_request('ListSubscriptions', params, '/', 'GET')\r\n body = response.read()\r\n if response.status == 200:\r\n return json.loads(body)\r\n else:\r\n boto.log.error('%s %s' % (response.status, response.reason))\r\n boto.log.error('%s' % body)\r\n raise self.ResponseError(response.status, response.reason, body)", "def getsubscriptions(self):\n subs = {}\n for sub in self._subscriptions.values():\n subs[sub.ID] = sub.asTuple()\n return subs", "def get_queryset(self):\n return filter_subjects(Subject.objects.all(), self.request.user)" ]
[ "0.76563036", "0.72232264", "0.6919182", "0.66618294", "0.66575885", "0.66574", "0.6607653", "0.65924275", "0.6519385", "0.6455571", "0.6406239", "0.6384957", "0.6365558", "0.6332527", "0.62650865", "0.6246953", "0.6179742", "0.617221", "0.61497104", "0.6115057", "0.61131865", "0.60833186", "0.60271585", "0.5994337", "0.5961331", "0.58928484", "0.58773446", "0.585569", "0.5839737", "0.5828236" ]
0.83896947
0
Returns the last paid invoice for this contact if it exists. Returns None if they have none.
def get_last_paid_invoice(self): try: return self.invoice_set.filter(Q(paid=True) | Q(debited=True)).latest("id") except Exception: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pending_invoice(self,org_id=None):\n if org_id is None:\n org_id = self.org_id\n return self.get('{}/orgs/{}/invoices/pending'.format(ApiVersion.A1.value,org_id))", "def get_latest_invoice(self) -> CreditorInvoice:\n\n LOGGER.info(\"Getting latest invoice from EON Romania\")\n\n session = http.create_session()\n\n response = session.get(self._login_page_url)\n if response.status_code != 200:\n raise self.Error(\"Login page is not functioning\")\n\n soup = bs4.BeautifulSoup(response.content, \"html.parser\")\n csrf_token_elem = soup.find(\"input\", {\"name\": \"_csrf_token\"})\n if not csrf_token_elem:\n raise self.Error(\"Could not extract CSRF token\")\n\n login_data = {\n \"_username\": self._email,\n \"_password\": self._password,\n \"_csrf_token\": csrf_token_elem.get(\"value\"),\n }\n\n if session.post(self._login_url, login_data).status_code != 200:\n raise self.AuthError()\n\n response = session.get(self._invoices_url)\n soup = bs4.BeautifulSoup(response.content, \"html.parser\")\n\n if not soup.select_one(self._selectors.sidebar):\n raise self.AuthError()\n\n invoice_date_elem = soup.select_one(self._selectors.invoice_date)\n if not invoice_date_elem:\n raise self.Error(\"Failed to get invoice date\")\n\n invoice_due_date_elem = soup.select_one(self._selectors.invoice_due_date)\n if not invoice_due_date_elem:\n raise self.Error(\"Failed to get invoice due date\")\n\n invoice_payment_status_elem = soup.select_one(\n self._selectors.invoice_payment_status\n )\n if not invoice_payment_status_elem:\n raise self.Error(\"Failed to get invoice payment status\")\n\n invoice_amount_elem = soup.select_one(self._selectors.invoice_amount)\n if not invoice_amount_elem:\n raise self.Error(\"Failed to get invoice amount\")\n\n invoice_date = invoice_date_elem.contents[-1]\n invoice_due_date = invoice_due_date_elem.contents[-1]\n invoice_payment_status = invoice_payment_status_elem.contents[-1]\n invoice_amount = invoice_amount_elem.contents[-1]\n\n invoice = CreditorInvoice(\n float(invoice_amount.replace(\",\", \".\")),\n Currency.RON,\n datetime.strptime(invoice_date, \"%d.%m.%Y\"),\n datetime.strptime(invoice_due_date, \"%d.%m.%Y\"),\n PaymentStatus.PAID_CONFIRMED\n if invoice_payment_status == \"0.00\"\n else PaymentStatus.UNPAID,\n )\n\n LOGGER.info(\"Found latest EON Romania invoice\", invoice=invoice)\n return invoice", "def get_latest_invoice(self) -> CreditorInvoice:\n\n LOGGER.info(\"Getting latest invoice from Electrica Romania\")\n\n session = http.create_session()\n\n login_data = {\n \"myelectrica_utilizator\": self._email,\n \"myelectrica_pass\": self._password,\n \"myelectrica_login_btn\": \"\",\n }\n\n if session.post(self._login_url, login_data).status_code != 200:\n raise self.AuthError()\n\n response = session.get(self._invoices_url)\n if response.status_code != 200:\n raise self.Error(\"Failed to get list of invoices\")\n\n soup = bs4.BeautifulSoup(response.content, \"html.parser\")\n if not soup.select_one(self._selectors.current_user):\n raise self.AuthError()\n\n invoice_date_elem = soup.select_one(self._selectors.invoice_date)\n if not invoice_date_elem:\n raise self.Error(\"Failed to get invoice date\")\n\n invoice_due_date_elem = soup.select_one(self._selectors.invoice_due_date)\n if not invoice_due_date_elem:\n raise self.Error(\"Failed to get invoice due date\")\n\n invoice_payment_status_elem = soup.select_one(\n self._selectors.invoice_payment_status\n )\n if not invoice_payment_status_elem:\n raise self.Error(\"Failed to get invoice payment status\")\n\n invoice_amount_elem = soup.select_one(self._selectors.invoice_amount)\n if not invoice_amount_elem:\n raise self.Error(\"Failed to get invoice amount\")\n\n invoice_date = int(invoice_date_elem.get(\"data-order\"))\n invoice_due_date = int(invoice_due_date_elem.get(\"data-order\"))\n invoice_payment_status = invoice_payment_status_elem.text\n invoice_amount = float(invoice_amount_elem.text.replace(\",\", \".\"))\n\n invoice = CreditorInvoice(\n invoice_amount,\n Currency.RON,\n datetime.fromtimestamp(invoice_date),\n datetime.fromtimestamp(invoice_due_date),\n PaymentStatus.PAID_CONFIRMED\n if invoice_payment_status == \"Incasata\"\n else PaymentStatus.UNPAID,\n )\n\n LOGGER.info(\"Found latest Electria Romania invoice\", invoice=invoice)\n return invoice", "def getInvoice(self):\n return self.base.get(\"invoice\", [])", "def generate_next_invoice_number(obj):\n queryset = obj.__class__.objects.filter(year=obj.year, company=obj.company)\n max = queryset.aggregate(Max('number')).values()[0]\n if max is None:\n max = 0\n return (max + 1)", "def get(self):\n user = get_authenticated_user()\n if not user.stripe_id:\n raise NotFound()\n\n return {\"fields\": get_invoice_fields(user)[0]}", "def receivables_account(self) -> Optional[Account]:\n if self.cached_receivables_account is None:\n row = AccountEntry.objects.filter(source_invoice=self).order_by(\"id\").first()\n if row is not None:\n assert isinstance(row, AccountEntry)\n self.cached_receivables_account = row.account\n return self.cached_receivables_account", "def get_company_affiliation(order):\n redemption = CouponRedemption.objects.filter(order=order).last()\n if redemption:\n return redemption.coupon_version.payment_version.company\n return None", "def fetch_invoice(self, invoice):\n fetched_invoice = self.client.get_invoice(invoice.bitpay_id)\n\n if (invoice.status != fetched_invoice['status']):\n possible_transitions = invoice.possible_transitions_to(fetched_invoice['status'])\n\n if (len(possible_transitions) > 0):\n trigger = possible_transitions[0]['trigger']\n method_to_call = getattr(invoice, trigger)\n method_to_call()\n\n self._update_invoice_bitpay_params(invoice, fetched_invoice)\n\n return(invoice)", "def get_last_blockchain_value(self):\n if len(self.__chain) < 1:\n return None\n return self.__chain[-1]", "def get_last_blockchain_value(self):\n if len(self.__chain) < 1:\n return None\n return self.__chain[-1]", "def get_last_blockchain_value(self):\n if len(self.__chain) < 1:\n return None\n return self.__chain[-1]", "def get_last_blockchain_value(self):\n if len(self.__chain) < 1:\n return None\n return self.__chain[-1]", "def get_last_blockchain_value(self):\n if len(self.__chain) < 1:\n return None\n return self.__chain[-1]", "def paid(self):\n return self.get('paid')", "def next_customer(self) -> Optional[str]:\n if len(self.priority_customer) > 0:\n return self.priority_customer.pop(0)\n elif len(self.normal_customer) > 0:\n return self.normal_customer.pop(0)\n else:\n return None", "def get_last_blockchainvalue(self):\n if len(self.__chain) < 1:\n return None\n return self.__chain[-1]", "def get_last(self):\n return self.get_block(len(self.chain)-1)", "def lookup(self, invoice_code):\n return self.exchange_rate_btc_today[0]", "def get_latest(self):\n if len(self.points) == 0:\n return None\n return self.points[-1]", "def get_invoice(self):\n\n # Check if unclosed invoice for the client exists\n old_inv = connection.Kinko.find_one({'cl': self.cl, 'tid': None,\n 'typ': TYPE_MAP[self.tab_type]})\n\n inv_num = None\n # If it does, update its values and update packages\n if old_inv:\n old_inv.dt = datetime.datetime.today()\n old_inv.range.lt = self.q_dict[\"cs.sd\"].get(\"$lt\", None)\n old_inv.save()\n\n inv_num = old_inv.num\n\n else:\n #kinko dict to be updated in Kinko Collection.\n kdict = {\n \"amt\": 0.0,\n \"cl\": unicode(self.cl),\n \"dt\": datetime.datetime.today(),\n \"typ\": TYPE_MAP[self.tab_type],\n \"range\": {\"lt\": self.q_dict[\"cs.sd\"].get(\"$lt\", None),\n \"gt\": self.q_dict[\"cs.sd\"].get(\"$gte\", None),\n }\n }\n\n k = Kinko(kdict)\n\n k_count = 1\n\n #the get num method of Kinko model generates the unique no for new kinko\n k[\"num\"] = self.get_knum(1)\n while connection.Kinko.collection.find({\"num\": k.num}).count() > 0:\n k[\"num\"] = self.get_knum(k_count+1)\n k_count += k_count\n\n connection.Kinko(k).save()\n\n inv_num = k['num']\n\n if inv_num:\n #after creating a new document in Kinko all packages are updated.\n connection.Package.collection.update(self.q_dict, {'$set': {'inv.num': inv_num}}, safe=True, multi=True)\n \n #Aggrigation of remitted amount for requested client\n non_invoiced = kinko_map_reduce(inv_num, TYPE_MAP[self.tab_type])\n\n if len(non_invoiced) == 0:\n return False\n else:\n inv = connection.Kinko.find_one({'num': inv_num})\n if inv:\n inv.amt = non_invoiced[0]['value']['amt']\n inv.save()\n return inv\n else:\n return False\n else:\n return False", "def get_billing_document(self):\n if self.rut:\n return self.rut\n elif self.billing_id_doc:\n return self.billing_id_doc\n else:\n return self.contact.id_document", "def get_last_blockchain_value():\n if len(blockchain) < 1:\n return None\n return blockchain[-1]", "def get_last_blockchain_value(self):\n # chekking if the blockchian is empty or not\n if len(self.__chain) < 1:\n return None\n return self.__chain[-1]", "def last_donation(self):\n return self.donations[-1] if self.donations else 0", "def last_potential(self) -> Optional[PotentialEnergy]:\n return self.last(energy_type=PotentialEnergy)", "def get_last_blockchain_value():\n if len(blockchain)< 1:\n return None\n return blockchain[-1]", "def last_donation(self):\n return self._donations[-1]", "def last_candle(self):\r\n if self.length() > 0:\r\n return self.candles[0]\r\n else:\r\n return None", "def upcoming(cls, customer_id):\n invoice = PaymentInvoice.upcoming(customer_id)\n\n return Invoice.parse_from_api(invoice)" ]
[ "0.58459026", "0.56620586", "0.5511519", "0.5396463", "0.5305116", "0.52948034", "0.5150207", "0.51453376", "0.5130346", "0.51226276", "0.51226276", "0.51226276", "0.51226276", "0.51226276", "0.51183546", "0.5111227", "0.5106159", "0.5092524", "0.50849295", "0.50741434", "0.50210327", "0.50144315", "0.50102985", "0.49982595", "0.4986775", "0.49830967", "0.49720398", "0.49630594", "0.49313265", "0.49220556" ]
0.82362366
0
Adds a product history for this contact on the ContactProductHistory table. This is used to keep record of how many times a Contact has been active or inactive, and when. Soon this will be improved.
def add_product_history( self, subscription, product, new_status, campaign=None, seller=None, override_date=None, ): # TODO: this method should be migrated to the Subscription model history_of_this_product = subscription.contactproducthistory_set.filter(product=product) if history_of_this_product.exists(): latest_history_of_this_product = history_of_this_product.latest("id") else: latest_history_of_this_product = None if latest_history_of_this_product: if latest_history_of_this_product.status == new_status: # if this is the same event, we will do nothing pass else: # if this is a different event, then we will activate or deactivate accordingly ContactProductHistory.objects.create( contact=self, subscription=subscription, date=override_date or date.today(), product=product, status=new_status, seller=seller, ) else: ContactProductHistory.objects.create( contact=self, subscription=subscription, date=override_date or date.today(), product=product, status=new_status, seller=seller, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def append_history_record(self, history_record):\n self.history.append(history_record)\n self._increment_history_pointer()", "def insert(self, unhealthy_product, name, description, stores, url):\n self.database.query('''INSERT INTO History\n VALUES (NULL,\n NOW(),\n :unhealthy_product,\n :healthy_product,\n :description,\n :stores,\n :url)''',\n unhealthy_product=unhealthy_product,\n healthy_product=name,\n description=description,\n stores=stores,\n url=url)\n print(f'La substitution du produit \"{name}\" a été ajoutée à la table \\\nHistory !', file=open('print_log.txt', 'a'))", "def add_history(self):\n # add separator, if there already are history entries\n if self.parentApp.History != '':\n self.parentApp.History += (\n '\\n\\n--- --- --- --- --- --- --- --- --- --- --- ---\\n\\n'\n )\n\n # add the transaction to it\n self.parentApp.History += self.parentApp.tmpTransC.to_str()", "def history(request):\n user = request.user\n user = CustomUser.objects.get(email=user)\n\n get_user_history = History.objects.filter(user=user)\n products_list = []\n for item in get_user_history:\n chosen_product = item.chosen_product\n products_list.append(chosen_product)\n remplacement_product = item.remplacement_product\n products_list.append(remplacement_product)\n\n context = {\n \"user\": user,\n \"list_of_products\": products_list,\n }\n return render(request, \"products/history.html\", context)", "def record_history_on_commit(self,\n clocked: 'Clocked',\n changes: dict,\n session: orm.Session,\n timestamp: dt.datetime):\n new_tick = self._get_new_tick(clocked)\n\n new_clock = self.make_clock(timestamp, new_tick)\n attr = {'entity': clocked}\n\n for prop, cls in self.history_models.items():\n if prop in changes:\n value = changes[prop]\n\n self._cap_previous_history_row(clocked, new_clock, cls)\n\n # Add new history row\n hist = attr.copy()\n hist[prop.key] = value\n session.add(\n cls(\n vclock=new_clock.vclock,\n effective=new_clock.effective,\n **hist,\n ),\n )", "def add_history(self,date,what,note):\r\n note = '.'.join(note.split(','))\r\n self.history.append([date,what,note])", "def add(self, product):\n pass", "def add_product(self):\n self.owner.new_product(self.barcode, self.description, self.price, self._add_product_callback)", "def RecordHistory( self ):\n if not self.restoringHistory:\n record = self.activated_node\n if self.historyIndex < -1:\n try:\n del self.history[self.historyIndex+1:]\n except AttributeError, err:\n pass\n if (not self.history) or record != self.history[-1]:\n self.history.append( record )\n del self.history[:-200]\n self.historyIndex = -1", "def history(self, history):\n\n self._history = history", "def create_new_package_history(self, customer, package=None, start_date=None, price=None, original_price=None):\n\n if original_price or customer.next_package_original_price !=0:\n if original_price !=0:\n pass\n else:\n original_price = customer.invoice_product_original_price\n else:\n # sale_order_lines = customer.next_package_sales_order_id.order_line\n # original_price = 0.0\n # for sale_order_line in sale_order_lines:\n # discount = (sale_order_line.discount * sale_order_line.price_subtotal) / 100.0\n # original_price_sale_order_line = sale_order_line.price_subtotal + discount\n # original_price = original_price + original_price_sale_order_line\n original_price = customer.invoice_product_original_price\n\n if package:\n original_price = package.list_price\n package_history_obj = self.env['isp_crm_module.customer_package_history']\n # Create new Package history for next package\n new_package_history = package_history_obj.create({\n 'customer_id': customer.id,\n 'package_id': package.id if package else customer.next_package_id,\n 'start_date': start_date if start_date else customer.next_package_start_date,\n 'price': price if price else customer.next_package_price,\n 'original_price': original_price if original_price else customer.next_package_original_price,\n })\n return new_package_history", "def add_product(cls, product_name, price, quantity):\n Product.insert(product_name=product_name,\n product_price=price,\n product_quantity=quantity,\n date_updated=date.today()).on_conflict(\n conflict_target=[Product.product_name],\n preserve=[Product.product_price,\n Product.product_quantity,\n Product.date_updated]).execute()\n print(f'\\nProduct added successfully!')\n print(f'Product: {product_name} ' +\n f'Price: ${int(price) / 100:.2f} ' +\n f'Quantity: {quantity}\\n')", "def _push_history(self):\n self._history.append(self._state)", "def history(self, history):\n self._history = history", "def record_history(self,\n clocked: 'Clocked',\n session: orm.Session,\n timestamp: dt.datetime):\n new_tick = self._get_new_tick(clocked)\n\n is_strict_mode = session.info[STRICT_MODE_KEY]\n vclock_history = attributes.get_history(clocked, 'vclock')\n is_vclock_unchanged = (vclock_history.unchanged and\n new_tick == vclock_history.unchanged[0])\n\n new_clock = self.make_clock(timestamp, new_tick)\n attr = {'entity': clocked}\n\n for prop, cls in self.history_models.items():\n value = self._get_prop_value(clocked, prop)\n\n if value is not NOT_FOUND_SENTINEL:\n if is_strict_mode:\n assert not is_vclock_unchanged, \\\n 'flush() has triggered for a changed temporalized property outside of a clock tick'\n\n self._cap_previous_history_row(clocked, new_clock, cls)\n\n # Add new history row\n hist = attr.copy()\n hist[prop.key] = value\n session.add(\n cls(\n vclock=new_clock.vclock,\n effective=new_clock.effective,\n **hist,\n ),\n )", "def __add_current_fen_to_history(self):\n self.history = np.hstack((self.history, self.fen()))", "def add_product(\n self,\n product,\n address,\n copies=1,\n message=None,\n instructions=None,\n route_id=None,\n order=None,\n seller_id=None,\n override_date=None,\n label_contact=None,\n ):\n sp = SubscriptionProduct.objects.create(\n subscription=self,\n product=product,\n address=address,\n copies=copies,\n label_message=message or None,\n special_instructions=instructions or None,\n label_contact=label_contact,\n seller_id=seller_id,\n route_id=route_id,\n order=order,\n )\n self.contact.add_product_history(\n subscription=self,\n product=product,\n new_status=\"A\",\n campaign=self.campaign,\n seller=sp.seller,\n override_date=override_date,\n )\n return sp", "def insert(self, product):\n pass", "def onRegisterHistory(self):\n pass", "def write(cls, products, values, *args):\n IndexBacklog = Pool().get('elasticsearch.index_backlog')\n rv = super(Product, cls).write(products, values, *args)\n IndexBacklog.create_from_records(products)\n return rv", "def add_task_history(self, task_name):\n self._task_history.append(task_name)", "def add_product(product_dict):\n product = models.Product(**product_dict)\n app.session.merge(product)\n app.session.commit()", "def addProduct(self, *args):\n return _libsbml.Reaction_addProduct(self, *args)", "def append(self, new):\n new = HistoryItem(new)\n list.append(self, new)\n new.idx = len(self)", "def add(self, record):\n self._hist_records[record.uid] = record", "def add_product(self, product: Product):\n log.debug(\"Adding a new product\")\n product_parameters = product.to_db()\n try:\n with DBCursor(self.host) as cursor:\n cursor.execute(\"INSERT INTO items VALUES (?, ?, ?, ?, ?)\", (product_parameters['name'].lower(), product_parameters['units'], product_parameters['last_buy'], product_parameters['cost'], product_parameters['price']))\n except sqlite3.IntegrityError:\n log.critical(\"An integrity error was raised. Maybe a matching name or id.\")\n raise DatabaseIntegrityError(\"There's a matching name or id already stored.\")\n else:\n log.info(f\"{product.__repr__} was added successfully.\")", "def set_package_change_history(self, customer):\n today = datetime.today().strftime('%Y-%m-%d')\n package_history_obj = self.env['isp_crm_module.customer_package_history']\n customer_package_history_count = package_history_obj.search_count([\n ('customer_id', '=', customer.id),\n ])\n if customer_package_history_count > 0:\n # Get Last Package change request of the customer\n package_change_req = self.env['isp_crm_module.change_package'].search([\n ('customer_id', '=', customer.id),\n ],\n order='create_date desc',\n limit=1\n )\n tomorrow = date.today() + timedelta(days=1)\n active_date = 1\n # Check if any package change request has been made recently.\n if package_change_req:\n package_change_req_active_date = datetime.strptime(package_change_req.active_from, \"%Y-%m-%d\").date()\n active_date = package_change_req_active_date - tomorrow\n active_date = int(abs(active_date.days))\n if active_date == 0 or customer.current_package_id.id != customer.next_package_id.id:\n # Update Last Package's end date\n last_package_history_obj = package_history_obj.search([\n ('customer_id', '=', customer.id),\n # ('package_id', '=', customer.current_package_id.id),\n ],\n order='create_date desc',\n limit=1\n )\n last_package_history_obj.update({\n 'end_date' : today,\n })\n # Create new Package History\n if active_date == 0 and package_change_req:\n return self.create_new_package_history(customer=customer, package=package_change_req.to_package_id, start_date= str(datetime.strptime(package_change_req.active_from, \"%Y-%m-%d\").date()))\n else:\n return self.create_new_package_history(customer=customer)\n else:\n original_price = customer.invoice_product_original_price\n # Creates Package history if the current customer has no package history\n return self.create_new_package_history(\n customer = customer,\n package = customer.current_package_id,\n start_date = customer.current_package_start_date,\n price = customer.current_package_price,\n original_price = original_price,\n )", "def add_products_page(cls, logger=None):\n if logger is None:\n logger = cls._logger\n database_connection = DatabaseConnection(f\"products.csv\")\n table = database_connection.table\n\n product = Product() # container\n\n logger.log(\n \"Note: Please do not use this interface to update existing products or enter many products\"\n )\n\n # set product name\n while True:\n logger.log(\"Enter 0 to exit\")\n product_name = input(\"Product name: \")\n\n if product_name==\"0\":\n logger.log(\"Returning to portal\")\n return 200\n elif product_name=='':\n logger.log('Please choose a valid product name')\n elif len(table[(table[f'product_name']==product_name)]) > 0:\n logger.log(f\"{product_name} already exists, please enter a unique product name\")\n else:\n product.set_product_name(product_name) # to be saved\n break\n\n product.set_description(input(\"Description: \"))\n\n # set quantity\n while True:\n quantity = input(\"Quantity: \")\n try:\n int(quantity)\n except ValueError:\n quantity = \"\"\n if quantity==\"\":\n logger.log('Please choose a valid integer quantity')\n else:\n product.set_quantity(int(quantity)) # to be saved\n break\n\n # set price\n while True:\n price = input(\"Price: \")\n try:\n float(price)\n except ValueError:\n price = \"\"\n if price==\"\":\n logger.log('Please choose a valid USD price, eg. $9.99 without the $')\n else:\n product.set_price(round(float(price), 2)) # to be saved\n break\n\n # save\n while True:\n confirm = input(\"Type 'yes' to confirm your new product, \"\n \"Enter empty to exit without saving: \")\n if confirm == 'yes':\n last_id = table[f'product_id'].max()\n if pd.isna(last_id):\n last_id = 0\n df = pd.DataFrame.from_records([\n {'product_id': last_id + 1,\n 'product_name': product.get_product_name(),\n 'description': product.get_description(),\n 'quantity': product.get_quantity(),\n 'price': product.get_price(),\n }\n ])\n database_connection.append(df)\n logger.log(\"Product created!\")\n break\n\n return 200", "def add_product(self, name, energy_points):\n now = datetime.datetime.now()\n date = \"{}-{}-{}\".format(now.year, now.month, now.day)\n Product(productName=name, energyPoints=energy_points, date=date)", "def appendProcessingHistoryItem(context, item):\n projectDir = context.projectDir\n history = GenericMetadata._readEntriesForSection(projectDir, GenericMetadata.HISTORY_SECTION)\n try:\n idx = int(history['numsteps'])\n except KeyError:\n idx = 0\n idx += 1\n \n idxStr = str(idx)\n key = GenericMetadata.HISTORY_PROTO + idxStr\n GenericMetadata._writeEntriesToSection(projectDir, GenericMetadata.HISTORY_SECTION, [key, 'numsteps'], [item, idxStr])" ]
[ "0.57782525", "0.5642956", "0.56051624", "0.55821145", "0.5513348", "0.5411007", "0.53511876", "0.53429097", "0.53345805", "0.5299433", "0.5290899", "0.52849525", "0.5278879", "0.5265222", "0.52643013", "0.52299225", "0.5223089", "0.5205646", "0.51907194", "0.5183049", "0.5177131", "0.5157027", "0.5121939", "0.51110154", "0.50427526", "0.5041034", "0.5038937", "0.5027508", "0.50082767", "0.49892315" ]
0.77234083
0
Returns the amount of products in this subscription
def get_product_count(self): return self.products.count()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def product_count(self) -> int:\n return self._product_count", "def getNumProducts(self):\n return _libsbml.Reaction_getNumProducts(self)", "def _grand_total(self):\n count = 0\n for product in self.products:\n count += product.price\n return count", "def __len__(self):\n return sum(item['qty'] for item in self.cart.values())", "def __len__(self):\n return sum(item['quantity'] for item in self.cart.values())", "def __len__(self):\n \n return sum(item['quantity'] for item in self.cart.values())", "def total_purchase(self):\n\n total_amount = 0\n #grab all the item\n items = self.item_set.all()\n for item in items:\n total_amount += item.price\n return total_amount", "def products(self):\r\n return self._products", "def __len__(self):\n return sum(item['qty'] for item in self.basket.values()) # counts all the values of the key qty", "def price_count(self):\n return self.price_set.count()", "def getNumPurchased(self):\n return self.numberPurchased", "def sum_promos_per_product(self, product=None):\n if product is None:\n subprods = SubscriptionProduct.objects.filter(\n route=self, subscription__active=True, subscription__type='P').aggregate(Sum('copies'))\n else:\n subprods = SubscriptionProduct.objects.filter(\n route=self, product=product, subscription__active=True, subscription__type='P').aggregate(Sum('copies'))\n return subprods['copies__sum']", "def num_items(self):\n num_items = 0\n for line in self.lines.all():\n num_items += line.quantity\n return num_items", "def count(self, **query):\n # This may be optimised into one query in the future.\n result = 0\n for product_type, count in self._do_count_by_product(query):\n result += count\n\n return result", "def products(self):\n return self._products", "def product_summary(self):\n # products = self.products.filter(type='S') # TODO: explain the usage of this commented line or remove it\n from .utils import process_products\n\n subscription_products = SubscriptionProduct.objects.filter(subscription=self)\n dict_all_products = {}\n for sp in subscription_products:\n dict_all_products[str(sp.product.id)] = str(sp.copies)\n return process_products(dict_all_products)", "def get_total_price(self):\n i = self.get_copy_with_resolved_dependencies()\n total_price = Decimal(0)\n for product in i['products']:\n billed_price = Decimal(str(product.get('price', 0))) * Decimal(str(product.get('quantity')))\n total_price += billed_price\n return total_price", "def quantity(self):\n return db.session.query(\n db.func.sum(StockChange.amount)\n ).filter(\n StockChange.product_id == self.id\n ).scalar() or 0", "def total_qty(self):\n return sum(self.quantities)", "def __len__(self):\n return sum(item[\"quantity\"] for item in self.carro.values())", "def count_items(self):\n count = 0\n for o in self.order_lst:\n count += o.count()\n \n return count", "def total_quantity(self) -> int:\n total = 0\n for i in self.order_items:\n total += i.quantity\n return total", "def count_products(list_products):\n for each_item in ADD_PRODUCTS: #This iterates in the dictionary\n num_of_products = list_products.count(each_item) #This count each product\n if num_of_products > 0:\n price = ADD_PRODUCTS[each_item]\n print num_of_products, each_item + \"(s)\", \"a\", (\"Q%.2f c/u\") % price", "def _get_as_dict_count(self):\n counter = Counter()\n for product in self.products:\n counter[product.id] += 1\n return counter", "def show_available_products():\n available_product = {}\n\n if not collection_exist(DATABASE, PRODUCT_COLLECTION):\n return available_product\n\n with MongoDBConnection() as mongo:\n database = mongo.connection[DATABASE]\n\n available_product__count = 0\n for product in database[PRODUCT_COLLECTION].find({\"quantity_available\": {\"$ne\": '0'}}):\n available_product[product['product_id']] = \\\n {'description': product['description'],\n 'product_type': product['product_type'],\n 'quantity_available': product['quantity_available']}\n available_product__count += 1\n\n return available_product__count", "def volume_used(self):\n return sum(product.volume\n for product in self.placed_products)", "def total_amount(self):\n full_price = sum(item.price for item in self._products) if self._products else 0.0\n return full_price - self._get_discount()", "def nb_cart_items(self):\n return CartItem.objects.filter(cart=self).count()", "def get_num_items(self):\r\n return self.num_items", "def _count_subscriptions(self):\n for partner in self:\n subscriptions = self.env['subscription.subscription']\n count = subscriptions.sudo().search_count([('partner_id', '=', partner.id)])\n for child in partner.child_ids:\n count += subscriptions.sudo().search_count([('partner_id', '=', child.id)])\n partner.subscriptions_count = count" ]
[ "0.75261235", "0.7410046", "0.73862386", "0.69536", "0.6936234", "0.6911655", "0.68340504", "0.66541094", "0.6592634", "0.65747774", "0.64729214", "0.64634186", "0.64480746", "0.6446529", "0.64411473", "0.6425942", "0.6403196", "0.6384714", "0.63453466", "0.62762976", "0.62412167", "0.62311417", "0.61794764", "0.61716574", "0.6145223", "0.6143505", "0.6138947", "0.60880464", "0.606478", "0.6057014" ]
0.8000121
0
Simple function that shows a link to edit the current subscription under a list of products. It's used to reduce clutter in the admin panel, only showing a small amount of information.
def edit_products_field(self): text = '<table style="padding:5px;">' subscription_products = SubscriptionProduct.objects.filter(subscription=self) for sp in subscription_products: text += ( '<tr style="padding:5px;"><td style="padding:5px;">{}</td><td style="padding:5px;">{} un.</td>' '<td style="padding:5px;">{}</td></tr>'.format( sp.product.name, sp.copies, sp.address ) ) text += "</table>" text += ( "<a href='/admin/core/subscription/{}/' target='_blank'>Edit</a>".format( self.id ) ) return mark_safe(text)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subscriber_detail(self):\n model_name = Subscriber._meta.object_name.lower()\n app_label = self._meta.app_label\n link = '/admin/%s/%s/' % (app_label, model_name)\n link += '?campaign__id=%d' % self.id\n display_link = _(\"<a href='%(link)s'>%(name)s</a>\") % \\\n {'link': link, 'name': _('details')}\n return display_link", "def display(auth_context):\n\n products = product_catalog.list_products()\n # Get promoted products recommended by the AutoML model.\n promos = product_catalog.get_promos()\n return render_template('product_catalog.html',\n products=products,\n promos=promos,\n auth_context=auth_context,\n bucket=product_catalog.BUCKET)", "def detail_url(product_id):\n return reverse('product:product-detail', args=[product_id])", "def show_products_html(self, ul=False, br=True):\n output = \"\"\n if ul:\n output += \"<ul>\"\n for sp in SubscriptionProduct.objects.filter(\n subscription=self, product__offerable=True).order_by('product_id'):\n count = self.products.filter(offerable=True).count()\n if ul:\n if sp.label_contact:\n output += \"<li>{} ({})</li>\".format(sp.product.name, sp.label_contact.name)\n else:\n output += \"<li>{}</li>\".format(sp.product.name)\n else:\n if sp.label_contact:\n output += \"{} ({})\".format(sp.product.name, sp.label_contact.name)\n else:\n output += \"{}\".format(sp.product.name)\n if count > 1:\n if br:\n output += \"<br>\"\n else:\n output += \"\\n\"\n if ul:\n output += \"</ul>\"\n return output", "def view_link(self, obj):\n if obj.cwr:\n url = reverse(\n 'admin:music_publisher_cwrexport_change', args=(obj.id,))\n url += '?preview=true'\n return mark_safe(\n '<a href=\"{}\" target=\"_blank\">View CWR</a>'.format(url))", "def view_link(self, obj):\n if obj.cwr:\n url = reverse(\n 'admin:music_publisher_ackimport_change', args=(obj.id,))\n url += '?preview=true'\n return mark_safe(\n '<a href=\"{}\" target=\"_blank\">View CWR</a>'.format(url))", "def edit_product(request, pk):\n\n products = get_object_or_404(Product, pk=pk)\n if request.method == 'POST':\n form = ProductPostForm(request.POST, instance=products)\n if form.is_valid():\n product = form.save()\n return redirect(product_details, product.pk)\n else:\n form = ProductPostForm(instance=products)\n return render(request, 'editproduct.html', {'form': form})", "def open_products_page(catalog_menu):\n catalog_menu.open_products_page()", "def products(request):\n\n return render(request, \"core/products.html\", {\n \"products\": Product.objects.all()\n })", "def product_detail(request, product_id):\n product = get_object_or_404(Product, pk=product_id)\n print(request.path)\n template = './product_detail.html'\n context = {\n 'product': product,\n }\n\n # products = Product.objects.all()\n\n return render(request, template, context)", "def product_detail(request, product_id):\n\n product = get_object_or_404(Product, pk=product_id)\n options = None\n\n if 'option' in request.GET:\n options = request.GET['option']\n options = list(Option.objects.filter(name__in=options))\n\n context = {\n 'product': product,\n 'options': options,\n }\n\n return render(request, 'products/product_detail.html', context)", "def product_view(request, product):\n product = Products.objects.get(product=product)\n\n context = {\n \"product\": product,\n }\n\n return render(request, \"products/product_detail.html\", context)", "def admin_update_preview():\n return user_management_handler(\"show_admin\", \"\", False)", "def __edit_product_menu(self, product: Optional[db.SwimPool] = None):\n log.debug(\"Displaying __edit_product_menu\")\n # Create an inline keyboard with a single skip button\n cancel = telegram.InlineKeyboardMarkup([[telegram.InlineKeyboardButton(self.loc.get(\"menu_skip\"),\n callback_data=\"cmd_cancel\")]])\n # Ask for the product name until a valid product name is specified\n while True:\n # Ask the question to the user\n self.bot.send_message(self.chat.id, self.loc.get(\"ask_product_name\"))\n # Display the current name if you're editing an existing product\n if product:\n self.bot.send_message(self.chat.id, self.loc.get(\"edit_current_value\", value=escape(product.name)),\n reply_markup=cancel)\n # Wait for an answer\n name = self.__wait_for_regex(r\"(.*)\", cancellable=bool(product))\n # Ensure a product with that name doesn't already exist\n if (product and isinstance(name, CancelSignal)) or \\\n self.session.query(db.Product).filter_by(name=name, deleted=False).one_or_none() in [None, product]:\n # Exit the loop\n break\n self.bot.send_message(self.chat.id, self.loc.get(\"error_duplicate_name\"))\n # Ask for the product description\n self.bot.send_message(self.chat.id, self.loc.get(\"ask_product_description\"))\n # Display the current description if you're editing an existing product\n if product:\n self.bot.send_message(self.chat.id,\n self.loc.get(\"edit_current_value\", value=escape(product.description)),\n reply_markup=cancel)\n # Wait for an answer\n description = self.__wait_for_regex(r\"(.*)\", cancellable=bool(product))\n # Ask for the product price\n self.bot.send_message(self.chat.id,\n self.loc.get(\"ask_product_price\"))\n # Display the current name if you're editing an existing product\n if product:\n self.bot.send_message(self.chat.id,\n self.loc.get(\"edit_current_value\",\n value=(str(self.Price(product.price))\n if product.price is not None else 'Non in vendita')),\n reply_markup=cancel)\n # Wait for an answer\n price = self.__wait_for_regex(r\"([0-9]+(?:[.,][0-9]{1,2})?|[Xx])\",\n cancellable=True)\n # If the price is skipped\n if isinstance(price, CancelSignal):\n pass\n elif price.lower() == \"x\":\n price = None\n else:\n price = self.Price(price)\n # Ask for the product image\n self.bot.send_message(self.chat.id, self.loc.get(\"ask_product_image\"), reply_markup=cancel)\n # Wait for an answer\n photo_list = self.__wait_for_photo(cancellable=True)\n # If a new product is being added...\n if not product:\n # Create the db record for the product\n # noinspection PyTypeChecker\n product = db.Product(name=name,\n description=description,\n price=int(price) if price is not None else None,\n deleted=False)\n # Add the record to the database\n self.session.add(product)\n # If a product is being edited...\n else:\n # Edit the record with the new values\n product.name = name if not isinstance(name, CancelSignal) else product.name\n product.description = description if not isinstance(description, CancelSignal) else product.description\n product.price = int(price) if not isinstance(price, CancelSignal) else product.price\n # If a photo has been sent...\n if isinstance(photo_list, list):\n # Find the largest photo id\n largest_photo = photo_list[0]\n for photo in photo_list[1:]:\n if photo.width > largest_photo.width:\n largest_photo = photo\n # Get the file object associated with the photo\n photo_file = self.bot.get_file(largest_photo.file_id)\n # Notify the user that the bot is downloading the image and might be inactive for a while\n self.bot.send_message(self.chat.id, self.loc.get(\"downloading_image\"))\n self.bot.send_chat_action(self.chat.id, action=\"upload_photo\")\n # Set the image for that product\n product.set_image(photo_file)\n # Commit the session changes\n self.session.commit()\n # Notify the user\n self.bot.send_message(self.chat.id, self.loc.get(\"success_product_edited\"))", "def product_detail(request, product_id):\n product = get_object_or_404(Product, pk=product_id)\n\n context = {\n 'product': product,\n }\n\n return render(request, 'products/product_detail.html', context)", "def edit_product(request, product_id):\n if not request.user.is_superuser:\n messages.error(request, 'Invalid Request: Only admin can edit products/services.')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n if request.method == 'POST':\n form = ProductForm(request.POST, request.FILES, instance=product)\n if form.is_valid():\n form.save()\n messages.success(request, 'Update Successful!')\n return redirect(reverse('product_detail', args=[product.id]))\n else:\n messages.error(request, 'Update Failed. \\\n Please check that the details in the form are valid ')\n else:\n form = ProductForm(instance=product)\n messages.info(request, f'You are editing {product.name}')\n\n template = 'products/edit_product.html'\n context = {\n 'form': form,\n 'product': product,\n 'on_edit_product_page': True\n }\n\n return render(request, template, context)", "def show_admin_edit_users():\n return render_admin_page(\"admin-eu.html\")", "def edit_product(request, product_id):\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n \n product = get_object_or_404(Product, pk=product_id)\n if request.method == 'POST':\n # This will get data from form and to update the product instance called above\n form = ProductForm(request.POST, request.FILES, instance=product)\n if form.is_valid():\n form.save()\n messages.success(request, 'Successfully updated product!')\n return redirect(reverse('product_detail', args=[product.id]))\n else:\n messages.error(request, 'Failed to update product. Please ensure the form is valid.')\n else:\n # populate the form with product instance\n form = ProductForm(instance=product)\n messages.info(request, f'You are editing {product.name}')\n\n template = 'products/edit_product.html'\n context = {\n 'form': form,\n 'product': product,\n }\n\n return render(request, template, context)", "def products(request):\n\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n products = Product.objects.all()\n template = \"auctionsmng/products.html\"\n\n context = {\n 'products': products\n }\n\n return render(request, template, context)", "def product_detail(request, product_id):\n # Search for product in Product Model using pk identifier obtained from project_id\n product = get_object_or_404(Product, pk=product_id)\n context = {\n 'product': product,\n }\n return render(request, 'products/product_detail.html', context)", "def __products_menu(self):\n log.debug(\"Displaying __products_menu\")\n # Get the products list from the db\n products = self.session.query(db.Product).filter_by(deleted=False).all()\n # Create a list of product names\n product_names = [product.name for product in products]\n # Insert at the start of the list the add product option, the remove product option and the Cancel option\n product_names.insert(0, self.loc.get(\"menu_all_cancel\"))\n product_names.insert(1, self.loc.get(\"menu_add_product\"))\n product_names.insert(2, self.loc.get(\"menu_delete_product\"))\n # Create a keyboard using the product names\n keyboard = [[telegram.KeyboardButton(product_name)] for product_name in product_names]\n # Send the previously created keyboard to the user (ensuring it can be clicked only 1 time)\n self.bot.send_message(self.chat.id, self.loc.get(\"conversation_admin_select_product\"),\n reply_markup=telegram.ReplyKeyboardMarkup(keyboard, one_time_keyboard=True))\n # Wait for a reply from the user\n selection = self.__wait_for_specific_message(product_names, cancellable=True)\n # If the user has selected the Cancel option...\n if isinstance(selection, CancelSignal):\n # Exit the menu\n return\n # If the user has selected the Add Product option...\n elif selection == self.loc.get(\"menu_add_product\"):\n # Open the add product menu\n self.__edit_product_menu()\n # If the user has selected the Remove Product option...\n elif selection == self.loc.get(\"menu_delete_product\"):\n # Open the delete product menu\n self.__delete_product_menu()\n # If the user has selected a product\n else:\n # Find the selected product\n product = self.session.query(db.Product).filter_by(name=selection, deleted=False).one()\n # Open the edit menu for that specific product\n self.__edit_product_menu(product=product)", "def edit_product(request, product_id):\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n\n if request.method == \"POST\":\n product_form = EditProductForm(request.POST, request.FILES,\n instance=product)\n if product_form.is_valid:\n product = product_form.save()\n messages.success(request, f'You have successfully updated \\\n product {product}.')\n return redirect('products')\n else:\n messages.error(request, 'Failed to update product. \\\n Please ensure the form is valid.')\n\n product_form = EditProductForm(instance=product)\n\n # Get all the product images to display on the edit form\n product_images = product.images.all()\n\n messages.info(request, f'You are editing product: \\\n {product}')\n\n template = 'auctionsmng/edit_product.html'\n\n context = {\n 'product_form': product_form,\n 'product': product,\n 'images': product_images,\n }\n return render(request, template, context)", "def subscription_view(\r\n current_subscription_view=CurrentSubscriptionView.as_view(),\r\n billing_details_view=SubscriptionBillingDetailsView.as_view(),\r\n confirmation_view=SubscriptionConfirmationView.as_view(),\r\n):\r\n def dispatch(request, *args, **kwargs):\r\n cur_product_cls = request.user.billing_account.get_current_product_class()\r\n req_product_name = kwargs['product']\r\n try:\r\n req_product_cls = billing.loading.get_product(req_product_name)\r\n except ValueError:\r\n raise Http404\r\n if req_product_cls not in request.user.billing_account.get_visible_products():\r\n raise Http404\r\n if cur_product_cls == req_product_cls:\r\n return current_subscription_view(request, *args, **kwargs)\r\n elif (\r\n req_product_cls.get_requires_payment_details()\r\n and not request.user.billing_account.has_valid_billing_details()\r\n ):\r\n return billing_details_view(request, *args, **kwargs)\r\n elif (\r\n not req_product_cls.get_requires_payment_details()\r\n or request.user.billing_account.has_valid_billing_details()\r\n ):\r\n return confirmation_view(request, *args, **kwargs)\r\n else:\r\n raise RuntimeError('Error: null condition should never occur')\r\n return dispatch", "def search_products_as_admin_single_page(self, **kwargs):\n return slurp(\n 'search_products_as_admin',\n self.search_products_as_admin,\n 'ProductViewDetails',\n **kwargs\n )", "def products():\n\n\treturn render_template(\"products.html\")", "def edit_item(request, product_id):\n\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, you are not permitted to do that.')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n if request.method == 'POST':\n form = ProductForm(request.POST, request.FILES, instance=product)\n if form.is_valid():\n form.save()\n messages.success(request, 'You have successfully updated store item!')\n return redirect(reverse('home'))\n else:\n messages.error(request, 'Failed to update item. Please check the form.')\n else:\n form = ProductForm(instance=product)\n messages.info(request, f'You are editing {product.name}')\n\n template = 'products/edit_item.html'\n context = {\n 'form': form,\n 'product': product,\n }\n\n return render(request, template, context)", "def _render_link(self, context, name, label, extra=''):\n product = Product.select(self.env, where={'name' : name})\n if product:\n product = product[0]\n href = context.href.products(product.prefix)\n if 'PRODUCT_VIEW' in context.perm(product.resource):\n return tag.a(label, class_='product', href=href + extra)\n elif 'PRODUCT_CREATE' in context.perm('product', name):\n return tag.a(label, class_='missing product', \n href=context.href('products', action='new'),\n rel='nofollow')\n return tag.a(label, class_='missing product')", "def view_product(cls, product_id):\n product = Product.get_by_id(product_id)\n print(f'Product ID: {product.product_id}')\n print(f'Product Name: {product.product_name}')\n print(f'Quantity: {product.product_quantity}')\n print(f'Price: ${product.product_price / 100:.2f}\\n')", "def edit_product(request, product_id):\n\n if not request.user.is_superuser:\n messages.error(request, 'This feature is for Admin only.')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n if request.method == 'POST':\n form = ProductForm(request.POST, request.FILES, instance=product)\n if form.is_valid():\n form.save()\n messages.success(request, 'Successfully updated product.')\n return redirect(reverse('product_detail', args=[product.id]))\n else:\n messages.error(request,\n 'Failed to update, please ensure form is valid.')\n else:\n form = ProductForm(instance=product)\n messages.info(request, f'You are editing { product.name }')\n\n template = 'products/edit_product.html'\n context = {\n 'form': form,\n 'product': product,\n }\n\n return render(request, template, context)", "def edit_link(instance):\n\n try:\n content_type = ContentType.objects.get_for_model(instance.__class__)\n except AttributeError:\n raise ValueError('Passed value must be registered model instance')\n else:\n model_admin_change_link = 'admin:{app}_{model}_change'.format(\n app=content_type.app_label,\n model=content_type.model\n )\n return reverse(model_admin_change_link, args=(instance.id,))" ]
[ "0.6159798", "0.5867929", "0.5686048", "0.5674001", "0.5663777", "0.5611686", "0.5566049", "0.5555919", "0.5525453", "0.55074465", "0.5486064", "0.5476782", "0.54587424", "0.54584545", "0.5458185", "0.54244345", "0.53984183", "0.53634214", "0.53292894", "0.5325081", "0.5323458", "0.53226054", "0.53181833", "0.53165525", "0.5310126", "0.53070414", "0.5289439", "0.5289328", "0.52819735", "0.52315634" ]
0.78890985
0
Used to add products to the current subscription. It is encouraged to always use this method when you want to add a product to a subscription, so you always have control of what happens here. This also creates a product history with the current subscription, product, and date, with the type 'A' (Activation).
def add_product( self, product, address, copies=1, message=None, instructions=None, route_id=None, order=None, seller_id=None, override_date=None, label_contact=None, ): sp = SubscriptionProduct.objects.create( subscription=self, product=product, address=address, copies=copies, label_message=message or None, special_instructions=instructions or None, label_contact=label_contact, seller_id=seller_id, route_id=route_id, order=order, ) self.contact.add_product_history( subscription=self, product=product, new_status="A", campaign=self.campaign, seller=sp.seller, override_date=override_date, ) return sp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_product_history(\n self,\n subscription,\n product,\n new_status,\n campaign=None,\n seller=None,\n override_date=None,\n ):\n # TODO: this method should be migrated to the Subscription model\n\n history_of_this_product = subscription.contactproducthistory_set.filter(product=product)\n\n if history_of_this_product.exists():\n latest_history_of_this_product = history_of_this_product.latest(\"id\")\n else:\n latest_history_of_this_product = None\n\n if latest_history_of_this_product:\n if latest_history_of_this_product.status == new_status:\n # if this is the same event, we will do nothing\n pass\n else:\n # if this is a different event, then we will activate or deactivate accordingly\n ContactProductHistory.objects.create(\n contact=self,\n subscription=subscription,\n date=override_date or date.today(),\n product=product,\n status=new_status,\n seller=seller,\n )\n else:\n ContactProductHistory.objects.create(\n contact=self,\n subscription=subscription,\n date=override_date or date.today(),\n product=product,\n status=new_status,\n seller=seller,\n )", "def add(self, product):\n pass", "def add_product(self):\n self.owner.new_product(self.barcode, self.description, self.price, self._add_product_callback)", "def addProduct(self, *args):\n return _libsbml.Reaction_addProduct(self, *args)", "def add_product(self, product):\n return self._make_post_request(self._urls['products'],\n data=dict(name=product))", "def add_product(self, name, energy_points):\n now = datetime.datetime.now()\n date = \"{}-{}-{}\".format(now.year, now.month, now.day)\n Product(productName=name, energyPoints=energy_points, date=date)", "def insert(self, product):\n pass", "def add_product(cls, product_name, price, quantity):\n Product.insert(product_name=product_name,\n product_price=price,\n product_quantity=quantity,\n date_updated=date.today()).on_conflict(\n conflict_target=[Product.product_name],\n preserve=[Product.product_price,\n Product.product_quantity,\n Product.date_updated]).execute()\n print(f'\\nProduct added successfully!')\n print(f'Product: {product_name} ' +\n f'Price: ${int(price) / 100:.2f} ' +\n f'Quantity: {quantity}\\n')", "def add(self, product):\n product_id = str(product.id)\n self.wishlist[product_id] = {'price': str(product.price)}\n self.save()", "def add_product(self, product: Product):\n log.debug(\"Adding a new product\")\n product_parameters = product.to_db()\n try:\n with DBCursor(self.host) as cursor:\n cursor.execute(\"INSERT INTO items VALUES (?, ?, ?, ?, ?)\", (product_parameters['name'].lower(), product_parameters['units'], product_parameters['last_buy'], product_parameters['cost'], product_parameters['price']))\n except sqlite3.IntegrityError:\n log.critical(\"An integrity error was raised. Maybe a matching name or id.\")\n raise DatabaseIntegrityError(\"There's a matching name or id already stored.\")\n else:\n log.info(f\"{product.__repr__} was added successfully.\")", "def add_subscription(self):\n schema = schemas.load(schemas.Subscription, self.request)\n subscription = self.customer.add_subscription(**schema)\n self.request.db.flush()\n self.request.response.status_int = 201\n return {'abonnement': subscription}", "def addProduct(self, product):\n self._checkDeleted()\n product._checkDeleted()\n\n productPath = self.productSearch.productClient.product_path(\n project=self.productSearch.projectId, location=self.productSearch.location, product=product.productId)\n\n self.productSearch.productClient.add_product_to_product_set(name=self.productSetPath, product=productPath)", "def add_subscription(self, query, price):\n session = Session()\n sub = self.cart.add_subscription(query, price)\n offers = session.search(query, self.lat, self.lon, self.radius)\n list(sub.handle_offers(offers))\n sub.check_offers()\n self.config_updated()", "def add(self, product, qty):\n product_id = str(product.id)\n\n if product_id in self.basket:\n self.basket[product_id]['qty'] = qty\n else:\n self.basket[product_id] = {'price': str(product.price), 'qty': qty}\n\n self.save()", "def add_products(self, products):\n return [self.add_product(product) for product in set(products)]", "def createProduct(self):\n return _libsbml.Reaction_createProduct(self)", "def scan(self, product_code):\n self.order.add_product(product_code)", "def add_products():\n result = order_obj.add_products(request.forms) \n return result", "def add_to_cart(request, id):\n product = Product.objects.get(id=id)\n\n cart = request.session.get('cart', {})\n quantity = int(request.POST.get('quantity[]'))\n max_product_quantity = int(product.max_product_quantity)\n is_base_product = product.is_base_product\n is_data_product = product.is_data_product\n\n # ====================================================================================================\n # Products can either be BASE or DATA products. A data product CANNOT be bought without purchasing a\n # base product first. The same base product CANNOT be bought twice, however it can be upgraded to one\n # with a higher number of devices / longer subscription duration. Downgrades are possible but are not\n # handled by the system. Customers are referred to the Sales Team instead.\n # ====================================================================================================\n\n if max_product_quantity == 1:\n # ================================================================================================\n # Check if product with a single item quantity is already saved to subscription table in database,\n # as it can only be saved once per parent organisation account.\n # ================================================================================================\n subscription = Subscription.objects.select_related('product').filter(product=id)\n\n # If customer already has this product then do not add it to Cart, but tell customer why!\n # Set quantity to zero.\n if subscription:\n quantity = 0\n messages.add_message(request, messages.INFO,\n 'Product not added to Cart. This product is already subscribed to!')\n else:\n if is_base_product:\n # =================================================================================\n # If the Cart product is a base product, then see if a subscription for it already\n # exists.\n # =================================================================================\n try:\n subscription = Subscription.objects.select_related('product').get(product__is_base_product=True)\n if subscription:\n if subscription.product.number_of_devices > product.number_of_devices:\n # =====================================================================================\n # If customer already owns a subscription product with a higher number of devices then\n # do not add this product to the Cart. Set quantity to zero.\n # =====================================================================================\n quantity = 0\n messages.add_message(request, messages.INFO, 'Base product not added to Cart. You already have '\n 'a '\n + str(subscription.product.number_of_devices) +\n '-device subscription on your account! '\n 'Downgrades are only possible through our '\n 'Sales Department. Please contact them on '\n '0800 1234567.')\n else:\n # Otherwise put subscription in the Cart but indicate that product is an upgrade.\n if id not in cart:\n total_quantity = quantity\n devices_count = product.number_of_devices * total_quantity\n messages.add_message(request, messages.INFO, 'Base product upgrade to '\n + str(devices_count) +\n '-devices added to Cart. You currently '\n 'have a '\n + str(subscription.product.number_of_devices) +\n '-device subscription on your account.')\n except ObjectDoesNotExist:\n # if no subscription found, then add to the Cart and inform user.\n messages.error(request, \"Base subscription product added to Cart.\")\n else:\n if is_data_product:\n # =================================================================================\n # If the Cart product is a data product, then check if a subscription for a base\n # product already exists.\n # =================================================================================\n try:\n Subscription.objects.select_related('product').get(product__is_base_product=True)\n except ObjectDoesNotExist:\n # ========================================================================================\n # If no subscription found, then check if a base item already in the Cart. If not in cart\n # either then inform user they will also need to add a base product.\n # ========================================================================================\n for new_id, qty in cart.items():\n product = Product.objects.get(id=new_id)\n if not product.is_base_product:\n messages.error(request, \"Please add a base product to go with this data product. \"\n \"You have no base product registered on system.\")\n\n if id in cart:\n # If product is already in cart..\n if max_product_quantity == 1 and int(product.id) == int(id):\n # If a single item product with the same id already exists in the cart then do not update the quantity.\n messages.add_message(request, messages.INFO, 'Product is already in Cart. Only one item of '\n 'this product is allowed!')\n else:\n # Otherwise, UPDATE the quantity of the product in the Cart.\n cart[id] = int(cart[id]) + quantity\n else:\n # Otherwise ADD the product to the Cart along with the quantity selected.\n if quantity != 0:\n cart[id] = cart.get(id, quantity)\n\n request.session['cart'] = cart\n return redirect(reverse('all_products'))", "def add_product(product_dict):\n product = models.Product(**product_dict)\n app.session.merge(product)\n app.session.commit()", "def add_to_basket(self, item):\n self._products.append(item)", "def agregar_producto(self, producto):\n\n self.productos.append(producto)", "def product(self, product):\n self._product = product", "def product(self, product):\n\n self._product = product", "def product(self, product):\n\n self._product = product", "def add_imported(products):\n \n for product in products:\n add_product(product[\"product_name\"], product[\"product_quantity\"], product[\"product_price\"], product[\"date_updated\"])", "def add_product(data):\r\n\r\n with mysql.db_session() as session:\r\n new_product = Product(product_name=data.get('product_name'))\r\n session.add(new_product)\r\n\r\n return response.Response(message='{} successfully added'.format(data))", "def insert_products(self):\n logic = ProductLogic()\n \n try:\n # We create the list of product objects\n products = self.objects_factory.create_product_object_list()\n products = set(products)\n\n for product in products:\n logic.insert(product)\n except:\n print('Il y a eu un problème lors de la récupération des données, veuillez rééssayer')", "def add_new_product():\n name = request.args.get(\"name\")\n email = request.args.get(\"email\")\n description = request.args.get(\"description\")\n price = request.args.get(\"price\")\n recommended = request.args.get(\"recommended\", default=\"n\")\n funcs.add_product(name, price, description, recommended, email)\n return json.dumps({'success': True}), 200, {'ContentType': 'application/json'}", "def add_to_product_display(self, product):\n self.product_displays.add(product)" ]
[ "0.70489734", "0.68279564", "0.66193235", "0.6583986", "0.6343731", "0.6334137", "0.62367344", "0.61137897", "0.6008538", "0.5986317", "0.59798837", "0.59118116", "0.5888556", "0.58400524", "0.58222914", "0.5775853", "0.57492256", "0.5745395", "0.5666695", "0.56510544", "0.56387085", "0.55861443", "0.55749255", "0.5532304", "0.5532304", "0.5515676", "0.55109704", "0.5506918", "0.5504337", "0.55023617" ]
0.71196955
0
Used to remove products from the current subscription. It is encouraged to always use this method when you want to remove a product from a subscription, so you always have control of what happens here. This also creates a product history with the current subscription, product, and date, with the type 'D' (Deactivation)
def remove_product(self, product): try: sp = SubscriptionProduct.objects.get(subscription=self, product=product) sp.delete() except SubscriptionProduct.DoesNotExist: pass else: self.contact.add_product_history(self, product, "D")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removeProduct(self, *args):\n return _libsbml.Reaction_removeProduct(self, *args)", "def remove_product(self, product_id):\n\n cur.execute(\"\"\"DELETE FROM catalogue WHERE productid = ? AND vendorname = ?\"\"\",\n (product_id, self.vendorname))", "def delete_product_from_gmc(self, products):\n service = self.gmc_flow()\n for product in products:\n if product.google_mcid:\n product_id = product.google_channel + ':' + product.google_content_language + ':' + product.google_target_country+ ':' + product.google_mcid\n request = service.products().delete(merchantId=product.google_merchant_center_id.name, productId=product_id)\n _logger.info('Product------- %s',product)\n try:\n result = request.execute()\n product.google_mcid = ''\n self.env.cr.commit()\n except errors.HttpError as e:\n error = simplejson.loads(e.content.decode('utf-8'))\n raise UserError(_(\"%s. when deleting %s\") % (error['error'].get('message'), product.name))", "def add_product_history(\n self,\n subscription,\n product,\n new_status,\n campaign=None,\n seller=None,\n override_date=None,\n ):\n # TODO: this method should be migrated to the Subscription model\n\n history_of_this_product = subscription.contactproducthistory_set.filter(product=product)\n\n if history_of_this_product.exists():\n latest_history_of_this_product = history_of_this_product.latest(\"id\")\n else:\n latest_history_of_this_product = None\n\n if latest_history_of_this_product:\n if latest_history_of_this_product.status == new_status:\n # if this is the same event, we will do nothing\n pass\n else:\n # if this is a different event, then we will activate or deactivate accordingly\n ContactProductHistory.objects.create(\n contact=self,\n subscription=subscription,\n date=override_date or date.today(),\n product=product,\n status=new_status,\n seller=seller,\n )\n else:\n ContactProductHistory.objects.create(\n contact=self,\n subscription=subscription,\n date=override_date or date.today(),\n product=product,\n status=new_status,\n seller=seller,\n )", "def remove(self, product):\n product_id = str(product.id)\n if product_id in self.cart:\n del self.cart[product_id]\n self.save()", "def add_remove(self, product):\n product_id = str(product.id)\n if product_id not in self.wishlist:\n self.wishlist[product_id] = {'price': str(product.price_in_dollars)}\n else:\n del self.wishlist[product_id]\n self.save()", "def remove(self, product):\n product_id = str(product.id)\n if product_id in self.wishlist:\n del self.wishlist[product_id]\n self.save()", "def delete(self, product):\n product_id = str(product)\n\n if product_id in self.basket:\n del self.basket[product_id]\n #print(product_id)\n self.save()", "def remove_product(product_id: str) -> None:\n with db_session() as session:\n session.query(Product).filter_by(id=product_id).delete()\n session.commit()", "def remove_from_cart(update, context):\n bot = context.bot\n query = update.callback_query\n\n chat_id = update.effective_chat.id\n user_id = update.effective_user.id\n\n # loads json received from callback_data into dictionary\n ids = json.loads(query.data)\n category_id = ids['category_id']\n product_id = ids['product_id']\n\n # user selected to remove all products from cart\n if product_id == -1:\n # delete user from cart\n del cart[chat_id][user_id]\n # when no other orders where made in this group chat also delete group from cart\n if not bool(cart[chat_id]):\n del cart[chat_id]\n message = \"All products removed from your cart!\"\n # user selected product\n else:\n # reduce the quantity when there is more than one order for this product\n if cart[chat_id][user_id][category_id][product_id] > 1:\n cart[chat_id][user_id][category_id][product_id] -= 1\n else:\n # delete product\n del cart[chat_id][user_id][category_id][product_id]\n # delete category when no other products where ordered in this category by user\n if not bool(cart[chat_id][user_id][category_id]):\n del cart[chat_id][user_id][category_id]\n # delete user when no other products where ordered by user\n if not bool(cart[chat_id][user_id]):\n del cart[chat_id][user_id]\n # delete group when no no other users ordered in group chat\n if not bool(cart[chat_id]):\n del cart[chat_id]\n message = \"Removed \" + menu[category_id]['products'][product_id]['name'] + \"from your cart. Your \" \\\n \"cart: \\n\" + \\\n str_user_cart(chat_id, user_id)['message']\n # InlineKeyboard back to start menu or the option to remove more\n keyboard = [[InlineKeyboardButton(\"remove more\", callback_data=str(THREE))],\n [InlineKeyboardButton(\"back to menu\", callback_data=str(ONE))]]\n # change last message send by bot\n bot.edit_message_text(chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n text=message,\n reply_markup=InlineKeyboardMarkup(keyboard))\n # notify ConversationHandler of SEVENTH stage\n return SEVENTH", "def delete(self, product):\n product_id = str(product)\n if product_id in self.cart:\n del self.cart[product_id]\n self.save()", "def test_product_remove(self):\n\n flag = \"user\"\n api = \"product.product.remove\"\n current_page = 1\n search_info = json.dumps({\n 'id': 12,\n })\n print('start------------------------>remove')\n result = self.access_api(flag = flag, api = api, current_page = current_page, product_info = search_info)", "def destroy(self, request, *args, **kwargs):\n response = super(ProductViewSet, self).destroy(request, *args, **kwargs)\n response.data = {'message': 'Producto ha sido eliminado'}\n return response", "def delete(self, user, product):\n\n cart_product = CartProduct.delete(user, product)\n CartProductsView.delete(cart_product)", "def woo_unpublished(self):\n instance = self.woo_instance_id\n woo_common_log_obj = self.env[\"common.log.book.ept\"]\n common_log_line_obj = self.env[\"common.log.lines.ept\"]\n model_id = common_log_line_obj.get_model_id('woo.product.template.ept')\n woo_common_log_id = woo_common_log_obj.create(\n {\n 'type':'import',\n 'module':'woocommerce_ept',\n 'woo_instance_id':instance.id,\n 'active':True,\n })\n wcapi = instance.woo_connect()\n if self.woo_tmpl_id:\n data = {'status':'draft'}\n if instance.woo_version == 'v3':\n data = {'product':data}\n res = wcapi.put('products/%s' % (self.woo_tmpl_id), data)\n # res = wcapi.post('products/batch', {'update': [data]})\n if not isinstance(res, requests.models.Response):\n message = \"Unpublish Product \\nResponse is not in proper format :: %s\" % (res)\n common_log_line_obj.woo_product_export_log_line(message, model_id,\n woo_common_log_id, False)\n return True\n if res.status_code not in [200, 201]:\n common_log_line_obj.woo_product_export_log_line(res.content, model_id,\n woo_common_log_id, False)\n return True\n try:\n response = res.json()\n except Exception as e:\n message = \"Json Error : While Unpublish Product with id %s from WooCommerce \" \\\n \"for instance %s. \\n%s\" % (self.woo_tmpl_id, instance.name, e)\n common_log_line_obj.woo_product_export_log_line(message, model_id,\n woo_common_log_id, False)\n return False\n if instance.woo_version == 'v3':\n errors = response.get('errors', '')\n if errors:\n message = errors[0].get('message')\n common_log_line_obj.woo_product_export_log_line(message, model_id,\n woo_common_log_id, False)\n\n else:\n self.write({'website_published':False})\n else:\n if response.get('data', {}) and response.get('data', {}).get('status') not in [200,\n 201]:\n message = response.get('message')\n common_log_line_obj.woo_product_export_log_line(message, model_id,\n woo_common_log_id, False)\n\n else:\n self.write({'website_published':False})\n return True", "def test_03_product_delete(self):\n product = self.create_product()\n products = self.product_obj.search([])\n self.assertIn(product, products)\n product.unlink()\n self.assertNotIn(product.exists(), products)", "def unlink(self):\n if self.on_magento:\n sku = self.default_code\n magento = self.env['magento.backend'].search([('id', '!=', False)], limit=1, order='id DESC')\n product = Product(magento.web_url, magento.access_token, True)\n if product:\n # Delete magento product\n try:\n product.delete_magento_product(sku)\n product.delete_magento_product_all(sku)\n except Exception as e:\n 'Sh@dowWalker'\n result = super(MagentoSyncOdoo, self).unlink()\n return result", "def delete(self, product):\n product_id = str(product)\n\n\n if product_id in self.sepet:\n del self.sepet[product_id]\n print(product_id)\n self.session.modified=True", "def remove_product_from_cart(user_name, product_id, quantity, store_name):\n user_name = auth.get_username_from_hash(user_name)\n user_handler.remove_product(user_name, store_name, product_id, quantity)\n users.remove_from_cart(user_name, product_id, quantity, store_name)", "def delete_product(self, last_modified, product_id):\n delete_row = \"UPDATE products SET delete_status = TRUE, last_modified = '{}' WHERE product_id = '{}';\"\\\n .format(last_modified, product_id)\n self.cursor.execute(delete_row, (last_modified, product_id))\n self.connection.commit()", "def remove(self, prod1_name, prod2_name):\n try:\n self._purchased.remove({PROD1: prod1_name, PROD2: prod2_name},\n True\n )\n self._purchased.remove({PROD1: prod2_name, PROD2: prod1_name},\n True\n )\n print('remove: succeeded')\n return True\n except pyerrors.OperationFailure as ex:\n print(ex.value)\n except pyerrors.PyMongoError as ex:\n print(ex.value)\n print('remove: failed')\n return False", "def unlink(self):\n raise ValidationError(_(\"Products may not be deleted. Please archive them instead.\"))", "def remove_item(self, product):\r\n if product in self.items_in_cart:\r\n del self.items_in_cart[product]\r\n print(product + \" removed.\")\r\n else:\r\n print(product + \" is not in the cart.\")", "def add_to_cart(request, id):\n product = Product.objects.get(id=id)\n\n cart = request.session.get('cart', {})\n quantity = int(request.POST.get('quantity[]'))\n max_product_quantity = int(product.max_product_quantity)\n is_base_product = product.is_base_product\n is_data_product = product.is_data_product\n\n # ====================================================================================================\n # Products can either be BASE or DATA products. A data product CANNOT be bought without purchasing a\n # base product first. The same base product CANNOT be bought twice, however it can be upgraded to one\n # with a higher number of devices / longer subscription duration. Downgrades are possible but are not\n # handled by the system. Customers are referred to the Sales Team instead.\n # ====================================================================================================\n\n if max_product_quantity == 1:\n # ================================================================================================\n # Check if product with a single item quantity is already saved to subscription table in database,\n # as it can only be saved once per parent organisation account.\n # ================================================================================================\n subscription = Subscription.objects.select_related('product').filter(product=id)\n\n # If customer already has this product then do not add it to Cart, but tell customer why!\n # Set quantity to zero.\n if subscription:\n quantity = 0\n messages.add_message(request, messages.INFO,\n 'Product not added to Cart. This product is already subscribed to!')\n else:\n if is_base_product:\n # =================================================================================\n # If the Cart product is a base product, then see if a subscription for it already\n # exists.\n # =================================================================================\n try:\n subscription = Subscription.objects.select_related('product').get(product__is_base_product=True)\n if subscription:\n if subscription.product.number_of_devices > product.number_of_devices:\n # =====================================================================================\n # If customer already owns a subscription product with a higher number of devices then\n # do not add this product to the Cart. Set quantity to zero.\n # =====================================================================================\n quantity = 0\n messages.add_message(request, messages.INFO, 'Base product not added to Cart. You already have '\n 'a '\n + str(subscription.product.number_of_devices) +\n '-device subscription on your account! '\n 'Downgrades are only possible through our '\n 'Sales Department. Please contact them on '\n '0800 1234567.')\n else:\n # Otherwise put subscription in the Cart but indicate that product is an upgrade.\n if id not in cart:\n total_quantity = quantity\n devices_count = product.number_of_devices * total_quantity\n messages.add_message(request, messages.INFO, 'Base product upgrade to '\n + str(devices_count) +\n '-devices added to Cart. You currently '\n 'have a '\n + str(subscription.product.number_of_devices) +\n '-device subscription on your account.')\n except ObjectDoesNotExist:\n # if no subscription found, then add to the Cart and inform user.\n messages.error(request, \"Base subscription product added to Cart.\")\n else:\n if is_data_product:\n # =================================================================================\n # If the Cart product is a data product, then check if a subscription for a base\n # product already exists.\n # =================================================================================\n try:\n Subscription.objects.select_related('product').get(product__is_base_product=True)\n except ObjectDoesNotExist:\n # ========================================================================================\n # If no subscription found, then check if a base item already in the Cart. If not in cart\n # either then inform user they will also need to add a base product.\n # ========================================================================================\n for new_id, qty in cart.items():\n product = Product.objects.get(id=new_id)\n if not product.is_base_product:\n messages.error(request, \"Please add a base product to go with this data product. \"\n \"You have no base product registered on system.\")\n\n if id in cart:\n # If product is already in cart..\n if max_product_quantity == 1 and int(product.id) == int(id):\n # If a single item product with the same id already exists in the cart then do not update the quantity.\n messages.add_message(request, messages.INFO, 'Product is already in Cart. Only one item of '\n 'this product is allowed!')\n else:\n # Otherwise, UPDATE the quantity of the product in the Cart.\n cart[id] = int(cart[id]) + quantity\n else:\n # Otherwise ADD the product to the Cart along with the quantity selected.\n if quantity != 0:\n cart[id] = cart.get(id, quantity)\n\n request.session['cart'] = cart\n return redirect(reverse('all_products'))", "def test_delete_subscription(self):\n pass", "def remove_item(self, product):\n if product in self.items_in_cart:\n del self.items_in_cart[product]\n print (product + \" removed.\")\n else:\n print (product + \" is not in the cart.\")", "def clear_products(self):\n self.product_displays.clear()", "def delete_product(driver, login_action, open_products_page, products_page):\n products_page.delete_product()\n driver.refresh()", "def deleteProduct(request,productId):\n deleteObj = Collection()\n deleteObj.id=productId\n productBll.deleteProducts(deleteObj)\n return HttpResponseRedirect('/admin/product/list/')", "def remove_item(self, product):\n if product in self.items_in_cart:\n del self.items_in_cart[product]\n print product + \" removed.\"\n else:\n print product + \" is not in the cart.\"" ]
[ "0.6247508", "0.56590396", "0.55963165", "0.5595322", "0.5497862", "0.54530734", "0.54368997", "0.5419161", "0.52681804", "0.5264107", "0.52634674", "0.5261236", "0.52172333", "0.51957965", "0.5186334", "0.5185944", "0.5162035", "0.5155476", "0.5127499", "0.51182806", "0.5112094", "0.50988835", "0.50848985", "0.5071928", "0.50712293", "0.50559413", "0.5055001", "0.5034643", "0.5019725", "0.5002918" ]
0.7667853
0
Gets the billing name for the contact. If it doesn't have one, then the contact's name is returned. Used primarily in invoicing.
def get_billing_name(self): if self.billing_name: return self.billing_name else: return self.contact.name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def contact_name(self) -> str:\n return pulumi.get(self, \"contact_name\")", "def billing_contact(self):\n return self._billing_contact", "def billing_contact_user(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"billing_contact_user\")", "def customer_name(self):\n return self._customer_name", "def billing_contact_user(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"billing_contact_user\")", "def billing_contact_user(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"billing_contact_user\")", "def get_billing_phone(self):\n if self.billing_phone:\n return self.billing_phone\n else:\n return self.contact.phone", "def _get_contact_first_name(app):\n name = app.get(CONTACT_NAME_KEY)\n if name:\n return ' {}'.format(name.split(' ')[0])", "def billing_account(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"billing_account\")", "def billing_account(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"billing_account\")", "def billing_account(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"billing_account\")", "def billing_account(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"billing_account\")", "def billing_account(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"billing_account\")", "def get_billing_address(self):\n if self.billing_address:\n return self.billing_address.address_1\n else:\n subscription_products = SubscriptionProduct.objects.filter(\n subscription=self\n )\n addresses = [sp.address for sp in subscription_products if sp.address]\n if not addresses:\n if self.contact.email:\n return self.contact.email\n else:\n return None\n else:\n return addresses[0].address_1", "def get_name(self):\n return self.card_name", "def billing_account(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"billing_account\")", "def billing_account(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"billing_account\")", "def contact_full_name(self):\n first = self.contact_first_name\n last = self.contact_last_name\n if first and last:\n return f'{first} {last}'\n return first or last", "def GetName(self):\n if self.compound.preferred_name:\n return self.compound.preferred_name\n if self._name:\n return self._name\n return str(self.compound.FirstName())", "def name(self, cname: str)->str:\n return self.like(cname, mx=1)[0]['cname']", "def get_name(self, field_name='NAME'):\n return self.get_default(field_name)", "def company_name(self):\n if \"companyName\" in self._prop_dict:\n return self._prop_dict[\"companyName\"]\n else:\n return None", "def account_name(self):\n\n name1 = self.business_trade_name\n name2 = self.business_name\n\n if not name1 and not name2:\n return 'NAME MISSING - ' + self.license_number\n elif name1 and not name2:\n return name1\n elif name2 and not name1:\n return name2\n else:\n return name1 + ' (' + name2 + ')'", "def __getCompanyName(parsed: BeautifulSoup) -> str:\n\n # Company name container\n name_container = parsed.find('span', class_='companyName')\n\n # Extracting raw text elements\n name_raw_text = [s for s in name_container.children if isinstance(s, str)]\n\n # Getting name (first raw text instance)\n return name_raw_text[0].strip()", "def get_name(self, asset):\n return self.get_name_and_meta(asset)[0]", "def display_name(self):\n if self.email is None:\n if self.first_name is None and self.last_name is None:\n return \"\"\n\n if self.first_name is None and self.last_name is None:\n return self.email\n\n if self.last_name is None:\n return self.first_name\n\n if self.first_name is None:\n return self.last_name\n\n return \"{} {}\".format(self.first_name, self.last_name)", "def get_short_name(self) -> str:\n return self.first_name", "def get_full_name(self):\n return self.name + \" \" + self.email", "def get_name(self) -> str:\n\n return self.name_", "def get_first_name(self) -> str:\n return self.first_name" ]
[ "0.72753066", "0.6679072", "0.64184", "0.6281225", "0.6215214", "0.6215214", "0.61888754", "0.61753666", "0.61308825", "0.61308825", "0.61308825", "0.60813963", "0.60813963", "0.6053672", "0.59911233", "0.59021527", "0.59021527", "0.5865127", "0.5850656", "0.58192235", "0.58147585", "0.5777968", "0.57745045", "0.5744868", "0.5725946", "0.57055384", "0.5692839", "0.56853414", "0.56137115", "0.5605837" ]
0.86943054
0
Gets the billing phone for the contact. If it doesn't have one, then the contact's phone is returned. Used primarily in invoicing.
def get_billing_phone(self): if self.billing_phone: return self.billing_phone else: return self.contact.phone
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def billing_contact(self):\n return self._billing_contact", "def getPhone(self):\n return self.phone", "def phone(self):\n return self._phone", "def phone(self):\n return self._phone", "def phone(self):\n return self._phone", "def phone(self):\n\n return self._phone", "def phone(self) -> str:\n return pulumi.get(self, \"phone\")", "async def get_phone(self):\n\n e = await self.request.request(url='https://accountinformation.roblox.com/v1/phone', method='get')\n return e", "def phone_number(self):\n\n return self._phone_number", "def get_billing_address(self):\n if self.billing_address:\n return self.billing_address.address_1\n else:\n subscription_products = SubscriptionProduct.objects.filter(\n subscription=self\n )\n addresses = [sp.address for sp in subscription_products if sp.address]\n if not addresses:\n if self.contact.email:\n return self.contact.email\n else:\n return None\n else:\n return addresses[0].address_1", "def phone_number(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"phone_number\")", "def get_billing_name(self):\n if self.billing_name:\n return self.billing_name\n else:\n return self.contact.name", "def business_phone(self):\n return self._business_phone", "def personal_phone(self):\n return self._personal_phone", "def phone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"phone\")", "def phone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"phone\")", "def phone_number(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"phone_number\")", "def phone_number(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"phone_number\")", "def person_phone_number(self):\n return self._person_phone_number", "def phone(self):\n\n\t\telement = Element(driver=self.driver,\n\t\t explicit_wait_time=self.explicit_wait_time,\n\t\t locator=BillPayPageLocator.PHONE_INPUT)\n\t\treturn element.element_value", "def number(self):\n return str(self._phone)", "def mobile_phone(self):\n if \"mobilePhone\" in self._prop_dict:\n return self._prop_dict[\"mobilePhone\"]\n else:\n return None", "def mobile_phone(self):\n if \"mobilePhone\" in self._prop_dict:\n return self._prop_dict[\"mobilePhone\"]\n else:\n return None", "def billing_contact_user(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"billing_contact_user\")", "def telephone(self):\n return self._telephone", "def phone_number_detail(self):\n return self._phone_number_detail", "def get_billing_document(self):\n if self.rut:\n return self.rut\n elif self.billing_id_doc:\n return self.billing_id_doc\n else:\n return self.contact.id_document", "def billing_contact_user(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"billing_contact_user\")", "def billing_contact_user(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"billing_contact_user\")", "def get_phone(self, list_item):\n phone = list_item.find('span', {'class': 'biz-phone'})\n return phone.get_text().strip()" ]
[ "0.72117543", "0.69185233", "0.6816935", "0.6816935", "0.6816935", "0.68060094", "0.67518926", "0.6579222", "0.65608084", "0.6559647", "0.63917357", "0.6354833", "0.6352409", "0.63150704", "0.62963", "0.62963", "0.62078226", "0.62078226", "0.61214375", "0.6061901", "0.6044865", "0.60283124", "0.60283124", "0.5927084", "0.59253335", "0.5913294", "0.58795357", "0.58722645", "0.58722645", "0.5837429" ]
0.8716133
0
Gets the billing id_document for the contact. It chooses between rut, id_document and the contact_id_document in that order. Used primarily in invoicing.
def get_billing_document(self): if self.rut: return self.rut elif self.billing_id_doc: return self.billing_id_doc else: return self.contact.id_document
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def contact_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"contact_id\")", "def contact_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"contact_id\")", "def _getcontact(id):\n contact = {}\n idwrapper = {}\n \n try:\n contact[\"name\"] = r.get(\"uid:\" + id + \":name\")\n contact[\"address\"] = r.get(\"uid:\" + id + \":address\")\n contact[\"phone\"] = r.get(\"uid:\" + id + \":phone\")\n contact[\"email\"] = r.get(\"uid:\" + id + \":email\")\n idwrapper[id] = contact\n\n return idwrapper\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise", "def doc_id(self):\n return self._id", "def get_document_id(file_name):\n logger.debug('Function Successful: % s',\n 'get_document_id: get_document_id successfully called from get_doc_attributes', extra=d)\n logger.info('Retrieving document ID...')\n\n doc,id,ending = file_name.split(\".\")\n\n logger.debug('Returning: %s',\n 'get_document_id: returning document_id', extra=d)\n logger.info('Document ID successfully retrieved')\n return id", "def get_doc_id(self):\n return self.__doc_id", "def _id(self, document):\n pass", "def get_id(doc = None, cursor = None):\n\tif cursor is None and doc is not None:\n\t\treturn doc['id']\n\telif doc is None and cursor is not None:\n\t\tallids = list()\n\t\tfor thisdoc in cursor:\n\t\t\tallids.append(thisdoc['id'])\n\t\treturn allids\n\telse:\n\t\tprint \"Supply any one argument only!\"", "def resourceDocumentId(self, resource: Resource) -> str:", "def doc_id(self):\n return self.properties.get('DocId', None)", "def billing_contact(self):\n return self._billing_contact", "def contact_id_for(self, identifier, default=None):\n lookup_key = \"phones\"\n if \"@\" in identifier:\n lookup_key = \"emails\"\n\n def matcher(item):\n \"\"\"Returns True iff the identifier matches\"\"\"\n hit = item.get(lookup_key)\n if not isinstance(hit, list):\n return hit == identifier\n return any([el for el in hit if el == identifier])\n\n candidates = [\n item.get(\"id\", default) for item in self.contact_details if matcher(item)\n ]\n if not candidates:\n return default\n return candidates[0]", "def get(self,id) -> Contact:\n data=ContactSet.query.get(id)\n if data:\n contact = Contact(data.id,data.name,data.birthdate,data.contact_type,data.description, data.phone)\n return contact\n return None", "def identifier(self):\n return self.contact.identifier", "def get_document_by_id(document_id):\n return Documents.query.filter_by(id=document_id).first()", "def get_customer_id(self, customer):\n\n\t\t# connect to the database\n\t\twith sqlite3.connect(\"bangazon_cli.db\") as bang:\n\t\t\tcursor = bang.cursor()\n\n\t\t\ttry: \n\t\t\t\t# select customer_id that matches the customer's phone number\n\t\t\t\tcursor.execute(\"SELECT customer_id FROM Customers c WHERE c.phone_number ='{}'\".format(customer.get_phone_number()))\n\n\t\t\t\t# return the data\n\t\t\t\tdata = cursor.fetchall()\n\t\t\t\t\n\t\t\t\tprint(\"Customer_id\", data[0][0])\n\t\t\t\treturn data[0][0]\n\t\t\t\t\n\n\t\t\texcept sqlite3.OperationalError:\n\t\t\t\tprint(\"NOPE.\")", "def get_customer_id(self, customer):\n\n\t\t# connect to the database\n\t\twith sqlite3.connect(\"bangazon_cli.db\") as bang:\n\t\t\tcursor = bang.cursor()\n\n\t\t\ttry: \n\t\t\t\t# select customer_id that matches the customer's phone number\n\t\t\t\tcursor.execute(\"SELECT customer_id FROM Customers c WHERE c.phone_number ='{}'\".format(customer.get_phone_number()))\n\n\t\t\t\t# return the data\n\t\t\t\tdata = cursor.fetchall()\n\t\t\t\t\n\t\t\t\tprint(\"Customer_id\", data[0][0])\n\t\t\t\treturn data[0][0]\n\t\t\t\t\n\n\t\t\texcept sqlite3.OperationalError:\n\t\t\t\tprint(\"NOPE.\")", "def test_document_by_id_endpoint(self):\n\n # Grab a document by the ID.\n federal_document = self.federal_client.document_by_id(\n document_id='2020-17469',\n fields='all'\n )\n\n # Make sure we have a response.\n self.assertIsNotNone(federal_document)\n\n # And the document ID matches.\n self.assertEqual(federal_document['document_number'], '2020-17469')", "def _get_billing_account_id():\n org_client = boto3.client(\"organizations\")\n response = org_client.describe_organization()\n return response[\"Organization\"][\"MasterAccountId\"]", "def identity_document_number(self) -> str:\n return self._identity_document_number", "def userDocumentId(self, id: str) -> str:", "def contact(self, contactid):\r\n return contacts.Contact(self, contactid)", "def getContactById(self, id):\n for contact in self.contacts:\n if contact.id == id:\n return contact\n if self.profile:\n if self.profile.id == id:\n return self.profile\n\n return None", "def get_request(self, request_id):\n doc_id = bson.objectid.ObjectId(request_id)\n coll = self._db.get_collection(COLLECTION_REQUEST)\n doc = coll.find_one(filter={\n '_id': doc_id\n })\n return doc", "def receipt_id(charge):\n # Fetch the receipt to generate a receipt ID\n receipt_url = charge.get(\"receipt_url\", None)\n assert receipt_url is not None\n fetch_invoice(receipt_url)\n charge.refresh()\n receipt_number = charge.get(\"receipt_number\")\n assert receipt_number is not None\n return receipt_number", "def get_document_number(self, txt_line, inv_type):\n number = 0\n if txt_line.invoice_id.type in ['in_invoice', 'in_refund']:\n if not txt_line.invoice_id.supplier_invoice_number:\n raise exceptions.except_orm(\n _('Invalid action !'),\n _(\"Unable to make txt file, because the bill has no\"\n \" reference number free!\"))\n else:\n number = self.get_number(\n txt_line.invoice_id.supplier_invoice_number.strip(),\n inv_type, 20)\n elif txt_line.invoice_id.number:\n number = self.get_number(\n txt_line.invoice_id.number.strip(), inv_type, 20)\n return number", "def getDocumentId(self): #$NON-NLS-1$\r", "def get_comment_id(self):\n return int(self.request.get('cid'))", "async def get_contact(dbcon: DBConnection, id: int) -> Any: # Use any because optional returns suck.\n q = \"\"\"select id, name, email, phone, active from contacts where id=%s\"\"\"\n q_args = (id,)\n row = await dbcon.fetch_row(q, q_args)\n contact = None\n if row:\n contact = object_models.Contact(*row)\n return contact", "def test_companies_company_id_data_bill_credit_notes_bill_credit_note_id_get(self):\n pass" ]
[ "0.5683501", "0.5683501", "0.56735724", "0.5603914", "0.5582864", "0.5566173", "0.55428797", "0.55278647", "0.55261034", "0.54602784", "0.5372517", "0.53609663", "0.5273999", "0.52701735", "0.52635944", "0.52564985", "0.52564985", "0.52452254", "0.5202236", "0.5181843", "0.51733804", "0.51675683", "0.5155141", "0.5134276", "0.5096852", "0.50947213", "0.5078534", "0.50679535", "0.5024494", "0.5008244" ]
0.78684276
0
Gets the billing address for the contact. If there is none set, then it will return the first address. It will return None given the case there's no available address in any products of the subscription. Used primaily in invoicing.
def get_billing_address(self): if self.billing_address: return self.billing_address.address_1 else: subscription_products = SubscriptionProduct.objects.filter( subscription=self ) addresses = [sp.address for sp in subscription_products if sp.address] if not addresses: if self.contact.email: return self.contact.email else: return None else: return addresses[0].address_1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def billing_contact(self):\n return self._billing_contact", "def get_address(self):\n if self.get_entity: # needs an entity to work\n if self.building:\n address = self.get_entity.get_institutional_address()\n address.extend(self.building.get_postal_address())\n return address\n else:\n return self.get_entity.get_address()", "def get_billing_phone(self):\n if self.billing_phone:\n return self.billing_phone\n else:\n return self.contact.phone", "def billing_account(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"billing_account\")", "def billing_account(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"billing_account\")", "def get_address(self):\n if self.address:\n return self.address", "def getaccountaddress(self, account):\n return self.proxy.getaccountaddress(account)", "def get_billing_city(self):\n if self.billing_address and self.billing_address.city:\n return self.billing_address.city\n else:\n sub_prods = SubscriptionProduct.objects.filter(subscription=self)\n addresses = [sp.address for sp in sub_prods]\n if addresses:\n return addresses[0].city\n else:\n return \"\"", "def get_address(self):\n entity = self\n if entity.abstract_entity:\n entity = self.get_real_ancestor()\n if entity:\n address = entity.get_institutional_address()\n building = entity.get_building()\n if building:\n if entity.building_recapitulates_entity_name: \n address.extend(building.get_postal_address()[1:])\n else:\n address.extend(building.get_postal_address())\n return address", "def get_address(self, ):\n return self.get_parameter('address')", "def get_billing_name(self):\n if self.billing_name:\n return self.billing_name\n else:\n return self.contact.name", "def get_address(self):\n return self.address.line[0]+\", \"+self.address.city+\", \"+self.address.state+\", \"+self.address.country", "def billing_account(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"billing_account\")", "def billing_account(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"billing_account\")", "def billing_account(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"billing_account\")", "def address(self) -> Optional[str]:\n return pulumi.get(self, \"address\")", "def address(self) -> Optional[str]:\n return pulumi.get(self, \"address\")", "def mailing_address(self):\n if \"mailingAddress\" in self._prop_dict:\n if isinstance(self._prop_dict[\"mailingAddress\"], OneDriveObjectBase):\n return self._prop_dict[\"mailingAddress\"]\n else :\n self._prop_dict[\"mailingAddress\"] = PhysicalAddress(self._prop_dict[\"mailingAddress\"])\n return self._prop_dict[\"mailingAddress\"]\n\n return None", "def billing_account(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"billing_account\")", "def billing_account(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"billing_account\")", "def get_address(self, address=None):\n return self.__get_addr_grp('address', address)", "def paymentAddress(self):\n return self.selectedAccount.paymentAddress()", "def _get_address(self, soup):\n street, city, state, zipcode = None, None, None, None\n try:\n # property detail tag\n street = soup.find('div', class_='main-address').get_text().strip()\n # find address tag\n address = soup.find('div', class_='c-address')\n \n # pattern for the address in this website\n locality = address.find_all('span', class_='locality')\n city = locality[0].get_text().strip()\n if len(locality) > 1:\n city = locality[1].get_text().strip()\n state = address.find('span', class_='region').get_text().strip()\n zipcode = address.find('span', class_='postal-code').get_text().strip()\n return street, city, state, zipcode\n except:\n return street, city, state, zipcode", "def shipping_address(self):\n return Address(self._dict.get('shipping_address'))", "def mailing_address(self):\n registered_office = db.session.query(Office).filter(Office.business_id == self.id).\\\n filter(Office.office_type == 'registeredOffice').one_or_none()\n if registered_office:\n return registered_office.addresses.filter(Address.address_type == 'mailing')\n\n return db.session.query(Address).filter(Address.business_id == self.id). \\\n filter(Address.address_type == Address.MAILING)", "def get_address(self) -> Optional[str]:\n raise NotImplementedError()", "def address1(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"address1\")", "def billing_contact_user(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"billing_contact_user\")", "def address1(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"address1\")", "def address1(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"address1\")" ]
[ "0.6749646", "0.6549209", "0.64370364", "0.63437635", "0.63437635", "0.62060183", "0.60846496", "0.6025994", "0.5972726", "0.59543216", "0.5943322", "0.5896184", "0.5877524", "0.5877524", "0.5877524", "0.5830147", "0.5830147", "0.5825068", "0.5797176", "0.5797176", "0.57224166", "0.5696226", "0.56654984", "0.56633604", "0.5625486", "0.5603308", "0.5578943", "0.55745846", "0.55602175", "0.55602175" ]
0.8461856
0
Gets the billing state for the contact. If it doesn't have one, it will chose the contact's first address' state. Used primarily in invoicing.
def get_billing_state(self): if self.billing_address and self.billing_address.state: return self.billing_address.state else: sub_prods = SubscriptionProduct.objects.filter(subscription=self) addresses = [sp.address for sp in sub_prods] if addresses: return addresses[0].state else: return ""
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def billing_contact(self):\n return self._billing_contact", "def get_billing_name(self):\n if self.billing_name:\n return self.billing_name\n else:\n return self.contact.name", "def get_billing_phone(self):\n if self.billing_phone:\n return self.billing_phone\n else:\n return self.contact.phone", "async def getContactState(self):\n contact_state = await self.director.getItemVariableValue(\n self.item_id, \"ContactState\"\n )\n return bool(contact_state)", "def get_billing_address(self):\n if self.billing_address:\n return self.billing_address.address_1\n else:\n subscription_products = SubscriptionProduct.objects.filter(\n subscription=self\n )\n addresses = [sp.address for sp in subscription_products if sp.address]\n if not addresses:\n if self.contact.email:\n return self.contact.email\n else:\n return None\n else:\n return addresses[0].address_1", "def default_billing(self):\n return self._default_billing", "def billing(self):\n return self._billing", "def billing_info(self):\n return self._billing_info", "def contactstate(self):\n return self.chain.contactstate()", "def billing_info(self):\r\n return BillingInfo(self)", "def billing_account(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"billing_account\")", "def billing_account(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"billing_account\")", "def state(self):\n state_code = MISSING\n try:\n country_code = self.status.place[\"country_code\"]\n except TypeError:\n return MISSING\n place_type = self.status.place[\"place_type\"]\n if country_code == \"US\" and place_type == \"city\":\n full_name = self.status.place[\"full_name\"]\n state_code = full_name.split(\",\")[-1].strip().upper()\n state_code = state_code if state_code in valid_state_codes else MISSING\n else:\n pass\n return state_code", "def get_billing_city(self):\n if self.billing_address and self.billing_address.city:\n return self.billing_address.city\n else:\n sub_prods = SubscriptionProduct.objects.filter(subscription=self)\n addresses = [sp.address for sp in sub_prods]\n if addresses:\n return addresses[0].city\n else:\n return \"\"", "def billing_account(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"billing_account\")", "def billing_account(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"billing_account\")", "def billing_account(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"billing_account\")", "def billing_account(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"billing_account\")", "def billing_account(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"billing_account\")", "def get_active_contact(self):\n list_contact = Contact.objects.filter(phonebook__campaign=self.id,\n status=CONTACT_STATUS.ACTIVE).all()\n if not list_contact:\n return False\n return list_contact", "def billing_contact_user(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"billing_contact_user\")", "def billing_contact_user(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"billing_contact_user\")", "def billing_contact_user(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"billing_contact_user\")", "def get_state(self):\n\n if self.prePOZ_numbus is None:\n prePOZ = [0, 0]\n else:\n prePOZ = [len(self.prePOZ_bus_checkout_time_dict.keys()), self._find_first_checkout_time_in_prePOZ()]\n\n return self.state", "def state(self):\n return (\n self.chainso.data.get(\"confirmed_balance\")\n if self.chainso is not None\n else None\n )", "def get_device_state(self, device_name):\n device_info = self.data.get(device_name)\n return device_info.get('state') if device_info else None", "def get_state(self):\n return ONEUP_STATES[self.state][0]", "def state(self):\n state = self._resource.get('state', self.default_state)\n\n if state in State:\n return state\n else:\n return getattr(State, state)", "def get_billing_data_by_priority(self):\n result = {}\n product = self.get_first_product_by_priority()\n if product:\n sp = self.subscriptionproduct_set.filter(product=product).first()\n if sp.address:\n result = {\n \"route\": sp.route_id,\n \"order\": sp.order,\n \"address\": sp.address.address_1 or sp.subscription.contact.email,\n \"state\": sp.address.state,\n \"city\": sp.address.city,\n \"name\": self.get_billing_name(),\n }\n if not result:\n if getattr(settings, \"FORCE_DUMMY_MISSING_BILLING_DATA\", False):\n result = {}\n return result", "def state_string(self):\n return AddressStates._to_string(self.state)" ]
[ "0.6733393", "0.626571", "0.6247604", "0.6238033", "0.62253356", "0.60487026", "0.59813064", "0.59758586", "0.5958007", "0.59036", "0.5803874", "0.5803874", "0.5647028", "0.54867643", "0.54809517", "0.54809517", "0.5471374", "0.5471374", "0.5471374", "0.5344955", "0.52707523", "0.52707523", "0.5222834", "0.51821285", "0.51556325", "0.5135026", "0.5058686", "0.50102717", "0.50045097", "0.49932876" ]
0.79510605
0
Gets the billing city for the contact. If it doesn't have one, it will chose the contact's first address' city. Used primarily in invoicing.
def get_billing_city(self): if self.billing_address and self.billing_address.city: return self.billing_address.city else: sub_prods = SubscriptionProduct.objects.filter(subscription=self) addresses = [sp.address for sp in sub_prods] if addresses: return addresses[0].city else: return ""
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def city(self):\n if \"city\" in self._prop_dict:\n return self._prop_dict[\"city\"]\n else:\n return None", "def city(self):\n if \"city\" in self._prop_dict:\n return self._prop_dict[\"city\"]\n else:\n return None", "def city(self):\n\n try:\n city = self.status.place[\"full_name\"].strip(r\",[A-Z ]\")\n except TypeError:\n city = None\n if not city:\n try:\n city = self.metadata.as_dict.get(\"user_city\").get(\"google_geocoding\")\n except (TypeError, AttributeError):\n city = None\n return city", "def city(self) -> Optional[str]:\n return pulumi.get(self, \"city\")", "def city(self) -> str:\n return pulumi.get(self, \"city\")", "def city(self):\n return self._city", "def city(self):\n return self._city", "def city(self):\n return self._city", "def city(self):\n return self._city", "def city(self):\n return self._city", "def city(self):\n # type: () -> string_types\n return self._city", "def city(self):\n\t\telement = Element(driver=self.driver,\n\t\t explicit_wait_time=self.explicit_wait_time,\n\t\t locator=BillPayPageLocator.CITY_INPUT)\n\t\treturn element.element_value", "def billing_contact(self):\n return self._billing_contact", "def get_billing_phone(self):\n if self.billing_phone:\n return self.billing_phone\n else:\n return self.contact.phone", "def get_billing_address(self):\n if self.billing_address:\n return self.billing_address.address_1\n else:\n subscription_products = SubscriptionProduct.objects.filter(\n subscription=self\n )\n addresses = [sp.address for sp in subscription_products if sp.address]\n if not addresses:\n if self.contact.email:\n return self.contact.email\n else:\n return None\n else:\n return addresses[0].address_1", "def GetCity():\n IPinfoRequest = requests.get('https://ipinfo.io/')\n IPinfo = IPinfoRequest.json()\n City = IPinfo['city']\n return(City)", "def get_place_details(self):\n self.google_api_url = 'https://maps.googleapis.com/maps/api/place/details/json?placeid={}&key={}'.format(self.place_id, api_key)\n self.r = requests.get(url=self.google_api_url)\n self.data = self.r.json()\n self.address_components = self.data['result']['address_components']\n\n for i in self.address_components:\n if i['types'][0] == 'locality':\n self.city = (i['long_name'])\n return (self.city)\n else:\n pass", "def city(self, instance):\r\n return instance.user.profile.city", "def extract_city(full_address):\n full_address = full_address.strip()\n last_comma_index = full_address.rindex(\",\")\n mid_comma_index = full_address.rindex(\",\", 0, last_comma_index)\n city = full_address[mid_comma_index + 1 : last_comma_index]\n city = city.strip()\n return city", "def get_datacenter_city(self, node):\n if self._datacenter_cache is None:\n self.populate_datacenter_cache()\n location = self._datacenter_cache[node.datacenter_id].location\n location = location.lower()\n location = location.split(\",\")[0]\n return location", "def get_city(self, name: str):\n key = name.lower()\n try:\n return self._cities[key]\n except KeyError:\n city = City(name=name, state=self)\n self._cities[key] = city\n return city", "def get_city(address):\n geolocator = Nominatim(user_agent=\"specify_your_app_name_here\")\n \n while True:\n try:\n location = geolocator.geocode(address)\n break\n except Exception:\n None\n \n city = citipy.nearest_city(location.latitude, location.longitude)\n return [city.city_name.title(), city.country_code.title()]", "def get_billing_name(self):\n if self.billing_name:\n return self.billing_name\n else:\n return self.contact.name", "def get_city_country(city, country, population=''):\n\tif population:\n\t\tcity_country = city.title() + ', ' + country.title() + \" - population \" + str(population)\n\telse:\n\t\tcity_country = city.title() + ', ' + country.title()\n\treturn city_country", "def get_city(self, territory_id: str = \"\"):", "def get_city(self, territory_id: str = \"\"):", "def get_city_country(city, country, population=''):\n if population:\n location = city + ' ' + country + ' ' + str(population)\n return location.title()\n\n else:\n location = city + ' ' + country\n return location.title()", "def get_city(doc = None, cursor = None):\n\tif cursor is None and doc is not None:\n\t\treturn doc['details']['city']\n\telif doc is None and cursor is not None:\n\t\tallcities = list()\n\t\tfor thisdoc in cursor:\n\t\t\tallcities.append(thisdoc['details']['city'])\n\t\treturn allcities\n\telse:\n\t\tprint \"Supply any one argument only!\"", "def GetCityFromAirportId(self, airprot_id):\n return self.airports.set_index('airport_id').loc[airprot_id]['city']", "def get_location(city, country, population=\"\"):\n\tdetails = city.title() + \", \" + country.title()\n\t#if population is specified i.e. not default value, append\n\tif population:\n\t\tdetails += \" - population \" + str(population)\n\t#in either case, return details\n\treturn details" ]
[ "0.70806533", "0.70806533", "0.7044645", "0.6975406", "0.69381595", "0.67983896", "0.67983896", "0.67983896", "0.67983896", "0.67983896", "0.6758192", "0.6743508", "0.6390767", "0.63457286", "0.62552303", "0.62327105", "0.6232633", "0.62152445", "0.6163769", "0.6126078", "0.6102532", "0.6039569", "0.6024781", "0.60043776", "0.597103", "0.597103", "0.5946353", "0.59427214", "0.5828676", "0.5824589" ]
0.82322896
0
Returns the first product by priority
def get_first_product_by_priority(self): products = self.products.filter(type="S").order_by("billing_priority") if products.exists(): return products.first() else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_priority(self, item):\n try:\n return self.set[item][0]\n except KeyError:\n print(\"Can't get priority of non-existing item\")", "def get_product(self, code):\n candidates = list(filter(lambda c: c.code == code, self.list_products()))\n if not len(candidates):\n return None\n return candidates[0]", "def get_priority(self, elem):\n pos = self.pos[elem]\n return self.A[pos][1]", "def lookup_product(product_id,all_products):\n matching_products = [p for p in all_products if str(p[\"id\"]) == str(product_id)]\n if any(matching_products):\n return matching_products[0]\n else:\n return None", "def buy_one_get_one(products):\n if 'p1' in products and products['p1'] >= 2:\n return -20\n else:\n return 0", "def GetProductByType(self, sProductType):\n for p in self.Products.itervalues():\n if p.Type == sProductType:\n return p\n\n return None", "def get_product(self, identifier):\n # try to find an id corresponding to the code\n for p in self['products']:\n if identifier in p.get('ids', []):\n return p\n # if no product associated with the code found, return None\n return None", "def get_highest_priority(self):\n for i in self.query.index.values.tolist():\n if not int(self.query.loc[i,'in_%s'%self.program]):\n pick = self.query.loc[i]\n break\n return pick", "def best_promo(order):\n return max(promo(order) for promo in promos)", "def target_for_product(self, product):\n for target, products in self._products_by_target.items():\n if product in products:\n return target\n return None", "def first(self) -> Optional[T]:\n if len(self.entry_finder) == 0:\n return None\n for (_, _, (item,)) in self.priority_queue:\n if item is not None:\n return cast(T, item)\n return None", "def medium_priority(self):\n\n return self.filter(priority='2')", "def find_product(product_id, all_products):\n matching_products = [p for p in all_products if str(p[\"id\"]) == str(product_id)]\n matching_product = matching_products[0]\n return matching_product", "def recommend_next_product(self, prod_list):\n scores = defaultdict(float)\n for prod in prod_list:\n for item in self._purchased.find({PROD1: prod}):\n if not item[PROD2] in prod_list:\n scores[item[PROD2]] += math.log(item[TIMES])\n if len(scores) == 0:\n return None\n max_tuple = max(scores.items(), key = operator.itemgetter(1))\n return max_tuple[0]", "def getPriority(self):", "def get_product_by_id(pid: int) -> Optional[Product]:\n return get_market().get_product(pid)", "def low_priority(self):\n\n return self.filter(priority='3')", "def get_product_by_slug(self, slug):\n return self.get_products({ 'review_url': slug })[0]", "def _get_product(self):\n try:\n return self.activities[industry.MANUFACTURING].products[0].typeID\n except (KeyError, IndexError):\n return None", "def get_single_result(self, rows):\n if self.priority_sort_column:\n rows = rows.sort_values(by=self.priority_sort_column,\n ascending=self.priority_sort_ascending)\n return rows.iloc[0]", "def priority(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"priority\")", "def priority(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"priority\")", "def priority(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"priority\")", "def priority(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"priority\")", "def test_single_quant_priority(self):\n pick = self.quant_1.create_picking(self.picking_type_pick, priority=\"1\")\n # Check priority is 1 = 'Urgent'\n self.assertEqual(pick.priority, \"1\")", "def find_substitute(self):\n\n products_list = None\n\n while not products_list:\n self.get_targeted_category()\n\n db.connect()\n db.execute(\"\"\"\n SELECT product_id, nutriscore_id\n FROM Product_per_category\n INNER JOIN Product\n ON Product.id = product_id\n WHERE category_id = %s AND nutriscore_id < %s\n ORDER BY nutriscore_id\n \"\"\", (self.category_id, self.nutriscore,))\n products_list = db.fetch(True)\n db.disconnect()\n self.category_concordance += 1\n\n return products_list[0][0]", "def get_first_carton(source: Union[Source, int]):\n\n from astra.database.targetdb import Target, CartonToTarget, Carton\n\n catalogid = get_catalog_identifier(source)\n\n sq = (\n CartonToTarget.select(CartonToTarget.carton_pk)\n .join(Target)\n .where(Target.catalogid == catalogid)\n .order_by(CartonToTarget.priority.asc())\n .limit(1)\n .alias(\"first_carton\")\n )\n return Carton.select().join(sq, on=(sq.c.carton_pk == Carton.pk)).first()", "def greatest_product_one(self, key):\n return self.greatest_product(key)[0]", "def _get_priority(self):\n return self.__priority", "def _get_priority(self):\n return self.__priority" ]
[ "0.64809465", "0.64384234", "0.61902714", "0.6139843", "0.6105018", "0.59034294", "0.5887629", "0.58555615", "0.58348155", "0.5798752", "0.57809514", "0.57279134", "0.57052654", "0.5690262", "0.5687347", "0.56850964", "0.56675524", "0.5659207", "0.56436497", "0.5585878", "0.55803275", "0.55803275", "0.55803275", "0.55803275", "0.5576739", "0.5575248", "0.55737686", "0.5567481", "0.555976", "0.555976" ]
0.87777555
0
This will order products by their billing_priority attribute, and billing data included in the first SubscriptionProduct that matches that priority will be returned in a dictionary. This is used to complete the billing information for when invoices are created. Used primarily in invoicing.
def get_billing_data_by_priority(self): result = {} product = self.get_first_product_by_priority() if product: sp = self.subscriptionproduct_set.filter(product=product).first() if sp.address: result = { "route": sp.route_id, "order": sp.order, "address": sp.address.address_1 or sp.subscription.contact.email, "state": sp.address.state, "city": sp.address.city, "name": self.get_billing_name(), } if not result: if getattr(settings, "FORCE_DUMMY_MISSING_BILLING_DATA", False): result = {} return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_products_in_subscription(self, subscription):\n path = 'katello/api/v2/subscriptions/{}'.format(subscription.id)\n subscription_json = satellite_get_response(path)\n name_dict = dict(\n (\n prod_json['name'],\n satellite_json_to_entity(prod_json, nailgun.entities.Product)\n )\n for prod_json in subscription_json['provided_products']\n )\n return name_dict", "def get_first_product_by_priority(self):\n products = self.products.filter(type=\"S\").order_by(\"billing_priority\")\n if products.exists():\n return products.first()\n else:\n return None", "def product_summary(self):\n # products = self.products.filter(type='S') # TODO: explain the usage of this commented line or remove it\n from .utils import process_products\n\n subscription_products = SubscriptionProduct.objects.filter(subscription=self)\n dict_all_products = {}\n for sp in subscription_products:\n dict_all_products[str(sp.product.id)] = str(sp.copies)\n return process_products(dict_all_products)", "def _serialize_order_and_product_data(order_data:dict):\n\n placed_orders = []\n ordered_products = []\n\n for order in order_data:\n if order[\"financial_status\"] not in COMPLETE_ORDER_STATUSES:\n continue\n \n items = []\n products = []\n for item in order[\"line_items\"]:\n items.append(\n {\n \"ProductID\": item[\"id\"],\n \"SKU\": item[\"sku\"],\n \"ProductName\": item[\"title\"],\n \"Quantity\": item[\"quantity\"],\n \"ItemPrice\": item[\"name\"]\n }\n )\n\n products.append(\n {\n \"token\": PUBLIC_KEY,\n \"event\": \"Ordered Product\",\n \"customer_properties\": {\n \"$email\": order[\"customer\"][\"email\"],\n \"$first_name\": order[\"customer\"][\"first_name\"],\n \"$last_name\": order[\"customer\"][\"last_name\"]\n },\n \"properties\": {\n \"$event_id\": item[\"id\"],\n \"$value\": item[\"price\"],\n \"ProductID\": item[\"product_id\"],\n \"SKU\": item[\"sku\"],\n \"ProductName\": item[\"title\"],\n \"Quantity\": item[\"quantity\"]\n }\n }\n )\n \n ordered_products.append({\"order_id\":order[\"id\"], \"body\": products})\n\n placed_orders.append(\n {\n \"token\": PUBLIC_KEY,\n \"event\": \"Placed Order\",\n \"customer_properties\": {\n \"$email\": order[\"customer\"][\"email\"],\n \"$first_name\": order[\"customer\"][\"first_name\"],\n \"$last_name\": order[\"customer\"][\"last_name\"],\n \"$phone_number\": order[\"customer\"][\"phone\"],\n \"$address1\": order[\"customer\"][\"default_address\"][\"address1\"] if \"default_address\" in order[\"customer\"].keys() else None,\n \"$address2\": order[\"customer\"][\"default_address\"][\"address2\"] if \"default_address\" in order[\"customer\"].keys() else None,\n \"$city\": order[\"customer\"][\"default_address\"][\"city\"] if \"default_address\" in order[\"customer\"].keys() else None,\n \"$zip\": order[\"customer\"][\"default_address\"][\"zip\"] if \"default_address\" in order[\"customer\"].keys() else None,\n \"$region\": order[\"customer\"][\"default_address\"][\"province_code\"] if \"default_address\" in order[\"customer\"].keys() else None,\n \"$country\": order[\"customer\"][\"default_address\"][\"country_name\"] if \"default_address\" in order[\"customer\"].keys() else None,\n },\n \"properties\": {\n \"$event_id\": order[\"id\"],\n \"$value\": order[\"total_price\"],\n \"ItemNames\": [item[\"name\"] for item in order[\"line_items\"]],\n \"DiscountCode\": order[\"discount_codes\"],\n \"DiscountValue\": order[\"total_discounts\"],\n \"Items\": items,\n \"BillingAddress\": None if \"billing_address\" not in order.keys() else\n {\n \"FirstName\": order[\"billing_address\"][\"first_name\"],\n \"LastName\": order[\"billing_address\"][\"last_name\"],\n \"Company\": order[\"billing_address\"][\"company\"],\n \"Addaress1\": order[\"billing_address\"][\"address1\"],\n \"Address2\": order[\"billing_address\"][\"address2\"],\n \"City\": order[\"billing_address\"][\"city\"],\n \"Region\": order[\"billing_address\"][\"province\"],\n \"RegionCode\": order[\"billing_address\"][\"province_code\"],\n \"Country\": order[\"billing_address\"][\"country\"],\n \"CountryCode\": order[\"billing_address\"][\"country_code\"],\n \"Zip\": order[\"billing_address\"][\"zip\"],\n \"Phone\": order[\"billing_address\"][\"phone\"]\n },\n \"ShippingAddress\": None if \"shipping_address\" not in order.keys() else\n {\n \"FirstName\": order[\"shipping_address\"][\"first_name\"],\n \"LastName\": order[\"shipping_address\"][\"last_name\"],\n \"Company\": order[\"shipping_address\"][\"company\"],\n \"Addaress1\": order[\"shipping_address\"][\"address1\"],\n \"Address2\": order[\"shipping_address\"][\"address2\"],\n \"City\": order[\"shipping_address\"][\"city\"],\n \"Region\": order[\"shipping_address\"][\"province\"],\n \"RegionCode\": order[\"shipping_address\"][\"province_code\"],\n \"Country\": order[\"shipping_address\"][\"country\"],\n \"CountryCode\": order[\"shipping_address\"][\"country_code\"],\n \"Zip\": order[\"shipping_address\"][\"zip\"],\n \"Phone\": order[\"shipping_address\"][\"phone\"]\n }\n },\n \"time\": int(time.time())\n }\n )\n \n return placed_orders, ordered_products", "def _initialize_products(self, products: List) -> Dict[str, int]:\n\n product_request = urllib.request.Request(url=URL_PRODUCTS, headers={'User-Agent': URL_USER_AGENT})\n product_response = urllib.request.urlopen(product_request)\n all_products = json.load(product_response)\n\n product_details = {}\n\n for product in products:\n for cbpro_product in all_products:\n if cbpro_product[\"id\"] == product:\n quote_increment = float(cbpro_product[\"quote_increment\"])\n num_decimal_places = int(math.log10(1 / quote_increment))\n product_details[product] = num_decimal_places\n logging.debug(\n \"Retrieved quote increment for {}: {} = {} decimal places\".\n format(product, quote_increment, num_decimal_places))\n\n return product_details", "def populate_product_cache(products=BILLING_PRODUCTS):\r\n if not products:\r\n product_classes = []\r\n elif isinstance(products, basestring):\r\n # we have a module containing products\r\n product_classes = collect_products_from_modules(products)\r\n product_classes.sort(key=lambda x: x.base_price)\r\n elif all(isinstance(i, basestring) for i in products):\r\n # we have a list of products\r\n product_classes = [import_item(p) for p in products]\r\n elif len(products) == 2:\r\n base_module, classes = products\r\n product_classes = [from_x_import_y(base_module, cls) for cls in classes]\r\n else:\r\n raise ValueError(\"\"\"Invalid value for \"product\"\r\n If defined, products must be one of:\r\n a list of product classes\r\n a (base_module, [product_class]) tuple\r\n a module containing product classes\r\n \"\"\")\r\n return OrderedDict((pc.name, pc) for pc in product_classes)", "def get_products_dict(products):\n # lang = get_language()[:2]\n lang = ''\n products_dict = {}\n try:\n if products and products[0].get('source') == 'greedy':\n for product in products:\n key = product['name']\n products_dict[key] = products_dict.get(key, {})\n products_dict[key].setdefault('products', []).append(key)\n products_dict[key]['price'] = products_dict[key].get('price', 0) + product['net_price']\n else:\n product_objs = list(Product.objects.using('slave').in_bulk([p['product_id'] for p in products]).values())\n bundled_products = []\n for product in product_objs:\n for bundled_product in product.bundled.all():\n bundled_product.price = 0\n bundled_products.append(bundled_product)\n product_objs.extend(bundled_products)\n for product in product_objs:\n key = getattr(product.parent, 'name_%s' % lang)\n products_dict[key] = products_dict.get(key, {\n 'expire_in': product.expire_in,\n 'never_expire': product.never_expire\n })\n products_dict[key].setdefault('products', []).append(mark_safe(product.name))\n products_dict[key]['price'] = products_dict[key].get('price', 0) + product.price\n # Convert it to a format which is easy to handle in email templates\n products_dict = [{\n 'title': key,\n 'body': value,\n } for key, value in products_dict.items()]\n except (ValueError, KeyError, AttributeError):\n products_dict = list({'title': p['name'], 'body': {'expire_in': None, 'never_expire': None}} for p in products)\n\n return products_dict", "def process_product_data(cls, pricing_info):\n if 'prices' not in pricing_info or 'vat_bands' not in pricing_info:\n raise PricingException('Json data does not contain required '\n 'product and vat band information')\n\n product_prices = pricing_info['prices']\n vat_bands = pricing_info['vat_bands']\n\n processed_vat_bands = {}\n for vat_name, vat_rate in vat_bands.items():\n if vat_name in processed_vat_bands:\n raise PricingException('Vat names need to be unique. '\n f'Vat names duplicated {vat_name}')\n\n try:\n rate = float(vat_rate)\n except ValueError:\n raise PricingException('Vat rates need to be a decimal. '\n f'Vat rate: {vat_rate}')\n\n processed_vat_bands[vat_name] = {\n 'name': vat_name,\n 'rate': rate\n }\n\n processed_products = {}\n for product_price in product_prices:\n product_id = product_price['product_id']\n if product_id in processed_products:\n raise PricingException('Product ids need to be unique. '\n f'Product id duplicated {product_id}')\n\n associated_vat_band = product_price['vat_band']\n if associated_vat_band not in processed_vat_bands:\n raise PricingException('Vat band does not exist. '\n f'Vat band: {associated_vat_band} on '\n f'product id: {product_id}')\n\n try:\n price = int(product_price['price'])\n except ValueError:\n raise PricingException('Product prices need to be numbers. '\n f'Price: {price} on '\n f'product id: {product_id}')\n\n processed_products[product_id] = {\n 'product_id': product_id,\n 'price': price,\n 'vat_band': associated_vat_band\n }\n\n return processed_vat_bands, processed_products", "def default_get(self, cr, uid, fields, context=None):\n if context is None:\n context = {}\n\n exchang_obj = self.pool.get('exchange.order')\n res ={}\n exchang_ids = context.get('active_ids', [])\n if not exchang_ids:\n return res\n\n result = []\n for req in exchang_obj.browse(cr, uid, exchang_ids, context=context):\n for product in req.order_line:\n result.append(self.__create_products(product))\n res.update({'products_ids': result})\n if 'current_date' in fields:\n res.update({'current_date': time.strftime('%Y-%m-%d %H:%M:%S')})\n return res", "def get_customer_order(self, order_id):\n resp = self._request_json(\"/api/orders/fetch-detail\",\n # Not sure what this getPayments does\n params={\"order_id\": order_id,\n \"getPayments\": \"true\"})\n order = _fix_order_fields(resp[\"order\"])\n products_infos = {}\n for product_info in order.pop(\"producerproducts\"):\n product_info = _strip_mongodb_id(product_info)\n pid = product_info[\"id\"]\n del product_info[\"id\"]\n products_infos[pid] = product_info\n\n for product in order[\"products\"]:\n product = _strip_mongodb_id(product)\n product.update(products_infos[product[\"id\"]])\n\n return order", "def order_products(self, obj):\n table = \"\"\"<table id=\"result_list\">\n <thead>\n <tr>\n <th scope=\"col\">\n <div class=\"text\"><span>Product ID</span></div>\n <div class=\"clear\"></div>\n </th>\n <th scope=\"col\">\n <div class=\"text\"><span>Product Name</span></div>\n <div class=\"clear\"></div>\n </th>\n <th scope=\"col\">\n <div class=\"text\"><span>Quantity</span></div>\n <div class=\"clear\"></div>\n </th>\n <th scope=\"col\">\n <div class=\"text\"><span>Price</span></div>\n <div class=\"clear\"></div>\n </th>\n </tr>\n </thead>\n <tbody>\"\"\"\n for order_item in obj.order_items.all():\n table += f\"\"\"<tr>\n <td class=\"field-id\">{order_item.product.id}</td>\n <td class=\"field-name\">{order_item.product.name}</td>\n <td class=\"field-quantity\">{order_item.quantity}</td>\n <td class=\"field-price\">{order_item.price}</td>\n </tr>\"\"\"\n table += \"</tbody></table>\"\n return format_html(table)", "def get_billing_address(self):\n if self.billing_address:\n return self.billing_address.address_1\n else:\n subscription_products = SubscriptionProduct.objects.filter(\n subscription=self\n )\n addresses = [sp.address for sp in subscription_products if sp.address]\n if not addresses:\n if self.contact.email:\n return self.contact.email\n else:\n return None\n else:\n return addresses[0].address_1", "def get_recurring_orderitems(self):\n subscriptions = []\n for orderitem in self.order.orderitem_set.all():\n product = orderitem.product\n if product.is_subscription:\n self.log_extra(\"Found subscription product: %s\", product.slug)\n if product.subscriptionproduct.recurring:\n self.log_extra(\"Subscription is recurring: %s\", product.slug)\n subscriptions.append(orderitem)\n elif product.subscriptionproduct.trial_set.count() > 0:\n self.log_extra(\n \"Not recurring, but it has a trial: %s\", product.slug\n )\n subscriptions.append(orderitem)\n else:\n self.log_extra(\"Not a recurring product: %s \", product.slug)\n else:\n self.log_extra(\"Not a subscription product: %s\", product.slug)\n return subscriptions", "def show_available_products(*args):\n logger.info(f\"Preparing dict of available prodcuts...\")\n available_products = {}\n\n with MONGO:\n mdb = eval(Settings.connect_string)\n products = mdb[\"product\"]\n for doc in products.find():\n del doc[\"_id\"]\n if int(doc[\"quantity_available\"]) > 0:\n product_id = doc[\"product_id\"]\n del doc[\"product_id\"]\n available_products[product_id] = doc\n\n return available_products", "def payload_add_products(self, payload: dict, order: Order, language: str):\n order_lines: [OrderLine] = OrderLine.objects.filter(order=order.id)\n items: [dict] = []\n\n area = resolve_area(order)\n\n # Additional product orders doesn't have berth product\n if hasattr(order, \"product\") and order.product:\n product = order.product\n int_tax = int(order.tax_percentage)\n assert (\n int_tax == product.tax_percentage\n ) # make sure the tax is a whole number\n with override(language):\n lease = order.lease\n place = (\n lease.berth\n if hasattr(lease, \"berth\")\n else lease.place\n if hasattr(lease, \"place\") and lease.place\n else lease.section\n if hasattr(lease, \"section\") and lease.section\n else area\n )\n product_name = f\"{product.name}: {place}\"\n items.append(\n {\n \"id\": get_talpa_product_id(product.id, area),\n \"title\": product_name,\n \"price\": price_as_fractional_int(order.price),\n \"pretax_price\": price_as_fractional_int(order.pretax_price),\n \"tax\": int_tax,\n \"count\": 1,\n \"type\": 1,\n }\n )\n\n for order_line in order_lines:\n product: AdditionalProduct = order_line.product\n int_tax = int(product.tax_percentage)\n assert (\n int_tax == product.tax_percentage\n ) # make sure the tax is a whole number\n with override(language):\n product_name = product.name\n items.append(\n {\n \"id\": get_talpa_product_id(\n product.id,\n area,\n is_storage_on_ice=product.service\n == ProductServiceType.STORAGE_ON_ICE,\n ),\n \"title\": product_name,\n \"price\": price_as_fractional_int(order_line.price),\n \"pretax_price\": price_as_fractional_int(order_line.pretax_price),\n \"tax\": int_tax,\n \"count\": order_line.quantity,\n \"type\": 1,\n }\n )\n payload[\"amount\"] = price_as_fractional_int(order.total_price)\n payload[\"products\"] = items", "def purchase(self, first='', last='', street1='', street2='', city='', state='', postalcode='',\r\n country='', ccnum='', cardtype='', processor_reply_dump=''):\r\n if self.status == 'purchased':\r\n return\r\n self.status = 'purchased'\r\n self.purchase_time = datetime.now(pytz.utc)\r\n self.bill_to_first = first\r\n self.bill_to_last = last\r\n self.bill_to_city = city\r\n self.bill_to_state = state\r\n self.bill_to_country = country\r\n self.bill_to_postalcode = postalcode\r\n if settings.FEATURES['STORE_BILLING_INFO']:\r\n self.bill_to_street1 = street1\r\n self.bill_to_street2 = street2\r\n self.bill_to_ccnum = ccnum\r\n self.bill_to_cardtype = cardtype\r\n self.processor_reply_dump = processor_reply_dump\r\n\r\n # save these changes on the order, then we can tell when we are in an\r\n # inconsistent state\r\n self.save()\r\n # this should return all of the objects with the correct types of the\r\n # subclasses\r\n orderitems = OrderItem.objects.filter(order=self).select_subclasses()\r\n for item in orderitems:\r\n item.purchase_item()\r\n\r\n # send confirmation e-mail\r\n subject = _(\"Order Payment Confirmation\")\r\n message = render_to_string(\r\n 'emails/order_confirmation_email.txt',\r\n {\r\n 'order': self,\r\n 'order_items': orderitems,\r\n 'has_billing_info': settings.FEATURES['STORE_BILLING_INFO']\r\n }\r\n )\r\n try:\r\n from_address = microsite.get_value(\r\n 'email_from_address',\r\n settings.DEFAULT_FROM_EMAIL\r\n )\r\n\r\n send_mail(subject, message,\r\n from_address, [self.user.email]) # pylint: disable=E1101\r\n except (smtplib.SMTPException, BotoServerError): # sadly need to handle diff. mail backends individually\r\n log.error('Failed sending confirmation e-mail for order %d', self.id) # pylint: disable=E1101\r", "def _compute_order_priorities(self, orders):\n order_priorities = {}\n times_last_created = {}\n\n if len(orders) == 0:\n return order_priorities\n\n time_last_order_created = orders[-1].time_created\n # We'll analyze all orders created in the\n # bodega_all.serializers.DEFAULT_ORDER_EXPIRATION_TIME_LIMIT window\n # before the last order that we're computing priorities for. In\n # practice that covers virtually all orders that hadn't expired. It's\n # not worth refactoring to move that constant somewhere else for this\n # hack, and since it's only an approximation anyway it's okay if the\n # values don't match up exactly.\n ordering_start_time = time_last_order_created + timedelta(hours=-24)\n all_orders = Order.objects.filter(\n time_created__gt=ordering_start_time,\n time_created__lte=time_last_order_created)\n for order in all_orders.order_by('time_created'):\n owner_sid = get_sid(order.owner)\n time_last_created = times_last_created.get(owner_sid, None)\n if time_last_created is None:\n # First order is not throttled.\n throttled_time_created = order.time_created\n else:\n # Hard-coded throttle amount since this is only a hack.\n # At about 200 jobs per release pipeline run, throttling them\n # at 4 minutes each will spread them evenly over ~13 hours.\n # Any orders placed by other individual users during those ~13\n # hours will be in the middle rather than the back of the\n # queue.\n throttled_time_created = max(\n time_last_created + timedelta(minutes=4),\n order.time_created)\n times_last_created[owner_sid] = throttled_time_created\n order_priorities[order.sid] = throttled_time_created.isoformat()\n\n return order_priorities", "def prepare_product_response(self, order_queue_line, product_data_queue_line):\n sync_category_and_tags = False\n if order_queue_line:\n data = product_data_queue_line\n product_queue_id = \"from Order\"\n sync_category_and_tags = True\n else:\n product_queue_id = product_data_queue_line.queue_id.id\n if product_data_queue_line.queue_id.created_by == \"webhook\":\n sync_category_and_tags = True\n data = json.loads(product_data_queue_line.woo_synced_data)\n return data, product_queue_id, sync_category_and_tags", "def __sort_orders_by_price(self):\n self.orders = sorted(self.orders, key=lambda o: o.price, reverse=True)", "def _format_product(self, product):\n formatted = {}\n variants = []\n formatted['id'] = product.get('id')\n formatted['name'] = product.get('name')\n formatted['in_stock'] = True if product.get('stock_status') == 'instock' else False\n # Variants (Includes prices here since variants can be different prices)\n for variant in product.get('variations', []):\n formatted_variant = self._format_variant(variant)\n variants.append(formatted_variant)\n formatted['variants'] = variants\n # Options\n if product.get('attributes'):\n options = [{attribute.get('name').lower(): attribute.get('options')} for attribute in product['attributes']]\n elif product.get('default_attributes'):\n options = [{attribute.get('name').lower(): [attribute.get('option')]} for attribute in product['default_attributes']]\n else:\n options = {}\n formatted['options'] = options\n return formatted", "def get_products(self) -> dict:\n\t\tproducts = dict()\n\n\t\tdb = Database()\n\t\tdb.create_connection(self._file_path)\n\t\trows = db.get_products()\n\t\tdb.close_connection()\n\n\t\tfor row in rows:\n\t\t\tif row[0] not in products:\n\t\t\t\ttry:\n\t\t\t\t\tproducts[row[0]] = Product(row[0], row[1], row[2], row[3]) # code, price, lastupdate, currency\n\t\t\t\texcept Exception as e: \n\t\t\t\t\t# IF the database was not correct parsed, the item will be discarted, \n\t\t\t\t\t# the event will be logged in the log file and the program will continue\n\t\t\t\t\tlogging.error(str(datetime.now())+': ' + e)\n\t\t\t\t\tcontinue\n\n\t\treturn products", "def get(self):\n\n bill = {\n 'product': {\n 'name': self.order.product.name,\n 'price': self.order.product.price\n },\n 'order_date_of_creation': self.order.date_of_creation,\n 'bill_date_of_creation': timezone.now(),\n 'discounts': [],\n 'total': self.order.product.price\n }\n\n return self.add_discount(bill)", "def _compute_order_priorities_stats(self, orders):\n order_prices = {}\n tab_limits = {}\n tab_demands = {}\n total_fulfilled_prices = Counter()\n valid_statuses = set([Order.STATUS_OPEN, Order.STATUS_FULFILLED])\n\n for order in orders:\n if order.status not in valid_statuses:\n bodega_value_error(\n log,\n ('Order %s status %s is not valid for computing '\n 'price-based priority') % (order, order.status))\n\n order_price = 0.0\n if not order.maintenance:\n # We currently assume that each user has a single tab,\n # but this may change in the future.\n if order.tab.sid not in tab_limits:\n tab_limits[order.tab.sid] = order.tab.limit\n\n if order.tab.sid not in tab_demands:\n tab_demands[order.tab.sid] = 0.0\n\n # Compute order price as a sum of its items' prices.\n item_prices = \\\n self.item_tools.get_prices_for_items(order.items.items())\n order_price = sum(item_prices.values())\n\n if order.status == Order.STATUS_FULFILLED:\n total_fulfilled_prices[order.tab.id] += order_price\n\n tab_demands[order.tab.sid] += order_price\n\n log.debug('Order %s has a price of %s' % (order, order_price))\n order_prices[order.sid] = order_price\n\n total_tab_limit = sum(tab_limits.values())\n\n # Generate a list of tab_demands / tab_limit to compute the median\n # demand\n tab_demand_per_limit = sorted(\n [tab_demands[key] / tab_limits[key]\n for key in tab_demands])\n\n if total_tab_limit < 0:\n bodega_value_error(\n log,\n 'Total tab limit is negative: %s' % total_tab_limit)\n elif total_tab_limit == 0:\n if orders:\n bodega_value_error(\n log,\n ('Total tab limit is 0 for non-empty list of orders. '\n 'This may be due to a race condition in between the time '\n 'we collect the tab ids and fetch their limits.'))\n median_demand = None\n else:\n median_demand = statistics.median(tab_demand_per_limit)\n\n order_priority_stats = {\n 'median_demand': median_demand,\n 'order_prices': order_prices,\n 'tab_limits': tab_limits,\n 'total_fulfilled_prices': dict(total_fulfilled_prices)\n }\n\n log.debug('Order priority stats: %s' % order_priority_stats)\n return order_priority_stats", "def billing_info(self):\r\n return BillingInfo(self)", "def available_products(self):\n # TODO - take into account bands, and what bands available in input products, etc\n return {k: self.__products__[k].description for k in self.__products__.keys()}", "def _sort_open_orders_by_price(self,\n open_orders,\n fulfilled_orders):\n priorities_stats = self._compute_order_priorities_stats(\n open_orders + fulfilled_orders)\n median_demand, order_prices, tab_limits, total_fulfilled_prices = \\\n (priorities_stats['median_demand'],\n priorities_stats['order_prices'],\n priorities_stats['tab_limits'],\n priorities_stats['total_fulfilled_prices'])\n\n # The get_priority function also does a write to the database to update\n # tab_based_priority field for each order. This is because we use that\n # as a cached field to show the user the order's last known priority.\n # This is a side-effect of the function\n def get_priority(open_order):\n \"\"\"Compute an open order's price-based priority.\n\n The floor and 20% fudge keep FIFO as a small component of priority\n instead of severely penalizing people who ordered early but want\n just a bit more than average demand.\n\n Maintenance orders are a special case and always priced at 0.0\n to be processed early.\n \"\"\"\n priority = 0.0\n if not open_order.maintenance:\n order_price = order_prices[open_order.sid]\n tab = open_order.tab\n owner_total_fulfilled_price = \\\n total_fulfilled_prices.get(tab.id, 0.0)\n tab_limit = tab_limits[tab.sid]\n priority = floor(\n ((order_price + owner_total_fulfilled_price) / tab_limit) /\n (1.2 * median_demand))\n\n open_order.tab_based_priority = priority\n open_order.save(update_fields=['tab_based_priority'])\n\n return priority\n\n order_priorities = {\n order.sid: get_priority(order) for order in open_orders\n }\n\n log.debug('Open order price-based priorities: %s' % order_priorities)\n\n sorted_open_orders = \\\n sorted(open_orders,\n key=lambda o: order_priorities[o.sid])\n\n return {\n 'sorted_open_orders': sorted_open_orders,\n 'open_order_priorities': order_priorities\n }", "def get_products(self, options = {}):\n\t\tproducts = Product.objects.all()\n\t\tif 'reference' in options:\n\t\t\tproduct_db = Product.objects.filter(reference = options['reference'])\n\t\t\tif len(product_db) == 1:\n\t\t\t\tproduct_db = product_db[0]\n\t\t\t\treturn [self.serialize(product)]\n\n\n\t\tif 'categories_id' in options:\n\t\t\tproducts = Product.objects.filter(categories__id__in = options['categories_id'])\n\t\t\n\t\tif 'locations' in options:\n\t\t\tproducts = products.filter(history__shipping_area__postal_code__in = options['locations'])\n\n\t\tif 'before_date' in options:\n\t\t\tproducts = products.filter(history__created__lte = options['before_date'])\n\n\t\tif 'after_date' in options:\n\t\t\tproducts = products.filter(history__created__gte = options['after_date'])\n\n\t\tproducts = products.filter(exists = True)\n\n\t\tproducts = self.serialize(products)\n\t\t# Getting locations of all products and \n\t\t[ p.update({'locations' : [s.postal_code for s in list(ShippingArea.objects.filter(history__product__id = p['id']))]}) for p in products]\n\n\t\treturn products", "def data(self):\n return dict({\"order\": super(TakeProfitOrderRequest, self).data})", "def sorted_recommended_products(self):\n return [\n r.recommendation\n for r in self.primary_recommendations.select_related(\"recommendation\").all()\n ]", "def test_get_rate_plan_by_product_and_rate_plan(self):\n pass" ]
[ "0.5509737", "0.5395463", "0.53618896", "0.52143073", "0.5201712", "0.51797724", "0.51070535", "0.5086419", "0.50125146", "0.4921268", "0.4906881", "0.48464125", "0.47775888", "0.47718132", "0.47433048", "0.47264567", "0.469503", "0.46914676", "0.4672938", "0.46599743", "0.4625627", "0.4590087", "0.45890555", "0.45858768", "0.4572392", "0.45716655", "0.4561613", "0.45361197", "0.45328885", "0.4527244" ]
0.78009915
0
Returns the amount discounted for when the subscription is more than one month. Requires these settings to be set on the local settings.
def get_frequency_discount(self): if self.frequency == 3: return getattr(settings, "DISCOUNT_3_MONTHS", 0) elif self.frequency == 6: return getattr(settings, "DISCOUNT_6_MONTHS", 0) elif self.frequency == 12: return getattr(settings, "DISCOUNT_12_MONTHS", 0) else: return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def monthly_benefit(self):\n \"\"\"Calculate weekly benefit of this company from this day\"\"\"\n total_purchase_price = 0\n total_selling_price = 0\n last_thirty_days = timezone.now() - timedelta(days=30)\n items = self.item_set.filter(status=\"sold\", updated_at__gte=last_thirty_days)\n for item in items:\n total_purchase_price += item.price\n total_selling_price += item.selling_price\n benefit = total_selling_price - total_purchase_price\n return benefit", "def free_cookie_discount():\n return DiscountPeriod.objects.get(id=settings.COOKIE_CORNER_FREE_COOKIE_DISCOUNT_PERIOD_ID)", "def monthly_payment(self):\n return self._quantize(self._monthly_payment)", "def discount_amount(self):\r\n customer = self.records.find_customers(str(self.__customer).strip())\r\n order_value = self.order_value\r\n discount = customer.get_discount(order_value)\r\n return discount", "def amount_to_pay_in_period(self):\n assert self.type == \"N\", _(\"Subscription must be normal to use this method\")\n period_start, period_end = self.get_current_period()\n price_per_day = (\n self.get_price_for_full_period() / (period_end - period_start).days\n )\n days_not_used = 30 * self.frequency - (date.today() - period_start).days\n return int(price_per_day * days_not_used)", "def exam_cookie_discount():\n return DiscountPeriod.objects.get(id=settings.COOKIE_CORNER_EXAM_COOKIE_DISCOUNT_PERIOD_ID)", "def monthly_sales(self):\n last_thirty_days = timezone.now() - timedelta(days=30)\n items = self.item_set.filter(status=\"sold\", updated_at__gte=last_thirty_days)\n total_sales = 0\n for item in items:\n total_sales += item.price\n return total_sales", "def discount(self, period):\n\t\treturn 1.0/compound(period)", "def amount_already_paid_in_period(self):\n assert self.type == \"N\", _(\"Subscription must be normal to use this method\")\n period_start, period_end = self.get_current_period()\n price_per_day = (\n self.get_price_for_full_period() / (period_end - period_start).days\n )\n days_already_used = (date.today() - period_start).days\n amount = int(price_per_day * days_already_used)\n if amount > self.get_price_for_full_period():\n amount = self.get_price_for_full_period()\n if amount < 0:\n amount = 0\n return amount", "def get_monthly_amount(sub):\n price = min(60, sub.price_paid) / Decimal('1.2') * Decimal('0.7')\n nb_month = max([(sub.date_over - sub.date_created).days//30, 1])\n return (price / nb_month).quantize(Decimal('1.00'))", "def compute_amount_discounted(promotion, amount):\n if promotion.promo_type == '1': # % off\n amount_discounted = promotion.promo_amount * amount / Decimal(100)\n amount_discounted = Decimal(str(round(amount_discounted, 2)))\n elif promotion.promo_type == '2': # $ off\n if promotion.promo_amount < amount:\n amount_discounted = promotion.promo_amount\n else:\n amount_discounted = amount\n elif promotion.promo_type == '3': # fixed $ cost\n if promotion.promo_amount < amount:\n amount_discounted = amount - promotion.promo_amount\n else:\n # If you have a fixed cost promo of $20, but your items \n # only cost $10, you don't save.\n amount_discounted = 0\n LOG.debug('compute discount: amount_discounted = %s' % amount_discounted)\n return amount_discounted", "def discounted(self):\n return self._discounted", "def _get_discount(self):\n\n # For every 2 PENS, one free discount\n number_of_pens = len([x for x in self._products if x.code == 'PEN'])\n discount = 5.0 * int(number_of_pens / 2)\n\n # If there are more than 3 T-Shirts in the basket, 5 EUR of discount in every of them (25%)\n number_of_tshirts = len([x for x in self._products if x.code == 'TSHIRT'])\n if number_of_tshirts >= 3:\n discount += 5.0 * number_of_tshirts\n\n return discount", "def discount_amount(self):\n return self._discount_amount", "def remaining_months_purchased(self) -> int:\n if not self.last_expiration:\n return 0\n start_date = _today()\n end_date = self.last_expiration\n return (end_date.year - start_date.year) * 12 + (\n end_date.month - start_date.month\n )", "def _prorata_rate(self, days_used, days_in_month):\n return (100 * days_used // days_in_month) / 100.0", "def get_spend_by_campaign_this_month(self, account_id):\n try:\n account = DependentAccount.objects.get(id=account_id)\n except DependentAccount.DoesNotExist:\n return\n\n client = get_client()\n client.client_customer_id = account.dependent_account_id\n\n report_downloader = client.GetReportDownloader(version=settings.API_VERSION)\n\n campaign_report_selector = {\n 'fields': ['Cost', 'CampaignId', 'CampaignStatus', 'CampaignName', 'Labels', 'Impressions'],\n 'predicates': [\n {\n 'field': 'Cost',\n 'operator': 'GREATER_THAN',\n 'values': '0'\n }\n ]\n }\n\n campaign_report_query = {\n 'reportName': 'CAMPAIGN_PERFORMANCE_REPORT',\n 'dateRangeType': 'THIS_MONTH',\n 'reportType': 'CAMPAIGN_PERFORMANCE_REPORT',\n 'downloadFormat': 'CSV',\n 'selector': campaign_report_selector\n }\n\n in_use_ids = []\n\n campaign_report = Reporting.parse_report_csv_new(report_downloader.DownloadReportAsString(campaign_report_query))\n for campaign_row in campaign_report:\n print(campaign_row)\n campaign_id = campaign_row['campaign_id']\n in_use_ids.append(campaign_row['campaign_id'])\n campaign, created = Campaign.objects.get_or_create(campaign_id=campaign_id, account=account)\n # Update campaign name\n campaign.campaign_name = campaign_row['campaign']\n # This is the cost for this month\n cost = int(campaign_row['cost']) / 1000000\n campaign.campaign_cost = cost\n campaign.save()\n print('Campaign: ' + str(campaign) + ' now has a spend this month of $' + str(campaign.campaign_cost))\n\n today = datetime.datetime.today()\n\n if today.day != 1:\n yesterday = datetime.datetime.now() - datetime.timedelta(1)\n first_day_of_month = datetime.datetime(yesterday.year, yesterday.month, 1)\n\n campaign_report_selector = {\n 'fields': ['Cost', 'CampaignId', 'CampaignStatus', 'CampaignName', 'Labels', 'Impressions'],\n 'predicates': [\n {\n 'field': 'Cost',\n 'operator': 'GREATER_THAN',\n 'values': '0'\n }\n ]\n }\n\n campaign_report_query = {\n 'reportName': 'CAMPAIGN_PERFORMANCE_REPORT',\n 'dateRangeType': 'CUSTOM_DATE',\n 'reportType': 'CAMPAIGN_PERFORMANCE_REPORT',\n 'downloadFormat': 'CSV',\n 'selector': campaign_report_selector\n }\n\n start_date = first_day_of_month\n end_date = yesterday\n\n campaign_report_selector['dateRange'] = {\n 'min': start_date.strftime('%Y%m%d'),\n 'max': end_date.strftime('%Y%m%d')\n }\n\n campaign_yest_report = Reporting.parse_report_csv_new(\n report_downloader.DownloadReportAsString(campaign_report_query))\n for campaign_row in campaign_yest_report:\n campaign_id = campaign_row['campaign_id']\n campaign, created = Campaign.objects.get_or_create(campaign_id=campaign_id, account=account)\n campaign.campaign_name = campaign_row['campaign']\n # This is the cost for this month until yesterday\n spend_until_yesterday = int(campaign_row['cost']) / 1000000\n campaign.spend_until_yesterday = spend_until_yesterday\n campaign.save()\n print(\n 'Campaign: ' + str(campaign) + ' has spend until yesterday of $' + str(campaign.spend_until_yesterday))\n\n return 'get_spend_by_campaign_this_month'", "def get_total_discount(self):\n total_discount = 0.00\n\n for promotion in self.pricing_rules:\n discount = promotion.get_discount(self.order)\n total_discount += discount\n\n return total_discount", "def get_subscription_days(self):\n if self.recurring_unit == self.DAY:\n return self.recurring_period\n elif self.recurring_unit == self.WEEK:\n return self.recurring_period * self.WEEKDAYS\n elif self.recurring_unit == self.MONTH:\n return self.recurring_period * self.MONTHDAYS\n elif self.recurring_unit == self.YEAR:\n return self.recurring_period * self.YEARDAYS\n else:\n return 0", "def bulk_item_promo(order: Order):\n discount = 0\n for item in order.cart:\n if item.quantity >= 20:\n discount += item.total() * 0.1\n return discount", "def fidelity_promo(order: Order) -> float: # <3>\n return order.total() * 0.05 if order.customer.fidelity >= 1000 else 0", "def actual_monthly(self):\n actual_monthly = numpy.pmt(\n self.loan_at / 12,\n self.total_months,\n -self.finance_value,\n self.loan_at_end\n )\n logger.info('numpy.pmt({},{},{},{})={}'.format(\n self.loan_at / 12,\n self.total_months,\n -self.finance_value,\n self.loan_at_end,\n actual_monthly\n ))\n return round(actual_monthly, 2)", "def amount_due(self):\n queryset = self.supplyorderitem_set.filter(delivery_date__isnull=False).aggregate(\n amount_due=Sum(F('unit_price')*F('quantity_ordered'))\n )\n return queryset['amount_due'] or 0", "def redeem(self, instance, customer, save=True):\n start = timezone.now().date()\n end = start + relativedelta(months=self.duration)\n discount = Discount(instance=instance,\n coupon=self,\n start=start,\n end=end,\n customer=customer)\n discount.full_clean()\n if save:\n discount.save()\n return discount", "def get_NextMonthsBalance(self):\n balance = (self.principal * math.exp(self.interestRate * (1/12))) - self.actualMonthlyPayment\n if balance <= 0:\n return 0\n return balance", "def __calculate_monthly_interest(self):\n return self.__percentage_interest / 12", "def get_discount(self, price):\r\n pass", "def get_quote_discount(self):\n return self.quoteitem_set.all().annotate(\n total_quote_price=F('price') * F('quantity')).annotate(\n calculate_discount=(F('total_quote_price') * F('discount') / 100)).aggregate(\n Sum('calculate_discount'))['calculate_discount__sum']", "def discount(self, cart):", "def monthly_fee(self):\n total_salary = self.base_salary\n if self.is_salary_allowances is True:\n fixed_allowances = self.summarize( self.fixed_allowances )\n non_fixed_allowances = self.summarize( self.non_fixed_allowances )\n total_salary = total_salary + non_fixed_allowances + fixed_allowances\n #end if\n\n company_old_age_insurance = 0\n individual_old_age_insurance = 0\n if self.old_age_insurance_status is True:\n company_old_age_insurance = \\\n self._company_old_age_insurance(total_salary)\n\n individual_old_age_insurance = \\\n self._individual_old_age_insurance(total_salary)\n #end if\n\n company_pension_insurance = 0\n individual_pension_insurance = 0\n if self.pension_insurance_status is True:\n company_pension_insurance = \\\n self._company_pension_insurance(total_salary)\n\n individual_pension_insurance = \\\n self._individual_pension_insurance(total_salary)\n #end if\n\n company_health_insurance = 0\n individual_health_insurance = 0\n if self.health_insurance_status is True:\n company_health_insurance = \\\n self._company_health_insurance(total_salary)\n\n individual_health_insurance = \\\n self._individual_health_insurance(total_salary)\n #end if\n\n death_insurance = 0\n if self.death_insurance_status is True:\n death_insurance = self._death_insurance(total_salary)\n #end if\n\n accident_insurance = 0\n if self.accident_insurance_status is True:\n accident_insurance = \\\n self._accident_insurance(total_salary, \\\n self.industry_risk_rate)\n #end if\n\n monthly = {\n \"old_age_insurance\" : {\n \"company\" : company_old_age_insurance,\n \"individual\" : individual_old_age_insurance,\n },\n \"pension_insurance\" : {\n \"company\" : company_pension_insurance,\n \"individual\" : individual_pension_insurance,\n },\n \"health_insurance\" : {\n \"company\" : company_health_insurance,\n \"individual\" : individual_health_insurance,\n },\n \"death_insurance\" : death_insurance,\n \"accident_insurance\" : accident_insurance\n }\n return monthly" ]
[ "0.62877953", "0.61699164", "0.6105525", "0.60971224", "0.60436356", "0.5954913", "0.59418446", "0.5904847", "0.5889601", "0.5864517", "0.58445734", "0.5741317", "0.57061625", "0.5664976", "0.5655065", "0.5608308", "0.5596242", "0.55690736", "0.550031", "0.53721046", "0.53706414", "0.5366821", "0.5330099", "0.5327908", "0.5313097", "0.52734476", "0.5272344", "0.52708584", "0.52596074", "0.52393943" ]
0.7160672
0
Returns an integer representing the first weekday (based on isoweekday) on the products this subscription has.
def get_first_day_of_the_week(self): if SubscriptionProduct.objects.filter( subscription=self, product__weekday=1 ).exists(): return 1 elif SubscriptionProduct.objects.filter( subscription=self, product__weekday=2 ).exists(): return 2 elif SubscriptionProduct.objects.filter( subscription=self, product__weekday=3 ).exists(): return 3 elif SubscriptionProduct.objects.filter( subscription=self, product__weekday=4 ).exists(): return 4 elif SubscriptionProduct.objects.filter( subscription=self, product__weekday=5 ).exists(): return 5 else: return 6
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def weekday(self):\n\n return func.extract('dow', self.start_date) + 1", "def locale_first_weekday():\n\tfirst_weekday = 6 #by default settle on monday\n\n\ttry:\n\t\tprocess = os.popen(\"locale first_weekday week-1stday\")\n\t\tweek_offset, week_start = process.read().split('\\n')[:2]\n\t\tprocess.close()\n\t\tweek_start = datetime.date(*time.strptime(week_start, \"%Y%m%d\")[:3])\n\t\tweek_offset = datetime.timedelta(int(week_offset) - 1)\n\t\tbeginning = week_start + week_offset\n\t\tfirst_weekday = int(beginning.strftime(\"%w\"))\n\texcept:\n\t\tprint \"WARNING - Failed to get first weekday from locale\"\n\n\treturn first_weekday", "def get_weekday():\n result = datetime.today().weekday() + 1\n return result", "def first_week_day(self) -> int:\n return self._data['week_data']['first_day']", "def get_weekday(self):\n weekdays = dict(PRODUCT_WEEKDAYS)\n return weekdays.get(self.weekday, \"N/A\")", "def weekday(self):\n return (self.toordinal() + 6) % 7", "def get_weekday(self):\n originDate = Date(1900, 1, 1)\n return WEEKDAYS[originDate.days_since(self) % 7]", "def weekday(self):\n return 0", "def weekday(self):\n return 0", "def weekday(self) -> int:\n return WD_EN.index(self.time.day.lower())", "def day_of_week_for_start_day(self):\n import calendar\n\n day = self.idfobjects[\"RUNPERIOD\"][0][\"Day_of_Week_for_Start_Day\"]\n\n if day.lower() == \"sunday\":\n return calendar.SUNDAY\n elif day.lower() == \"monday\":\n return calendar.MONDAY\n elif day.lower() == \"tuesday\":\n return calendar.TUESDAY\n elif day.lower() == \"wednesday\":\n return calendar.WEDNESDAY\n elif day.lower() == \"thursday\":\n return calendar.THURSDAY\n elif day.lower() == \"friday\":\n return calendar.FRIDAY\n elif day.lower() == \"saturday\":\n return calendar.SATURDAY\n else:\n return 0", "def weekday(day):\n return (day % 7) - 1", "def _FirstSunday(self, dtz): # pylint: disable-msg=C0103,R0201\n return dtz + datetime.timedelta(days=(6-dtz.weekday()))", "def first_day_of_week(self):\n return self.__first_day_of_week", "def day_of_week(self):\n # 1 Jan 0001 was Monday according to the proleptic Gregorian calendar.\n # So, 1 Jan 0001 has ordinal 1, and the weekday is 0.\n return (self._ordinals - 1) % 7", "def day_of_week():\n return calendar.day_name[datetime.date.today().weekday()]", "def get_trip_day_weekday(self):\n\n days = {\"Lundi\":'1', \"Mardi\":'2', \"Mercredi\":'3', \"Jeudi\":'4', \"Vendredi\":'5', \"Samedi\":'6', \"Dimanche\":'7'}\n\n return int(days[dict(self.TRIP_DAY_SELECTIONS)[self.trip_day]])", "def WEEKDAY(\n serial_number: func_xltypes.XlNumber,\n return_type: func_xltypes.XlNumber = None\n) -> func_xltypes.XlNumber:\n\n date = utils.number_to_datetime(int(serial_number))\n\n if return_type is None:\n # Numbers 1 (Sunday) through 7 (Saturday)\n weekDays = (2, 3, 4, 5, 6, 7, 1)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 1:\n # Numbers 1 (Sunday) through 7 (Saturday)\n weekDays = (2, 3, 4, 5, 6, 7, 1)\n return weekDays[date.weekday()]\n\n # weekday() is 0 based, starting on a Monday\n elif int(return_type) == 2:\n # Numbers 1 (Monday) through 7 (Sunday)\n weekDays = (1, 2, 3, 4, 5, 6, 7)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 3:\n # Numbers 0 (Monday) through 6 (Sunday)\n weekDays = (0, 1, 2, 3, 4, 5, 6)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 11:\n # Numbers 1 (Monday) through 7 (Sunday)\n weekDays = (1, 2, 3, 4, 5, 6, 7)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 12:\n # Numbers 1 (Tuesday) through 7 (Monday)\n weekDays = (7, 1, 2, 3, 4, 5, 6)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 13:\n # Numbers 1 (Wednesday) through 7 (Tuesday)\n weekDays = (6, 7, 1, 2, 3, 4, 5)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 14:\n # Numbers 1 (Thursday) through 7 (Wednesday)\n weekDays = (5, 6, 7, 1, 2, 3, 4)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 15:\n # Numbers 1 (Friday) through 7 (Thursday)\n weekDays = (4, 5, 6, 7, 1, 2, 3)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 16:\n # Numbers 1 (Saturday) through 7 (Friday)\n weekDays = (3, 4, 5, 6, 7, 1, 2)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 17:\n # Numbers 1 (Sunday) through 7 (Saturday)\n weekDays = (2, 3, 4, 5, 6, 7, 1)\n return weekDays[date.weekday()]\n\n else:\n raise xlerrors.NumExcelError(\n f\"return_type needs to be omitted or one of 1, 2, 3, 11, 12, 13,\\\n 14, 15, 16 or 17. You supplied {return_type}\")", "def gen_weeklyFrequency(self):\n\n if len(self.fields) == 0:\n return None\n\n if self.validator.validate(self.fields) == False:\n return None\n\n weeklyFrequency = 0\n dayFields = ['day1','day2','day3','day4','day5','day6','day7']\n for dayField in dayFields:\n if dayField in self.fields:\n if self.fields[dayField] == True:\n weeklyFrequency += 1\n\n return weeklyFrequency", "def week_start_on_monday(weekday):\n return (weekday - 1 + 6) % 7 + 1", "def weekday(self):\n if self.month is not None and self.day is not None:\n return self.todate().weekday()\n else:\n return None", "def get_weekday_number(date):\n return date.strftime('%w')", "def get_current_day_week_number(week_delta=0):\n return (datetime.today() + timedelta(weeks=week_delta)).isocalendar()[1]", "def wkday_on_first(yr, mon): # returns day of week of first of month of the given year (1/1/2016)\r\n TotalDays = 0\r\n for x in range(1754, yr):\r\n YearNum = yeardays(x)\r\n TotalDays += YearNum\r\n for x in range(1, mon):\r\n MonNum = monthdays(yr, x)\r\n TotalDays += MonNum\r\n WhatDayNum = TotalDays % 7\r\n WhatDay = [\"Tues\", \"Wedn\", \"Thu\", \"Fri\", \"Sat\", \"Mon\"]\r\n return WhatDay[WhatDayNum]", "def get_today_week_number(self):\n\n today = date.today()\n iso_result = today.isocalendar()\n return iso_result[1]", "def day_of_week(self) -> str:\n return self.elements[4]", "def day_of_week(self) -> pulumi.Input[Union[str, 'WeekDay']]:\n return pulumi.get(self, \"day_of_week\")", "def day_of_week(self) -> pulumi.Input[Union[str, 'WeekDay']]:\n return pulumi.get(self, \"day_of_week\")", "def weekday(self, dt):\n days = {\n 0: self.MONDAY,\n 1: self.TUESDAY,\n 2: self.WEDNESDAY,\n 3: self.THURSDAY,\n 4: self.FRIDAY,\n 5: self.SATURDAY,\n 6: self.SUNDAY\n }\n return days.get(dt.weekday())", "def dayofweek(self) -> Index:\n warnings.warn(\n \"`dayofweek` will return int32 index instead of int 64 index in 4.0.0.\",\n FutureWarning,\n )\n return Index(self.to_series().dt.dayofweek)" ]
[ "0.7634603", "0.72620153", "0.71937007", "0.7100606", "0.70940375", "0.70748067", "0.70052755", "0.696908", "0.696908", "0.686782", "0.68471444", "0.68352985", "0.6781707", "0.6761986", "0.67520535", "0.6745715", "0.6545274", "0.6521297", "0.65017974", "0.64915496", "0.642778", "0.6398245", "0.6383246", "0.6364287", "0.63282543", "0.6304229", "0.62422764", "0.62422764", "0.62358934", "0.6190761" ]
0.83789855
0
Returns invoiceitems for each product
def get_invoiceitems(self): from invoicing.models import InvoiceItem invoiceitem_list = [] # First we get all the product invoiceitems for product in self.products: # TODO: SOLVE BUNDLED PRODUCTS! item = InvoiceItem() # Get the copies for this product, when used on with_copies item.copies = product[1] # Add the amount of frequency if necessary frequency_extra = ( _(" {} months".format(self.frequency)) if self.frequency > 1 else "" ) item.description = product[0].name + frequency_extra item.price = product[0].price * self.frequency item.amount = item.price * item.copies item.product = product[0] item.subscription = self # TODO: Service from, service to invoiceitem_list.append(item) # Next, we append all discount invoiceitems for discount in self.get_discounts(): discount_item = InvoiceItem() # Add the amount of frequency if necessary frequency_extra = ( _(" {} months".format(self.frequency)) if self.frequency > 1 else "" ) discount_item.description = discount["description"] + frequency_extra discount_item.amount = discount["amount"] * self.frequency discount_item.type_dr = discount["type_dr"] discount_item.type = discount["type"] discount_item.subscription = self invoiceitem_list.append(discount_item) return invoiceitem_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def invoiceitems(self):\r\n return InvoiceItems(self)", "def return_items(self):\n cur = self.cursor\n cur.execute(f\"SELECT * FROM {self.product_name}\")\n products = cur.fetchall()\n return products", "def get_products(self):\n page = 1\n out = []\n while True:\n resp = self.get_session().Product.find(limit=10,page=page)\n if not len(resp):\n return\n yield resp\n page += 1", "def products(self):\r\n return self._products", "def items(self):\n return zip(self.products, self.yields)", "def fill_item_list(self):\n return_list = []\n with Transaction().start(DBNAME, 1):\n self.productlist = self.Product.search([('description', '=', 'Stock'), ('type', '=', 'goods')])\n for i in self.productlist:\n return_list.append(i.template.name)\n return return_list", "def get_all_products(self):\n\t\tpass", "def __iter__(self):\n product_ids = self.basket.keys()\n products = Product.products.filter(id__in=product_ids)\n basket = self.basket.copy()\n\n for product in products:\n basket[str(product.id)]['product'] = product\n\n for item in basket.values():\n item['price'] = Decimal(item['price'])\n item['total_price'] = item['price'] * item['qty']\n yield item", "def invoice_items(self,org_id=None,query={}):\n if org_id is None:\n org_id = self.org_id\n query_end_date = datetime.strptime(query['endDate'],'%Y-%m-%dT%H:%M:%SZ')\n # Given a 'query_end_date' to find the invoice containing the\n # line items for that date we need to find the invoice which \n # has 'endDate' equal to the end of the month of the `query_end_date`\n query_first_next_month = query_end_date + relativedelta(months=+1) - relativedelta(days=(query_end_date.day-1))\n target_invoices = []\n invoices = self.invoices(org_id)\n if self.verbose:\n print('Searching invoices org_id={}'.format(org_id))\n print('query={} query_end_date={}'.format(query,query_end_date))\n print('Result keys: {}'.format( invoices['content'].keys() ))\n print('Total result count: {}'.format( invoices['content']['totalCount'] ))\n for invoice in invoices['content']['results']:\n #pprint.pprint(invoice)\n end_date = datetime.strptime(invoice['endDate'],'%Y-%m-%dT%H:%M:%SZ')\n if self.verbose: \n print('invoice({})[\\'endDate\\']={} end_date={}'.format(invoice['id'],invoice['endDate'],end_date))\n if end_date == query_first_next_month:\n target_invoices.append(invoice)\n \n if self.verbose: \n print('Target invoices: {}'.format(target_invoices))\n \n\n target_line_items = []\n for invoice in target_invoices:\n invoice_details = self.invoices(org_id,invoice['id']) \n print('invoice_details: {}'.format(invoice_details))\n for item in invoice_details['content']['lineItems']:\n end_date = datetime.strptime(item['endDate'],'%Y-%m-%dT%H:%M:%SZ')\n if end_date == query_end_date:\n target_line_items.append(item)\n if self.verbose:\n print('target_line_items: {}'.format(target_line_items)) \n return target_line_items", "def invoices(self):\r\n return inv.Invoices(self)", "def test_get_invoice_items(self):\n invoice = Invoice(self.client, 123456)\n items = invoice.items\n\n self.assertEqual(len(items), 1)\n item = items[0]\n\n self.assertEqual(item.label, \"Linode 2048 - Example\")\n self.assertEqual(item.type, \"hourly\")\n self.assertEqual(item.amount, 9.51)\n self.assertEqual(item.quantity, 317)\n self.assertEqual(item.unit_price, \"0.03\")\n self.assertEqual(item.from_date, datetime(year=2014, month=12, day=19, hour=0, minute=27, second=2))\n self.assertEqual(item.to_date, datetime(year=2015, month=1, day=1, hour=4, minute=59, second=59))", "def test_get_invoice_items(self):\n invoice = Invoice(self.client, 123456)\n items = invoice.items\n\n self.assertEqual(len(items), 1)\n item = items[0]\n\n self.assertEqual(item.label, \"Linode 2048 - Example\")\n self.assertEqual(item.type, \"hourly\")\n self.assertEqual(item.amount, 9.51)\n self.assertEqual(item.quantity, 317)\n self.assertEqual(item.unit_price, \"0.03\")\n self.assertEqual(\n item.from_date,\n datetime(year=2014, month=12, day=19, hour=0, minute=27, second=2),\n )\n self.assertEqual(\n item.to_date,\n datetime(year=2015, month=1, day=1, hour=4, minute=59, second=59),\n )", "def __iter__(self):\n product_ids = self.cart.keys()\n # get the product objects and add them to the cart\n products = Product.objects.filter(id__in=product_ids)\n\n cart = self.cart.copy()\n for product in products:\n cart[str(product.id)]['product'] = product\n\n for item in cart.values():\n item['price'] = Decimal(item['price'])\n if item['duration']!=None:\n item['total_price'] = Decimal(item['price']) * item['quantity'] * Decimal(item['duration'])\n else:\n item['total_price'] = Decimal(item['price']) * item['quantity']\n yield item", "def get_all_products():\n data = order_obj.get_all_products()\n return data", "def __iter__(self): \n item_ids = self.cart.keys()\n\n # getting product objects and adding them to the cart\n items = Item.objects.filter(id__in=item_ids)\n for item in items:\n self.cart[str(item.id)]['item'] = item\n # iterating over the cart items and convert the item prices back to the decimal adding a total price attribute to each item\n for item in self.cart.values():\n item['price'] = Decimal(item['price'])\n item['total_price'] = item['price'] * item['quantity']\n yield item", "def __iter__(self):\n #gets product data keys e.g price, quantity\n product_ids = self.cart.keys()\n\n #checks if the product exist in the database by filtering by product_ids\n products = Product.objects.filter(id__in=product_ids)\n cart = self.cart.copy()\n\n #loop through the products 1 by 1 and re-assigns them to the product.id in the cart\n for product in products:\n cart[str(product.id)][\"product\"] = product\n\n # get price and quatity of items and mutiplies price by quantity to get total price of items\n for item in cart.values():\n item[\"price\"] = Decimal(item[\"price\"])\n item[\"total_price\"] = item[\"price\"] * item[\"qty\"]\n yield item", "def create_invoices(self, cr, uid, ids, context=None):\n invoice_list = []\n po_obj = self.pool.get('purchase.order')\n inv_line_obj = self.pool.get('account.invoice.line')\n inv_obj = self.pool.get('account.invoice')\n addr_obj = self.pool.get('res.partner')\n journal_obj = self.pool.get('account.journal')\n if context is None:\n context = {}\n\n for purchase_adv_obj in self.browse(cr, uid, ids, context=context):\n for purchase_order in po_obj.browse(cr, uid, context.get('active_ids', []), context=context):\n inv_line_ids = []\n invoice_ids = []\n val = inv_line_obj.product_id_change(cr, uid, [], purchase_adv_obj.product_id.id,\n uom_id=False, partner_id=purchase_order.partner_id.id, fposition_id=purchase_order.fiscal_position.id)\n line_id = inv_line_obj.create(cr, uid, {\n 'name': val['value']['name'],\n 'account_id': val['value']['account_id'],\n 'price_unit': purchase_adv_obj.amount,\n 'quantity': purchase_adv_obj.qtty,\n 'discount': False,\n 'uos_id': val['value']['uos_id'],\n 'product_id': purchase_adv_obj.product_id.id,\n 'invoice_line_tax_id': [(6, 0, val['value']['invoice_line_tax_id'])],\n })\n inv_line_ids.append(line_id)\n addr = addr_obj.address_get(cr, uid, [purchase_order.partner_id.id], ['invoice'])\n journal_ids = journal_obj.search(cr, uid, [('type', '=', 'purchase')])\n context.update({'type':'in_invoice','journal_type':'purchase'})\n inv_vals = {\n 'name': purchase_order.partner_ref or purchase_order.name,\n 'origin': purchase_order.name,\n 'type': 'in_invoice',\n 'reference': False,\n 'account_id': purchase_order.partner_id.property_account_payable.id,\n 'journal_id':journal_ids and journal_ids[0] or False,\n 'partner_id': purchase_order.partner_id.id,\n 'address_invoice_id': addr['invoice'],\n 'invoice_line': [(6, 0, inv_line_ids)],\n 'currency_id': purchase_order.pricelist_id.currency_id.id,\n 'comment': '',\n 'payment_term': purchase_order.payment_term_id and purchase_order.payment_term_id.id or False,\n 'fiscal_position': purchase_order.fiscal_position.id or purchase_order.partner_id.property_account_position.id,\n 'prepaid': True\n }\n\n inv_id = inv_obj.create(cr, uid, inv_vals, context=context)\n inv_obj.button_reset_taxes(cr, uid, [inv_id], context=context)\n for invoice in purchase_order.invoice_ids:\n invoice_ids.append(invoice.id)\n invoice_ids.append(inv_id)\n po_obj.write(cr, uid, purchase_order.id, {'invoice_ids': [(6, 0, invoice_ids)]})\n invoice_list.append(inv_id)\n\n if purchase_order.invoice_method in ('picking','order'):\n self.pool.get('purchase.order.line').create(cr, uid, {\n 'order_id': purchase_order.id,\n 'name': val['value']['name'],\n 'date_planned':purchase_order.date_order,\n 'price_unit': -purchase_adv_obj.amount,\n 'product_uom_qty': purchase_adv_obj.qtty,\n 'product_uos': val['value']['uos_id'],\n 'product_uom': val['value']['uos_id'],\n 'product_id': purchase_adv_obj.product_id.id,\n 'adavance_product':True,\n 'discount': False,\n 'taxes_id': [(6, 0, val['value']['invoice_line_tax_id'])],\n }, context=context)\n\n\n context.update({'invoice_id':invoice_list})\n return {\n 'name': 'Open Invoice',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'purchase.open.invoice',\n 'type': 'ir.actions.act_window',\n 'target': 'new',\n 'context': context\n }", "def __iter__(self):\n return self._products.__iter__()", "def products(self):\n return self._products", "def products(self):\n response = requests.get(self._url(self._PRODUCTS_PATH), headers=self._headers)\n return response.json()", "def products(self):\r\n return products.Products(self)", "def __iter__(self):\n conta_ids = self.cart.keys()\n # get the conta objects and add them to the cart\n contas = Conta.objects.filter(id__in=conta_ids)\n for conta in contas:\n self.cart[str(conta.id)]['conta'] = conta\n\n for item in self.cart.values():\n item['conta'] = item['conta']\n item['valor'] = item['valor']\n item['d_c'] = item['d_c']\n item['codigo_historico'] = item['codigo_historico']\n item['historico'] = item['historico']\n yield item", "def __iter__(self):\n ids_productos = self.carro.keys()\n #obtiene los objetos producto y los agrega al carro\n productos = Producto.objects.filter(id__in=ids_productos)\n for producto in productos:\n self.carro[str(producto.id)]['producto'] = producto\n\n for item in self.carro.values():\n item['precio']=Decimal(item['precio'])\n item['precio_total'] = item['precio']*item['cantidad']\n yield item", "def get_products(self):\n return [item.code for item in self._products]", "def order_products(self, obj):\n table = \"\"\"<table id=\"result_list\">\n <thead>\n <tr>\n <th scope=\"col\">\n <div class=\"text\"><span>Product ID</span></div>\n <div class=\"clear\"></div>\n </th>\n <th scope=\"col\">\n <div class=\"text\"><span>Product Name</span></div>\n <div class=\"clear\"></div>\n </th>\n <th scope=\"col\">\n <div class=\"text\"><span>Quantity</span></div>\n <div class=\"clear\"></div>\n </th>\n <th scope=\"col\">\n <div class=\"text\"><span>Price</span></div>\n <div class=\"clear\"></div>\n </th>\n </tr>\n </thead>\n <tbody>\"\"\"\n for order_item in obj.order_items.all():\n table += f\"\"\"<tr>\n <td class=\"field-id\">{order_item.product.id}</td>\n <td class=\"field-name\">{order_item.product.name}</td>\n <td class=\"field-quantity\">{order_item.quantity}</td>\n <td class=\"field-price\">{order_item.price}</td>\n </tr>\"\"\"\n table += \"</tbody></table>\"\n return format_html(table)", "def invoices(self):\r\n return Invoices(self)", "def merge_purchase_invoice(self):\r\n active_id = self.env['purchase.order'].browse(self.env['purchase.order']._context.get('active_ids'))\r\n journal_id = self.env['account.journal'].search([('type', '=', 'purchase')]) \r\n active_id_count = 0\r\n active_count = 0\r\n exist_vendor = []; invoice = [];exist_vendors = [];ctx = ();invoice_id = []\r\n for rec in active_id : \r\n po_reference = self.env['account.invoice'].search([('origin', 'like', rec.name)])\r\n active_count = len(active_id)\r\n if rec.picking_count >= 1 and rec.picking_count != rec.invoice_count:\r\n len_name = [] \r\n for inv in po_reference: \r\n len_name = inv.origin.split(\":\") \r\n if rec.name in len_name:\r\n if po_reference.state == 'draft':\r\n for record in po_reference.invoice_line_ids:\r\n print (record.line_id)\r\n for res in rec.order_line:\r\n if res.id == record.line_id: \r\n record.write({'quantity':res.qty_received})\r\n res.write({'qty_invoiced':record.quantity})\r\n \r\n else:\r\n \r\n po_list = [];line_values = {};lines = {};purchase = []\r\n if rec.state in 'purchase' and rec.invoice_status in 'to invoice':\r\n purchase.append(rec.id)\r\n active_id_count = len(purchase)\r\n if rec.partner_id.id in exist_vendor:\r\n for inv in invoice:\r\n if inv['partner_id'] == rec.partner_id.id:\r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received \r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received ,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids) or False] ,\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id ,\r\n 'line_id':recc.id\r\n }) \r\n inv['invoice_line_ids'].append(line_values)\r\n inv['origin'] = inv['origin'] + ':' + rec.name\r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendors.append(rec.partner_id.id) \r\n else: \r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids)or False],\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id,\r\n 'line_id':recc.id\r\n }) \r\n print (rec.id)\r\n po_list.append(line_values) \r\n invoice.append({'origin':rec.name, 'partner_id': rec.partner_id.id, 'invoice_line_ids':po_list, 'account_id': rec.partner_id.property_account_payable_id.id, 'type': 'in_invoice', 'journal_id':journal_id.id,'date_invoice':datetime.today()}) \r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendor.append(rec.partner_id.id) \r\n \r\n else:\r\n po_list = [];line_values = {};lines = {};purchase = []\r\n if rec.state in 'purchase' and rec.invoice_status in 'to invoice':\r\n purchase.append(rec.id)\r\n active_id_count = len(purchase)\r\n if rec.partner_id.id in exist_vendor:\r\n for inv in invoice:\r\n if inv['partner_id'] == rec.partner_id.id:\r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received ,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids) or False] ,\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id ,\r\n 'line_id':recc.id\r\n }) \r\n inv['invoice_line_ids'].append(line_values)\r\n inv['origin'] = inv['origin'] + ':' + rec.name\r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendors.append(rec.partner_id.id) \r\n else: \r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids)or False],\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id,\r\n 'line_id':recc.id\r\n }) \r\n print (rec.id)\r\n po_list.append(line_values) \r\n invoice.append({'origin':rec.name, 'partner_id': rec.partner_id.id, 'invoice_line_ids':po_list, 'account_id': rec.partner_id.property_account_payable_id.id, 'type': 'in_invoice', 'journal_id':journal_id.id,'date_invoice':date.today()}) \r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendor.append(rec.partner_id.id) \r\n \r\n invoices = []\r\n invoice_counts = 0\r\n for record in invoice:\r\n invoice_id = self.env['account.invoice'].create(record)\r\n invoices.append(invoice_id.id)\r\n invoice_counts = len(invoices)\r\n if active_id_count == 1:\r\n if invoice_counts == 1:\r\n form_view = self.env.ref('purchase.view_invoice_supplier_purchase_form').id\r\n tree_view = self.env.ref('account.invoice_tree').id \r\n return{\r\n 'name': _('Invoice'),\r\n 'type':'ir.actions.act_window',\r\n 'view_type':'form',\r\n 'view_mode':'form,tree',\r\n 'res_model':'account.invoice',\r\n 'res_id':invoices[0],\r\n 'views_id':False,\r\n 'views':[(form_view , 'form'), (tree_view , 'tree')],\r\n 'domain':[('id', 'in', invoices)],\r\n 'target': 'current',\r\n } \r\n else: \r\n form_view = self.env.ref('account.invoice_supplier_form').id\r\n tree_view = self.env.ref('account.invoice_supplier_tree').id \r\n return{\r\n 'name': _('Invoice'),\r\n 'type':'ir.actions.act_window',\r\n 'view_type':'form',\r\n 'view_mode':'form,tree',\r\n 'res_model':'account.invoice',\r\n 'views_id':True,\r\n 'views':[(tree_view , 'tree'), (form_view , 'form')],\r\n 'domain':[('id', 'in', invoices)],\r\n 'target': 'current',\r\n }", "def fetch_all_products():\n products = []\n client = ProductsClient()\n for product in client.get_products():\n products.append(Product(\n base_currency=product[0],\n quote_currency=product[1],\n ))\n return products", "def get_products(self):\n con = dbcon()\n cur = con.cursor()\n cur.execute(\"SELECT * FROM products;\")\n res = cur.fetchall()\n if res:\n prdcts=[]\n for prodct_item in res:\n picked_prdct = {\n 'product_id':prodct_item[0],\n 'product_name':prodct_item[1],\n 'price':prodct_item[2],\n 'quantity':prodct_item[3]\n }\n prdcts.append(picked_prdct)\n return jsonify({\"Products\": prdcts}), 200\n return jsonify({\"message\":\"No products in store\"})", "def obtener_productos():\n\n # Se crea la lista de objetos Producto()\n productos = [\n Producto(\"Caja chica\", 5, 100.0),\n Producto(\"Caja mediana\", 3, 185.0),\n Producto(\"Caja grande\", 1, 299.0)\n ]\n\n return productos" ]
[ "0.70017594", "0.66855896", "0.65207624", "0.6497684", "0.64442295", "0.64148694", "0.6395297", "0.63928103", "0.63620645", "0.63393635", "0.63169175", "0.63153607", "0.6256051", "0.6250157", "0.62497675", "0.62283736", "0.6215448", "0.61937636", "0.61726075", "0.6098233", "0.6056957", "0.596227", "0.5955847", "0.59072196", "0.5888191", "0.5885584", "0.5879179", "0.5869921", "0.5843931", "0.5821389" ]
0.7768776
0
Takes each product for this subscription and returns a list with the copies for each.
def product_summary(self): # products = self.products.filter(type='S') # TODO: explain the usage of this commented line or remove it from .utils import process_products subscription_products = SubscriptionProduct.objects.filter(subscription=self) dict_all_products = {} for sp in subscription_products: dict_all_products[str(sp.product.id)] = str(sp.copies) return process_products(dict_all_products)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ListProducts(self):\n return copy.deepcopy(self._products)", "def sum_copies_per_product(self, product=None, new=False):\n if product is None:\n subprods = SubscriptionProduct.objects.filter(route=self, subscription__active=True)\n else:\n subprods = SubscriptionProduct.objects.filter(route=self, product=product, subscription__active=True)\n if new:\n subprods = subprods.filter(subscription__start_date__gte=date.today() - timedelta(7))\n subprods = subprods.aggregate(Sum('copies'))\n return subprods['copies__sum']", "def products(self):\r\n return self._products", "def products(self):\n return list(Product.select())", "def products(self):\n return self._products", "def get_all_products(self):\n\t\tpass", "def get_products(self, adi):\r\n obj = None\r\n if self.from_copy:\r\n with open(self.products_copy.format(adi), encoding='utf-8') as f:\r\n obj = json.load(f)\r\n return obj\r\n else:\r\n return self.rf.get_products(self.urls[adi])", "def sum_promos_per_product(self, product=None):\n if product is None:\n subprods = SubscriptionProduct.objects.filter(\n route=self, subscription__active=True, subscription__type='P').aggregate(Sum('copies'))\n else:\n subprods = SubscriptionProduct.objects.filter(\n route=self, product=product, subscription__active=True, subscription__type='P').aggregate(Sum('copies'))\n return subprods['copies__sum']", "def source_products(self, uuid):\n return self._backend.source_products(uuid)", "def __iter__(self):\n return self._products.__iter__()", "def get_substitutable_products(self):\n\n while True:\n clean_terminal()\n products = self.database_manager.get_substitutable_products()\n\n if not products:\n cprint('Aucun resultat.', 'red')\n return\n\n print('Choisir un produit :')\n\n range_param = self.print_products_line(products)\n\n reply = self.ask_with_input('Choisir un numéro'\n ' (tapez \"quit\" pour quitter) : ',\n range_param, ('quit',))\n\n if reply == 'quit':\n break\n\n product_number = int(reply)\n product_number -= 1\n product = products[product_number]\n\n operateur_result = []\n self.database_manager.fill_list_with_product_and_substitutes(\n product.get('id'), operateur_result)\n # print product and his substitutes in the terminal\n self.printer(operateur_result)\n\n self.ask_with_input('Ok ? (y) ', -1, ('y',))", "def fetch_all_products():\n products = []\n client = ProductsClient()\n for product in client.get_products():\n products.append(Product(\n base_currency=product[0],\n quote_currency=product[1],\n ))\n return products", "def products(self):\n response = requests.get(self._url(self._PRODUCTS_PATH), headers=self._headers)\n return response.json()", "def products(self):\r\n return products.Products(self)", "def add_products(self, products):\n return [self.add_product(product) for product in set(products)]", "def get_all_products():\n data = order_obj.get_all_products()\n return data", "def getListOfProducts(self, *args):\n return _libsbml.Reaction_getListOfProducts(self, *args)", "def list_products(self):\n return self._make_get_request(self._urls['products'])", "def get(self):\n copies = db.session.query(models.Copy)\n return [copy.serialize() for copy in copies], 200", "def get_product_list_async(self, observations):\n\n # getting the obsid list\n if type(observations) == Row:\n observations = observations[\"obsid\"]\n if np.isscalar(observations):\n observations = [observations]\n if type(observations) == Table:\n observations = observations['obsid']\n\n service = 'Mast.Caom.Products'\n params = {'obsid': ','.join(observations)}\n\n return self.service_request_async(service, params)", "def list_all_products():\n all_products = []\n max_pages = 1000\n page_num = 1\n while page_num < max_pages:\n\n param = {\n 'per_page': 100,\n 'page': page_num,\n }\n rs_api = woo_request_helper().get_details(wc_endpoint='products', params=param)\n\n if rs_api:\n page_num += 1\n all_products.extend(rs_api)\n else:\n break\n\n return all_products", "def get_products(self):\n return [item.code for item in self._products]", "def derived_products(self, uuid):\n return self._backend.derived_products(uuid)", "def get_products(self):\n page = 1\n out = []\n while True:\n resp = self.get_session().Product.find(limit=10,page=page)\n if not len(resp):\n return\n yield resp\n page += 1", "def get_products(self):\n formatted_products = []\n resp = woo_api\n for product in resp:\n formatted = self._format_product(product)\n formatted_products.append(formatted)\n return formatted_products", "def _product(self, args):\n pools = map(tuple, args) #within original version args defined as *args\n result = [[]]\n for pool in pools:\n result = [x + [y] for x in result for y in pool]\n return result", "def _get_products_in_subscription(self, subscription):\n path = 'katello/api/v2/subscriptions/{}'.format(subscription.id)\n subscription_json = satellite_get_response(path)\n name_dict = dict(\n (\n prod_json['name'],\n satellite_json_to_entity(prod_json, nailgun.entities.Product)\n )\n for prod_json in subscription_json['provided_products']\n )\n return name_dict", "def get_queryset(self):\n # distinct is needed to prevent multiple instances of product in resultset if multiple subscriptions are present\n return self.model.objects.filter(subscription__owner=self.request.user).distinct()", "def items(self):\n return zip(self.products, self.yields)", "def get_products(self, query_args={}):\n endpoint = '/v3/educator/products'\n result = self.request(endpoint, query_args)\n\n products = []\n for data in result.response:\n # Dynamically load product instance.\n class_name = data.type.capitalize()\n product = Product.instance(class_name, data)\n products.append(product)\n\n return products" ]
[ "0.66389644", "0.6372359", "0.6192775", "0.6183691", "0.6012321", "0.6008919", "0.5960865", "0.59557086", "0.591539", "0.58433527", "0.5830497", "0.5779049", "0.5690014", "0.5670402", "0.56616014", "0.5647406", "0.5637686", "0.56343627", "0.56197554", "0.55755746", "0.5539333", "0.55335295", "0.5523736", "0.5517175", "0.5490318", "0.5488143", "0.5462505", "0.54570335", "0.5433788", "0.5424379" ]
0.67118526
0
Returns two values, one for the start and one for the end of the period that's going to be paid on this subscription.
def get_current_period(self): if not self.next_billing: return None assert self.type == "N", _("Subscription must be normal to use this method") start = self.next_billing - relativedelta(months=self.frequency) end = self.next_billing return start, end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def amount_to_pay_in_period(self):\n assert self.type == \"N\", _(\"Subscription must be normal to use this method\")\n period_start, period_end = self.get_current_period()\n price_per_day = (\n self.get_price_for_full_period() / (period_end - period_start).days\n )\n days_not_used = 30 * self.frequency - (date.today() - period_start).days\n return int(price_per_day * days_not_used)", "def billing_period_start(self): # ISO8601 or timestamp\n return self._safe_value(VAR_BILLINGPERIODSTART, int)", "def paid(self):\n try:\n start_date = self.member_membership.start_date\n except AttributeError:\n # not available\n start_date = None\n try:\n paid_date = self.member_membership.membership_paid\n except AttributeError:\n # not available\n paid_date = None\n if start_date:\n T = current.T\n PAID = T(\"paid\")\n OVERDUE = T(\"overdue\")\n LAPSED = T(\"expired\")\n lapsed = datetime.timedelta(days=183) # 6 months\n\n now = current.request.utcnow.date()\n now_month = now.month\n start_month = start_date.month\n if now_month > start_month:\n due = datetime.date(now.year, start_month, start_date.day)\n elif now_month == start_month:\n now_day = now.day\n start_day = start_date.day\n if now_day > start_day:\n due = datetime.date(now.year, start_month, start_day)\n else:\n due = datetime.date((now.year - 1), start_month, start_date.day)\n\n if not paid_date:\n # Never paid\n if (now - due) > lapsed:\n return LAPSED\n else:\n return OVERDUE\n\n if paid_date > due:\n return PAID\n elif (due - paid_date) > lapsed:\n return LAPSED\n else:\n return OVERDUE\n\n return current.messages.NONE", "def period(self):\n return self.__period", "def _getTimePeriod(self):\n if isinstance(self.period, tuple):\n period = self.soapCustomDateRange % \\\n (self.soapCustomDate % (self.period[1].day,\n self.period[1].month,\n self.period[1].year),\n self.soapCustomDate % (self.period[0].day,\n self.period[0].month,\n self.period[0].year))\n else:\n period = self.soapPredefinedTime % self.period\n self.logger.debug(\"period = %s\", period)\n return period", "def get_chart_period(self,req):\n now=int(DATE())\n period=INT(req.period) # allow for it having been a string\n if period>9999: # assume it is a month\n if period<(now//100): # a valid complete previous month\n prior=True# this is a previous month\n else:\n period=now//100 # default to current month\n prior=False\n start=period*100+1\n end=self.nextperiod(period)*100+1\n else: # assume it is a year\n if period and (period<(now//10000)): # a prior year\n prior=True# this is a previous year\n else:\n##\n# period=now//100 # default to current month\n# prior=False\n# start=period*100+1\n# end=self.nextperiod(period)*100+1\n##\n period=now//10000 # default to current year\n prior=False\n start=period*10000+101\n end=self.nextperiod(period)*10000+101\n return period,start,end,prior", "def getPeriod(self):\n return StripePeriod(self.base.get(\"period\", []))", "def get_period_range(self, period, start, end, inclusive_start=True, inclusive_end=True):\n if not isinstance(start, datetime.datetime):\n start = self.get_date_from_string(start, '%Y-%m-%d')\n if not isinstance(end, datetime.datetime):\n end = self.get_date_from_string(end, '%Y-%m-%d')\n\n if period == 'month':\n get_period = self.get_current_month_range\n get_next_period = self.get_next_month\n get_previous_period = self.get_previous_month\n if period == 'week':\n get_period = self.get_current_week_range\n get_next_period = self.get_next_week\n get_previous_period = self.get_previous_week\n\n #####################\n # inclusive_start means that the result set will include the whole period\n # containing the start date. Likewise for inclusive_end.\n #\n # If you are, say, reporting on a 'last completed month' or something,\n # but your report date (and end date) is mid-month or something, then setting 'inclusive_end'\n # to False will insure that the report ends with the month prior to the\n # end date.\n #\n # If you're doing projections starting with the month following the one\n # you're in, setting inclusive_start to False will insure that the first\n # period in the range is the one *after* the period you're in now.\n #######################\n if not inclusive_start:\n start = get_next_period(start)[0]\n if not inclusive_end:\n end = get_previous_period(end)[1]\n\n returnvals = []\n\n\n firstper = get_period(start)\n returnvals.append(firstper)\n per = firstper\n while per[1] < end:\n # goes as long as the *end* of the period is < our end date.\n # the intent is that if end is 2010-10-04, the last period will be\n # (2010-10-01, 2010-10-31)\n per = get_next_period(per[1])\n returnvals.append(per)\n\n return returnvals", "def __get_period(self):\n return self.__period", "def getBeginEnd(self):\n if (self.dr_type == choices.DATE_RANGE_TYPE_FIXED):\n return self.begin, self.end\n\n elif (self.dr_type == choices.DATE_RANGE_TYPE_VARIABLE):\n end = datetime.now()\n\n if (self.unit == choices.TIME_UNIT_DAY):\n begin = end - relativedelta(days=self.quantity)\n\n elif (self.unit == choices.TIME_UNIT_WEEK):\n begin = end - relativedelta(weeks=self.quantity)\n\n elif (self.unit == choices.TIME_UNIT_MONTH):\n begin = end - relativedelta(months=self.quantity)\n\n elif (self.unit == choices.TIME_UNIT_YEAR):\n begin = end - relativedelta(years=self.quantity)\n\n else:\n # This case should not happen\n raise Exception(\"A DateRange object's 'unit' must be a numeric\"\n \" value in: {units}.\".format(units=\", \".join([\n \"{const} ({name})\".format(const=unit, name=unit_name)\n for unit, unit_name in choices.TIME_UNIT\n if unit is not None]))\n )\n\n return begin, end\n\n else:\n # This case should not happen\n raise Exception(\"A DateRange object's 'dr_type' must be one of:\"\n \" {const_fixed} (fixed range) or {const_dynamic}\"\n \" (dynamic range).\".format(\n const_fixed=choices.DATE_RANGE_TYPE_FIXED,\n const_dynamic=choices.DATE_RANGE_TYPE_VARIABLE\n ))", "def period(self) -> int:", "def get_interval(self):\n return self._period", "def current_period(self):\n return self._current_period", "def get_subscription(self):\n if not hasattr(self, '_subscription'):\n self._subscription = self.admin.subscriptions.select_related('plan').get_overlapping(\n self.admin_id, DateRange(self.period, self.period_end, bounds='[]'))\n return self._subscription", "def get_end(self):\n return self.start + timedelta(minutes=self.duration)", "def get_first_period(start_record, end_record, start_period, end_period):\n start_record, end_record, start_period, end_period = to_datetime(start_record, end_record, start_period, end_period)\n pspan = end_period - start_period\n delta_year = relativedelta(years=1)\n # what is the first day of year of the start of the period that fits the record?\n start_rec_year = start_record.year\n d = datetime(start_rec_year, start_period.month, start_period.day)\n if d < start_record:\n d = d + delta_year\n delta_years = start_period.year - d.year\n e = end_period + relativedelta(years=-delta_years)\n return (d, e)", "def paid(self):\n return self.get('paid')", "def get_end(self):\n return self._start + self._duration", "def calculate_provision_start_end(trades, instrument, portfolio_swap,\n start_date, end_date, warehousing_type='Daily'):\n \n start_provision = GetProvision(instrument, portfolio_swap, start_date)\n LOGGER.debug(\"Start provision '%s': %s\", instrument.Name(), start_provision)\n \n end_provision = 0.0\n today = acm.Time.DateToday()\n\n if today == end_date and not hist_valuation():\n for trade in trades.AsList():\n funding_instrument = trade.Portfolio().AdditionalInfo().PS_FundingIns()\n if funding_instrument != portfolio_swap:\n continue # Trade doesn't belong to the processed portfolio swap.\n end_provision += calculate(trade)\n else:\n LOGGER.debug(\"Historical valuation. Using PSwap to retrieve provision: '%s'\", portfolio_swap.Name())\n end_provision = GetProvision(instrument, portfolio_swap, end_date)\n \n LOGGER.debug(\"End provision '%s': %s\", instrument.Name(), end_provision)\n \n provision = end_provision - start_provision\n return provision", "def billing_period(self) -> Optional[str]:\n return pulumi.get(self, \"billing_period\")", "def amount_already_paid_in_period(self):\n assert self.type == \"N\", _(\"Subscription must be normal to use this method\")\n period_start, period_end = self.get_current_period()\n price_per_day = (\n self.get_price_for_full_period() / (period_end - period_start).days\n )\n days_already_used = (date.today() - period_start).days\n amount = int(price_per_day * days_already_used)\n if amount > self.get_price_for_full_period():\n amount = self.get_price_for_full_period()\n if amount < 0:\n amount = 0\n return amount", "def get_payoffs(self):\n raise NotImplementedError", "def _get_prorata_interval_rate(self, cr, uid, change_date, context=None):\n month_days = calendar.monthrange(change_date.year,\n change_date.month)[1]\n start_date = add_months(change_date, 1)\n end_date = start_date.replace(day=month_days)\n used_days = month_days - change_date.day\n ptx = self._prorata_rate(used_days, month_days)\n\n return start_date, end_date, ptx", "def renewal_period(self) -> Optional[float]:\n return pulumi.get(self, \"renewal_period\")", "def period_dates(period):\n end = date.today() - timedelta(days=1) # yesterday\n\n if period == LAST_7_DAYS:\n start = end - timedelta(days=7)\n elif period == LAST_30_DAYS:\n start = end - timedelta(days=30)\n elif period == LAST_90_DAYS:\n start = end - timedelta(days=90)\n elif ALL_TIME:\n start = settings.GA_START_DATE\n\n return start, end", "def quantaHandledByPeriod(self, period):\n\n start_datetime = self.start_time.to_python_datetime()\n end_datetime = self.end_time.to_python_datetime()\n\n total_quanta = 0\n\n # Iterate through the quanta of the period, while the starting_quanta is less\n # than the ending quanta\n\n quanta_start_time = period.start_time\n while quanta_start_time < period.end_time:\n quanta_end_time = quanta_start_time + timedelta(minutes=granularity_in_minutes())\n\n if start_datetime <= quanta_start_time < end_datetime:\n if start_datetime < quanta_end_time <= end_datetime:\n total_quanta = total_quanta + 1\n\n quanta_start_time = quanta_start_time + timedelta(minutes=granularity_in_minutes())\n\n return total_quanta", "def period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"period\")", "def period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"period\")", "def period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"period\")", "def period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"period\")" ]
[ "0.6252857", "0.61863184", "0.6160721", "0.61561406", "0.60880554", "0.6087459", "0.6063793", "0.6032833", "0.6019768", "0.59712064", "0.59394836", "0.5904312", "0.57624906", "0.57588446", "0.5736884", "0.5725289", "0.56277066", "0.56106377", "0.5581673", "0.55783534", "0.5558268", "0.55368704", "0.5530863", "0.5527938", "0.552673", "0.55186164", "0.5500867", "0.5500867", "0.5500867", "0.5500867" ]
0.7761805
0
Divides the price of one period between the amount of days (frequency) to get the price for one day of this subscription. Then multiplies the value of this single day by the amount of days that have passed since the start of the period, giving as a result the amount that the customer has already paid. This is useful to add that amount as a discount for the next subscription when selling a new subscription to the customer, in the case the new subscription price is greater than the old one.
def amount_already_paid_in_period(self): assert self.type == "N", _("Subscription must be normal to use this method") period_start, period_end = self.get_current_period() price_per_day = ( self.get_price_for_full_period() / (period_end - period_start).days ) days_already_used = (date.today() - period_start).days amount = int(price_per_day * days_already_used) if amount > self.get_price_for_full_period(): amount = self.get_price_for_full_period() if amount < 0: amount = 0 return amount
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def amount_to_pay_in_period(self):\n assert self.type == \"N\", _(\"Subscription must be normal to use this method\")\n period_start, period_end = self.get_current_period()\n price_per_day = (\n self.get_price_for_full_period() / (period_end - period_start).days\n )\n days_not_used = 30 * self.frequency - (date.today() - period_start).days\n return int(price_per_day * days_not_used)", "def get_price_for_full_period(self):\n from .utils import calc_price_from_products\n\n summary_of_products = self.product_summary()\n frequency = self.frequency\n price = calc_price_from_products(summary_of_products, frequency)\n return price", "def get_subscription_days(self):\n if self.recurring_unit == self.DAY:\n return self.recurring_period\n elif self.recurring_unit == self.WEEK:\n return self.recurring_period * self.WEEKDAYS\n elif self.recurring_unit == self.MONTH:\n return self.recurring_period * self.MONTHDAYS\n elif self.recurring_unit == self.YEAR:\n return self.recurring_period * self.YEARDAYS\n else:\n return 0", "def check_price(self):\n return self.day*self.price", "def discount(self, period):\n\t\treturn 1.0/compound(period)", "def calc_price_for_period(prev_price):\n result = []\n for i in range(1, N+1):\n price = prev_price + calc_price_delta(prev_price, i)\n prev_price = price\n result.append(price)\n return result", "def calc_total_price(price_per_day, date_from, date_to):\n date_from = datetime.strptime(date_from, '%Y-%m-%d')\n date_to = datetime.strptime(date_to, '%Y-%m-%d')\n n_days = date_to - date_from\n n_days = n_days.days + 1\n return price_per_day * n_days", "def compute_consumption_per_day(self):\n start = datetime.now()\n tz = pytz.timezone(self.env.user.tz)\n dt = pytz.utc.localize(start).astimezone(tz)\n schedule = self.env['ir.config_parameter'].get_param('calc_orderpoint.schedule', '0 6').split()\n run = False\n for begin, end in zip(schedule[::2], schedule[1::2]):\n if dt.hour >= int(begin) and dt.hour < int(end):\n run = True\n break\n if run:\n now = fields.Datetime.now()\n location_ids = eval(self.env['ir.config_parameter'].get_param('calc_orderpoint.location_ids', '[]'))\n limit = timedelta(minutes=float(self.env['ir.config_parameter'].get_param('calc_orderpoint.time_limit', '4')))\n _logger.warn('Starting compute_consumption_per_day.')\n products = self.env['product.template'].search(\n [\n '|',\n ('product_variant_ids.sale_ok', '=', True),\n ('sale_ok', '=', True),\n ('last_sales_count', '=', False),\n '|',\n ('earliest_sales_count', '=', False),\n ('earliest_sales_count', '<', now)\n ],\n limit=int(self.env['ir.config_parameter'].get_param(\n 'calc_orderpoint.product_limit', '30')))\n if not products:\n products = self.env['product.template'].search(\n [\n '|',\n ('product_variant_ids.sale_ok', '=', True),\n ('sale_ok', '=', True),\n '|',\n ('earliest_sales_count', '=', False),\n ('earliest_sales_count', '<', now)\n ],\n order='last_sales_count asc',\n limit=int(self.env['ir.config_parameter'].get_param(\n 'calc_orderpoint.product_limit', '30')))\n _logger.warn('Computing compute_consumption_per_day for the following products: %s' % products)\n for product in products:\n try:\n product._consumption_per_day()\n product.write({\n 'last_sales_count': fields.Datetime.now(),\n 'earliest_sales_count': False,\n })\n if (datetime.now() - start) > limit:\n break\n except:\n tb = traceback.format_exc()\n tomorrow = fields.Datetime.to_string(fields.Datetime.from_string(fields.Datetime.now()) + timedelta(1))\n subject = 'compute_consumption_per_day failed to compute %s (%s)' % (product.display_name, product.id)\n body = 'Earliest recompute attempt set to %s.\\n\\n%s' % (tomorrow, tb)\n _logger.warn('%s. %s' % (subject, body))\n product.earliest_sales_count = tomorrow\n product.message_post(body=body.replace('\\n', '<br/>'), subject=subject, type='notification')\n \n _logger.warn('Finished compute_consumption_per_day.')", "def get_current_period(self):\n if not self.next_billing:\n return None\n assert self.type == \"N\", _(\"Subscription must be normal to use this method\")\n start = self.next_billing - relativedelta(months=self.frequency)\n end = self.next_billing\n return start, end", "def calculate_next_payment(frequency, payment_date, payment_month):\r\n\tif frequency == 1 or frequency == 4: # weekly or four-weekly\r\n\t\tnext_payment = payment_date + timedelta(weeks=frequency)\r\n\telif frequency == 2: # monthly\r\n\t\tnext_payment = payment_date.replace(month=payment_month + 1)\r\n\telse:\r\n\t\tnext_payment = date(1, 1, 1)\r\n\r\n\tprint(\"Frequency : \" + str(frequency)) # testing\r\n\tprint(\"Payment date: \" + str(payment_date)) # testing\r\n\tprint(\"Next payment: \" + str(next_payment)) # testing\r\n\r\n\treturn next_payment", "def update_period(self):\n return 0.1", "def calcul_next(self):\n if not self.valide: # si c'est plus en fonction pas besoin de calcul complemnetaire\n return None\n initial = self.date\n if self.periodicite == 'u':\n return None\n finale = None\n if self.periodicite == 'j':\n finale = initial + datetime.timedelta(days=self.intervalle)\n if self.periodicite == 's':\n finale = initial + datetime.timedelta(weeks=self.intervalle)\n if self.periodicite == 'm':\n finale = initial + relativedelta(months=self.intervalle)\n if self.periodicite == 'a':\n finale = initial + relativedelta(years=self.intervalle)\n # on verifie que la date limite n'est pas dépasséee\n if self.date_limite is not None and finale > self.date_limite:\n finale = None\n return finale", "def __simulate_one_day__(self):\n self.compute()\n self.days.append(next(self.index))", "def period(self, value: int, /) -> None:", "def calculate_time_weighted_rate(self, origin_currency, amount, target_currency, date_invested=datetime.datetime.today()):\n rates = self.get_rates(date_from=date_invested, source_currency=origin_currency, exchanged_currency=target_currency)\n rates_values = []\n amount_values = []\n period_values = []\n amount = Decimal(amount)\n day_amount = amount # first day\n log.debug('Orig amount ' + str(day_amount))\n for rate in rates:\n log.debug('Comenzando con amount val ' + str(day_amount))\n rate_value = Decimal(rate.get('rate_value'))\n log.debug('Rate val ' + str(rate_value))\n rates_values.append(rate_value)\n day_value = Decimal(amount) * rate_value\n log.debug('Day val ' + str(day_value))\n amount_values.append(day_value)\n period_value = (day_value - day_amount) / day_amount\n log.debug('Period val ' + str(period_value))\n log.debug('Period val +1: =' + str(period_value + 1))\n day_amount = Decimal(day_value) # next day, the value is the past day\n period_values.append(period_value + 1)\n twr = reduce((lambda x, y: x * y), period_values) - 1\n log.debug('result con -1: =' + str(twr))\n # considerando un periodo en vez de todos los días\n last_value = Decimal(rates[-1].get('rate_value')) * amount\n twr_one_period = (last_value - amount) / amount\n log.debug('Resultado suponiendo un periodo en vez de cada dia: ' + str(twr_one_period))\n return str(twr)", "def period_payment(yearly_payments_percentage, client_cost_reduction,\n days_with_payments, days_for_discount_rate):\n\n yearly_payments_percentage = Fraction(str(yearly_payments_percentage))\n client_cost_reduction = Fraction(str(client_cost_reduction))\n\n if days_with_payments == 0:\n payments = Fraction(0)\n else:\n payments = Fraction(days_with_payments, days_for_discount_rate)\n return (yearly_payments_percentage * client_cost_reduction * payments)", "def get_next_day(self):\n pass", "def getNextDate(self, currentDate, startDate, repeat):\n\t\tif repeat.lower() == 'quarterly':\n\t\t\tupdatedDate = currentDate + relativedelta(months=3)\n\t\t\tupdatedDate = self.checkValidDate(updatedDate, startDate)\n\t\telif repeat.lower() == 'monthly':\n\t\t\tupdatedDate = currentDate + relativedelta(months=1)\n\t\t\tupdatedDate = self.checkValidDate(updatedDate, startDate)\n\t\telif repeat.lower() == 'weekly':\n\t\t\tupdatedDate = currentDate + relativedelta(weeks=1)\n\t\telif repeat.lower() == 'daily':\n\t\t\tupdatedDate = currentDate + relativedelta(days=1)\n\t\telse:\n\t\t\tupdatedDate = currentDate\n\t\treturn updatedDate", "def delta_freq(da_or_freq, da_or_freq1):\n return to_timedelta(da_or_freq1) / to_timedelta(da_or_freq)", "def increment_daily_total(self, unique, property_id=None, value=1):\n key = (self.user_name, self.bucket_name, \"daily_event\", self.shard)\n property_id = property_id or _32_BYTE_FILLER\n column_id = \"\".join([\n self.id,\n property_id[0:16],\n pack_day(),\n property_id[16:32]])\n increment_counter(key, column_id=column_id, value=value)\n if unique:\n key = (\n self.user_name, \n self.bucket_name, \n \"daily_unique_event\", \n self.shard)\n increment_counter(key, column_id=column_id)", "def get_base_price(self):\n\n price = randint(5, 9)\n\n now = datetime.now()\n weekday = now.weekday()\n hour = now.hour\n\n if weekday < 5 and 7 < hour < 12:\n price = price + 4\n\n return price", "def period(self) -> int:", "def daily_price():\n for item in data:\n if valid_date(item):\n yield data[item]['daily_value']", "def new_day(self):\n self.previous_days.append(self.energy_debt)\n self.energy_debt = defaultdict(lambda: 0.0)\n\n #TODO: add the settelement mechanism here", "def fee(self, prices, fee):\n return self.volume(prices) * fee.value / Config.FEE_TOKEN_PRICE", "def redeem(self, instance, customer, save=True):\n start = timezone.now().date()\n end = start + relativedelta(months=self.duration)\n discount = Discount(instance=instance,\n coupon=self,\n start=start,\n end=end,\n customer=customer)\n discount.full_clean()\n if save:\n discount.save()\n return discount", "def returns_to_prices(returns: pd.Series, start_price: float) -> pd.Series:\n return returns.add(1).cumprod().mul(start_price)", "def discount(ir, period):\n\treturn ir.discount(period)", "def next(self, dt):\n self.x = self.x + \\\n (self.rate-0.5*self.vola*self.vola)*dt + \\\n sqrt(dt)*self.vola*np.random.normal()\n return exp(self.x)", "def price(self, value):\n self.price_ = max(value, 0)\n\n if self.price_ == 0:\n self.mark_as_paid()" ]
[ "0.6303654", "0.6007813", "0.55935836", "0.5568597", "0.5533616", "0.5408784", "0.53791", "0.5367118", "0.5322472", "0.52189183", "0.51695794", "0.51471466", "0.5137264", "0.503475", "0.50263155", "0.50206846", "0.5014889", "0.5012917", "0.49996305", "0.4996481", "0.49523568", "0.49300805", "0.49031958", "0.4899079", "0.48854068", "0.48672548", "0.48479313", "0.48469487", "0.48354998", "0.483071" ]
0.6241355
1
Divides the price of one period between the amount of days (frequency) to get the price for one day of this subscription. Then multiplies the value of this single day by the amount of days that have passed since the start of the period, giving as a result the amount that the customer has yet to pay.
def amount_to_pay_in_period(self): assert self.type == "N", _("Subscription must be normal to use this method") period_start, period_end = self.get_current_period() price_per_day = ( self.get_price_for_full_period() / (period_end - period_start).days ) days_not_used = 30 * self.frequency - (date.today() - period_start).days return int(price_per_day * days_not_used)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_price_for_full_period(self):\n from .utils import calc_price_from_products\n\n summary_of_products = self.product_summary()\n frequency = self.frequency\n price = calc_price_from_products(summary_of_products, frequency)\n return price", "def amount_already_paid_in_period(self):\n assert self.type == \"N\", _(\"Subscription must be normal to use this method\")\n period_start, period_end = self.get_current_period()\n price_per_day = (\n self.get_price_for_full_period() / (period_end - period_start).days\n )\n days_already_used = (date.today() - period_start).days\n amount = int(price_per_day * days_already_used)\n if amount > self.get_price_for_full_period():\n amount = self.get_price_for_full_period()\n if amount < 0:\n amount = 0\n return amount", "def check_price(self):\n return self.day*self.price", "def get_subscription_days(self):\n if self.recurring_unit == self.DAY:\n return self.recurring_period\n elif self.recurring_unit == self.WEEK:\n return self.recurring_period * self.WEEKDAYS\n elif self.recurring_unit == self.MONTH:\n return self.recurring_period * self.MONTHDAYS\n elif self.recurring_unit == self.YEAR:\n return self.recurring_period * self.YEARDAYS\n else:\n return 0", "def compute_consumption_per_day(self):\n start = datetime.now()\n tz = pytz.timezone(self.env.user.tz)\n dt = pytz.utc.localize(start).astimezone(tz)\n schedule = self.env['ir.config_parameter'].get_param('calc_orderpoint.schedule', '0 6').split()\n run = False\n for begin, end in zip(schedule[::2], schedule[1::2]):\n if dt.hour >= int(begin) and dt.hour < int(end):\n run = True\n break\n if run:\n now = fields.Datetime.now()\n location_ids = eval(self.env['ir.config_parameter'].get_param('calc_orderpoint.location_ids', '[]'))\n limit = timedelta(minutes=float(self.env['ir.config_parameter'].get_param('calc_orderpoint.time_limit', '4')))\n _logger.warn('Starting compute_consumption_per_day.')\n products = self.env['product.template'].search(\n [\n '|',\n ('product_variant_ids.sale_ok', '=', True),\n ('sale_ok', '=', True),\n ('last_sales_count', '=', False),\n '|',\n ('earliest_sales_count', '=', False),\n ('earliest_sales_count', '<', now)\n ],\n limit=int(self.env['ir.config_parameter'].get_param(\n 'calc_orderpoint.product_limit', '30')))\n if not products:\n products = self.env['product.template'].search(\n [\n '|',\n ('product_variant_ids.sale_ok', '=', True),\n ('sale_ok', '=', True),\n '|',\n ('earliest_sales_count', '=', False),\n ('earliest_sales_count', '<', now)\n ],\n order='last_sales_count asc',\n limit=int(self.env['ir.config_parameter'].get_param(\n 'calc_orderpoint.product_limit', '30')))\n _logger.warn('Computing compute_consumption_per_day for the following products: %s' % products)\n for product in products:\n try:\n product._consumption_per_day()\n product.write({\n 'last_sales_count': fields.Datetime.now(),\n 'earliest_sales_count': False,\n })\n if (datetime.now() - start) > limit:\n break\n except:\n tb = traceback.format_exc()\n tomorrow = fields.Datetime.to_string(fields.Datetime.from_string(fields.Datetime.now()) + timedelta(1))\n subject = 'compute_consumption_per_day failed to compute %s (%s)' % (product.display_name, product.id)\n body = 'Earliest recompute attempt set to %s.\\n\\n%s' % (tomorrow, tb)\n _logger.warn('%s. %s' % (subject, body))\n product.earliest_sales_count = tomorrow\n product.message_post(body=body.replace('\\n', '<br/>'), subject=subject, type='notification')\n \n _logger.warn('Finished compute_consumption_per_day.')", "def calc_price_for_period(prev_price):\n result = []\n for i in range(1, N+1):\n price = prev_price + calc_price_delta(prev_price, i)\n prev_price = price\n result.append(price)\n return result", "def calc_total_price(price_per_day, date_from, date_to):\n date_from = datetime.strptime(date_from, '%Y-%m-%d')\n date_to = datetime.strptime(date_to, '%Y-%m-%d')\n n_days = date_to - date_from\n n_days = n_days.days + 1\n return price_per_day * n_days", "def discount(self, period):\n\t\treturn 1.0/compound(period)", "def get_current_period(self):\n if not self.next_billing:\n return None\n assert self.type == \"N\", _(\"Subscription must be normal to use this method\")\n start = self.next_billing - relativedelta(months=self.frequency)\n end = self.next_billing\n return start, end", "def update_period(self):\n return 0.1", "def calculate_time_weighted_rate(self, origin_currency, amount, target_currency, date_invested=datetime.datetime.today()):\n rates = self.get_rates(date_from=date_invested, source_currency=origin_currency, exchanged_currency=target_currency)\n rates_values = []\n amount_values = []\n period_values = []\n amount = Decimal(amount)\n day_amount = amount # first day\n log.debug('Orig amount ' + str(day_amount))\n for rate in rates:\n log.debug('Comenzando con amount val ' + str(day_amount))\n rate_value = Decimal(rate.get('rate_value'))\n log.debug('Rate val ' + str(rate_value))\n rates_values.append(rate_value)\n day_value = Decimal(amount) * rate_value\n log.debug('Day val ' + str(day_value))\n amount_values.append(day_value)\n period_value = (day_value - day_amount) / day_amount\n log.debug('Period val ' + str(period_value))\n log.debug('Period val +1: =' + str(period_value + 1))\n day_amount = Decimal(day_value) # next day, the value is the past day\n period_values.append(period_value + 1)\n twr = reduce((lambda x, y: x * y), period_values) - 1\n log.debug('result con -1: =' + str(twr))\n # considerando un periodo en vez de todos los días\n last_value = Decimal(rates[-1].get('rate_value')) * amount\n twr_one_period = (last_value - amount) / amount\n log.debug('Resultado suponiendo un periodo en vez de cada dia: ' + str(twr_one_period))\n return str(twr)", "def period(self, value: int, /) -> None:", "def period(self) -> int:", "def calculate_next_payment(frequency, payment_date, payment_month):\r\n\tif frequency == 1 or frequency == 4: # weekly or four-weekly\r\n\t\tnext_payment = payment_date + timedelta(weeks=frequency)\r\n\telif frequency == 2: # monthly\r\n\t\tnext_payment = payment_date.replace(month=payment_month + 1)\r\n\telse:\r\n\t\tnext_payment = date(1, 1, 1)\r\n\r\n\tprint(\"Frequency : \" + str(frequency)) # testing\r\n\tprint(\"Payment date: \" + str(payment_date)) # testing\r\n\tprint(\"Next payment: \" + str(next_payment)) # testing\r\n\r\n\treturn next_payment", "def get_base_price(self):\n\n price = randint(5, 9)\n\n now = datetime.now()\n weekday = now.weekday()\n hour = now.hour\n\n if weekday < 5 and 7 < hour < 12:\n price = price + 4\n\n return price", "def calcul_next(self):\n if not self.valide: # si c'est plus en fonction pas besoin de calcul complemnetaire\n return None\n initial = self.date\n if self.periodicite == 'u':\n return None\n finale = None\n if self.periodicite == 'j':\n finale = initial + datetime.timedelta(days=self.intervalle)\n if self.periodicite == 's':\n finale = initial + datetime.timedelta(weeks=self.intervalle)\n if self.periodicite == 'm':\n finale = initial + relativedelta(months=self.intervalle)\n if self.periodicite == 'a':\n finale = initial + relativedelta(years=self.intervalle)\n # on verifie que la date limite n'est pas dépasséee\n if self.date_limite is not None and finale > self.date_limite:\n finale = None\n return finale", "def daily_price():\n for item in data:\n if valid_date(item):\n yield data[item]['daily_value']", "def __simulate_one_day__(self):\n self.compute()\n self.days.append(next(self.index))", "def fee(self, prices, fee):\n return self.volume(prices) * fee.value / Config.FEE_TOKEN_PRICE", "def initial_price(self) -> float:\n return self.__initial_price", "def get_current_price(self, tickers: Union[Ticker, Sequence[Ticker]],\n frequency: Frequency = None) -> Union[float, QFSeries]:\n frequency = frequency or self.fixed_data_provider_frequency or Frequency.MIN_1\n\n if frequency <= Frequency.DAILY:\n raise ValueError(\"The Intraday Data Handler can be used only with the Intraday Frequency\")\n\n tickers, was_single_ticker_provided = convert_to_list(tickers, Ticker)\n # if an empty tickers list was supplied then return an empty result\n if not tickers:\n return QFSeries()\n\n current_datetime = self.timer.now()\n\n # Check if the current time is at the market open, if so - take the Open price of the time range, starting\n # at current datetime\n if current_datetime + MarketOpenEvent.trigger_time() == current_datetime:\n time_range_start = current_datetime\n field = PriceField.Open\n else:\n time_range_start = current_datetime - frequency.time_delta()\n field = PriceField.Close\n\n prices_data_array = self.data_provider.get_price(tickers,\n field,\n time_range_start,\n time_range_start + frequency.time_delta(),\n frequency)\n try:\n # Below, the loc[time_range_start] is used instead of iloc[0], in order to return the price exactly from the\n # time_range_start, and not from the range between time_range_start and time_range_start +\n # frequency.time_delta()\n prices_series = prices_data_array.loc[time_range_start]\n except KeyError:\n prices_series = QFSeries(index=tickers)\n\n prices_series.name = \"Current asset prices\"\n\n prices_series = cast_series(prices_series, QFSeries)\n if was_single_ticker_provided:\n return prices_series[0]\n else:\n return prices_series", "def returns_to_prices(returns: pd.Series, start_price: float) -> pd.Series:\n return returns.add(1).cumprod().mul(start_price)", "def period_payment(yearly_payments_percentage, client_cost_reduction,\n days_with_payments, days_for_discount_rate):\n\n yearly_payments_percentage = Fraction(str(yearly_payments_percentage))\n client_cost_reduction = Fraction(str(client_cost_reduction))\n\n if days_with_payments == 0:\n payments = Fraction(0)\n else:\n payments = Fraction(days_with_payments, days_for_discount_rate)\n return (yearly_payments_percentage * client_cost_reduction * payments)", "def delta_freq(da_or_freq, da_or_freq1):\n return to_timedelta(da_or_freq1) / to_timedelta(da_or_freq)", "def running_total(date_list):\n return sum(d.price for d in date_list)", "def compute(self, days=1):\n raise NotImplementedError", "def price_generator(self, start, end, periods):\r\n tickers = [self.SelectedTicker]\r\n tick_yahoo = YahooFinancials(tickers)\r\n data = tick_yahoo.get_historical_price_data(start, \r\n end, \r\n periods)\r\n \r\n df = pd.DataFrame({\r\n a: {x['formatted_date']: x['adjclose'] for x in data[a]['prices']} for a in tickers})\r\n \r\n self.prices = df.dropna()\r\n self.returns = self.prices.pct_change().dropna()\r\n try:\r\n self.div_yield = tick_yahoo.get_dividend_yield()\r\n #print(self.div_yield[self.SelectedTicker])\r\n if self.div_yield[self.SelectedTicker] == None:\r\n self.div_yield = 0.00\r\n else:\r\n self.div_yield = self.div_yield[self.SelectedTicker]\r\n except:\r\n print(\"no dividend yield\")", "def get_price():\n return uniform(1.0, 350.0)", "def value_ret_calendar_period(self, year: int, month: int = None) -> float:\n if month is None:\n period = str(year)\n else:\n period = '-'.join([str(year), str(month).zfill(2)])\n rtn = self.tsdf.copy().pct_change()\n rtn = rtn.loc[period] + 1\n return float(rtn.apply(np.cumprod, axis='index').iloc[-1] - 1)", "def get_next_day(self):\n pass" ]
[ "0.63601166", "0.6031539", "0.57353836", "0.57038665", "0.5697343", "0.5652752", "0.562607", "0.56000257", "0.54350585", "0.53622705", "0.5336522", "0.5317782", "0.53067946", "0.5294332", "0.52442366", "0.5220777", "0.5146846", "0.51338613", "0.509506", "0.5093615", "0.5076544", "0.5072984", "0.50638354", "0.5030509", "0.5004216", "0.50016475", "0.4999862", "0.49906117", "0.498633", "0.49778438" ]
0.66041255
0
Shows an asterisk depending on which weekdays the subscription has products in. This is used in logistics.
def render_weekdays(self): products = self.products.filter(type="S") response = "<table><tr>" if products.filter(weekday=1).exists(): response += "<td>*</td>" else: response += "<td></td>" if products.filter(weekday=2).exists(): response += "<td>*</td>" else: response += "<td></td>" if products.filter(weekday=3).exists(): response += "<td>*</td>" else: response += "<td></td>" if products.filter(weekday=4).exists(): response += "<td>*</td>" else: response += "<td></td>" if products.filter(weekday=5).exists(): response += "<td>*</td>" else: response += "<td></td>" response += "</tr></table>" return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def info():\n\n\t info = \"This package determines the day of the week.\"\n\t print(info)", "def weekly():", "def get_weekday(self):\n weekdays = dict(PRODUCT_WEEKDAYS)\n return weekdays.get(self.weekday, \"N/A\")", "def day_of_week(self):\n day_of_week_names = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',\n 'Friday', 'Saturday', 'Sunday']\n diff = self.diff(Date(1, 1, 1970)) + 3\n while diff < 0:\n diff += 7\n print(day_of_week_names[diff % 7])", "def get_first_day_of_the_week(self):\n if SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=1\n ).exists():\n return 1\n elif SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=2\n ).exists():\n return 2\n elif SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=3\n ).exists():\n return 3\n elif SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=4\n ).exists():\n return 4\n elif SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=5\n ).exists():\n return 5\n else:\n return 6", "def get_this_week_label(self):\n return gettext_lazy('This week')", "def get_weekday_song():\n current_day = day_of_week()\n print(f\"WEEKDAY:{current_day}\")\n if (current_day == \"Monday\"):\n return random.choice([ \\\n \"Monday - Imagine Dragons\", \\\n \"Monday Morning - Quinn XCII\", \\\n \"Monday Mornin' Missin' You - Blake Shelton\", \\\n \"Monday Morning - Fleetwood Mac\", \\\n \"Monday, Monday\", \\\n \"Every Day is a Monday\"])\n elif (current_day == \"Tuesday\"):\n return random.choice([ \\\n \"Tuesdays\", \\\n \"Tuesday (feat Drake)\", \\\n \"Tuesday's Gone\", \\\n \"Tuesday I'll Be Gone\", \\\n \"Taco Tuesday - Migos\", \\\n \"Taco Tuesday - Lil Jon\", \\\n \"Tuesday Afternoon\"])\n elif (current_day == \"Wednesday\"):\n return random.choice([ \\\n \"Wednesday Morning - Macklemore\", \\\n \"Wednesday Night Interlude - Drake\", \\\n \"Wednesday Morning, 3AM\"])\n elif (current_day == \"Thursday\"):\n return random.choice([ \\\n \"Thursday - The Weeknd\", \\\n \"Thursday - Jess Glyne\", \\\n \"(Thursday) Here's Why I Did Not Go to Work Today\", \\\n \"Like a Summer Thursday\", \\\n \"Sweet Thursday\"])\n elif (current_day == \"Friday\"):\n return random.choice([ \\\n \"Friday Night - Eric Paslay \", \\\n \"Last Friday Night\", \\\n \"Finally Friday - George Jones\", \\\n \"Friday Rascall Flatts\", \\\n \"I Gotta Feeling\", \\\n \"Friday Night in Dixie\", \\\n \"Fridays Child\", \\\n \"Hymn for the Weekend\", \\\n \"Friday Night Fish Fry\", \\\n \"Friday Night - Lady A\", \\\n \"Hello Friday - Flo Rida\"])\n elif (current_day == \"Saturday\"):\n return random.choice([ \\\n \"Louisiana Saturday Night\", \\\n \"American Saturday Night\", \\\n \"Small Town Saturday Night\", \\\n \"Satuday Night's Alright\", \\\n \"Saturday in the Park\", \\\n \"Saturday - Twenty One Pilots\", \\\n \"Saturday Nights - Khalid\", \\\n \"Saturday Sun - Vance Joy\"])\n elif (current_day == \"Sunday\"):\n return random.choice([ \\\n \"Sunday Candy\", \\\n \"Sunday Morning - Parmalee\", \\\n \"Sunday Morning - Maroon 5\", \\\n \"Sunday Best\", \\\n \"Sunday\", \\\n \"Closed on Sunday\", \\\n \"Raining on Sunday\", \\\n \"A Month of Sundays\", \\\n \"That's What I Love About Sunday\", \\\n \"Sunday Drive\", \\\n \"Another Sunday in the South - Miranda Lambert\", \\\n \"Sunday - Sia\", \\\n \"Sunday Morning - Maroon 5\", \\\n \"A Month of Sundays - Don Henly\", \\\n \"Lazing on a Sunday Afternoon - Queen\", \\\n \"Sunday Morning Coming Down\", \\\n \"Blue Sunday - The Doors\", \\\n \"A Sunday Kind of Love - Etta James\"])", "def weekly_report(request):\n if TimeCheck().is_ready():\n # get the list of items for the email\n # this will include all active items with an expiration date\n # that occurs within the next 31 days\n exclude_date = dt.now() + datetime.timedelta(days=31)\n items = StockItem.objects\\\n .filter(active=True)\\\n .exclude(date_of_expiration__gt=exclude_date)\\\n .order_by('date_of_expiration')\n response = send_weekly_report(items)\n return HttpResponse(response.content)\n else:\n return HttpResponse('It is too soon to send another email.')", "def week(update: Update, _: CallbackContext) -> None:\n running_total, average_dose_per_day = return_weekly_figure()\n text = \\\n (\n \"\\n📅 *Rolling 7 Day Stats*\\n\" \n + \"\\n\\t\\t\\t📈 Rolling 7 Day Doses - \" + str('{:,}'.format(running_total))\n + \"\\n\\t\\t\\t💉 Average Daily Doses - \" + str('{:,}'.format(average_dose_per_day)) \n )\n update.message.reply_markdown(text)\n logger.info(\"Getting week update for \" + str(update.message.chat_id))", "def today():\n this_cal = Kalendar()\n to_display = \"TODAY:<BR><BR>\"\n\n elements = this_cal.get_all_day_elements(datetime.datetime.now())\n for element in elements:\n for key, values in element.items():\n to_display += key + \":<BR>\"\n for val in values:\n to_display += \"&nbsp;&nbsp;&nbsp;&nbsp;\" + val + \"<BR>\"\n\n return to_display", "def weekday(self, *args, **kwargs): # real signature unknown\r\n pass", "def weekdays(self):\n return self._get('weekdays')", "def add_sundayInfo(self, cost):\n\t\tself.sunday_cost = cost", "def generate_frapp_broadcastinfos(schedule):\n\n # Don't translate!\n weekdays = ('Montag', 'Dienstag', 'Mittwoch', 'Donnerstag', 'Freitag', 'Samstag', 'Sonntag')\n\n broadcasttime = schedule.tstart.strftime('%H:%M') + ' - ' + schedule.tend.strftime('%H:%M') + ' Uhr;'\n broadcastinfos = ''\n\n if schedule.rrule_id == 1: # Once\n broadcastinfos = 'Am ' + weekdays[schedule.byweekday] + ', ' + schedule.dstart.strftime('%d.%m.%Y') + ', ' + broadcasttime\n if schedule.rrule_id == 2: # Daily\n broadcastinfos = 'täglich, ' + broadcasttime\n if schedule.rrule_id == 3: # Business days\n broadcastinfos = 'werktags, ' + broadcasttime\n if schedule.rrule_id == 4: # Weekly\n broadcastinfos = 'Jeden ' + weekdays[schedule.byweekday] + ', ' + broadcasttime\n if schedule.rrule_id == 5: # Bi-weekly\n print(\"Not supported by FRAPP yet\")\n if schedule.rrule_id == 6: # Every four weeks\n print(\"Not supported by FRAPP yet\")\n if schedule.rrule_id == 7: # Even calendar weeks\n broadcastinfos = 'Jeden geraden ' + weekdays[schedule.byweekday] + ', ' + broadcasttime\n if schedule.rrule_id == 8: # Odd calendar weeks\n broadcastinfos = 'Jeden ungeraden ' + weekdays[schedule.byweekday] + ', ' + broadcasttime\n if schedule.rrule_id == 9: # Every 1st week\n broadcastinfos = 'Jeden 1. ' + weekdays[schedule.byweekday] + ', ' + broadcasttime\n if schedule.rrule_id == 10: # Every 2nd week\n broadcastinfos = 'Jeden 2. ' + weekdays[schedule.byweekday] + ', ' + broadcasttime\n if schedule.rrule_id == 11: # Every 3rd week\n broadcastinfos = 'Jeden 3. ' + weekdays[schedule.byweekday] + ', ' + broadcasttime\n if schedule.rrule_id == 12: # Every 4th week\n broadcastinfos = 'Jeden 4. ' + weekdays[schedule.byweekday] + ', ' + broadcasttime\n if schedule.rrule_id == 13: # Every 5th week\n broadcastinfos = 'Jeden 5. ' + weekdays[schedule.byweekday] + ', ' + broadcasttime\n\n return broadcastinfos", "def main():\n print(day_of_week(datetime.now()))\n print(day_of_week(datetime(2019, 7, 4)))\n print(day_of_week(datetime(2013, 12, 25)))\n print(day_of_week(datetime(2000, 1, 1)))", "def help(self, update, context):\n help_message = textwrap.dedent(\"\"\"\n 1. /subscribe - To subscribe to sixes scored in IPL to avail 60% off swiggy coupon (SWIGGY6)\n 2. /snooze - To snooze the notifications for sixes scored for the day.\n 3. /removeSnooze - To resume the notifications for the day.\n 4. /unsubscribe - To unsubscribe to the sixes scored notifications.\n 5. /swiggyOffer - To know more about the ongoing swiggy offer.\n \"\"\")\n self.bot.send_message(chat_id=update.message.chat_id, text=help_message, parse_mode='markdown')", "def weekday(self):\n return 0", "def weekday(self):\n return 0", "def week_number(self, bot, update):\n bot.send_message(update.message.chat_id,\n text='Сейчас *{}* учебная неделя.'.format(self.week()),\n parse_mode='Markdown')", "def weekday_name(day_of_week):\n i = 0\n weekdays = [\"Sunday\", \"Monday\", \"Tuesday\",\n \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\"]\n\n while i < len(weekdays):\n if i + 1 == day_of_week:\n print(weekdays[i])\n i = i + 1", "def menu_python_daily(self, event=None):\n self.link('http://www.pythonware.com/daily/')", "def news_for_week(self):\n\n raise NotImplementedError", "def print_weekly_forecast(update, context):\n city = context.user_data['city']\n provider = context.user_data['provider']\n data = context.bot_data['forecast_data']\n\n for d in data:\n if d['city'] == city and d['provider'] == provider:\n forecast = d['forecast']['week']\n\n message = f\"Прогноз погоды на неделю ({datetime.date.today().strftime('%A, %e %B')} - {(datetime.date.today() + datetime.timedelta(days=6)).strftime('%A, %e %B')}):\\n\"\n\n for f in forecast:\n date = datetime.datetime.strptime(f['date'], '%Y-%m-%d')\n message += f\"\"\"\n*{datetime.datetime.strftime(date,'%A')}*:\nМин.: {f['min_temp']}. Макс.: {f['max_temp']} \n{f['description']} {f['emoji']}\n\"\"\"\n context.bot.send_message(chat_id=update.effective_chat.id, text=message, parse_mode='markdown')", "def banner(self):\n\t\trundays = 0\n\t\tsqr = self.sqc.cursor()\n\t\tsqr.execute(\"SELECT value FROM sord WHERE name = 'gdays'\")\n\t\tfor value in sqr.fetchall():\n\t\t\trundays = value[0]\n\t\tthismsg = \"\\r\\n\"+self.cntransi(self.ESC+\"32mSaga Of The Red Dragon\"+self.ESC+\"0m\")+\"\\r\\n\"\n\t\tthismsg += self.cntransi(self.ESC+\"32m\"+self.ESC+\"1m\"+self.config.host)+\"\\r\\n\"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.cntransi(self.ESC+\"32mCompiled June 25, 2009: Version \"+self.ESC+\"1m\"+self.ESC+\"37m\"+self.config.version+self.ESC+\"0m\")+\"\\r\\n\"\n\t\tthismsg += self.cntransi(self.ESC+\"22m\"+self.ESC+\"32m(c) pre-2009 by Someone Else\\r\\n\\r\\n\"+self.ESC+\"0m\")+\"\\r\\n\"\n\t\tthismsg += self.cntransi(self.ESC+\"32m\"+self.ESC+\"1m\"+self.ESC+\"37mREGISTERED TO \"+self.ESC+\"0m\"+self.ESC+\"1m\"+self.ESC+\"34m\"+self.config.admin+self.ESC+\"0m\")+\"\\r\\n\\r\\n\"\n\t\tthismsg += self.cntransi(self.ESC+\"32mThe current game has been running for \"+self.ESC+\"1m\"+str(rundays)+self.ESC+\"22m game days.\\r\\n\"+self.ESC+\"0m\")+\"\\r\\n\"\n\t\tthismsg += self.cntransi(self.ESC+\"32mPlayers are deleted after \"+self.ESC+\"1m\"+str(self.config.delinactive)+self.ESC+\"22m real days of inactivity.\"+self.ESC+\"0m\")+\"\\r\\n\"\n\t\tthismsg += self.cntransi(self.ESC+\"32mPlayers are enjoying \"+self.ESC+\"1m\"+str(self.config.ffight)+self.ESC+\"22m forest fights per day.\"+self.ESC+\"0m\")+\"\\r\\n\"\n\t\tthismsg += self.cntransi(self.ESC+\"32mPlayers are enjoying \"+self.ESC+\"1m\"+str(self.config.pfight)+self.ESC+\"22m player fights per day.\"+self.ESC+\"0m\")+\"\\r\\n\"\n\t\tthismsg += self.cntransi(self.ESC+\"32mPlayers are enjoying \"+self.ESC+\"1m\"+str(self.config.bankinterest)+\"%\"+self.ESC+\"22m interest at the bank per day.\"+self.ESC+\"0m\")+\"\\r\\n\"\n\t\tthismsg += self.cntransi(self.ESC+\"32mThe current game day is \"+self.ESC+\"1m\"+str(self.config.daylength)+self.ESC+\"22m real hours long.\\r\\n\"+self.ESC+\"0m\")+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"32m (\"+self.ESC+\"1mE\"+self.ESC+\"22m)nter the realm of the Dragon\"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"32m (\"+self.ESC+\"1mL\"+self.ESC+\"22m)ist Warriors\"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"32m (\"+self.ESC+\"1mI\"+self.ESC+\"22m)nstructions\"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"32m (\"+self.ESC+\"1mQ\"+self.ESC+\"22m)uit the game server\\r\\n\"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"32m Your choice, warrior? [\"+self.ESC+\"1mE\"+self.ESC+\"22m]: \"+self.ESC+\"0m\"+self.ESC+\"0m \"\n\t\tsqr.close()\n\t\treturn thismsg", "def reporting_week(self):\n\n print(\"Week Numbers:\")\n print(self.time_stamp)\n print(self.time_stamp_iso)\n print(\"Current = {}\".format(self.current_week()))\n print(\"Reporting = {}\".format(self.current_week() - 1))", "def weekday_name(day_of_week):\n\n weekday_names = [\n 'Sunday', \n 'Monday', \n 'Tuesday', \n 'Wednesday', \n 'Thursday', \n 'Friday', \n 'Saturday']\n \n if day_of_week < 1 or day_of_week > 7:\n return 'None! Sowwy.'\n\n if day_of_week == 1:\n print(weekday_names[0])\n if day_of_week == 2:\n print(weekday_names[1])\n if day_of_week == 3:\n print(weekday_names[2])\n if day_of_week == 4:\n print(weekday_names[3])\n if day_of_week == 5:\n print(weekday_names[4])\n if day_of_week == 6:\n print(weekday_names[5]) \n if day_of_week == 7:\n print(weekday_names[6])", "def day_of_the_week(arg):", "async def daily(self, ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send_help(ctx.command)", "def every_day():\n logger.info('[ EVERY_DAY ] [ %s ]' % str(datetime.now().time()))", "def has_wednesday(self):\n return self.products.filter(type=\"S\", weekday=3).exists()" ]
[ "0.5607956", "0.5562305", "0.5316422", "0.52950746", "0.52692235", "0.513908", "0.512714", "0.5123601", "0.51166713", "0.5115938", "0.50918335", "0.506186", "0.50614196", "0.5032122", "0.49700102", "0.49646038", "0.49457192", "0.49457192", "0.49444526", "0.49432766", "0.49299335", "0.4926258", "0.4913931", "0.48902434", "0.48753503", "0.48722166", "0.48454452", "0.4804891", "0.47913095", "0.47896886" ]
0.6195554
0
Returns true if the subscription has a Tuesday product.
def has_tuesday(self): return self.products.filter(type="S", weekday=2).exists()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_wednesday(self):\n return self.products.filter(type=\"S\", weekday=3).exists()", "def has_weekend(self):\n return self.products.filter(type=\"S\", weekday=10).exists()", "def has_thursday(self):\n return self.products.filter(type=\"S\", weekday=4).exists()", "def has_monday(self):\n return self.products.filter(type=\"S\", weekday=1).exists()", "def has_friday(self):\n return self.products.filter(type=\"S\", weekday=5).exists()", "def is_weekend() -> bool:\n return datetime.today().weekday() > 3", "def checker(self, product):\n for item in self.instock:\n if item == product:\n return True\n return False", "async def is_in_stock(cls, product: Product, session: ClientSession) -> bool:\n if not cls.is_expected_fqdn(product.fqdn):\n raise UnexpectedFQDN\n resp = await session.request(\n method=\"GET\", url=product.url, raise_for_status=True\n )\n try:\n return cls._is_in_stock(await resp.text())\n except:\n raise # re-raise exception to caller", "def is_salary(self):\n if self.wage_type == \"weekly_salary\":\n return True\n return False", "def _want_subscription() -> bool:\n prompt = (\n 'Would you be willing, once your first certificate is successfully issued, '\n 'to share your email address with the Electronic Frontier Foundation, a '\n \"founding partner of the Let's Encrypt project and the non-profit organization \"\n \"that develops Certbot? We'd like to send you email about our work encrypting \"\n \"the web, EFF news, campaigns, and ways to support digital freedom. \")\n return display_util.yesno(prompt, default=False)", "def checker(product):\n for item in INSTOCK:\n if item == product:\n return True\n return False", "def _is_active_subscription(self, topic: str) -> bool:\n return topic in self._simple_subscriptions or any(\n other.topic == topic for other in self._wildcard_subscriptions\n )", "def products_made(self, product) -> bool:\n return self.product_idx(product) is not None", "def get_first_day_of_the_week(self):\n if SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=1\n ).exists():\n return 1\n elif SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=2\n ).exists():\n return 2\n elif SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=3\n ).exists():\n return 3\n elif SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=4\n ).exists():\n return 4\n elif SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=5\n ).exists():\n return 5\n else:\n return 6", "def sleep_in(weekday, vacation):\r\n if not weekday or vacation:\r\n return True\r\n else:\r\n return False", "def sleep_in(weekday, vacation):\r\n if not weekday or vacation:\r\n return True\r\n return False", "def _subscribed(self, account_id):\n sql = \"\"\"SELECT 1 FROM hive_subscriptions\n WHERE community_id = :community_id\n AND account_id = :account_id\"\"\"\n return bool(DB.query_one(\n sql, community_id=self.community_id, account_id=account_id))", "def is_subscriber(self):\n try:\n return self.get_subscription().get('@type') != 'free'\n except Exception:\n # If can't retrieve, assume not paired and not a subscriber yet\n return False", "def has_subscribers(cls, topic):\n\t\tif (cls.all().filter('topic_hash =', utils.sha1_hash(topic))\n\t\t\t\t.filter('subscription_state =', cls.STATE_VERIFIED).get() is not None):\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def isOnSaleSoon(ticket):\n return ticket.start_time > timezone.now()", "def get_subscription(self, article: BeautifulSoup):\n if self.parsing_template.subscription and article.select_one(self.parsing_template.subscription):\n return True\n return False", "def test_tuesday(self):\n date = datetime.date(1982, 5, 4)\n self.assertEqual(date.isoweekday(), 2)\n start_date, end_date = get_weekspan(date)\n self.assertEqual(start_date.isoweekday(), 1)\n self.assertEqual(end_date.isoweekday(), 7)\n self.assertTrue(start_date.toordinal() <= date.toordinal() <= end_date.toordinal())", "def isProduct(*args):\n return _libsbml.SBO_isProduct(*args)", "def verifysubscriptioninhomedevicestatus(sub):\n try:\n if \"Subscription Active\" in sub:\n print \" Hi chetan You have Active subscription\"\n else:\n print \" your subscription is not active \"\n except Exception as er:\n print(\"not able to get subscription details\")\n return False", "def is_subscribed(self) -> bool:\n return bool(self._subscriptions)", "def available(self) -> bool:\n return self._product and self._product.online", "def is_satisfied(self, item: Product) -> bool:\n return self.satisfied(item)", "def test_product_is_installed(self):\n try:\n result = self.installer.is_product_installed(PROJECT_NAME)\n except AttributeError:\n result = self.installer.isProductInstalled(PROJECT_NAME)\n self.assertTrue(result)", "def is_purchase(event_as_json):\n event = json.loads(event_as_json)\n if event['event_type'] == 'purchase':\n return True\n return False", "def is_product_saved(self):\n\n db.execute(\"SELECT product_id FROM Substitute WHERE product_id = %s\",\n (self.product.id,))\n product = db.fetch()\n if product:\n return True\n else:\n return False" ]
[ "0.79033303", "0.7433568", "0.7363184", "0.70281404", "0.66000545", "0.5900313", "0.5836925", "0.56731385", "0.55946356", "0.5543228", "0.55162776", "0.5497049", "0.5488144", "0.547877", "0.54727805", "0.5456891", "0.543672", "0.54349643", "0.54203504", "0.54126585", "0.5407693", "0.5402045", "0.5327861", "0.5321667", "0.5314829", "0.53023267", "0.52930224", "0.52376753", "0.5237095", "0.52280223" ]
0.83221126
0
Returns true if the subscription has a Wednesday product.
def has_wednesday(self): return self.products.filter(type="S", weekday=3).exists()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_thursday(self):\n return self.products.filter(type=\"S\", weekday=4).exists()", "def has_weekend(self):\n return self.products.filter(type=\"S\", weekday=10).exists()", "def has_tuesday(self):\n return self.products.filter(type=\"S\", weekday=2).exists()", "def has_friday(self):\n return self.products.filter(type=\"S\", weekday=5).exists()", "def has_monday(self):\n return self.products.filter(type=\"S\", weekday=1).exists()", "def is_weekend() -> bool:\n return datetime.today().weekday() > 3", "def test_wednesday(self):\n date = datetime.date(1988, 5, 4)\n self.assertEqual(date.isoweekday(), 3)\n start_date, end_date = get_weekspan(date)\n self.assertEqual(start_date.isoweekday(), 1)\n self.assertEqual(end_date.isoweekday(), 7)\n self.assertTrue(start_date.toordinal() <= date.toordinal() <= end_date.toordinal())", "def get_first_day_of_the_week(self):\n if SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=1\n ).exists():\n return 1\n elif SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=2\n ).exists():\n return 2\n elif SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=3\n ).exists():\n return 3\n elif SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=4\n ).exists():\n return 4\n elif SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=5\n ).exists():\n return 5\n else:\n return 6", "def sleep_in(weekday, vacation):\r\n if not weekday or vacation:\r\n return True\r\n return False", "def sleep_in(weekday, vacation):\r\n if not weekday or vacation:\r\n return True\r\n else:\r\n return False", "def is_weekend(date):\n \n return date.weekday() == 5 or date.weekday() == 6", "def check_weekday_of_date(self, date):\n return date.isoweekday() % 7", "def is_weekly_emails(self):\n return self._tag == 'weekly_emails'", "def isoweekday(self):\n # 1-Jan-0001 is a Monday\n return self.toordinal() % 7 or 7", "def is_workfree(date):\n \n return date.weekday() == 6 or is_holiday(date)", "def is_working_day_appointment(self):\n # function helps hide appointments on weekend\n return 0 <= self.date.weekday() <= 4", "def is_weekday(dtObj):\n return dtObj.weekday() < 5", "def is_salary(self):\n if self.wage_type == \"weekly_salary\":\n return True\n return False", "def iso_equal(self):\n\n if date(self.time_stamp.year, 1, 1).weekday() in (0, 1, 2, 3, 6):\n return True\n return False", "def test_thursday(self):\n date = datetime.date(1989, 5, 4)\n self.assertEqual(date.isoweekday(), 4)\n start_date, end_date = get_weekspan(date)\n self.assertEqual(start_date.isoweekday(), 1)\n self.assertEqual(end_date.isoweekday(), 7)\n self.assertTrue(start_date.toordinal() <= date.toordinal() <= end_date.toordinal())", "def is_weekday(day, halfDay):\n hours, days = halfDay.split('x')\n if day <= int(days)-1:\n return True\n else:\n return False", "def check_weekday(self, date):\n week_next = self.next_seven_day()\n today = datetime.date.today().strftime('%Y-%m-%d')\n if not date or date > week_next or date < today: # check the date is within one week\n return False, \"Sorry you can only booking consultation up to next one week. Your booking date must before {}\".format(week_next)\n try:\n day_as_string = self.get_the_weekday(date)\n if day_as_string == \"Saturday\" or day_as_string == \"Sunday\":\n logger.info(\"Sorry, there is no consultation on weekends\")\n return False, \"Sorry, there is no consultation on weekends\"\n else:\n logger.info(\"It is on next {}\".format(day_as_string))\n return True, \"Your booking has been made on {} {}\".format(day_as_string, date)\n except ValueError as e:\n logger.error(str(e))\n return False, \"Please try again\"", "def isoweekday(self):\n return 0", "def isoweekday(self):\n return 0", "def test_weekends():\n\n assert not datetime.datetime(2002, 3, 9) in TRADING_DATES\n assert not datetime.datetime(2002, 3, 10) in TRADING_DATES", "def isFullySubscribed(self) -> bool:\n\t\treturn self.subscribedCount == len(self.subscribedTopics)", "def filterWeekends(combinedPairRDDRecord):\n recordRDD = combinedPairRDDRecord[1]\n dayOfWeek = recordRDD[0][0]\n \n if dayOfWeek != 'Saturday' and dayOfWeek != 'Sunday':\n return True\n else:\n return False", "def test_weekly_training_is_weekly(self):\n self.assertIsInstance(self.weekly_training.is_weekly, bool)\n self.assertTrue(self.weekly_training.is_weekly)", "def dateweek(line, date):\r\n\tindex = datetime.weekday(date)\r\n\tdateweek = '%s%s%s' % (date.day, cn2en.DATE_WEEK, cn2en.WEEKDAYS[index])\r\n\t\r\n\treturn dateweek == line", "def _want_subscription() -> bool:\n prompt = (\n 'Would you be willing, once your first certificate is successfully issued, '\n 'to share your email address with the Electronic Frontier Foundation, a '\n \"founding partner of the Let's Encrypt project and the non-profit organization \"\n \"that develops Certbot? We'd like to send you email about our work encrypting \"\n \"the web, EFF news, campaigns, and ways to support digital freedom. \")\n return display_util.yesno(prompt, default=False)" ]
[ "0.7999227", "0.77904016", "0.74677527", "0.6989771", "0.6985255", "0.6812778", "0.6110862", "0.5900883", "0.5791354", "0.5783273", "0.5782799", "0.57391125", "0.5735744", "0.56574214", "0.56529266", "0.5615679", "0.556945", "0.549226", "0.5433178", "0.5399697", "0.53904605", "0.53646445", "0.53111285", "0.53111285", "0.53019303", "0.52643687", "0.5259272", "0.52370065", "0.5210245", "0.5208608" ]
0.8656813
0
Returns true if the subscription has a Thursday product.
def has_thursday(self): return self.products.filter(type="S", weekday=4).exists()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_wednesday(self):\n return self.products.filter(type=\"S\", weekday=3).exists()", "def has_tuesday(self):\n return self.products.filter(type=\"S\", weekday=2).exists()", "def has_weekend(self):\n return self.products.filter(type=\"S\", weekday=10).exists()", "def has_friday(self):\n return self.products.filter(type=\"S\", weekday=5).exists()", "def has_monday(self):\n return self.products.filter(type=\"S\", weekday=1).exists()", "def is_weekend() -> bool:\n return datetime.today().weekday() > 3", "def test_thursday(self):\n date = datetime.date(1989, 5, 4)\n self.assertEqual(date.isoweekday(), 4)\n start_date, end_date = get_weekspan(date)\n self.assertEqual(start_date.isoweekday(), 1)\n self.assertEqual(end_date.isoweekday(), 7)\n self.assertTrue(start_date.toordinal() <= date.toordinal() <= end_date.toordinal())", "def get_first_day_of_the_week(self):\n if SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=1\n ).exists():\n return 1\n elif SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=2\n ).exists():\n return 2\n elif SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=3\n ).exists():\n return 3\n elif SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=4\n ).exists():\n return 4\n elif SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=5\n ).exists():\n return 5\n else:\n return 6", "def is_weekday(dtObj):\n return dtObj.weekday() < 5", "def is_weekly_emails(self):\n return self._tag == 'weekly_emails'", "def iso_equal(self):\n\n if date(self.time_stamp.year, 1, 1).weekday() in (0, 1, 2, 3, 6):\n return True\n return False", "def isOnSaleSoon(ticket):\n return ticket.start_time > timezone.now()", "def sleep_in(weekday, vacation):\r\n if not weekday or vacation:\r\n return True\r\n return False", "def sleep_in(weekday, vacation):\r\n if not weekday or vacation:\r\n return True\r\n else:\r\n return False", "def is_salary(self):\n if self.wage_type == \"weekly_salary\":\n return True\n return False", "def check_weekday_of_date(self, date):\n return date.isoweekday() % 7", "def is_weekend(date):\n \n return date.weekday() == 5 or date.weekday() == 6", "def test_wednesday(self):\n date = datetime.date(1988, 5, 4)\n self.assertEqual(date.isoweekday(), 3)\n start_date, end_date = get_weekspan(date)\n self.assertEqual(start_date.isoweekday(), 1)\n self.assertEqual(end_date.isoweekday(), 7)\n self.assertTrue(start_date.toordinal() <= date.toordinal() <= end_date.toordinal())", "async def is_in_stock(cls, product: Product, session: ClientSession) -> bool:\n if not cls.is_expected_fqdn(product.fqdn):\n raise UnexpectedFQDN\n resp = await session.request(\n method=\"GET\", url=product.url, raise_for_status=True\n )\n try:\n return cls._is_in_stock(await resp.text())\n except:\n raise # re-raise exception to caller", "def has_picked_week(self, week):\n return self.find_pick_for_week(week, key_only=True) is not None", "def is_recurring(self):\n return self.__is_recurring", "def test_weekly_training_is_weekly(self):\n self.assertIsInstance(self.weekly_training.is_weekly, bool)\n self.assertTrue(self.weekly_training.is_weekly)", "def is_workfree(date):\n \n return date.weekday() == 6 or is_holiday(date)", "def membership_valid(self):\n\n today = date.today()\n\n if self.dues_paid is None:\n return False\n\n months = 12 if self.dues_paid_year else 6\n dues_due = datetime.combine(self.dues_paid, datetime.min.time()) + relativedelta(months=+months)\n dues_due = dues_due.date()\n\n return dues_due > today", "def is_market_hours():\n now = datetime.datetime.now()\n day = now.weekday()\n time = now.hour * 100 + now.minute\n\n if day > 4:\n return False\n\n if 930 <= time < 1600:\n return True\n\n return False", "def is_purchase(event_as_json):\n event = json.loads(event_as_json)\n if event['event_type'] == 'purchase':\n return True\n return False", "def is_dayofweek(day, today):\n if isinstance(today, datetime):\n int_day = int(day)\n if today.weekday() == int_day - 1:\n return True\n return False\n else:\n raise Exception(\"{} is not a datetime instance\".format(today))", "def is_today(self, dt: datetime.datetime) -> bool:\n\n if self is Day.DAILY:\n return True\n day = dt.weekday()\n if self is Day.WEEKDAY:\n return day < 5\n if self is Day.WEEKEND:\n return day >= 5\n return Day(day) == self", "def products_made(self, product) -> bool:\n return self.product_idx(product) is not None", "def is_working_day_appointment(self):\n # function helps hide appointments on weekend\n return 0 <= self.date.weekday() <= 4" ]
[ "0.8011751", "0.77116203", "0.7494331", "0.72076064", "0.68151414", "0.6368107", "0.61029273", "0.5982623", "0.57970923", "0.57579887", "0.56541497", "0.5562794", "0.55601287", "0.5555926", "0.55064213", "0.54724693", "0.54631317", "0.54424846", "0.54281855", "0.5418709", "0.5378454", "0.53750616", "0.5357815", "0.5300304", "0.52950215", "0.52656406", "0.52561367", "0.52422345", "0.52288735", "0.5225323" ]
0.8805305
0
Returns true if the subscription has a Friday product.
def has_friday(self): return self.products.filter(type="S", weekday=5).exists()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_weekend(self):\n return self.products.filter(type=\"S\", weekday=10).exists()", "def has_wednesday(self):\n return self.products.filter(type=\"S\", weekday=3).exists()", "def has_thursday(self):\n return self.products.filter(type=\"S\", weekday=4).exists()", "def has_tuesday(self):\n return self.products.filter(type=\"S\", weekday=2).exists()", "def has_monday(self):\n return self.products.filter(type=\"S\", weekday=1).exists()", "def is_weekend() -> bool:\n return datetime.today().weekday() > 3", "def is_workfree(date):\n \n return date.weekday() == 6 or is_holiday(date)", "def get_first_day_of_the_week(self):\n if SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=1\n ).exists():\n return 1\n elif SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=2\n ).exists():\n return 2\n elif SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=3\n ).exists():\n return 3\n elif SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=4\n ).exists():\n return 4\n elif SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=5\n ).exists():\n return 5\n else:\n return 6", "def check_weekday_of_date(self, date):\n return date.isoweekday() % 7", "def is_recurring(self):\n return self.__is_recurring", "def check_fees(prod_info):\n if prod_info == True:\n\n return (True)\n\n else:\n\n return(False)", "def test_friday(self):\n date = datetime.date(1984, 5, 4)\n self.assertEqual(date.isoweekday(), 5)\n start_date, end_date = get_weekspan(date)\n self.assertEqual(start_date.isoweekday(), 1)\n self.assertEqual(end_date.isoweekday(), 7)\n self.assertTrue(start_date.toordinal() <= date.toordinal() <= end_date.toordinal())", "async def is_in_stock(cls, product: Product, session: ClientSession) -> bool:\n if not cls.is_expected_fqdn(product.fqdn):\n raise UnexpectedFQDN\n resp = await session.request(\n method=\"GET\", url=product.url, raise_for_status=True\n )\n try:\n return cls._is_in_stock(await resp.text())\n except:\n raise # re-raise exception to caller", "def check_weekday(self, date):\n week_next = self.next_seven_day()\n today = datetime.date.today().strftime('%Y-%m-%d')\n if not date or date > week_next or date < today: # check the date is within one week\n return False, \"Sorry you can only booking consultation up to next one week. Your booking date must before {}\".format(week_next)\n try:\n day_as_string = self.get_the_weekday(date)\n if day_as_string == \"Saturday\" or day_as_string == \"Sunday\":\n logger.info(\"Sorry, there is no consultation on weekends\")\n return False, \"Sorry, there is no consultation on weekends\"\n else:\n logger.info(\"It is on next {}\".format(day_as_string))\n return True, \"Your booking has been made on {} {}\".format(day_as_string, date)\n except ValueError as e:\n logger.error(str(e))\n return False, \"Please try again\"", "def iso_equal(self):\n\n if date(self.time_stamp.year, 1, 1).weekday() in (0, 1, 2, 3, 6):\n return True\n return False", "def is_weekday(dtObj):\n return dtObj.weekday() < 5", "def is_weekend(date):\n \n return date.weekday() == 5 or date.weekday() == 6", "def has_shipping_event_occurred(self, event_type, quantity=None):\n if not quantity:\n quantity = self.quantity\n return self.shipping_event_quantity(event_type) == quantity", "def is_working_day_appointment(self):\n # function helps hide appointments on weekend\n return 0 <= self.date.weekday() <= 4", "def checker(self, product):\n for item in self.instock:\n if item == product:\n return True\n return False", "def _day_rule_matches(self, rule, dt):\n if dt.weekday() == 4:\n sat = dt + datetime.timedelta(days=1)\n if super(SiteHolidays, self)._day_rule_matches(rule, sat):\n return True\n elif dt.weekday() == 0:\n sun = dt - datetime.timedelta(days=1)\n if super(SiteHolidays, self)._day_rule_matches(rule, sun):\n return True\n return super(SiteHolidays, self)._day_rule_matches(rule, dt)", "def is_weekday(day, halfDay):\n hours, days = halfDay.split('x')\n if day <= int(days)-1:\n return True\n else:\n return False", "def test_is_payday_positive6(self):\n # Overriding first_payday\n self.first_payday = date_class(2020,12,24)\n date_to_check = date_class(2021,1,8)\n is_payday = self.pay_cycle.is_payday(date_to_check)\n assert is_payday == True", "def has_fiscal_period(row: pd.Series) -> bool:\n return any(row.iloc[1:].astype(str).map(partial(parse_fiscal_period, prefix=str(row.iloc[0]))))", "def isFeas(self, referencetuple):\n return referencetuple[self.__primalidx__] == self.__feas__", "def is_relevant(event):\n\n if \"Install\" not in event:\n return False\n for package in event[\"Install\"]:\n name = package[\"Name\"]\n if (\n installed.is_installed(name)\n and history.last_installed(name) == event[\"Start-Date\"]\n ):\n return True\n if (\n installed.is_multiarch_installed(name)\n and history.last_multiarch_installed(name) == event[\"Start-Date\"]\n ):\n return True\n return False", "def is_FriCASElement(x):\n return isinstance(x, FriCASElement)", "def is_satisfied(self, item: Product) -> bool:\n return self.satisfied(item)", "def is_registered(self):\n return self.faucet is not None", "def is_satisfied(self, item: Product) -> bool:\n return item.colour == self.colour" ]
[ "0.7637973", "0.7567508", "0.75327075", "0.7065873", "0.65174216", "0.6368904", "0.6181212", "0.61077476", "0.5917382", "0.58119035", "0.57857114", "0.57834", "0.57660586", "0.5702861", "0.56998616", "0.5680689", "0.5678202", "0.5603491", "0.55724514", "0.5518139", "0.5485771", "0.53923047", "0.53869396", "0.5374579", "0.5364021", "0.5360133", "0.53502434", "0.5349093", "0.5344634", "0.5314863" ]
0.8796624
0
Returns true if the subscription has a Weekend product.
def has_weekend(self): return self.products.filter(type="S", weekday=10).exists()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_wednesday(self):\n return self.products.filter(type=\"S\", weekday=3).exists()", "def has_thursday(self):\n return self.products.filter(type=\"S\", weekday=4).exists()", "def has_tuesday(self):\n return self.products.filter(type=\"S\", weekday=2).exists()", "def is_weekend() -> bool:\n return datetime.today().weekday() > 3", "def has_friday(self):\n return self.products.filter(type=\"S\", weekday=5).exists()", "def has_monday(self):\n return self.products.filter(type=\"S\", weekday=1).exists()", "async def is_in_stock(cls, product: Product, session: ClientSession) -> bool:\n if not cls.is_expected_fqdn(product.fqdn):\n raise UnexpectedFQDN\n resp = await session.request(\n method=\"GET\", url=product.url, raise_for_status=True\n )\n try:\n return cls._is_in_stock(await resp.text())\n except:\n raise # re-raise exception to caller", "def is_salary(self):\n if self.wage_type == \"weekly_salary\":\n return True\n return False", "def available(self) -> bool:\n return self._product and self._product.online", "def is_subscribed(self) -> bool:\n return bool(self._subscriptions)", "def is_weekend(date):\n \n return date.weekday() == 5 or date.weekday() == 6", "def is_weekly_emails(self):\n return self._tag == 'weekly_emails'", "def is_recurring(self):\n return self.__is_recurring", "def subscription_required(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"subscription_required\")", "def _subscribed(self, account_id):\n sql = \"\"\"SELECT 1 FROM hive_subscriptions\n WHERE community_id = :community_id\n AND account_id = :account_id\"\"\"\n return bool(DB.query_one(\n sql, community_id=self.community_id, account_id=account_id))", "def subscription_required(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"subscription_required\")", "def get_is_subscribed(self, obj):\n user = self.context['request'].user\n if not user.is_authenticated:\n return None\n # pylint: disable=no-member\n profile = UserProfile.objects.get(user=user)\n return profile in obj.subscribed_users.all()", "def isFullySubscribed(self) -> bool:\n\t\treturn self.subscribedCount == len(self.subscribedTopics)", "def is_workfree(date):\n \n return date.weekday() == 6 or is_holiday(date)", "def is_subscriber(self):\n try:\n return self.get_subscription().get('@type') != 'free'\n except Exception:\n # If can't retrieve, assume not paired and not a subscriber yet\n return False", "def has_picked_week(self, week):\n return self.find_pick_for_week(week, key_only=True) is not None", "def test_weekly_training_is_weekly(self):\n self.assertIsInstance(self.weekly_training.is_weekly, bool)\n self.assertTrue(self.weekly_training.is_weekly)", "def is_subscribed(user_id, profile_user_id):\n\n subscription = Subscription.query.filter(\n Subscription.user_id == user_id,\n Subscription.subscribe_to_id == profile_user_id\n ).first()\n print(\"IS SUBSCRIBED\")\n print(subscription)\n print(subscription is not None)\n return subscription is not None", "def has_subscribers(cls, topic):\n\t\tif (cls.all().filter('topic_hash =', utils.sha1_hash(topic))\n\t\t\t\t.filter('subscription_state =', cls.STATE_VERIFIED).get() is not None):\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def is_satisfied(self, item: Product) -> bool:\n return self.satisfied(item)", "def test_one_off_training_is_not_weekly(self):\n self.assertIsInstance(self.one_off_training.is_weekly, bool)\n self.assertFalse(self.one_off_training.is_weekly)", "def is_subscribed(self, inst, channel):\r\n if channel not in self._channels:\r\n return False\r\n return inst in self._channels[channel].subscribers", "def is_payout_available(self):\r\n return False # Disabled because artist receives payout directly from fan\r\n # return self.has_ended and not self.is_payout_requested and self.amount_raised_online and not self.is_deleted\r", "def has_active_subscription(self, count=False):\n subs = self.subscriptions.filter(active=True)\n return subs.exists() if count is False else subs.count()", "def _want_subscription() -> bool:\n prompt = (\n 'Would you be willing, once your first certificate is successfully issued, '\n 'to share your email address with the Electronic Frontier Foundation, a '\n \"founding partner of the Let's Encrypt project and the non-profit organization \"\n \"that develops Certbot? We'd like to send you email about our work encrypting \"\n \"the web, EFF news, campaigns, and ways to support digital freedom. \")\n return display_util.yesno(prompt, default=False)" ]
[ "0.748854", "0.69645387", "0.68662715", "0.6862472", "0.6481014", "0.64291966", "0.6000979", "0.5917448", "0.57905054", "0.57651114", "0.57306504", "0.5691392", "0.5668606", "0.5651247", "0.56261396", "0.5589581", "0.55890775", "0.5559175", "0.5558002", "0.5557032", "0.5551415", "0.5543576", "0.55003655", "0.54895437", "0.5454558", "0.5426137", "0.54179716", "0.540775", "0.54047656", "0.5379173" ]
0.82367826
0
Returns the unsubscription reason.
def get_unsubscription_reason(self): unsubscription_reasons = dict(settings.UNSUBSCRIPTION_REASON_CHOICES) return unsubscription_reasons.get(self.unsubscription_reason, "N/A")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_unsubscription_channel(self):\n unsubscription_channels = dict(settings.UNSUBSCRIPTION_CHANNEL_CHOICES)\n return unsubscription_channels.get(self.unsubscription_channel, \"N/A\")", "def reason(self) -> str:\n return pulumi.get(self, \"reason\")", "def get_unsubscription_type(self):\n unsubscription_types = dict(UNSUBSCRIPTION_TYPE_CHOICES)\n return unsubscription_types.get(self.unsubscription_type, \"N/A\")", "def getreason(self):\n return self.__reason", "def reason(self) -> str:\n return self._reason", "def reason(self) -> Optional[str]:\n return self._reason", "def getReason():", "def disabled_reason(self) -> Sequence[str]:\n return pulumi.get(self, \"disabled_reason\")", "def disconnect_reason(self):\n return self._disconnect_reason", "def suspension_reason(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"suspension_reason\")", "def error_reason(self):\n return self._error_reason", "def suspension_reason(self) -> Sequence[str]:\n return pulumi.get(self, \"suspension_reason\")", "def unregistered(self, reason: str):\n log.info(\"Unregistered, Reason: {}\".format(reason))\n pass", "def get_disconnected_reason(self):\n return self._server_disconnected_reason", "def state_reason(self):\n return self._state_reason", "def unsubscribe_url(self) -> str:\n return self[\"Sns\"][\"UnsubscribeUrl\"]", "def Reason(self):\n if self.force_auto_sync:\n self.get('Reason')\n return self._Reason", "def unsubscribed_member_count(self):\n return self._unsubscribed_member_count", "def cancel_reason(self):\n return self._dict.get('cancel_reason')", "def status_reason(self):\n return self.status.split()[2]", "def reason(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"reason\")", "def reason(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"reason\")", "def _get_lsp_frr_down_reason(self):\n return self.__lsp_frr_down_reason", "def SupportReasonUnknown(self):\n return self._get_attribute('supportReasonUnknown')", "def transition_reason(self):\n return self._transition_reason", "def connection_failure_reason(self):\n return self._connection_failure_reason", "def ungettext(self):\n return self._translations.ngettext", "def unsubscribe( self, mess, args):\n user = self.get_sender_username(mess)\n if not user in self.users:\n return 'You are not subscribed!'\n else:\n user = self.users.pop(user)\n self.message_queue.append('_%s has left the channel_' % user)\n self.log.info( '%s unsubscribed from the broadcast.' % user)\n self.save_state()\n return 'You are now unsubscribed.'", "def suspension_reason(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InstanceSuspensionReasonItem']]]]:\n return pulumi.get(self, \"suspension_reason\")", "def reason(self) -> ConfirmClaimRequestpropertiesReason:\n return self._reason" ]
[ "0.67562884", "0.6677292", "0.66479945", "0.66324896", "0.6481102", "0.6330465", "0.622456", "0.6197353", "0.61534196", "0.6085014", "0.6073488", "0.60605896", "0.6048066", "0.6036894", "0.60239834", "0.6007807", "0.5993624", "0.5964749", "0.59406304", "0.5928856", "0.5863522", "0.5828042", "0.5810941", "0.58004826", "0.55807084", "0.55769783", "0.5508845", "0.55065215", "0.54622483", "0.5404995" ]
0.8568551
0
Returns the unsubscription reason.
def get_unsubscription_channel(self): unsubscription_channels = dict(settings.UNSUBSCRIPTION_CHANNEL_CHOICES) return unsubscription_channels.get(self.unsubscription_channel, "N/A")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_unsubscription_reason(self):\n unsubscription_reasons = dict(settings.UNSUBSCRIPTION_REASON_CHOICES)\n return unsubscription_reasons.get(self.unsubscription_reason, \"N/A\")", "def reason(self) -> str:\n return pulumi.get(self, \"reason\")", "def get_unsubscription_type(self):\n unsubscription_types = dict(UNSUBSCRIPTION_TYPE_CHOICES)\n return unsubscription_types.get(self.unsubscription_type, \"N/A\")", "def getreason(self):\n return self.__reason", "def reason(self) -> str:\n return self._reason", "def reason(self) -> Optional[str]:\n return self._reason", "def getReason():", "def disabled_reason(self) -> Sequence[str]:\n return pulumi.get(self, \"disabled_reason\")", "def disconnect_reason(self):\n return self._disconnect_reason", "def suspension_reason(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"suspension_reason\")", "def error_reason(self):\n return self._error_reason", "def suspension_reason(self) -> Sequence[str]:\n return pulumi.get(self, \"suspension_reason\")", "def unregistered(self, reason: str):\n log.info(\"Unregistered, Reason: {}\".format(reason))\n pass", "def get_disconnected_reason(self):\n return self._server_disconnected_reason", "def state_reason(self):\n return self._state_reason", "def unsubscribe_url(self) -> str:\n return self[\"Sns\"][\"UnsubscribeUrl\"]", "def Reason(self):\n if self.force_auto_sync:\n self.get('Reason')\n return self._Reason", "def unsubscribed_member_count(self):\n return self._unsubscribed_member_count", "def cancel_reason(self):\n return self._dict.get('cancel_reason')", "def status_reason(self):\n return self.status.split()[2]", "def reason(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"reason\")", "def reason(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"reason\")", "def _get_lsp_frr_down_reason(self):\n return self.__lsp_frr_down_reason", "def SupportReasonUnknown(self):\n return self._get_attribute('supportReasonUnknown')", "def transition_reason(self):\n return self._transition_reason", "def connection_failure_reason(self):\n return self._connection_failure_reason", "def ungettext(self):\n return self._translations.ngettext", "def unsubscribe( self, mess, args):\n user = self.get_sender_username(mess)\n if not user in self.users:\n return 'You are not subscribed!'\n else:\n user = self.users.pop(user)\n self.message_queue.append('_%s has left the channel_' % user)\n self.log.info( '%s unsubscribed from the broadcast.' % user)\n self.save_state()\n return 'You are now unsubscribed.'", "def suspension_reason(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InstanceSuspensionReasonItem']]]]:\n return pulumi.get(self, \"suspension_reason\")", "def reason(self) -> ConfirmClaimRequestpropertiesReason:\n return self._reason" ]
[ "0.8568551", "0.6677292", "0.66479945", "0.66324896", "0.6481102", "0.6330465", "0.622456", "0.6197353", "0.61534196", "0.6085014", "0.6073488", "0.60605896", "0.6048066", "0.6036894", "0.60239834", "0.6007807", "0.5993624", "0.5964749", "0.59406304", "0.5928856", "0.5863522", "0.5828042", "0.5810941", "0.58004826", "0.55807084", "0.55769783", "0.5508845", "0.55065215", "0.54622483", "0.5404995" ]
0.67562884
1
Returns the payment type.
def get_payment_type(self): payment_types = dict(settings.SUBSCRIPTION_PAYMENT_METHODS) return payment_types.get(self.payment_type, "N/A")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def payment_type(self) -> str:\n return pulumi.get(self, \"payment_type\")", "def pay_type(self) -> str:\n return pulumi.get(self, \"pay_type\")", "def pay_type(self) -> str:\n return pulumi.get(self, \"pay_type\")", "def pay_type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"pay_type\")", "def pay_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"pay_type\")", "def pay_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"pay_type\")", "def payment_channel_type(self) -> Optional[str]:\n return pulumi.get(self, \"payment_channel_type\")", "def payment_channel_type(self) -> Optional[str]:\n return pulumi.get(self, \"payment_channel_type\")", "def payment_mode(self):\n return self._payment_mode", "def policy_type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"policy_type\")", "def type(self) -> str:\n return pulumi.get(self, \"type\")", "def type(self) -> str:\n return pulumi.get(self, \"type\")", "def type(self) -> str:\n return pulumi.get(self, \"type\")", "def type(self) -> str:\n return pulumi.get(self, \"type\")", "def type(self) -> str:\n return pulumi.get(self, \"type\")", "def type(self) -> str:\n return pulumi.get(self, \"type\")", "def type(self) -> str:\n return pulumi.get(self, \"type\")", "def type(self) -> str:\n return pulumi.get(self, \"type\")", "def type(self) -> str:\n return pulumi.get(self, \"type\")", "def type(self) -> str:\n return pulumi.get(self, \"type\")", "def type(self) -> str:\n return pulumi.get(self, \"type\")", "def type(self) -> str:\n return pulumi.get(self, \"type\")", "def type(self) -> str:\n return pulumi.get(self, \"type\")", "def type(self) -> str:\n return pulumi.get(self, \"type\")", "def type(self) -> str:\n return pulumi.get(self, \"type\")", "def type(self) -> str:\n return pulumi.get(self, \"type\")", "def type(self) -> str:\n return pulumi.get(self, \"type\")", "def type(self) -> str:\n return pulumi.get(self, \"type\")", "def type(self) -> str:\n return pulumi.get(self, \"type\")", "def type(self) -> str:\n return pulumi.get(self, \"type\")" ]
[ "0.92547166", "0.843507", "0.843507", "0.82871956", "0.80102414", "0.8000726", "0.7407751", "0.7407751", "0.7096854", "0.6870054", "0.6848388", "0.6848388", "0.6848388", "0.6848388", "0.6848388", "0.6848388", "0.6848388", "0.6848388", "0.6848388", "0.6848388", "0.6848388", "0.6848388", "0.6848388", "0.6848388", "0.6848388", "0.6848388", "0.6848388", "0.6848388", "0.6848388", "0.6848388" ]
0.8578038
1
Returns all the activities on this campaign, for a specific seller. Activities on a campaign imply that the contact has been scheduled to be called in the future.
def get_activities_by_seller(self, seller, status=None, type=None, datetime=None): acts = Activity.objects.filter(campaign=self, seller=seller).order_by('datetime') if status: acts = acts.filter(status__in=status) if type: acts = acts.filter(activity_type__in=type) if date: acts = acts.filter(datetime__lte=datetime) return acts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_activities(cls):\n objs = cls.objects\n return objs", "def activities(self):\r\n return v3.Activities(self)", "def activities(self):\n return self._activities", "def get_already_contacted(self, seller_id):\n return self.contactcampaignstatus_set.filter(\n seller_id=seller_id, status=2\n )", "def getVotersToContact(self):\n two_days_ago = date.today() - timedelta(2)\n year_ago = date.today() - timedelta(365)\n return self.voters.filter(\n Q(campaignstovoters__last_served=None) | Q(campaignstovoters__last_served__lt=two_days_ago),\n Q(campaignstovoters__last_contacted=None) | Q(campaignstovoters__last_contacted__lt=year_ago),\n campaignstovoters__is_active=True,\n is_active=True)", "def activities(self):\r\n return activities.Activities(self)", "def systray_get_activities(self):\n activities = super(Users, self).systray_get_activities()\n for activity in activities:\n if activity.get('model') == 'mailing.mailing':\n activities.remove(activity)\n query = \"\"\"SELECT m.mailing_type, count(*), act.res_model as model, act.res_id,\n CASE\n WHEN %(today)s::date - act.date_deadline::date = 0 Then 'today'\n WHEN %(today)s::date - act.date_deadline::date > 0 Then 'overdue'\n WHEN %(today)s::date - act.date_deadline::date < 0 Then 'planned'\n END AS states\n FROM mail_activity AS act\n JOIN mailing_mailing AS m ON act.res_id = m.id\n WHERE act.res_model = 'mailing.mailing' AND act.user_id = %(user_id)s \n GROUP BY m.mailing_type, states, act.res_model, act.res_id;\n \"\"\"\n self.env.cr.execute(query, {\n 'today': fields.Date.context_today(self),\n 'user_id': self.env.uid,\n })\n activity_data = self.env.cr.dictfetchall()\n \n user_activities = {}\n for act in activity_data:\n if not user_activities.get(act['mailing_type']):\n if act['mailing_type'] == 'sms':\n module = 'mass_mailing_sms'\n name = _('SMS Marketing')\n else:\n module = 'mass_mailing'\n name = _('Email Marketing')\n icon = module and modules.module.get_module_icon(module)\n res_ids = set()\n user_activities[act['mailing_type']] = {\n 'name': name,\n 'model': 'mailing.mailing',\n 'type': 'activity',\n 'icon': icon,\n 'total_count': 0, 'today_count': 0, 'overdue_count': 0, 'planned_count': 0,\n 'res_ids': res_ids,\n }\n user_activities[act['mailing_type']]['res_ids'].add(act['res_id'])\n user_activities[act['mailing_type']]['%s_count' % act['states']] += act['count']\n if act['states'] in ('today', 'overdue'):\n user_activities[act['mailing_type']]['total_count'] += act['count']\n\n for mailing_type in user_activities.keys():\n user_activities[mailing_type].update({\n 'actions': [{'icon': 'fa-clock-o', 'name': 'Summary',}],\n 'domain': json.dumps([['activity_ids.res_id', 'in', list(user_activities[mailing_type]['res_ids'])]])\n })\n activities.extend(list(user_activities.values()))\n break\n\n return activities", "def activities(self):\r\n return resources.Activities(self)", "def by_activity(cls,site_id=0,activity=None):\n return meta.DBSession.query(Activity).filter_by(site_id=site_id,activity=activity).all()", "def find_campaigns_as_caller(caller):\n\n \"\"\"Get Campaigns for Caller\"\"\"\n campaigns_as_caller = caller.campaigns_as_caller.filter(\n status__in=[x.value[0] for x in call_campaign_statuses_for_caller],\n ).order_by('-date_created')\n\n \"\"\"Check Call Tool Feature Access for Campaigns\"\"\"\n campaigns = [x for x in campaigns_as_caller if has_call_feature_access_for_local_group(\n x.local_group\n )]\n\n return campaigns", "def get_activities(self, activity_ids=None, max_records=50):\r\n return self.connection.get_all_activities(self, activity_ids, max_records)", "def get_activities():\n pass", "def get_not_contacted(self, seller_id):\n return self.contactcampaignstatus_set.filter(seller_id=seller_id, status__in=[1, 3])", "def get_activity_list(self):\n return self._request_activity_list(self.athlete)", "def get_activities(self, type=None):\n return flattrclient._get_query_dict(type=type)", "def get_sorted_activities(self):\n return helpers.get_sorted_activities(self)", "def get_activities(ts_activity, access_token):\n params = {'after': ts_activity, 'access_token': access_token}\n url = \"https://www.strava.com/api/v3/activities\"\n response = return_json(url, \"GET\", parameters=params)\n return response", "def imps_by_me(self):\n return self.caller.roster.accounthistory_set.last().initiated_contacts.all()", "def get_activities(self, user_id=None, group_id=None, app_id=None,\n activity_id=None, start_index=0, count=0):\n raise NotImplementedError()", "def get_recipients(self):\n return [\n self.obj.activity.owner\n ] + [\n slot_participant.participant.user for slot_participant\n in self.obj.slot_participants.all()\n if (\n slot_participant.status == 'registered' and\n slot_participant.participant.status == 'accepted'\n )\n ]", "def get_campaigns(self, uuid=None):\n params = self._build_params(uuid=uuid)\n return self._get_query('campaigns', params, Campaign)", "def _get_campaigns(self, params):\n return self._api.account.get_campaigns(params={**params, **self._state_filter()}, fields=[self.state_pk])", "def __ui_list_all_activities(self):\n activities_list = self.__activity_service.service_get_list_of_activities()\n if len(activities_list) == 0:\n print(\"The list of activities is empty!\\n\")\n else:\n for activity in activities_list:\n print(activity)\n print(\"\")", "def collect_activities(self, user_id, release, params=None):\n params = params or {}\n filter_params = {'user_id': user_id, 'release': release}\n filter_params.update(params)\n activities = []\n while True:\n resp = requests.get(self.url, filter_params)\n content = json.loads(resp.content)\n activities.extend(content['activity'])\n filter_params['start_record'] += self.page_size\n if len(content['activity']) == 0:\n break\n return activities", "def get_drip_campaigns(self):\n return list(DripCampaign.objects(user_id=self.user_id))", "def get_all_items(bucket_id, created_by):\n return BucketActivities.query.filter_by(bucket_id=bucket_id, created_by=created_by)", "def yesterday_spend_campaign(self):\n accounts = active_adwords_accounts()\n for account in accounts:\n try:\n if settings.DEBUG:\n adwords_cron_campaign_stats(account.dependent_account_id)\n else:\n adwords_cron_campaign_stats.delay(account.dependent_account_id)\n except (ConnectionRefusedError, ReddisConnectionError, KombuOperationalError):\n logger = Logger()\n warning_message = 'Failed to created celery task for cron_campaigns.py for account ' + str(\n account.dependent_account_name)\n warning_desc = 'Failed to create celery task for cron_campaigns.py'\n logger.send_warning_email(warning_message, warning_desc)\n break\n\n return 'yesterday_spend_campaign'", "def dynamics365_cds_activities(self) -> 'outputs.Dynamics365DataConnectorDataTypesResponseDynamics365CdsActivities':\n return pulumi.get(self, \"dynamics365_cds_activities\")", "def get_administrator_receivers(self):\n result = []\n services = self.find_tags(\"receiver\")\n for s in services:\n for action in s.findall(\"./intent-filter/action\"):\n if \"android.app.action.DEVICE_ADMIN_ENABLED\" in action.attrib.values():\n result.append(s.attrib['{http://schemas.android.com/apk/res/android}name'])\n # print(result)\n return result", "def activities(self):\n if \"activities\" in self._prop_dict:\n return ActivitiesCollectionPage(self._prop_dict[\"activities\"])\n else:\n return None" ]
[ "0.59208065", "0.5899923", "0.5818823", "0.5736359", "0.5720688", "0.55833286", "0.5477893", "0.540893", "0.5273918", "0.52573407", "0.5247901", "0.5240242", "0.52282554", "0.5195234", "0.51873195", "0.5182521", "0.51658654", "0.501627", "0.5011079", "0.4955191", "0.48948267", "0.4874831", "0.4806107", "0.47283638", "0.47205454", "0.4715883", "0.47052035", "0.46871787", "0.4687146", "0.46781632" ]
0.68809056
0
Returns the ContactCampaignStatus objects for all Contacts that have not been called yet (status=1)
def get_not_contacted(self, seller_id): return self.contactcampaignstatus_set.filter(seller_id=seller_id, status__in=[1, 3])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_already_contacted(self, seller_id):\n return self.contactcampaignstatus_set.filter(\n seller_id=seller_id, status=2\n )", "def campaign_status(self):\n return self._campaign_status", "def get_active_contact(self):\n list_contact = Contact.objects.filter(phonebook__campaign=self.id,\n status=CONTACT_STATUS.ACTIVE).all()\n if not list_contact:\n return False\n return list_contact", "def get_all_active_members(debug, contactsUrl):\n\n valid_date = str(datetime.date.today() - datetime.timedelta(days=7)) # 7 days ago in yyyy-mm-dd format\n\n #params = {'$filter': 'member eq true AND Status eq Active',\n # '$async': 'false'}\n params = {'$filter': \"member eq true AND ( Status eq Active OR ( Status eq PendingRenewal AND 'Renewal due' ge \" + valid_date + \"))\",\n '$async': 'false'}\n request_url = contactsUrl + '?' + urllib.parse.urlencode(params)\n if debug: print('Making api call to get contacts')\n return api.execute_request(request_url).Contacts", "def _get_campaigns(self, params):\n return self._api.account.get_campaigns(params={**params, **self._state_filter()}, fields=[self.state_pk])", "def contact_status(request):\n\n if request.user.is_authenticated():\n try:\n return {\n 'has_contact': Contact.objects.filter(user=request.user),\n \n }\n except DoesNotExist:\n pass\n return {\n \n }", "def find_campaigns_as_caller(caller):\n\n \"\"\"Get Campaigns for Caller\"\"\"\n campaigns_as_caller = caller.campaigns_as_caller.filter(\n status__in=[x.value[0] for x in call_campaign_statuses_for_caller],\n ).order_by('-date_created')\n\n \"\"\"Check Call Tool Feature Access for Campaigns\"\"\"\n campaigns = [x for x in campaigns_as_caller if has_call_feature_access_for_local_group(\n x.local_group\n )]\n\n return campaigns", "def test_get_non_existent_campaigns_returns_empty_list(self):\n response = self.client.get(self.endpoint_url)\n response_body = response.get_json()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response_body, {\"campaigns\": []})", "def fetch_contact_messages(self, org, contact, created_after, created_before):\n pass", "def ListAllContacts(self):\n feed = self.gd_client.GetContacts()\n self.contacts = self.CleanPhoneNumbers(self.GetContactsInfo(feed))\n return self.contacts", "def related_reports(self):\n return Report.objects.exclude(contact_email__isnull=True).filter(contact_email__iexact=self.contact_email).order_by('status', '-create_date')[:1000]", "def get_patient_contacted_status(args):\n person_id = args[\"PersonID\"]\n c = get_most_recent_row_by_person_id(\"Contacted\", person_id,\n timestamp=\"UpdatedDateTime\", schema=app_schema)\n h = get_table_by_person_id(\"Contacted\", person_id,\n schema=app_schema).sort_values(by=[\"UpdatedDateTime\"], ascending=False)\n return {\"current\": c, \"historical\": h}", "def get_com_status(self, **kwargs):\n for com in self._coms:\n self.__chk_com(port_name=com)", "def available_statuses(self):\n return self.pipeline.get(self.status, ())", "def available_statuses(self):\n return self.pipeline.get(self.status, ())", "def test_get_existent_campaigns_returns_campaigns_list(self):\n test_campaign = return_canned_campaign()\n test_campaign.create()\n response = self.client.get(self.endpoint_url)\n response_body = response.get_json()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n response_body, {\"campaigns\": [{\"id\": 1, \"name\": \"Test Campaign\"}]}\n )", "def list_campaigns(self, **kwargs) -> ApiResponse:\n return self._request(kwargs.pop('path'), params=kwargs)", "def test_get_filter_no_effective_contacts(self):\n data = {\"type_contact\": 2}\n response = self.client.get(reverse('contacts-filter'), data)\n # import pdb; pdb.set_trace()\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[\"count\"], 2)", "def get_list_of_campaigns(self, limit=0, offset=0):\n logger.info(\"Function call: get_list_of_campaigns\")\n return self.__handle_result(self.__send_request('campaigns', 'GET', {'limit': limit or 0, 'offset': offset or 0}))", "def customers_presence(self):\n return self._customers_presence", "def getAllCampaigns(service):\n # Using AWQL to retrieve campaigns.\n query = (adwords.ServiceQueryBuilder()\n .Select('Id', 'Name', 'Status', 'StartDate', 'EndDate',\n 'BudgetId', 'BudgetStatus', 'BudgetName', 'Amount',\n 'BudgetReferenceCount', 'IsBudgetExplicitlyShared')\n .Limit(0, pageSize)\n .Build())\n campaigns = []\n for page in query.Pager(service):\n if page['entries']:\n for campaign in page['entries']:\n campaigns.append(campaign)\n else:\n pass\n return campaigns", "def imps_by_me(self):\n return self.caller.roster.accounthistory_set.last().initiated_contacts.all()", "def get_running_campaign(self):\n kwargs = {}\n kwargs['status'] = 1\n tday = datetime.utcnow().replace(tzinfo=utc)\n kwargs['startingdate__lte'] = datetime(tday.year, tday.month, tday.day,\n tday.hour, tday.minute, tday.second, tday.microsecond).replace(tzinfo=utc)\n kwargs['expirationdate__gte'] = datetime(tday.year, tday.month, tday.day,\n tday.hour, tday.minute, tday.second, tday.microsecond).replace(tzinfo=utc)\n\n s_time = \"%s:%s:%s\" % (\n str(tday.hour), str(tday.minute), str(tday.second))\n kwargs['daily_start_time__lte'] = datetime.strptime(s_time, '%H:%M:%S')\n kwargs['daily_stop_time__gte'] = datetime.strptime(s_time, '%H:%M:%S')\n\n # weekday status 1 - YES\n # self.model._meta.get_field(tday.strftime(\"%A\").lower()).value()\n kwargs[tday.strftime(\"%A\").lower()] = 1\n\n return Campaign.objects.filter(**kwargs)", "def get_comment_statuses(self):\n comment_statuses = self.session.query(CommentStatus).all()\n return comment_statuses", "def deps_status(self):\n if not self.deps:\n return [self.S_OK]\n\n return [d.status for d in self.deps]", "def get_contacts(self):\n contacts = Membership.objects.filter(entity = self, key_contact = True).order_by('importance_to_entity')\n return contacts", "def find_campaigns_as_admin(call_profile):\n\n \"\"\"Check Feature Access and Local Group Permissions\"\"\"\n user = call_profile.user\n local_group = find_local_group_by_user(user)\n if local_group is not None and has_call_permission_for_local_group(\n user,\n local_group,\n 'calls.change_callcampaign'\n ):\n return local_group.callcampaign_set.all().order_by(\n '-date_created'\n )\n\n \"\"\"Otherwise return empty list\"\"\"\n return CallCampaign.objects.none()", "def get_latest_statuses(self):\n self.status_lock.acquire()\n status = copy.deepcopy(self.cached_status)\n self.status_lock.release()\n return status", "def get_all(self) -> List[Status]:\n return self.__mapper.map_all(\n self.__repository.get_all(),\n Status\n )", "def list_active_customers():\n active_customers = 0\n for customer in cm.Customers:\n if customer.status == \"Active \":\n active_customers += 1\n return active_customers" ]
[ "0.6204409", "0.5799874", "0.5657505", "0.56244934", "0.5607043", "0.53916603", "0.53543097", "0.5330877", "0.5271085", "0.52295774", "0.5223064", "0.5221222", "0.52025133", "0.51303506", "0.51303506", "0.50963914", "0.50958073", "0.50875974", "0.50099576", "0.49826843", "0.49739078", "0.49525318", "0.49360093", "0.49300033", "0.49116597", "0.4906825", "0.49067482", "0.4883957", "0.48788595", "0.48767307" ]
0.6219367
0
Returns the count of ContactCampaignStatus objects for all Contacts that have not been called yet (status=1)
def get_not_contacted_count(self, seller_id): return self.get_not_contacted(seller_id).count()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_subscribers(self):\n return self.request(\"count:Contact\", [ None ])", "def status_counts(self):\n return self._status_counts", "def get_already_contacted_count(self, seller_id):\n return self.get_already_contacted(seller_id).count()", "def campaign_status(self):\n return self._campaign_status", "def contact_count(self, *args, **kwargs) -> Any:\n pass", "def get_already_contacted(self, seller_id):\n return self.contactcampaignstatus_set.filter(\n seller_id=seller_id, status=2\n )", "def get_not_contacted(self, seller_id):\n return self.contactcampaignstatus_set.filter(seller_id=seller_id, status__in=[1, 3])", "def count_status(list_of_results, domain_status):\n count = 0\n for dict in list_of_results:\n if dict[\"DomainStatus\"] == domain_status:\n count += 1\n return count", "def list_active_customers():\n try:\n active_customer_count = 0\n for _ in Customer.select().where(Customer.status == 'Active'):\n active_customer_count += 1\n return active_customer_count\n except Exception as unknown_error:\n print(f'Error. Not able to count number of active customers. {unknown_error}')", "def find_all_count(cls):\n return cls.query.filter(\n FormProcessMapper.status == str(FormProcessMapperStatus.Active.value)\n ).count()", "def count_messages(self, statuses=DEFAULT_MESSAGE_STATUSES):\n return self.request(\"count:Message\", [{\"status\": statuses}])", "def list_active_customers():\n try:\n active_customer_count = 0\n for _ in Customer.select().where(Customer.status == 'Active'):\n active_customer_count += 1\n logger.info(\n f\"Successfully counted active customers {active_customer_count}\"\n )\n return active_customer_count\n except Exception as unknown_error:\n logger.error(f\"Error. Failed to count customers. {unknown_error}\")\n print(\n f'Error. Not able to count number of active customers.'\n ' {unknown_error}'\n )", "def list_active_customers():\n active_customers = 0\n for customer in cm.Customers:\n if customer.status == \"Active \":\n active_customers += 1\n return active_customers", "def _get_campaigns(self, params):\n return self._api.account.get_campaigns(params={**params, **self._state_filter()}, fields=[self.state_pk])", "def contact_status(request):\n\n if request.user.is_authenticated():\n try:\n return {\n 'has_contact': Contact.objects.filter(user=request.user),\n \n }\n except DoesNotExist:\n pass\n return {\n \n }", "def count_codon_all(self):\n return Counter(list(self))", "def get_all_active_members(debug, contactsUrl):\n\n valid_date = str(datetime.date.today() - datetime.timedelta(days=7)) # 7 days ago in yyyy-mm-dd format\n\n #params = {'$filter': 'member eq true AND Status eq Active',\n # '$async': 'false'}\n params = {'$filter': \"member eq true AND ( Status eq Active OR ( Status eq PendingRenewal AND 'Renewal due' ge \" + valid_date + \"))\",\n '$async': 'false'}\n request_url = contactsUrl + '?' + urllib.parse.urlencode(params)\n if debug: print('Making api call to get contacts')\n return api.execute_request(request_url).Contacts", "def test_get_filter_no_effective_contacts(self):\n data = {\"type_contact\": 2}\n response = self.client.get(reverse('contacts-filter'), data)\n # import pdb; pdb.set_trace()\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[\"count\"], 2)", "def test_get_non_existent_campaigns_returns_empty_list(self):\n response = self.client.get(self.endpoint_url)\n response_body = response.get_json()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response_body, {\"campaigns\": []})", "def comment_count(self):\n return self.comments.filter_by(state=0).count()", "def no_locked_budgets(self) -> int:\n count = 0\n for budget in self.budgets.values():\n if budget.locked:\n count += 1\n return count", "def counts(self):\n counts = {\n 'total': self.app.db.jobs.count(),\n 'failed': self.app.db.jobs.find({'status': 'failed'}).count(),\n 'pending': self.app.db.jobs.find({'status': 'pending'}).count(),\n 'done': self.app.db.jobs.find({'status': 'done'}).count(),\n }\n return counts", "def list_active_customers():\n db_customers = Customers.select()\n LOGGER.debug(\"Calculating number of active customers\")\n # Technically used this in Lesson 03, but it is a comprehension. Another method added below.\n number_active = sum([int(x.status) for x in db_customers])\n LOGGER.info(\"There are %d active customers\", number_active)\n\n return number_active", "def available_statuses(self):\n return self.pipeline.get(self.status, ())", "def available_statuses(self):\n return self.pipeline.get(self.status, ())", "def list_active_customers():\n init_database()\n return Customer.select().where(Customer.active_status).count()", "def counts_by_test_result_status(self, status):\n return len([\n [key, event] for (key, event) in self.result_events.items()\n if event.get(\"status\", \"\") == status])", "def get_com_status(self, **kwargs):\n for com in self._coms:\n self.__chk_com(port_name=com)", "def list_active_customers():\n customer_active = Customer.select().where(Customer.status == 'Active')\n print('{} Active Customers'.format(len(customer_active)))\n return len(customer_active)", "def num_events_hosted(self, status=None):\n if status == EventStatus.LIVE:\n return len([event for event in self.events if event.is_ongoing()])\n elif status == EventStatus.PAST:\n return len([event for event in self.events if event.has_ended()])\n else:\n return len([event for event in self.events if not event.is_draft()])" ]
[ "0.58729506", "0.5697539", "0.56552345", "0.5644677", "0.5614512", "0.55899376", "0.5523953", "0.5406303", "0.5382926", "0.53309304", "0.532123", "0.5316718", "0.53129387", "0.5263166", "0.5259178", "0.52466035", "0.5164468", "0.51618105", "0.5160587", "0.5086446", "0.50863725", "0.5083086", "0.5079815", "0.50606215", "0.50606215", "0.5051437", "0.5033492", "0.5030135", "0.5012835", "0.49998033" ]
0.5831577
1
Returns the ContactCampaignStatus objects for all Contacts that have already been called yet (status=2, 3)
def get_already_contacted(self, seller_id): return self.contactcampaignstatus_set.filter( seller_id=seller_id, status=2 )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_active_contact(self):\n list_contact = Contact.objects.filter(phonebook__campaign=self.id,\n status=CONTACT_STATUS.ACTIVE).all()\n if not list_contact:\n return False\n return list_contact", "def find_campaigns_as_caller(caller):\n\n \"\"\"Get Campaigns for Caller\"\"\"\n campaigns_as_caller = caller.campaigns_as_caller.filter(\n status__in=[x.value[0] for x in call_campaign_statuses_for_caller],\n ).order_by('-date_created')\n\n \"\"\"Check Call Tool Feature Access for Campaigns\"\"\"\n campaigns = [x for x in campaigns_as_caller if has_call_feature_access_for_local_group(\n x.local_group\n )]\n\n return campaigns", "def get_all_active_members(debug, contactsUrl):\n\n valid_date = str(datetime.date.today() - datetime.timedelta(days=7)) # 7 days ago in yyyy-mm-dd format\n\n #params = {'$filter': 'member eq true AND Status eq Active',\n # '$async': 'false'}\n params = {'$filter': \"member eq true AND ( Status eq Active OR ( Status eq PendingRenewal AND 'Renewal due' ge \" + valid_date + \"))\",\n '$async': 'false'}\n request_url = contactsUrl + '?' + urllib.parse.urlencode(params)\n if debug: print('Making api call to get contacts')\n return api.execute_request(request_url).Contacts", "def campaign_status(self):\n return self._campaign_status", "def _get_campaigns(self, params):\n return self._api.account.get_campaigns(params={**params, **self._state_filter()}, fields=[self.state_pk])", "def contact_status(request):\n\n if request.user.is_authenticated():\n try:\n return {\n 'has_contact': Contact.objects.filter(user=request.user),\n \n }\n except DoesNotExist:\n pass\n return {\n \n }", "def get_not_contacted(self, seller_id):\n return self.contactcampaignstatus_set.filter(seller_id=seller_id, status__in=[1, 3])", "def get_patient_contacted_status(args):\n person_id = args[\"PersonID\"]\n c = get_most_recent_row_by_person_id(\"Contacted\", person_id,\n timestamp=\"UpdatedDateTime\", schema=app_schema)\n h = get_table_by_person_id(\"Contacted\", person_id,\n schema=app_schema).sort_values(by=[\"UpdatedDateTime\"], ascending=False)\n return {\"current\": c, \"historical\": h}", "def get_com_status(self, **kwargs):\n for com in self._coms:\n self.__chk_com(port_name=com)", "def update_contacts(self, contact_list):\n updated_contacts = 0\n request_list = list()\n\n # stale_contacts contains all old contacts at first, all current\n # contacts get then removed so that the remaining can get deleted\n stale_contacts = set(self.contacts)\n\n for contact in contact_list:\n c = Persona.query.get(contact[\"id\"])\n\n if c is None:\n c = Persona(id=contact[\"id\"], _stub=True)\n\n if c._stub is True:\n request_list.append(contact[\"id\"])\n\n try:\n # Old and new contact; remove from stale list\n stale_contacts.remove(c)\n except KeyError:\n # New contact\n self.contacts.append(c)\n updated_contacts += 1\n\n # Remove old contacts that are not new contacts\n for contact in stale_contacts:\n self.contacts.remove(contact)\n\n app.logger.info(\"Updated {}'s contacts: {} added, {} removed, {} requested\".format(\n self.username, updated_contacts, len(stale_contacts), len(request_list)))\n\n return request_list", "def fetch_contact_messages(self, org, contact, created_after, created_before):\n pass", "def get_contacts(self):\n contacts = Membership.objects.filter(entity = self, key_contact = True).order_by('importance_to_entity')\n return contacts", "def related_reports(self):\n return Report.objects.exclude(contact_email__isnull=True).filter(contact_email__iexact=self.contact_email).order_by('status', '-create_date')[:1000]", "def get_status(self, rows):\n\n\t\taccount_status = {}\n\t\tfor row in rows:\n\t\t\t(account_number, status) = (int(row[0]), row[2])\n\t\t\tif account_status.has_key(account_number):\n\t\t\t\taccount_status[account_number].append(status)\n\t\t\t\t# Log account information if account has more than 1 current active status\n\t\t\t\tself.log.debug(\"Multiple Current Statuses for Account Number:\" + account_number)\n\t\t\telse:\n\t\t\t\taccount_status[account_number] = [status]\n\n\t\treturn account_status", "def available_statuses(self):\n return self.pipeline.get(self.status, ())", "def available_statuses(self):\n return self.pipeline.get(self.status, ())", "def add_to_campaign(self, campaign_id):\n campaign = Campaign.objects.get(pk=campaign_id)\n if not ContactCampaignStatus.objects.filter(\n contact=self, campaign=campaign\n ).exists():\n # We first create the big object that will hold the status for the campaign\n ContactCampaignStatus.objects.create(contact=self, campaign=campaign)\n return _(\"Contact %s (ID: %s) added to campaign\") % (self.name, self.id)\n else:\n raise Exception(\n _(\"Contact %s (ID: %s) already in campaign\") % (self.name, self.id)\n )", "def list_campaigns(self, **kwargs) -> ApiResponse:\n return self._request(kwargs.pop('path'), params=kwargs)", "def ListAllContacts(self):\n feed = self.gd_client.GetContacts()\n self.contacts = self.CleanPhoneNumbers(self.GetContactsInfo(feed))\n return self.contacts", "def get_contacts(self):\n\n\t\treturn self.__contacts", "def getallcontacts(self):\n feed_url = self.contacts_client.GetFeedUri(projection='full')\n total_read = 0\n while True:\n print('Retrieving contacts... (%d retrieved so far)' % total_read)\n feed = self.contacts_client.get_feed(uri=feed_url,\n auth_token=None,\n desired_class=gdata.contacts.data.ContactsFeed)\n total_read += len(feed.entry)\n for entry in feed.entry:\n yield entry\n next_link = feed.GetNextLink()\n if next_link is None:\n print('All contacts retrieved: %d total' % total_read)\n break\n feed_url = next_link.href", "def get_latest_statuses(self):\n self.status_lock.acquire()\n status = copy.deepcopy(self.cached_status)\n self.status_lock.release()\n return status", "def test_get_existent_campaigns_returns_campaigns_list(self):\n test_campaign = return_canned_campaign()\n test_campaign.create()\n response = self.client.get(self.endpoint_url)\n response_body = response.get_json()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n response_body, {\"campaigns\": [{\"id\": 1, \"name\": \"Test Campaign\"}]}\n )", "def test_get_filter_effective_contacts(self):\n data = {\"type_contact\": 1}\n response = self.client.get(reverse('contacts-filter'), data)\n # import pdb; pdb.set_trace()\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[\"count\"], 2)", "def getVotersToContact(self):\n two_days_ago = date.today() - timedelta(2)\n year_ago = date.today() - timedelta(365)\n return self.voters.filter(\n Q(campaignstovoters__last_served=None) | Q(campaignstovoters__last_served__lt=two_days_ago),\n Q(campaignstovoters__last_contacted=None) | Q(campaignstovoters__last_contacted__lt=year_ago),\n campaignstovoters__is_active=True,\n is_active=True)", "def contact_list(self):\n return self._contact_list", "def get_contacts_list(self):\n return [(id + 1, contact) for id, contact in enumerate(self.contact_list)]", "def imps_by_me(self):\n return self.caller.roster.accounthistory_set.last().initiated_contacts.all()", "def get_list_of_campaigns(self, limit=0, offset=0):\n logger.info(\"Function call: get_list_of_campaigns\")\n return self.__handle_result(self.__send_request('campaigns', 'GET', {'limit': limit or 0, 'offset': offset or 0}))", "def check_status_code_for_all_calls(context, status):\n wrong_calls = [code for code in context.api_call_results if code != status]\n assert not wrong_calls, \"Wrong code returned {n} times: {codes}\".format(\n n=len(wrong_calls), codes=wrong_calls\n )" ]
[ "0.57124084", "0.56400394", "0.5551901", "0.55213046", "0.55115336", "0.54854417", "0.54402715", "0.54215056", "0.5379803", "0.53058594", "0.53005534", "0.51895696", "0.51827735", "0.51389533", "0.50814724", "0.50814724", "0.505643", "0.5013041", "0.50040466", "0.49269885", "0.49207124", "0.49136937", "0.48982498", "0.489619", "0.4892112", "0.48869306", "0.48776907", "0.48729104", "0.48678836", "0.48583648" ]
0.6468503
0
Returns the seller from the contact this activity is assigned for.
def get_contact_seller(self): return self.contact.seller
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def seller(self):\n if \"seller\" in self._prop_dict:\n return self._prop_dict[\"seller\"]\n else:\n return None", "def seller_from_user(context: Dict[str, Any]) -> Optional[Seller]:\n user: User = context[\"user\"]\n if user.is_authenticated:\n try:\n seller = Seller.objects.get(username=user.username)\n except:\n seller = None\n return seller\n return None", "def user(self):\n return self.contact.user", "def get_receiver(self):\n return self.receiver", "def seller(self, seller):\n\n self._seller = seller", "def recipient(self):\n return self._recipient", "def contact(self):\n return self._contact", "def contact(self):\n return self._contact", "def Besucher(self):\n return self.getAnsprechpartner()", "def get_seller_id(vehicle, api):\n seller = vehicle[SELLER]\n try:\n address = ADDRESS_FORMAT.format(seller[STREET_ADDRESS], seller[CITY], seller[STATE])\n except KeyError:\n send_slack_message(text=f'Address error for seller: {seller} and vehicle: {vehicle}')\n return -1\n\n # Search for existing seller\n db_seller = api.seller_get(address=address)\n if db_seller == -1:\n return -1\n elif len(db_seller) >= 1:\n return db_seller[0]['id']\n\n # New seller, add it to sellers table\n payload = {\n 'phone_number': seller.get(PHONE_NUMBER),\n 'name': seller[NAME],\n 'address': address,\n 'latitude': seller.get(LAT),\n 'longitude': seller.get(LNG),\n }\n new_seller = api.seller_post(**payload)\n return new_seller['id'] if new_seller != -1 else -1", "def get_seller_surname(self, id):\n try:\n MySQLConnector().execute_query('select surname from salemen where id = {0};'.format(id))\n surname = MySQLConnector().get_results()[0][0]\n except Error as er:\n logging.getLogger(__name__).error(\"Something went wrong with database %s\" % er)\n return surname", "def RecipientScreenName(self):\n return self._recipient_screen_name", "def receiver(self) -> str:\n return self._receiver", "def assignee(self):\n membership = UnitMembershipFactory(unit=self.unit)\n return membership.user", "def book_for_sale(self):\n try:\n return self.book_set.filter(book_type=get_model('books', 'Book').TO_SELL)[0]\n except:\n None", "def get_provider(self):\r\n if self.provided_by:\r\n return list(self.provided_by)[0]", "def sponsor(self) -> object:\n return self._sponsor", "def identifier(self):\n return self.contact.identifier", "def get_customer(self):\n return self._customer", "def get_customer(self):\n return self._customer", "def assigned_to(self) -> Optional[str]:\n return pulumi.get(self, \"assigned_to\")", "def receiver(self):\n l = self.link\n if l and l.is_receiver:\n return l\n else:\n return None", "def consumer_site_name(self):\n # get the consumer sitename from the lti request\n try:\n return self.request.POST[\"tool_consumer_instance_guid\"]\n except MultiValueDictKeyError:\n # in the case of OpenEDX, resource_link_id format is defined in settings.py file.\n # it is defined as follow: ``consumer-site-id_xblock-id``\n # example: ``dns.fr-724d6c2b5fcc4a17a26b9120a1d463aa``\n # except\n return self.request.POST.get(\"resource_link_id\", \"\").rsplit(\"-\", 1)[0]", "def getManufacturer(self):\n return self.manufacturer", "def Contact(self):\n return self.__contact", "def customer(self):\n return self.__customer", "def _get_supplier_(obj, line):\n \n iNo = 0\n strRet = None\n for item in obj.order_line:\n iNo += 1\n if (item.id == line.id):\n if (len(item.product_id.seller_ids)>0):\n strRet = item.product_id.seller_ids[0] and item.product_id.seller_ids[0].name.name or None\n break\n \n \n return strRet", "def source_contact(self):\n return self._source_contact", "def issued_by(self):\n return self._issued_by", "def get_seller_name(self, id):\n try:\n MySQLConnector().execute_query('select name from salemen where id = {0};'.format(id))\n name = MySQLConnector().get_results()[0][0]\n MySQLConnector().execute_query('select surname from salemen where id = {0};'.format(id))\n surname = MySQLConnector().get_results()[0][0]\n MySQLConnector().execute_query('select surname from salemen where id = {0};'.format(id))\n surname = MySQLConnector().get_results()[0][0]\n name_surname = name +', ' + surname\n except Error as er:\n logging.getLogger(__name__).error(\"Something went wrong with database %s\" % er)\n return name_surname" ]
[ "0.7485591", "0.5901869", "0.58646476", "0.57309294", "0.5610337", "0.55715734", "0.55709654", "0.55709654", "0.5546228", "0.5507627", "0.5496031", "0.5492068", "0.5451132", "0.5379029", "0.5316402", "0.52811015", "0.52564305", "0.5247079", "0.5247", "0.5247", "0.52305055", "0.5210369", "0.51889086", "0.5183171", "0.5181932", "0.5154748", "0.5137883", "0.51070815", "0.5106108", "0.5097695" ]
0.7961372
0
Returns a description of the direction of the activity. That can be In or Out.
def get_direction(self): directions = dict(ACTIVITY_DIRECTION_CHOICES) return directions.get(self.direction, "N/A")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def direction(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"direction\")", "def direction(self) -> str:\n return pulumi.get(self, \"direction\")", "def get_direction(self):\n return self.direction", "def get_direction(self):\r\n return self.__direction", "def direction(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"direction\")", "def traffic_direction(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"traffic_direction\")", "def direction(self) -> Optional[str]:\n return self._direction", "def direction(self):\n _direction = self._custom.get(\"direction\")\n if _direction is not None:\n return _direction\n\n _direction = self._infer_direction()\n self._custom[\"direction\"] = _direction\n\n return _direction", "def direction(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"direction\")", "def current_direction(self) -> str:\n if self._device.fan_dir == SENSEME_DIRECTION_FORWARD:\n return DIRECTION_FORWARD\n return DIRECTION_REVERSE", "def traffic_direction(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"traffic_direction\")", "def get_direction(self):\n return self.actual_coordinates[2]", "def direction(self):\n return self._direction.copy()", "def current_direction(self):\n return self._attributes.get(\"current_direction\")", "def getDirection(self):\n if 'N' in str(self.trip_update.trip.trip_id):\n direction = 'northbound'\n if 'S' in str(self.trip_update.trip.trip_id):\n direction = 'southbound'\n return direction", "def direction(self):\n if self._is_hit:\n return Direction.NOT_MOVING\n return self._dir", "def getDirection(self, direction: str):\n return direction", "def direction(self):\n return self.cfg.direction", "def traffic_direction(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"traffic_direction\")", "def direction(self) -> int:\n return self._direction", "def current_direction(self):\n return self.wink.current_fan_direction()", "def getDirection(self):\n return self.listener.direction", "def get_direction(event):\n return event['result']['parameters']['direction']", "def get_direction(strategy_context):\n direction_param = strategy_context['strategy']['opt_params'][0]\n\n if 'direction' in strategy_context['strategy']:\n warnings.warn(\"'direction' parameter in strategy_context['strategy']['direction'] is obsolete, \"\n \"please remove it to suppress this warning\")\n\n if direction_param.name.lower() != 'direction':\n raise ValueError('First OptParam of strategy must be Direction')\n\n for dir_value in direction_param.array:\n if dir_value != -1 and dir_value != 1:\n raise ValueError(\"Direction OptParam value must be -1 or 1\")\n\n if len(direction_param.array) == 1:\n if direction_param.array[0] == 1:\n return 1, 'Long'\n elif direction_param.array[0] == -1:\n return -1, 'Short'\n\n elif len(direction_param.array) == 2:\n return 0, 'Bidir'\n else:\n raise ValueError(\"Direction OptParam must contain 1 or 2 elements\")", "def getDirection(self):\n return self.ray.direction", "def direction(self):\n return atan2d(self.y, self.x)", "def direction(self):\n return self._dir", "def getDirection (self, time):\n return self._response.getDirection(time)", "def direction(self) -> np.ndarray:\n return self._direction", "def direction(self, segment_index, t):\n\n return self.segments[segment_index].direction(t)" ]
[ "0.7737809", "0.7479199", "0.74217033", "0.7307552", "0.7224062", "0.7208195", "0.7044395", "0.70183897", "0.70051634", "0.6928032", "0.691956", "0.68777025", "0.6851188", "0.6831645", "0.6790336", "0.67377764", "0.6731791", "0.67219067", "0.67007256", "0.6678404", "0.6668446", "0.6664572", "0.6657483", "0.66520506", "0.663244", "0.6517568", "0.6396553", "0.6375924", "0.63436925", "0.6339121" ]
0.7751099
0
Returns the last activity for the contact, on this exact campaign.
def get_last_activity(self): return Activity.objects.filter(campaign=self.campaign, status="P", contact=self.contact).latest("id")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_last_activity(self):\n last_activities = self.get_last_activities(n=1)\n return last_activities[0]", "def last_activity(self):\n if self.activity_set.exists():\n return self.activity_set.latest(\"id\")\n else:\n return None", "def last_time(self) -> datetime:\n return self.activities[-1].timestamp", "def getactivity(self) -> Optional[ba.Activity]:\n stats = self._stats()\n if stats is not None:\n return stats.getactivity()\n return None", "def get_activity():\n try:\n activity = Activity.objects.filter(active=1).latest('id')\n except Activity.DoesNotExist:\n activity = None\n return activity", "def activity(self):\n return self._activity", "def __last_activity(cls, card_info: Dict[str, str]) -> DateTime:\n last_activity_field = \"dateLastActivity\" if \"dateLastActivity\" in card_info else \"createdAt\"\n return cls.__parse_date_time(card_info[last_activity_field])", "def getactivity(self) -> Optional[ba.Activity]:\n if self._activity is None:\n return None\n return self._activity()", "def last_activity(self):\n\t\tif not self.lastseen:\n\t\t\treturn 'nooit'\n\n\t\t# this was once made by myself (Erick)\n\t\t# return self.lastseen.strftime(\"%d %B %Y, %H:%M:%S\")\n\n\t\t# this is later added by the author of the Flask Tutorial (Michael)\n\t\treturn moment(self.lastseen).format('LLL')", "def get_last_time(client):\n try:\n activity = None\n activities = client.get_activities(limit=10)\n # for else in python if you don't know please google it.\n for a in activities:\n if a.type == \"Run\":\n activity = a\n break\n else:\n return 0\n # add 30 minutes to make sure after the end of this activity\n end_date = activity.start_date + activity.elapsed_time + timedelta(minutes=30)\n return int(datetime.timestamp(end_date) * 1000)\n except:\n return 0", "def recent_email_sent(self):\n recent_contact_activity = self.activity().filter(verb='Contacted complainant:', description__contains='Email sent').first()\n if recent_contact_activity:\n try:\n email = recent_contact_activity.description.split(\"'\")[1]\n except IndexError:\n email = None\n return email\n return None", "def when_last_chat_with(self, actor_label):\n query = read_query('trust/when_last_chat_with') % actor_label\n response = self._submit_query(query)\n\n return response[0]['time']['value'].split('/')[-1] if response != [] else ''", "def last(self) -> 'outputs.CommitmentPeriodResponse':\n return pulumi.get(self, \"last\")", "def getActivity(self):\n return self.activity", "def get_last_event(self):\n return self.last_event_code", "def get_last_action(self):\n return self.a_", "def get_last_time(self):\n \n return self._last", "def get_last_task(self):\n return self.get_task_by_index(-1)", "def fetch(self, activity):\n return None, None", "async def getLastEmergency(self):\n last_emergency = await self.director.getItemVariableValue(\n self.item_id, \"LAST_EMERGENCY\"\n )\n return last_emergency", "def get_last_conversations(self):\n email_token = auth.current_user()[0]\n user_data, last_messages = self.friend_database.get_conversations(email_token)\n last_messages = [{k:v for k,v in m._asdict().items() if k != \"hidden_to\"} for m in last_messages]\n for i in range(len(last_messages)):\n last_messages[i][\"timestamp\"] = last_messages[i][\"timestamp\"].isoformat()\n response = []\n for i in range(len(last_messages)):\n response.append({\"user\": user_data[i], \"last_message\": last_messages[i]})\n return json.dumps(response), 200", "def last(self):\n data = self._http_get(\"last\")\n return data.json()", "def contact(self):\n return self._contact", "def contact(self):\n return self._contact", "def last_hit(self):\n return self._last_hit", "def get_masscan_last_output(self):\n return self._masscan_last_output", "def resulting_contact(self):\n return self._resulting_contact", "def get_last_result(self):\n return self.last_result", "def last_attempt(self) -> 'outputs.AttemptResponse':\n return pulumi.get(self, \"last_attempt\")", "def report(self):\n self.last_contacted = time.time()" ]
[ "0.8021665", "0.7515905", "0.68185985", "0.67223203", "0.6343814", "0.6304953", "0.63015753", "0.61637557", "0.6048075", "0.5997703", "0.59606355", "0.58743024", "0.57338977", "0.5712074", "0.5694064", "0.56693697", "0.5622185", "0.5594319", "0.55657274", "0.55362064", "0.55154335", "0.55109483", "0.54806376", "0.54806376", "0.54413915", "0.5436009", "0.5427226", "0.5410688", "0.54067487", "0.5390218" ]
0.87714857
0
Returns a description of the resolution for this campaign on this contact.
def get_campaign_resolution(self): campaign_resolutions = dict(CAMPAIGN_RESOLUTION_CHOICES) return campaign_resolutions.get(self.campaign_resolution, "N/A")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Resolution(self):\n\t\treturn self._get_attribute('resolution')", "def resolution(self):\n return self._resolution", "def res_description(self):\n return self.get(\"res_description\", decode=True)", "def get_resolution(self):\n return self.__resolution", "def get_description(self):\n return self['contact_name']", "def getResolution(self):\n return self.resolution", "def resolution(self) -> int:\n return self._resolution", "def resolution(self):\n return next(iter(self.resolutions()), None)", "def resolution(self) -> int:\n return self.options.resolution", "def description(self):\n return self._domain.description", "def get_description(self):\n return self['contactgroup_name']", "def info(self):\n return (f\"Match id: {self._id}\\n\"\n f\"dire_score: {self.dire_score}\\n\"\n f\"dire_team: {self.dire_team}\\n\"\n f\"duration: {self.duration}\\n\"\n f\"game_mode: {self.game_mode}\\n\"\n f\"patch: {self.patch}\\n\"\n f\"radiant_score: {self.radiant_score}\\n\"\n f\"radiant_team: {self.radiant_team}\\n\"\n f\"radiant_win: {self.radiant_win}\\n\"\n f\"skill: {self.skill}\\n\"\n f\"start_time: {self.start_time}\\n\")", "def resolution(self):\n assert self.has_video\n\n return self.__resolution", "def description(self):\n return self.visual_desc", "def get_display_info(self):\n return self.display_info", "def description(self):\n if \"description\" in self._prop_dict:\n return self._prop_dict[\"description\"]\n else:\n return None", "def display(self):\n caldesc = settings.CALENDAR_DESC[self.type]\n if \"display\" in caldesc:\n return caldesc[\"display\"](self)\n else:\n return self.pretty_duration()", "def getDescription(self):\n return self._description", "def get_description(self):\n return self._description", "def description(self):\n\n return self._get_field(\"description\")", "def getDescription(self):\n return self.description", "def long_description(self):\n return self._long_description", "def get_description(self):\n\n return self._description", "def contact_details(self) -> 'outputs.ContactDetailsResponse':\n return pulumi.get(self, \"contact_details\")", "def contact_details(self) -> 'outputs.ContactDetailsResponse':\n return pulumi.get(self, \"contact_details\")", "def contact_details(self) -> 'outputs.ContactDetailsResponse':\n return pulumi.get(self, \"contact_details\")", "def getResolution(self):\n # load it each time, since this setting is not limited to a single user\n projectSettingsDB = self.loadProjectSettings()\n try:\n resolution = projectSettingsDB[\"Resolution\"]\n return resolution\n except KeyError:\n msg = \"Database Error while reading projectSettings.json\"\n logger.error(msg)\n return None", "def response_description(self):\n return self._response_description", "def description(self):\n return self.definition.description", "def get_description(self):\n if self._visited is False:\n self._visited = True\n return self._desc\n else:\n return self._sdesc" ]
[ "0.66166127", "0.6521925", "0.6436431", "0.63877213", "0.62083656", "0.61682487", "0.596611", "0.5934394", "0.59258974", "0.5867041", "0.5824322", "0.578653", "0.57848024", "0.5735983", "0.5665601", "0.55896026", "0.558924", "0.556708", "0.55272025", "0.5522097", "0.5521857", "0.5511862", "0.55088496", "0.5508273", "0.5508273", "0.5508273", "0.55005753", "0.5485276", "0.5475121", "0.5472311" ]
0.73374283
0
Set the 'cancelled' flag so the thread exits.
def cancel(self): self.cancelled = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cancel(self):\n self.cancelled.set()", "def mark_cancelled(self):\n self.status = STATUS_CANCELED", "def do_uncancel(self):\r\n self.write({'cancelled': False})", "def set_status_update_waiter_cancelled(self):\n self.set_state(CHANNEL_MOVE_STATE_CANCELLED)\n self.set_status_update_waiter()", "def do_cancel(self):\r\n self.write({'cancelled': True})", "def cancel(self) -> None:\n with self._lock:\n if self.__thread and self.__thread.is_alive():\n self.__cancel.set()\n self.__thread.join()\n\n self.__cancel.clear()", "def cancel(self):\n if not self.is_cancelled:\n self.will_change_value_for('is_cancelled')\n self.cancelled = True\n # remove our dependencies so that we're ready, properly behaved operations\n # will honor the cancel flag\n self.dependencies.clear()\n self.did_change_value_for('is_cancelled')\n \n if not self.is_executing and not self.is_finished:\n with self.changing('is_finished'):\n self.finished = True", "def cancel(self):\n self.__canceled = True", "def _on_future_cancelled(self, promise):\n promise.setCanceled()", "def handleCancel(self):\n self.isTerminated = True\n self.terminate()", "def cancel(self):\n self.stop()\n self.make_callback('canceled')", "def _chain_cancel(_):\n if recvd.done():\n return\n if f.cancelled():\n recvd.cancel()", "def cancel():", "def _cancel(self, d):\n if self._finished:\n return\n try:\n raise CancelledError()\n except:\n self._caught_failure = failure.Failure()\n self._iterate()", "def stop(self):\n if not self._thread or self._abort:\n return\n\n self._abort = True\n self._thread.join()", "def cancel_inner():\n kernel32.SetEvent(cancel_event)", "def cancel(self):\n GameLoop.getInstance()._cancelation_token = True", "def cancel(self) -> asyncio.Future:\n pass # pragma: no cover", "def cancel(self):\n if self.cancelled() or self.done():\n return False\n self._is_done = True\n self._is_cancelled = True\n return True", "def set_abort_flag(self):\r\n self.abort_flag = True", "def set_abort_flag(self):\r\n self.abort_flag = True", "def cancel(self):\n pass", "def _cancel(self):\n self.waiter.set_result_if_pending(None)\n \n timer = self.timer\n if (timer is not None):\n self.timer = None\n timer.cancel()", "def cancelled(self):\n return self._is_cancelled", "def cancel(self):\n LOG.debug('cancelling %s', self)\n self._cancelled = True\n self.clear_callbacks() # not strictly necessary, but may release references\n while True:\n try:\n self._waitables.get_nowait().put_nowait(self.sentinel)\n except queue.Empty:\n break", "def cancel(self):\n self.waiter.set_result_if_pending(True)\n \n timer = self.timer\n if (timer is not None):\n self.timer = None\n timer.cancel()", "def future(self, cancel):", "def cancel(self):\n\n self.end()\n super().cancel()", "def cancel(self):", "def cancel(self):" ]
[ "0.75206125", "0.73317796", "0.725674", "0.71834475", "0.71784335", "0.7120356", "0.7109169", "0.6906426", "0.69026905", "0.683166", "0.67981595", "0.67532504", "0.6739393", "0.66940963", "0.6657798", "0.6641062", "0.6619799", "0.6615156", "0.6611922", "0.6600137", "0.6600137", "0.6540987", "0.6536481", "0.65248924", "0.6503098", "0.64854413", "0.64832634", "0.64816755", "0.6465512", "0.6465512" ]
0.74287105
1
Change the Node's sleep time between function calls.
def set_sleep_time(self, time): self.sleep_time = time
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sleep(self, amount: float):\n time.sleep(amount)", "def sleep(sleep_time=0.250):\n time.sleep(sleep_time)", "def deepsleep(time_ms: int = None) -> None:", "def sleep_for(timeToSleep):\r\n time.sleep(timeToSleep)", "async def sleep(cls, delay: float) -> None:", "def sleep(seconds):\n time.sleep(seconds)", "def sleep(seconds):\n time.sleep(seconds)", "def sleep(self):\n time.sleep(0.2)", "def sleep(seconds):\r\n time.sleep(seconds)", "def sleep(self):\n self.sleep_after(1) # Can't be 0, that means 'don't sleep'", "def sleep(self, seconds=60):\n\t\ttime.sleep(seconds)", "def sleep(seconds):\n\n return Sleep(seconds)", "async def sleep(self, sleep_time):\n await asyncio.sleep(sleep_time)", "def sleep(interval):\n time.sleep(interval) # pragma: no cover", "def delay():\r\n time.sleep(2)", "def sleep(secs=1.0):\n time.sleep(secs)", "def set_sleep_time(self, milliseconds:int):\n self.send_command(f\"configure mainLoopSleepTime {milliseconds}\")", "def RandomDelay():\r\n sleep(random())", "def delay(seconds):\n\n # Perform the delay\n time.sleep(seconds)", "def sleep(self, seconds):\n time.sleep(seconds)", "def watch(self, func, seconds=3600):\n func\n time.sleep(seconds)", "def test_sleep():\n time.sleep(3600 * 24)", "def lightleep(time_ms: int = None) -> None:", "def sleep(self):\n current_time = time.time()\n\n if not self.next_time: # first call\n self.next_time = current_time + self.period\n return\n\n delta = self.next_time - current_time\n if delta > 0:\n time.sleep(delta)\n self.next_time += self.period", "def _sleep(self, sleep_time: float = 10) -> None:\n sleep_until_interrupt(sleep_time, lambda: self.stopped, interval=0.5)", "def sleep(seconds):\n # After load and initializing the PvAPI Python's built-in 'sleep' function\n # stops working (returns too early). The is a replacement.\n from time import sleep,time\n t = t0 = time()\n while t < t0+seconds: sleep(t0+seconds - t); t = time()", "def sleep_for(self, duration):\n raise NotImplementedError()", "def sleep(self):\n if self._stop is not None:\n timeLeft = max(self._stop - time.time(), 0) \n sleep = min(self._sleep, timeLeft)\n else:\n sleep = self._sleep\n time.sleep(sleep)", "def randomized_sleep(duration):\n sleep(duration + duration * random.random())", "def delay(ms: int, /) -> None:" ]
[ "0.7682915", "0.7612475", "0.7465841", "0.73927754", "0.7347401", "0.7290041", "0.7290041", "0.7264841", "0.7252344", "0.71803594", "0.7167534", "0.7126728", "0.7065027", "0.7045028", "0.7004403", "0.69281256", "0.6792978", "0.67551804", "0.67542213", "0.6740669", "0.67119396", "0.66845906", "0.6680043", "0.66797125", "0.66763103", "0.66333956", "0.6620252", "0.6567302", "0.6557237", "0.65394497" ]
0.76355064
1
Register environment selection flags.
def register_env_select_flags( parser: ArgumentParser, default: CliEnv | None, multiple: bool = True, # noqa: FBT001, FBT002 group_only: bool = False, # noqa: FBT001, FBT002 ) -> ArgumentParser: if multiple: group = parser.add_argument_group("select target environment(s)") add_to: ArgumentParser = group.add_mutually_exclusive_group(required=False) # type: ignore[assignment] else: add_to = parser if not group_only: if multiple: help_msg = "enumerate (ALL -> all environments, not set -> use <env_list> from config)" else: help_msg = "environment to run" add_to.add_argument("-e", dest="env", help=help_msg, default=default, type=CliEnv) if multiple: help_msg = "labels to evaluate" add_to.add_argument("-m", dest="labels", metavar="label", help=help_msg, default=[], type=str, nargs="+") help_msg = ( "factors to evaluate (passing multiple factors means 'AND', passing this option multiple times means 'OR')" ) add_to.add_argument( "-f", dest="factors", metavar="factor", help=help_msg, default=[], type=str, nargs="+", action="append", ) help_msg = "exclude all environments selected that match this regular expression" add_to.add_argument("--skip-env", dest="skip_env", metavar="re", help=help_msg, default="", type=str) return add_to
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def env_callback(args):\n return {\n \"MY_CUSTOM_FLAG\": args.my_custom_flag\n }", "def register_globals ():\n\n # This feature is used to determine which OS we're on.\n # In future, this may become <target-os> and <host-os>\n # TODO: check this. Compatibility with bjam names? Subfeature for version?\n os = sys.platform\n feature.feature ('os', [os], ['propagated', 'link-incompatible'])\n\n\n # The two OS features define a known set of abstract OS names. The host-os is\n # the OS under which bjam is running. Even though this should really be a fixed\n # property we need to list all the values to prevent unknown value errors. Both\n # set the default value to the current OS to account for the default use case of\n # building on the target OS.\n feature.feature('host-os', __os_names)\n feature.set_default('host-os', default_host_os())\n\n feature.feature('target-os', __os_names, ['propagated', 'link-incompatible'])\n feature.set_default('target-os', default_host_os())\n\n feature.feature ('toolset', [], ['implicit', 'propagated' ,'symmetric'])\n\n feature.feature ('stdlib', ['native'], ['propagated', 'composite'])\n\n feature.feature ('link', ['shared', 'static'], ['propagated'])\n feature.feature ('runtime-link', ['shared', 'static'], ['propagated'])\n feature.feature ('runtime-debugging', ['on', 'off'], ['propagated'])\n\n\n feature.feature ('optimization', ['off', 'speed', 'space'], ['propagated'])\n feature.feature ('profiling', ['off', 'on'], ['propagated'])\n feature.feature ('inlining', ['off', 'on', 'full'], ['propagated'])\n\n feature.feature ('threading', ['single', 'multi'], ['propagated'])\n feature.feature ('rtti', ['on', 'off'], ['propagated'])\n feature.feature ('exception-handling', ['on', 'off'], ['propagated'])\n\n # Whether there is support for asynchronous EH (e.g. catching SEGVs).\n feature.feature ('asynch-exceptions', ['off', 'on'], ['propagated'])\n\n # Whether all extern \"C\" functions are considered nothrow by default.\n feature.feature ('extern-c-nothrow', ['off', 'on'], ['propagated'])\n\n feature.feature ('debug-symbols', ['on', 'off'], ['propagated'])\n feature.feature ('define', [], ['free'])\n feature.feature ('undef', [], ['free'])\n feature.feature ('include', [], ['free', 'path']) #order-sensitive\n feature.feature ('cflags', [], ['free'])\n feature.feature ('cxxflags', [], ['free'])\n feature.feature ('asmflags', [], ['free'])\n feature.feature ('linkflags', [], ['free'])\n feature.feature ('archiveflags', [], ['free'])\n feature.feature ('version', [], ['free'])\n\n feature.feature ('location-prefix', [], ['free'])\n\n feature.feature ('action', [], ['free'])\n\n\n # The following features are incidental, since\n # in themself they have no effect on build products.\n # Not making them incidental will result in problems in corner\n # cases, for example:\n #\n # unit-test a : a.cpp : <use>b ;\n # lib b : a.cpp b ;\n #\n # Here, if <use> is not incidental, we'll decide we have two\n # targets for a.obj with different properties, and will complain.\n #\n # Note that making feature incidental does not mean it's ignored. It may\n # be ignored when creating the virtual target, but the rest of build process\n # will use them.\n feature.feature ('use', [], ['free', 'dependency', 'incidental'])\n feature.feature ('dependency', [], ['free', 'dependency', 'incidental'])\n feature.feature ('implicit-dependency', [], ['free', 'dependency', 'incidental'])\n\n feature.feature('warnings', [\n 'on', # Enable default/\"reasonable\" warning level for the tool.\n 'all', # Enable all possible warnings issued by the tool.\n 'off'], # Disable all warnings issued by the tool.\n ['incidental', 'propagated'])\n\n feature.feature('warnings-as-errors', [\n 'off', # Do not fail the compilation if there are warnings.\n 'on'], # Fail the compilation if there are warnings.\n ['incidental', 'propagated'])\n\n feature.feature('c++-template-depth',\n [str(i) for i in range(64,1024+1,64)] +\n [str(i) for i in range(20,1000+1,10)] +\n # Maximum template instantiation depth guaranteed for ANSI/ISO C++\n # conforming programs.\n ['17'],\n ['incidental', 'optional', 'propagated'])\n\n feature.feature ('source', [], ['free', 'dependency', 'incidental'])\n feature.feature ('library', [], ['free', 'dependency', 'incidental'])\n feature.feature ('file', [], ['free', 'dependency', 'incidental'])\n feature.feature ('find-shared-library', [], ['free']) #order-sensitive ;\n feature.feature ('find-static-library', [], ['free']) #order-sensitive ;\n feature.feature ('library-path', [], ['free', 'path']) #order-sensitive ;\n # Internal feature.\n feature.feature ('library-file', [], ['free', 'dependency'])\n\n feature.feature ('name', [], ['free'])\n feature.feature ('tag', [], ['free'])\n feature.feature ('search', [], ['free', 'path']) #order-sensitive ;\n feature.feature ('location', [], ['free', 'path'])\n\n feature.feature ('dll-path', [], ['free', 'path'])\n feature.feature ('hardcode-dll-paths', ['true', 'false'], ['incidental'])\n\n\n # This is internal feature which holds the paths of all dependency\n # dynamic libraries. On Windows, it's needed so that we can all\n # those paths to PATH, when running applications.\n # On Linux, it's needed to add proper -rpath-link command line options.\n feature.feature ('xdll-path', [], ['free', 'path'])\n\n #provides means to specify def-file for windows dlls.\n feature.feature ('def-file', [], ['free', 'dependency'])\n\n # This feature is used to allow specific generators to run.\n # For example, QT tools can only be invoked when QT library\n # is used. In that case, <allow>qt will be in usage requirement\n # of the library.\n feature.feature ('allow', [], ['free'])\n\n # The addressing model to generate code for. Currently a limited set only\n # specifying the bit size of pointers.\n feature.feature('address-model', ['16', '32', '64'], ['propagated', 'optional'])\n\n # Type of CPU architecture to compile for.\n feature.feature('architecture', [\n # x86 and x86-64\n 'x86',\n\n # ia64\n 'ia64',\n\n # Sparc\n 'sparc',\n\n # RS/6000 & PowerPC\n 'power',\n\n # MIPS/SGI\n 'mips1', 'mips2', 'mips3', 'mips4', 'mips32', 'mips32r2', 'mips64',\n\n # HP/PA-RISC\n 'parisc',\n\n # Advanced RISC Machines\n 'arm',\n\n # Combined architectures for platforms/toolsets that support building for\n # multiple architectures at once. \"combined\" would be the default multi-arch\n # for the toolset.\n 'combined',\n 'combined-x86-power'],\n\n ['propagated', 'optional'])\n\n # The specific instruction set in an architecture to compile.\n feature.feature('instruction-set', [\n # x86 and x86-64\n 'native', 'i486', 'i586', 'i686', 'pentium', 'pentium-mmx', 'pentiumpro', 'pentium2', 'pentium3',\n 'pentium3m', 'pentium-m', 'pentium4', 'pentium4m', 'prescott', 'nocona', 'core2', 'corei7', 'corei7-avx', 'core-avx-i',\n 'conroe', 'conroe-xe', 'conroe-l', 'allendale', 'merom', 'merom-xe', 'kentsfield', 'kentsfield-xe', 'penryn', 'wolfdale',\n 'yorksfield', 'nehalem', 'sandy-bridge', 'ivy-bridge', 'haswell', 'k6', 'k6-2', 'k6-3', 'athlon', 'athlon-tbird', 'athlon-4', 'athlon-xp',\n 'athlon-mp', 'k8', 'opteron', 'athlon64', 'athlon-fx', 'k8-sse3', 'opteron-sse3', 'athlon64-sse3', 'amdfam10', 'barcelona',\n 'bdver1', 'bdver2', 'bdver3', 'btver1', 'btver2', 'winchip-c6', 'winchip2', 'c3', 'c3-2', 'atom',\n\n # ia64\n 'itanium', 'itanium1', 'merced', 'itanium2', 'mckinley',\n\n # Sparc\n 'v7', 'cypress', 'v8', 'supersparc', 'sparclite', 'hypersparc', 'sparclite86x', 'f930', 'f934',\n 'sparclet', 'tsc701', 'v9', 'ultrasparc', 'ultrasparc3',\n\n # RS/6000 & PowerPC\n '401', '403', '405', '405fp', '440', '440fp', '505', '601', '602',\n '603', '603e', '604', '604e', '620', '630', '740', '7400',\n '7450', '750', '801', '821', '823', '860', '970', '8540',\n 'power-common', 'ec603e', 'g3', 'g4', 'g5', 'power', 'power2',\n 'power3', 'power4', 'power5', 'powerpc', 'powerpc64', 'rios',\n 'rios1', 'rsc', 'rios2', 'rs64a',\n\n # MIPS\n '4kc', '4kp', '5kc', '20kc', 'm4k', 'r2000', 'r3000', 'r3900', 'r4000',\n 'r4100', 'r4300', 'r4400', 'r4600', 'r4650',\n 'r6000', 'r8000', 'rm7000', 'rm9000', 'orion', 'sb1', 'vr4100',\n 'vr4111', 'vr4120', 'vr4130', 'vr4300',\n 'vr5000', 'vr5400', 'vr5500',\n\n # HP/PA-RISC\n '700', '7100', '7100lc', '7200', '7300', '8000',\n\n # Advanced RISC Machines\n 'armv2', 'armv2a', 'armv3', 'armv3m', 'armv4', 'armv4t', 'armv5',\n 'armv5t', 'armv5te', 'armv6', 'armv6j', 'iwmmxt', 'ep9312'],\n\n ['propagated', 'optional'])\n\n feature.feature('conditional', [], ['incidental', 'free'])\n\n # The value of 'no' prevents building of a target.\n feature.feature('build', ['yes', 'no'], ['optional'])\n\n # Windows-specific features\n feature.feature ('user-interface', ['console', 'gui', 'wince', 'native', 'auto'], [])\n feature.feature ('variant', [], ['implicit', 'composite', 'propagated', 'symmetric'])\n\n\n variant ('debug', ['<optimization>off', '<debug-symbols>on', '<inlining>off', '<runtime-debugging>on'])\n variant ('release', ['<optimization>speed', '<debug-symbols>off', '<inlining>full',\n '<runtime-debugging>off', '<define>NDEBUG'])\n variant ('profile', ['release'], ['<profiling>on', '<debug-symbols>on'])", "def register_env_creator(self):\n raise NotImplementedError(\"Subclasses should implement this to call ray.tune.registry.register_env\")", "def AddRegisterFlagsToParser(parser):\n _AddDNSSettingsFlagsToParser(parser, mutation_op=MutationOp.REGISTER)\n _AddContactSettingsFlagsToParser(parser, mutation_op=MutationOp.REGISTER)\n _AddPriceFlagsToParser(parser, MutationOp.REGISTER)\n\n messages = apis.GetMessagesModule('domains', API_VERSION_FOR_FLAGS)\n notice_choices = ContactNoticeEnumMapper(messages).choices.copy()\n notice_choices.update({\n 'hsts-preloaded':\n ('By sending this notice you acknowledge that the domain is '\n 'preloaded on the HTTP Strict Transport Security list in browsers. '\n 'Serving a website on such domain will require an SSL certificate. '\n 'See https://support.google.com/domains/answer/7638036 for details.')\n })\n base.Argument( # This is not a go/gcloud-style#commonly-used-flags.\n '--notices',\n help='Notices about special properties of certain domains or contacts.',\n metavar='NOTICE',\n type=arg_parsers.ArgList(element_type=str,\n choices=notice_choices)).AddToParser(parser)", "def test_environment_marker_extras(self, data):\n reqset = self.basic_reqset()\n req = InstallRequirement.from_editable(\n data.packages.join(\"LocalEnvironMarker\"))\n reqset.add_requirement(req)\n finder = PackageFinder([data.find_links], [], session=PipSession())\n reqset.prepare_files(finder)\n # This is hacky but does test both case in py2 and py3\n if sys.version_info[:2] in ((2, 7), (3, 4)):\n assert reqset.has_requirement('simple')\n else:\n assert not reqset.has_requirement('simple')", "def test_expand_environment_variables(self):\n include_prefixes = ['-I']\n db = CppProperties(include_prefixes)\n environ['TEST_VARIABLE_TO_EXPAND'] = '/lib_include_dir'\n\n expected = [\n Flag('-I', path.normpath('/lib_include_dir')),\n ]\n\n scope = SearchScope(from_folder=_get_test_folder('environment'))\n self.assertEqual(expected, db.get_flags(search_scope=scope))", "def register_environment(name, factory, override=False):\n global registered_environment\n\n if name in registered_environment:\n warning(f'{name} was already registered, use override=True to ignore')\n\n if not override:\n return\n\n registered_environment[name] = factory", "def set_c_flags_hook(build_ext, ext):\n std_flag = get_c_std_flag(build_ext.compiler)\n if std_flag is not None:\n ext.extra_compile_args.append(std_flag)", "def register_environment(env_info):\n prospective_env_info = VirtualEnvInfo(env_info)\n for _env in virtualenvs:\n if _env == prospective_env_info:\n _env.merge(prospective_env_info)\n else:\n virtualenvs.append(prospective_env_info)\n\n if len(virtualenvs) == 0:\n virtualenvs.append(prospective_env_info)", "def _create_extra_environment(self):\n return {}", "def register_opts():\n _register_api_opts()\n _register_db_opts()", "def setup_flags(self):\n self.io_args.color = self.io_args.color_full\n self.io_args.rig_in = self.io_args.rig\n self.io_args.matches = os.path.join(self.io_args.output_root, \"matches.json\")\n self.io_args.rig_out = os.path.join(self.io_args.output_root, \"rig.json\")", "def envs(self, envs):\n self._instructions_setter('ENV', envs)", "def use_flags(*funcs):\n\n global GLOBAL_STATUS\n if funcs:\n GLOBAL_STATUS.discard('ERRORS')\n GLOBAL_STATUS.add('FLAGS')\n else:\n GLOBAL_STATUS.discard('ERRORS')\n GLOBAL_STATUS.discard('FLAGS')\n\n for name in _get_func_names(funcs):\n if 'error' not in name and 'flag' not in name:\n globals()[name] = globals()[name].flag", "def add_env(self, env):\n pass", "def test_build_system_flags_not_implemented(self, temp_env):\n s = spack.spec.Spec(\"mpileaks cppflags=-g\").concretized()\n s.package.flag_handler = build_system_flags\n try:\n spack.build_environment.setup_package(s.package, False)\n assert False\n except NotImplementedError:\n assert True", "def fiddle_with_flags():\n flags['c++'] += '-arch x86_64 -bundle'\n flags['c'] += '-arch x86_64'", "def define_flags():\n define_flag = {\n 'boolean': flags.DEFINE_boolean,\n 'float': flags.DEFINE_float,\n 'integer': flags.DEFINE_integer,\n 'string': flags.DEFINE_string,\n }\n for name, param_spec in six.iteritems(proparams._DEFAULT_PARAMS):\n define_flag[param_spec.flag_type](name, param_spec.default_value, param_spec.description)\n flags.declare_key_flag(name)", "def register_env(cls, to_register=None, *, name: Optional[str] = None):\n from habitat import RLEnv\n\n return cls._register_impl(\"env\", to_register, name, assert_type=RLEnv)", "def test_setFlags(self):\n self._flagsTest('setFlags', b'FLAGS')", "def register_envvars(self, *envvars):\n invalid_envvars = [\n envvar\n for envvar in envvars\n if re.match(r\"^\\w+$\", envvar, flags=re.ASCII) is None\n ]\n if invalid_envvars:\n raise WorkflowError(\n f\"Invalid environment variables requested: {', '.join(map(repr, invalid_envvars))}. \"\n \"Environment variable names may only contain alphanumeric characters and the underscore. \"\n )\n undefined = set(var for var in envvars if var not in os.environ)\n if self.check_envvars and undefined:\n raise WorkflowError(\n \"The following environment variables are requested by the workflow but undefined. \"\n \"Please make sure that they are correctly defined before running Snakemake:\\n\"\n \"{}\".format(\"\\n\".join(undefined))\n )\n self.envvars.update(envvars)", "def SetupEnvironment(self):\n pass", "def register_options(cls, register):", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def add_env_arg(self, name, value):\n self._env[name] = value" ]
[ "0.5905224", "0.56406796", "0.5604462", "0.55191755", "0.5496644", "0.5454322", "0.5442913", "0.54183125", "0.5416355", "0.53807175", "0.5366575", "0.53401583", "0.53310597", "0.5293394", "0.5283587", "0.52786773", "0.5274902", "0.52701205", "0.52452815", "0.52281153", "0.5196968", "0.5140297", "0.5133237", "0.5118673", "0.5118673", "0.5118673", "0.5118673", "0.5118673", "0.5118673", "0.5102203" ]
0.66334325
0
epsilon decays as the current episode gets higher because we want the agent to explore more in earlier episodes (when it hasn't learned anything) explore less in later episodes (when it has learned something) i.e. assume that episode number is directly related to learning
def epsilon(current_episode, num_episodes): # return 1 - (current_episode/num_episodes) return .5 * .9**current_episode
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def q_learning(env, agent, num_episodes, batch_size, epsilon, epsilon_min, epsilon_decay, folder):\n \n # Keeps track of useful statistics\n stats = plotting.EpisodeStats(\n episode_lengths=np.zeros(num_episodes),\n episode_rewards=np.zeros(num_episodes))\n\n\n for i_episode in range(num_episodes):\n if epsilon > epsilon_min and i_episode > 500:\n # complete random exploration 500 episodes, \n # then decrase exploration till epsilon less than epsilon_min\n epsilon *= epsilon_decay\n sys.stdout.flush()\n\n state = env.reset()\n state = np.reshape(state, [1, env.nS])\n\n \n for t in range(MAX_STEP):\n\n ## Decide action\n action = agent.act(state, epsilon)\n ## Advance the game to the next frame based on the action\n next_state, reward, done, _ = env.step(action)\n\n env.my_render(folder)\n\n stats.episode_rewards[i_episode] += reward\n stats.episode_lengths[i_episode] = t+1\n\n next_state = np.reshape(next_state, [1, env.nS])\n ## Remember the previous state, action, reward, and done\n agent.remember(state, action, reward, next_state, done)\n ## make next_state the new current state for the next frame.\n state = next_state ## change to copy.copy(next_state), if it is a array\n\n if len(agent.memory) > batch_size:\n agent.replay(batch_size) \n\n if done: \n break\n \n mean_score = stats.episode_rewards[i_episode]/stats.episode_lengths[i_episode]\n print(\"episode: {}/{}, score: {}, e: {:.2}, steps:{}, mean score:{:.2}\"\n .format(i_episode, num_episodes, stats.episode_rewards[i_episode], epsilon, \n stats.episode_lengths[i_episode], \n mean_score))\n #if(i_episode > 200):\n write_csv(folder, i_episode, stats.episode_lengths[i_episode], mean_score)\n if(i_episode%50 == 0):\n agent.save(folder + \"_qn\" + str(i_episode) + \".h5\") \n agent.save(folder + \"_qn-final\" + \".h5\") \n\n return stats", "def train(n_episodes=1000, max_n_steps=300, eps_start=1.0, eps_end=0.01, eps_decay=0.995, strCheckpointFile='checkpoint.pth'):\n\n global env\n scores = [] # list containing scores from each episode\n scores_window = deque(maxlen=100) # last 100 scores\n eps = eps_start # initialize epsilon\n num_saves = 0\n for i_episode in range(1, n_episodes + 1):\n env_info = env.reset(train_mode=True)[brain_name] # reset the environment\n state = env_info.vector_observations[0] # get the current state\n score = 0 # initialize the score\n last_t = max_n_steps\n for t in range(max_n_steps):\n action = agent.act(state, eps) # agent returns an epsilon-greedy action based on state\n env_info = env.step(action)[brain_name] # send the action to the environment\n next_state = env_info.vector_observations[0] # get the next state\n reward = env_info.rewards[0] # get the reward\n done = env_info.local_done[0] # see if episode has finished\n agent.step(state, action, reward, next_state, done) # records experience and learns (depending on settings)\n state = next_state\n score += reward\n if done:\n last_t = t + 1\n break\n scores_window.append(score) # save most recent score\n scores.append(score) # save most recent score\n eps = max(eps_end, eps_decay * eps) # decrease epsilon\n print('\\rEpisode {}\\tNum steps: {}\\tAverage Score: {:.2f}'.format(i_episode, last_t, np.mean(scores_window)))\n # if i_episode % 100 == 0:\n # print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))\n if np.mean(scores_window) >= 13: # win condition in course\n if num_saves == 0:\n print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode - 100, np.mean(scores_window)))\n print('\\nTraining will continue and the checkpoint will be overwritten every 100 episodes')\n print('\\nSaving a checkpoint now, you may interrupt code execution with eg Ctrl+C')\n torch.save(agent.qnetwork_local.state_dict(), strCheckpointFile)\n else:\n if i_episode % 100 == 0:\n print('\\nSaving another checkpoint now, you may interrupt code execution with eg Ctrl+C')\n torch.save(agent.qnetwork_local.state_dict(), strCheckpointFile)\n num_saves += 1\n\n env.close()\n\n # plot the scores\n fig = plt.figure()\n ax = fig.add_subplot(111)\n plt.plot(np.arange(len(scores)), scores)\n plt.ylabel('Score')\n plt.xlabel('Episode #')\n # plt.show()\n plt.savefig('training_score_by_episode.png')\n return scores", "def run_epsilon(env, num_of_bandits, iterations, episodes):\n\n # Initialize total mean rewards array per episode by zero\n epsilon_rewards = np.zeros(iterations)\n\n for i in range(episodes):\n print(f\"Running Epsilon episode:{i}\")\n\n n = 1\n action_count_per_bandit = np.ones(num_of_bandits)\n mean_reward = 0\n total_rewards = np.zeros(iterations)\n mean_reward_per_bandit = np.zeros(num_of_bandits)\n env.reset()\n epsilon = 0.5\n\n for j in range(iterations):\n a = get_epsilon_action(epsilon, env, mean_reward_per_bandit)\n\n observation, reward, done, info = env.step(a)\n\n # Update counts\n n += 1\n action_count_per_bandit[a] += 1\n\n # Update mean rewards\n mean_reward = mean_reward + (\n reward - mean_reward) / n\n\n # Update mean rewards per bandit\n mean_reward_per_bandit[a] = mean_reward_per_bandit[a] + (\n reward - mean_reward_per_bandit[a]) / action_count_per_bandit[a]\n\n # Capture mean rewards per iteration\n total_rewards[j] = mean_reward\n\n # Update mean episode rewards once all the iterations of the episode are done\n epsilon_rewards = epsilon_rewards + (total_rewards - epsilon_rewards) / (i + 1)\n\n return epsilon_rewards", "def update_epsilon(self):\n\t\tif self.epsilon > self.epsilon_min:\n\t\t\tself.epsilon *= self.epsilon_decay", "def update_epsilon(self):\n self.epsilon = self.epsilon * self.decay", "def current_epsilon(self):\n t = self.action_requests\n T = self.exploration_period\n if(t >= T):\n return self.epsilon_final\n\n epsilon0 = self.epsilon_initial\n epsilonT = self.epsilon_final\n\n return epsilon0 - (t * (epsilon0 - epsilonT)) / T", "def hdqn_learning(\n env,\n agent,\n num_episodes,\n exploration_schedule,\n gamma=1.0,\n ):\n ###############\n # RUN ENV #\n ###############\n # Keep track of useful statistics\n stats = plotting.EpisodeStats(\n episode_lengths=np.zeros(num_episodes),\n episode_rewards=np.zeros(num_episodes))\n n_thousand_episode = int(np.floor(num_episodes / 1000))\n visits = np.zeros((n_thousand_episode, env.nS))\n total_timestep = 0\n meta_timestep = 0\n ctrl_timestep = defaultdict(int)\n\n for i_thousand_episode in range(n_thousand_episode):\n for i_episode in range(1000):\n episode_length = 0\n current_state = env.reset()\n visits[i_thousand_episode][current_state-1] += 1\n encoded_current_state = one_hot_state(current_state)\n \n done = False\n while not done:\n meta_timestep += 1\n # Get annealing exploration rate (epislon) from exploration_schedule\n meta_epsilon = exploration_schedule.value(total_timestep)\n goal = agent.select_goal(encoded_current_state, meta_epsilon)[0]\n encoded_goal = one_hot_goal(goal+1)\n\n total_extrinsic_reward = 0\n goal_reached = False\n s1 = encoded_current_state\n while not done and not goal_reached:\n total_timestep += 1\n episode_length += 1\n ctrl_timestep[goal] += 1\n # Get annealing exploration rate (epislon) from exploration_schedule\n ctrl_epsilon = exploration_schedule.value(total_timestep)\n joint_state_goal = np.concatenate([encoded_current_state, encoded_goal], axis=1)\n action = agent.select_action(joint_state_goal, ctrl_epsilon)[0]\n ### Step the env and store the transition\n next_state, extrinsic_reward, done, _ = env.step(action)\n # Update statistics\n stats.episode_rewards[i_thousand_episode*1000 + i_episode] += extrinsic_reward\n stats.episode_lengths[i_thousand_episode*1000 + i_episode] = episode_length\n visits[i_thousand_episode][next_state-1] += 1\n\n encoded_next_state = one_hot_state(next_state)\n intrinsic_reward = agent.get_intrinsic_reward(goal+1, next_state)\n goal_reached = next_state == (goal+1)\n\n joint_next_state_goal = np.concatenate([encoded_next_state, encoded_goal], axis=1)\n #print (joint_state_goal, action, joint_next_state_goal, intrinsic_reward, done)\n agent.ctrl_replay_memory.push(joint_state_goal, action, joint_next_state_goal, intrinsic_reward, done)\n # Update Both meta-controller and controller\n agent.update_meta_controller(gamma)\n agent.update_controller(gamma)\n\n total_extrinsic_reward += extrinsic_reward\n current_state = next_state\n encoded_current_state = encoded_next_state\n # Goal Finished\n agent.meta_replay_memory.push(s1, goal, encoded_next_state, total_extrinsic_reward, done)\n\n return agent, stats, visits", "def run_episode(self):\n # Reset environment\n self.agent.env.reset()\n done = False\n step_count = 0\n total_reward = 0\n\n while not done:\n reward, done = self.agent.explore()\n step_count += 1\n if step_count % 100 == 0:\n print('step count {}'.format(step_count))\n total_reward += self.agent.params['gamma']**step_count * reward\n return step_count, total_reward", "def epsilon_delta(self):", "def evaluate(self, env, num_episodes, max_episode_length=None\n , show_detail = False):\n episode_counter = 1;\n average_reward = 0;\n average_episode_length = 0;\n time_this, ob_this, is_terminal = env.reset()\n\n ob_this = self._preprocessor.process_observation(time_this, ob_this)\n\n obs_this_net = self._preprocessor.process_observation_for_network(\n ob_this, self._min_array, self._max_array)\n\n state_this_net = np.append(obs_this_net[0:13], obs_this_net[14:]).reshape(1,16)\n setpoint_this = ob_this[6:8]\n \n this_ep_reward = 0;\n this_ep_length = 0;\n while episode_counter <= num_episodes:\n action_mem = self.select_action(state_this_net, stage = 'testing');\n # covert command to setpoint action \n action = self._policy.process_action(setpoint_this, action_mem)\n\n time_next, ob_next, is_terminal = env.step(action)\n \n ob_next = self._preprocessor.process_observation(time_next, ob_next)\n\n setpoint_next = ob_next[6:8]\n\n obs_next_net = self._preprocessor.process_observation_for_network(\n ob_next, self._min_array, self._max_array)\n \n state_next_net = np.append(obs_next_net[0:13], obs_next_net[14:]).reshape(1,16)\n \n #10:PMV, 11: Occupant number , -2: power\n reward = self._preprocessor.process_reward(obs_next_net[12:15])\n \n this_ep_reward += reward;\n \n #Check if exceed the max_episode_length\n if max_episode_length is not None and \\\n this_ep_length >= max_episode_length:\n is_terminal = True;\n #Check whether to start a new episode\n if is_terminal:\n time_this, ob_this, is_terminal = env.reset()\n ob_this = self._preprocessor.process_observation(time_this, ob_this)\n setpoint_this = ob_this[6:8]\n obs_this_net = self._preprocessor.process_observation_for_network(\n ob_this, self._min_array, self._max_array)\n\n state_this_net = np.append(obs_this_net[0:13], \n obs_this_net[14:]).reshape(1,16)\n\n average_reward = (average_reward * (episode_counter - 1) \n + this_ep_reward) / episode_counter;\n average_episode_length = (average_episode_length \n * (episode_counter - 1) \n + this_ep_length) / episode_counter;\n \n episode_counter += 1;\n if show_detail:\n logging.info ('Episode ends. Cumulative reward is %0.04f '\n 'episode length is %d, average reward by now is %0.04f,'\n ' average episode length by now is %d.' %(this_ep_reward,\n this_ep_length,\n average_reward,\n average_episode_length));\n this_ep_length = 0;\n this_ep_reward = 0;\n \n else:\n ob_this = ob_next\n setpoint_this = setpoint_next\n state_this_net = state_next_net\n time_this = time_next\n this_ep_length += 1;\n return (average_reward, average_episode_length);", "def dqn(agent, n_episodes=1500, eps_start=1.0, eps_end=0.01, eps_decay=0.995, score_threshold=13.0):\n scores = [] # list containing scores from each episode\n scores_window = deque(maxlen=100) # last 100 scores\n eps = eps_start # initialize epsilon\n for i_episode in range(0, n_episodes):\n brain_info = env.reset(train_mode=True)[brain_name]\n state = brain_info.vector_observations[0]\n score = 0\n while True:\n action = agent.act(state, eps, training_mode=True)\n env_info = env.step(action)[brain_name]\n next_state = env_info.vector_observations[0]\n reward = env_info.rewards[0]\n done = env_info.local_done[0]\n agent.step(state, action, reward, next_state, done)\n score += reward\n state = next_state\n if done:\n break \n scores_window.append(score) # save most recent score\n scores.append(score) # save most recent score\n eps = max(eps_end, eps_decay*eps) # decrease epsilon\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end=\"\")\n if i_episode % 100 == 0:\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))\n if np.mean(scores_window)>=score_threshold:\n print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window)))\n torch.save(agent.qnetwork_local.state_dict(), 'checkpoint.pth')\n break\n return scores", "def run_episode(self, mode=0, eps=0.):\n if mode==0:\n eps = 0.\n done = False\n score = 0 \n \n while not done:\n state = self.env_info.vector_observations[0] # get the current state\n action = self.agent.act(state, eps=eps) # get an action using epsilon greedy policy\n self.env_info = self.env.step(action)[self.brain_name] # send the action to the environment\n next_state = self.env_info.vector_observations[0] # get the next state\n reward = self.env_info.rewards[0] # get the reward\n done = self.env_info.local_done[0] # see if episode has finished\n \n if mode == 1:\n self.agent.step(state, action, reward, next_state, done)\n \n score += reward\n \n self.reset_env() # reset the environment\n \n return score", "def evaluate(self, env, num_episode, epsilon):\n num_environment = env.num_process\n env.reset()\n reward_of_each_environment = np.zeros(num_environment)\n rewards_list = []\n\n num_finished_episode = 0\n\n while num_finished_episode < num_episode:\n old_state, action, reward, new_state, is_terminal = env.get_state()\n action = self.get_action(new_state, epsilon)\n env.take_action(action)\n for i, r, is_t in zip(range(num_environment), reward, is_terminal):\n if not is_t:\n reward_of_each_environment[i] += r\n else:\n rewards_list.append(reward_of_each_environment[i])\n reward_of_each_environment[i] = 0\n num_finished_episode += 1\n return np.mean(rewards_list), np.std(rewards_list), self.epsilon", "def Q_learning_test(env,alpha,gamma,episodes, q_table):\n %time\n \n # For plotting metrics\n all_epochs = []\n all_penalties = []\n rewards = []\n \n total_reward = 0\n \n for i in range(1, episodes+1):\n state = env.reset()\n episode_rewards = []\n\n epochs, penalties, reward, = 0, 0, 0\n done = False\n\n while not done:\n \n action = np.argmax(q_table[state]) # Exploit learned values by choosing optimal values\n next_state, reward, done, info = env.step(action) \n\n\n if reward == -10:\n penalties += 1\n \n state = next_state\n episode_rewards.append(reward)\n epochs += 1\n \n if done == True:\n break \n if epochs == 1000:\n break \n \n total_reward += reward\n rewards.append(np.sum(episode_rewards))\n \n if i % 1000 == 0:\n clear_output(wait=True)\n print(f\"Episode: {i}\")\n\n \n print(\"Training finished.\\n\")\n \n \n plt.plot(savgol_filter(rewards, 1001, 3, mode = \"interp\"))\n plt.title(\"Smoothened testing reward per episode\", pad = 30 , size = BIGGER_SIZE)\n plt.xlabel('Episodes', labelpad = 20);\n plt.ylabel('Total Reward', labelpad = 20);\n plt.tick_params(axis='both', which='major', labelsize=16);\n plt.tick_params(axis='both', which='minor', labelsize=16);\n #plt.xlim(100000, 200000);\n #plt.ylim(0,50)\n # plt.xticks(np.arange(0, episodes+1, 5000));\n # plt.yticks(np.arange(min(rewards), max(rewards)+1, 1000));", "def new_episode(self, scores):\n\n # Keep track of an average score for use with annealing epsilon,\n # TODO: this currently lives in new_episode() because we only want to\n # update epsilon each episode, not each timestep, currently. This should\n # be further investigate about moving this into the epsilon property\n # itself instead of here\n avg_across = np.clip(len(scores), 1, 50)\n self.avg_score = np.array(scores[-avg_across:]).mean()\n\n self.memory.init_n_step()\n self.episode += 1", "def q_learning(env, learning, discount, epsilon, min_eps, episodes):\n # [18.00000072 14.00000006]\n num_states = (env.observation_space.high - env.observation_space.low) * \\\n np.array([10, 100]) # >> [18.00000072 14.00000006]\n num_states = np.round(num_states, 0).astype(int) + 1 # >> [19 15]\n\n # Initialize Q table\n # env.action_space.n return the number of action that our agent can make (here 3, left, cease, right)\n Q = np.random.uniform(low=-1, high=1, size=(num_states[0], num_states[1], env.action_space.n))\n\n # Initialize variable to track rewards\n reward_list = []\n ave_reward_list = []\n\n # Calculate episodic reduction in epsilon\n reduction = (epsilon - min_eps) / (episodes / 2)\n\n for i in range(episodes):\n # Initialize parameters\n done = False\n tot_reward, reward = 0, 0\n state = env.reset()\n\n # Discretize state\n state_adj = adjust_state(state)\n\n while done != True:\n # Render env for last five eps\n if i >= (episodes - 20):\n env.render()\n\n # Determine next action - epsilon greedy strategy\n if np.random.random() < 1 - epsilon:\n action = np.argmax(Q[state_adj[0], state_adj[1]])\n else:\n action = np.random.randint(0, env.action_space.n)\n\n # Get next state and reward\n state2, reward, done, info = env.step(action)\n\n # Discretize state2\n state2_adj = adjust_state(state2)\n\n # Allow for terminal states // .5 on env_space[0] represent the flag position\n if done and state2[0] >= .5:\n Q[state_adj[0], state_adj[1], action] = reward\n\n # adjust Q value for current state\n else:\n '''work on this, it's complicated but far from non-understandable'''\n delta = learning*(reward + discount*np.max(Q[state2_adj[0], state2_adj[1]]) -\n Q[state_adj[0], state_adj[1], action])\n Q[state_adj[0], state_adj[1], action] += delta\n\n tot_reward += reward\n state_adj = state2_adj\n\n # Decay epsilon\n if epsilon > min_eps:\n epsilon -= reduction\n\n # Track rewards\n reward_list.append(tot_reward)\n\n if (i+1) % 100 == 0:\n ave_reward = np.mean(reward_list)\n ave_reward_list.append(ave_reward)\n reward_list = []\n print(f'Episode {i+1} Average Reward: {ave_reward}')\n\n env.close()\n\n return ave_reward_list", "def train_dqn(self, n_episodes=2000, max_t=1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995):\n self.scores = [] # list containing scores from each episode\n scores_window = deque(maxlen=100) # last 100 scores\n eps = eps_start # initialize epsilon\n for i_episode in range(1, n_episodes+1):\n env_info = self.env.reset(train_mode=True)[self.brain_name] # reset the environment\n state = env_info.vector_observations[0] # get the current state\n score = 0 # initialize the score\n for t in range(max_t):\n action = self.agent.act(state, eps)\n env_info = self.env.step(action)[self.brain_name] # send the action to the environment\n next_state = env_info.vector_observations[0] # get the next state\n reward = env_info.rewards[0] # get the reward\n done = env_info.local_done[0] # see if episode has finished\n self.agent.step(state, action, reward, next_state, done)\n state = next_state\n score += reward\n if done:\n break \n scores_window.append(score) # save most recent score\n self.scores.append(score) # save most recent score\n eps = max(eps_end, eps_decay*eps) # decrease epsilon\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end=\"\")\n if i_episode % 100 == 0:\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))\n # we use 15.0 just to be sure\n if np.mean(scores_window)>=self.threshold:\n print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window)))\n break\n return self.scores", "def set_epsilon(self,epsilon):\r\n\t\tself.epsilon = epsilon", "def _epsilon_greedy(self, info_state, legal_actions, epsilon):\n probs = np.zeros(self._num_actions)\n if np.random.rand() < epsilon:\n action = np.random.choice(legal_actions)\n probs[legal_actions] = 1.0 / len(legal_actions)\n else:\n info_state = np.reshape(info_state, [1, -1])\n q_values = self._session.run(\n self._q_values, feed_dict={self._info_state_ph: info_state})[0]\n legal_q_values = q_values[legal_actions]\n action = legal_actions[np.argmax(legal_q_values)]\n probs[action] = 1.0\n return action, probs", "def Q_learning_train(env,alpha,gamma,epsilon,episodes):\n %time\n # For plotting metrics\n all_epochs = []\n all_penalties = []\n rewards = []\n \n #Initialize Q table of 22500 x 8 size (22500 states and 8 actions) with all zeroes\n q_table = np.zeros([env.observation_space.n, env.action_space.n]) \n \n for i in range(1, episodes+1):\n state = env.reset()\n episode_rewards = []\n\n epochs, penalties, reward, = 0, 0, 0\n done = False\n\n while not done:\n if random.uniform(0, 1) < epsilon:\n action = env.action_space.sample() # Explore action space randomly\n else:\n action = np.argmax(q_table[state]) # Exploit learned values by choosing optimal values\n\n next_state, reward, done, info = env.step(action) \n\n old_value = q_table[state, action]\n next_max = np.max(q_table[next_state])\n\n new_value = (1 - alpha) * old_value + alpha * (reward + gamma * next_max)\n q_table[state, action] = new_value\n\n if reward == -10:\n penalties += 1\n \n\n state = next_state\n episode_rewards.append(reward)\n epochs += 1\n \n if done == True:\n break \n if epochs == 1000:\n break \n rewards.append(np.sum(episode_rewards))\n \n if i % 1000 == 0:\n clear_output(wait=True)\n print(f\"Episode: {i}\")\n \n \n print(\"Training finished.\\n\")\n \n plt.plot(savgol_filter(rewards, 1001, 3, mode = \"interp\"))\n plt.title(\"Smoothened training reward per episode\", pad = 30, size = BIGGER_SIZE)\n plt.legend()\n plt.xlabel('Episodes', labelpad = 20);\n plt.ylabel('Total Reward', labelpad = 20);\n plt.tick_params(axis='both', which='major');\n plt.tick_params(axis='both', which='minor');\n #plt.xlim(0, 60000);\n #plt.ylim(0,50)\n #plt.xticks(np.arange(0, episodes+1, 5000));\n #plt.yticks(np.arange(min(rewards), max(rewards)+1, 1000));", "def evaluate(self, environment, max_reward=1.0):\n episode_reward = 0.0\n state = environment.reset()\n\n for step_idx in range(self.max_episode_steps):\n reward, action_idx, new_state, is_done = environment.step(state, self)\n \n state = new_state\n episode_reward += reward\n\n if is_done or episode_reward >= max_reward:\n break\n\n self.fitness = episode_reward\n return episode_reward", "def stopEpisode(self):\n if self.episodesSoFar < self.numTraining:\n self.accumTrainRewards += self.episodeRewards\n else:\n self.accumTestRewards += self.episodeRewards\n self.episodesSoFar += 1\n if self.episodesSoFar >= self.numTraining:\n # Take off the training wheels\n self.epsilon = 1.0 # no exploration\n self.lr = 0.0 # no learning", "def _epsilon(self, step):\n if step < 0:\n return self._start\n elif step > self._steps:\n return self._stop\n else:\n return self._step_size * step + self._start", "def terminal_test(self):\n\n for self.cur_ep in tqdm.tqdm(range(1, self.episodes + 1), ascii=True, unit='episodes'):\n\n # Nombre de passages dans la boucle principale\n step = 1\n\n cur_state = self.env.reset()\n\n done = False\n\n while not done:\n\n # Choix au hasard entre :\n if np.random.random() > self.epsilon:\n # Action à partir de la q-table\n action = np.argmax(self.agent.get_q_values(np.array(cur_state)))\n\n else:\n # Action random\n action = np.random.randint(0, self.env.ACTION_SPACE_SIZE)\n\n # On effectue une action avec le serpent\n new_state, reward, done = self.env.step(action)\n\n # Ajout d'un exemple dans la mémoire\n self.agent.update_training_set((cur_state, action, reward, new_state, done))\n\n # Entrainement éventuel\n self.agent.train()\n\n cur_state = new_state\n step += 1\n\n if self.epsilon > self.MIN_EPSILON:\n self.epsilon *= self.EPSILON_DECAY\n self.epsilon = max(self.MIN_EPSILON, self.epsilon)\n\n if self.save_model:\n self.agent.save_model(self.model_file_name)", "def executeEpisode(self):\n trainExamples = []\n board = self.game.getInitBoard()\n player = 1\n episodeStep = 0\n\n while True:\n episodeStep += 1\n canonicalBoard = self.game.getCanonicalForm(board, player)\n temp = int(episodeStep < self.args.tempThreshold)\n\n pi = self.mcts.getActionProb(canonicalBoard, temp=temp)\n sym = self.game.getSymmetries(canonicalBoard, pi)\n for b, p in sym:\n trainExamples.append([b, player, p, None])\n\n action = np.random.choice(len(pi), p=pi)\n board, player = self.game.getNextState(board, player, action)\n\n r = self.game.getGameEnded(board, player)\n\n if r != 0:\n ex = [(x[0], x[2], r * ((-1) ** (x[1] != player))) for x in trainExamples]\n return ex", "def run_episode(self):\n self.reset_episode()\n obs = self.env.reset()\n while True:\n action = self.Policy[self.env.stateDict[obs]]\n new_obs, reward, done, _ = self.env.step(action)\n if self.mode=='debug':\n print(\"PrevObs:{}, Action:{}, Obs:{}, Reward:{}, Done:{}\"\n .format(obs, action, new_obs,reward,done))\n self.totalReward += reward\n self.totalSteps += 1\n if done:\n break\n else:\n obs = new_obs\n return self.totalReward", "def dqn(n_episodes=2000, max_t=1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995):\n scores = [] # list containing scores from each episode\n scores_window = deque(maxlen=100) # last 100 scores\n eps = eps_start # initialize epsilon\n g_losses = []\n g_losses_window = deque(maxlen=100)\n s_losses = []\n s_losses_window = deque(maxlen=100)\n for i_episode in range(1, n_episodes+1):\n env_info = env.reset(train_mode=True)\n score = 0\n ball_reward_val = 0.0\n \n g_states = env_info[g_brain_name].vector_observations # get initial state (goalies)\n s_states = env_info[s_brain_name].vector_observations # get initial state (strikers)\n# s2_states = env_info[s2_brain_name].vector_observations # get initial state (strikers)\n\n g_scores = np.zeros(num_g_agents) # initialize the score (goalies)\n s_scores = np.zeros(num_s_agents) # initialize the score (strikers) \n# s2_scores = np.zeros(num_s2_agents) # initialize the score (strikers) \n \n #for t in range(max_t):\n while True:\n action_g_0 = g_agent.act(g_states[0], eps) # always pick state index 0\n action_s_0 = s_agent.act(s_states[0], eps)\n action_s_2 = s_agent.act(s_states[2], eps)\n# action_s2_0 = s2_agent.act(s2_states[0], eps) \n# action_s2_0 = np.asarray( [np.random.choice(s2_action_size)] )\n \n # Set other team to random\n action_g_1 = np.asarray( [np.random.choice(g_action_size)] ) \n action_s_1 = np.asarray( [np.random.choice(s_action_size)] )\n action_s_3 = np.asarray( [np.random.choice(s_action_size)] )\n# action_s2_1 = np.asarray( [np.random.choice(s2_action_size)] )\n \n # Train simultaneously\n #action_g_1 = g_agent.act(g_states[1], eps) # always pick state index 1\n #action_s_1 = s_agent.act(s_states[1], eps) \n \n # Combine actions\n actions_g = np.array( (action_g_0, action_g_1) ) \n actions_s = np.array( (action_s_0, action_s_1, action_s_2, action_s_3 ) )\n# actions_s2 = np.array( (action_s2_0, action_s2_1) )\n# actions = dict( zip( [g_brain_name, s_brain_name, s2_brain_name], [actions_g, actions_s, actions_s2] ) )\n actions = dict( zip( [g_brain_name, s_brain_name], [actions_g, actions_s] ) )\n \n env_info = env.step(actions) \n # get next states\n g_next_states = env_info[g_brain_name].vector_observations \n s_next_states = env_info[s_brain_name].vector_observations\n# s2_next_states = env_info[s2_brain_name].vector_observations\n \n # check if episode finished\n done = np.any(env_info[g_brain_name].local_done)\n \n # get reward and update scores\n g_rewards = env_info[g_brain_name].rewards\n s_rewards = env_info[s_brain_name].rewards\n# s2_rewards = env_info[s2_brain_name].rewards\n \n # Modify RED striker reward -Only when goal is scored\n if done:\n new_s_reward = modify_reward(s_rewards[0])\n s_rewards[0] = new_s_reward\n new_s_reward = modify_reward(s_rewards[2])\n s_rewards[2] = new_s_reward\n# new_s2_reward = modify_reward(s2_rewards[0])\n# s2_rewards[0] = new_s2_reward\n \n # Update scores\n g_scores += g_rewards\n s_scores += s_rewards\n# s2_scores += s2_rewards\n \n # Add in ball reward for striker\n ball_reward_val += ball_reward(s_states[0])\n \n # store experiences\n g_agent.step(g_states[0], action_g_0, g_rewards[0], \n g_next_states[0], done)\n s_agent.step(s_states[0], action_s_0, s_rewards[0] + ball_reward(s_states[0]), # adding ball reward\n s_next_states[0], done)\n s_agent.step(s_states[2], action_s_2, s_rewards[2] + ball_reward(s_states[2]), # adding ball reward\n s_next_states[2], done)\n# s2_agent.step(s2_states[0], action_s2_0, s2_rewards[0] + ball_reward(s2_states[0]), # adding ball reward\n# s2_next_states[0], done)\n\n if done:\n break\n \n g_states = g_next_states\n s_states = s_next_states\n# s2_states = s2_next_states\n \n # learn\n if len(g_agent.memory) > 64: #check memory to batch size\n goalie_loss = g_agent.learn(g_agent.memory.sample(), 0.99) # discount = 0.99\n striker_loss = s_agent.learn(s_agent.memory.sample(), 0.99) # discount = 0.99 \n# _ = s2_agent.learn(s2_agent.memory.sample(), 0.99) # discount = 0.99 \n \n g_losses.append(goalie_loss.item())\n g_losses_window.append(goalie_loss.item())\n #print(goalie_loss.item())\n s_losses.append(striker_loss.item())\n s_losses_window.append(striker_loss.item())\n \n score = g_scores[0] + s_scores[0] #+ s2_scores[0]\n scores_window.append(score) # save most recent score\n scores.append(score) # save most recent score\n \n eps = max(eps_end, eps_decay*eps) # decrease epsilon\n print('\\rEpisode {}\\tAverage Score: {:.2f}\\t Goalie Loss:' \\\n '{:.5f}\\t Striker Loss: {:.5f}' \\\n '\\t Ball Reward: {:.2f}'.format(i_episode, \\\n np.mean(scores_window), \\\n np.mean(g_losses_window), \\\n np.mean(s_losses_window), \\\n ball_reward_val), end=\"\")\n #print(s_states[0][0:56])\n if i_episode % 100 == 0:\n print('\\rEpisode {}\\tAverage Score: {:.2f}\\t Goalie Loss:' \\\n '{:.5f}\\t Striker Loss: {:.5f}\\n' \\\n '\\t Ball Reward: {:.2f}'.format(i_episode, \\\n np.mean(scores_window), \\\n np.mean(g_losses_window), \\\n np.mean(s_losses_window), \\\n ball_reward_val))\n \n # TODO: ---------- CHANGE OUTPUT FILE NAMES ----------\n torch.save(g_agent.qnetwork_local.state_dict(), 'goalie3_dqn_V1_mod.pth')\n torch.save(s_agent.qnetwork_local.state_dict(), 'striker3_dqn_V1_mod.pth')\n return scores", "def evaluate(self, env, num_episodes, max_episode_length=None):\n self.mode = 'test'\n\n average_episode_length = 0\n rewards = []\n\n for i in range(num_episodes):\n state = env.reset()\n t = 0\n episode_reward = 0.0\n while True:\n t += 1\n action, _ = self.select_action(state)\n next_state, reward, is_terminal, debug_info = env.step(action)\n episode_reward += reward\n average_episode_length += 1\n\n if is_terminal or (max_episode_length is not None and t > max_episode_length):\n break\n\n state = next_state\n\n rewards.append(episode_reward)\n self.mode = 'train'\n return np.mean(rewards), np.std(rewards), average_episode_length / num_episodes", "def get_epsilon(step: int):\n return (epsilon_0 - epsilon) * math.exp(-step) + epsilon", "def dqn(\n env,\n n_episodes=10000,\n max_t=1000,\n eps_start=1.0,\n eps_end=0.005,\n eps_decay=0.995,\n train_mode=True,\n):\n scores = [] # list containing scores from each episode\n scores_window = deque(maxlen=100) # last 100 scores\n eps = eps_start # initialize epsilon\n\n brain_name = env.brain_names[0]\n brain = env.brains[brain_name]\n brain_name = env.brain_names[0]\n brain = env.brains[brain_name]\n action_size = brain.vector_action_space_size\n env_info = env.reset(train_mode=train_mode)[brain_name]\n state_size = len(env_info.vector_observations[0])\n\n agent = Agent(state_size=state_size, action_size=action_size, seed=1)\n\n for i_episode in range(1, n_episodes + 1):\n state = env_info.vector_observations[0]\n score = 0\n for _ in range(max_t):\n action = np.int32(agent.act(state, eps))\n env_info = env.step(action)[\n brain_name\n ] # send the action to the environment\n next_state = env_info.vector_observations[0] # get the next state\n reward = env_info.rewards[0] # get the reward\n done = env_info.local_done[0] # see if episode has finished\n\n agent.step(state, action, reward, next_state, done)\n state = next_state\n score += reward\n if done:\n env.reset(train_mode=train_mode)[brain_name]\n break\n scores_window.append(score) # save most recent score\n scores.append(score) # save most recent score\n eps = max(eps_end, eps_decay * eps) # decrease epsilon\n print(\n \"\\rEpisode {}\\tAverage Score: {:.2f}\".format(\n i_episode, np.mean(scores_window)\n ),\n end=\"\",\n )\n if i_episode % 100 == 0:\n print(\n \"\\rEpisode {}\\tAverage Score: {:.2f}\".format(\n i_episode, np.mean(scores_window)\n )\n )\n if np.mean(scores_window) >= 13.0:\n print(\n \"\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}\".format(\n i_episode - 100, np.mean(scores_window)\n )\n )\n torch.save(agent.qnetwork_local.state_dict(), \"checkpoint_vanilla.pth\")\n break\n return scores" ]
[ "0.7373344", "0.72590876", "0.71999145", "0.7132823", "0.70544076", "0.7023157", "0.69378114", "0.6925371", "0.6911052", "0.68459004", "0.6752915", "0.67482215", "0.668959", "0.6643346", "0.6621843", "0.66143537", "0.6612828", "0.6591157", "0.65705395", "0.65690637", "0.6567207", "0.65615326", "0.6560272", "0.6558582", "0.65570086", "0.65508574", "0.6539623", "0.65389615", "0.65366477", "0.6533782" ]
0.8238254
0
Parse an iPhoto AlbumData.xml file, keeping the interesting bits.
def parseAlbumData(self, filename): doc = parse(filename) stack = [] last_top_key = None if self.use_album: album_list_key = "List of Albums" else: album_list_key = "List of Rolls" for event, node in doc: if event == START_ELEMENT: stack.append(node) level = len(stack) if level == 3: if node.nodeName == 'key': doc.expandNode(node) last_top_key = self.getText(node) stack.pop() elif last_top_key == 'List of Keywords': doc.expandNode(node) self.keywords = self.dePlist(node) stack.pop() elif last_top_key == 'List of Faces': doc.expandNode(node) self.faces = dict([ (k, v['name']) for k,v in self.dePlist(node, ['name']).items() ]) stack.pop() elif last_top_key == 'Major Version': doc.expandNode(node) major_version = self.dePlist(node) stack.pop() if major_version != self.major_version: raise iPhotoLibraryError, \ "Sorry, I can't understand version %i iPhoto Libraries." % major_version elif last_top_key == 'Minor Version': doc.expandNode(node) minor_version = self.dePlist(node) stack.pop() if minor_version > self.minor_version: self.status( "\nI don't recognise iPhoto libraries when the minor version is %i, but let's try anyway.\n" % minor_version, force=True ) elif level == 4: # process large items individually so we don't # load them all into memory. if last_top_key == album_list_key: doc.expandNode(node) self.albums.append(self.dePlist(node)) stack.pop() elif last_top_key == 'Master Image List': doc.expandNode(node) if node.nodeName == 'key': last_image_key = self.getText(node) else: self.images[last_image_key] = self.dePlist( node, self.interesting_image_keys ) stack.pop() elif event == END_ELEMENT: stack.pop()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def album_parser(data):\n album_ids = []\n for item in data['data']:\n album_ids.append(item['id'])\n return album_ids", "def parse_data(self):\n\n msg = self.xml['dom'].childNodes[0]\n self.data = xml_to_dicts(msg, False)\n\n # Get some metadata together\n self.id = \"%s:%s\" % (self.data['src']['name']['#cdata'], self.data['src']['id']['#cdata'])\n self.temp = self.data['tmpr']['#cdata']\n self.watts = self.data['ch1']['watts']['#cdata']\n\n # Time - CurrentCost and local\n self.date = {}\n self.date['cc'] = [ int(self.data['date'][k]['#cdata']) for k in ('dsb','hr','min','sec') ]\n self.date['now'] = localtime()", "def _parse_metadata ( self ):\n self.date = []\n self.atcorr_refl = []\n self.saa = []\n self.sza = []\n self.vaa = []\n self.vza = []\n self.res = []\n self._mask = []\n for md_file in self.metadata:\n # This is required to get rid of the namespace cruft\n it = xml.etree.ElementTree.iterparse ( md_file )\n for _, el in it:\n el.tag = el.tag.split('}', 1)[1] # strip all namespaces\n tree = it.root\n\n dirname = os.path.dirname ( md_file )\n\n self.date.append( datetime.datetime.strptime(\n tree.find(\"global_metadata/acquisition_date\").text,\n \"%Y-%m-%d\") )\n\n for c in tree.findall (\"global_metadata/corner\"):\n if c.attrib['location'] == \"UL\":\n ulx = float ( c.attrib['longitude'] )\n uly = float ( c.attrib['latitude'] )\n else:\n lrx = float ( c.attrib['longitude'] )\n lry = float ( c.attrib['latitude'] )\n\n self.vaa.append ( get_vaa ( lrx, lry, ulx, uly ) )\n\n #self.atcorr_refl.append( os.path.join ( dirname, tree[1][2].text ) )\n self.saa.append(\n float ( tree.find(\"global_metadata/solar_angles\").attrib['azimuth'] ) )\n self.sza.append(\n float ( tree.find(\"global_metadata/solar_angles\").attrib['zenith'] ) )\n self.vza.append( 0.0 ) # Note that LDCM can look sideways a bit!\n self.res.append( 30. ) # 30m\n\n images = []\n mask = []\n for b in tree.findall(\"bands/band\"):\n if b.attrib['product'] == \"toa_refl\":\n fname = b.find(\"file_name\").text\n if fname.find ( \"qa.tif\" ) < 0:\n images.append ( os.path.join ( dirname, fname ) )\n elif b.attrib['product'] == \"cfmask\":\n mask = os.path.join ( dirname, fname )\n # Create VRT?\n subprocess.call ([\"gdalbuildvrt\", \"-overwrite\", \"-separate\",\n os.path.join ( dirname, md_file.replace(\".xml\", \"_crop.vrt\" )) ] + images )\n self.atcorr_refl.append ( os.path.join ( dirname,\n md_file.replace(\".xml\", \"_crop.vrt\" )) )\n self._mask.append( mask )", "def load_infos(self):\n xml = self.api.photos_getInfo(photo_id=self.id)\n xml = xml.find(\"photo\")\n out = xml.attrib\n out[\"title\"] = xml.find(\"title\").text\n out[\"description\"] = xml.find(\"description\").text\n out[\"dates\"] = xml.find(\"dates\").attrib\n\n # Load urls\n out[\"urls\"] = {}\n for url_xml in xml.find(\"urls\").findall(\"url\"):\n out[\"urls\"][url_xml.attrib[\"type\"]] = url_xml.text\n\n # Load tags\n out[\"tags\"] = []\n for tag_xml in xml.find(\"tags\").findall(\"tag\"):\n tag = tag_xml.attrib\n tag[\"tag\"] = tag_xml.text\n out[\"tags\"].append(tag)\n\n return out", "def parse(self, data):\n self.links = []\n self.images = []\n self.current_tags = []\n self.reset()\n self.feed(data)", "def __parse(self):\n\t\tparser=xml.sax.make_parser()\n\t\tparser.setContentHandler(OSMXMLFileParser(self))\n\t\tparser.parse(self.filename)\n\t\n\t\t# convert them back to lists\n\t\tself.nodes = self.nodes.values()\n\t\tself.ways = self.ways.values()\n\t\tself.relations = self.relations.values()", "def _parse_metadata ( self ):\n self.date = []\n self.atcorr_refl = []\n self.saa = []\n self.sza = []\n self.vaa = []\n self.vza = []\n self.res = []\n self._mask = []\n\n for md_file in self.metadata:\n tree = xml.etree.ElementTree.ElementTree ( file=md_file ).getroot()\n dirname = os.path.dirname ( md_file )\n try:\n self.date.append(\n datetime.datetime.strptime(tree[0][1].text, \"%Y-%m-%d %H:%M:%S\") )\n except:\n self.date.append(\n datetime.datetime.strptime(tree[0][1].text, \"%Y-%m-%d %H:%M:%S.%f\") )\n self.atcorr_refl.append(\n os.path.join ( dirname, tree[1][2].text ) )\n self.saa.append( float ( tree[4][10][0].text ) )\n self.sza.append( float ( tree[4][10][1].text ) )\n self.vaa.append( float ( tree[4][10][2].text ) )\n self.vza.append( float ( tree[4][10][3].text ) )\n self.res.append( float ( tree[2][1].text ) )\n self._mask.append( os.path.join ( dirname, tree[1][5].text ) )", "def _parse(self, infile):\n raise NotImplementedError()", "def parse_data(fp):\n pass", "def parse(self):\n try:\n self.validate()\n except Exception as e:\n raise AssetmapError(e)\n\n tree = ET.parse(self.path)\n root = tree.getroot()\n # ElementTree prepends the namespace to all elements, so we need to extract\n # it so that we can perform sensible searching on elements.\n assetmap_ns = get_namespace(root.tag)\n\n self.id = get_element_text(root, \"Id\", assetmap_ns).split(\":\")[2]\n self.annotation_text = get_element_text(root, \"AnnotationText\", assetmap_ns)\n self.volume_count = int(get_element_text(root, \"VolumeCount\", assetmap_ns))\n self.issue_date = parse_date(get_element_text(root, \"IssueDate\", assetmap_ns))\n self.issuer = get_element_text(root, \"Issuer\", assetmap_ns)\n self.creator = get_element_text(root, \"Creator\", assetmap_ns)\n\n asset_list = get_element(root, \"AssetList\", assetmap_ns)\n # Get the data from the ASSETMAP file\n for asset in asset_list.getchildren():\n asset_id = get_element_text(asset, \"Id\", assetmap_ns).split(\":\")[2]\n for chunklist in get_element_iterator(asset, \"ChunkList\", assetmap_ns):\n \"\"\"\n The code below assumes that there will only ever be one chunk in a chunklist. Chunking is\n used to split files up into smaller parts, usually in order to provide compatability with older\n filesystems, which is not applicable for our uses.\n \"\"\"\n for chunk in chunklist.getchildren():\n v = get_element_text(chunk, \"VolumeIndex\", assetmap_ns)\n o = get_element_text(chunk, \"Offset\", assetmap_ns)\n l = get_element_text(chunk, \"Length\", assetmap_ns)\n\n a = {\n \"path\": get_element_text(chunk, \"Path\", assetmap_ns),\n \"volume_index\": int(v) if v is not None else v,\n \"offset\": int(o) if o is not None else o,\n \"length\": int(l) if l is not None else l\n }\n\n self.assets[asset_id] = AssetData(**a)", "def _parse_preset(self, xmldata):\r\n\r\n raise NotImplementedError", "def _parseImageXml(self, xml, topImage):\n if not topImage or topImage.pixelInfo.get('magnificaiton'):\n return\n topImage.parse_image_description(xml)\n if not topImage._description_record:\n return\n try:\n xml = topImage._description_record\n # Optrascan metadata\n scanDetails = xml.get('ScanInfo', xml.get('EncodeInfo'))['ScanDetails']\n mag = float(scanDetails['Magnification'])\n # In microns; convert to mm\n scale = float(scanDetails['PixelResolution']) * 1e-3\n topImage._pixelInfo = {\n 'magnification': mag,\n 'mm_x': scale,\n 'mm_y': scale,\n }\n except Exception:\n pass", "def parse(self, infile):\r\n raise NotImplementedError()", "def _loadData(self, data):\n self._data = data\n self.addedAt = utils.toDatetime(data.attrib.get('addedAt'))\n self.art = data.attrib.get('art')\n self.artBlurHash = data.attrib.get('artBlurHash')\n self.fields = self.findItems(data, media.Field)\n self.guid = data.attrib.get('guid')\n self.key = data.attrib.get('key', '')\n self.lastRatedAt = utils.toDatetime(data.attrib.get('lastRatedAt'))\n self.lastViewedAt = utils.toDatetime(data.attrib.get('lastViewedAt'))\n self.librarySectionID = utils.cast(int, data.attrib.get('librarySectionID'))\n self.librarySectionKey = data.attrib.get('librarySectionKey')\n self.librarySectionTitle = data.attrib.get('librarySectionTitle')\n self.listType = 'video'\n self.ratingKey = utils.cast(int, data.attrib.get('ratingKey'))\n self.summary = data.attrib.get('summary')\n self.thumb = data.attrib.get('thumb')\n self.thumbBlurHash = data.attrib.get('thumbBlurHash')\n self.title = data.attrib.get('title')\n self.titleSort = data.attrib.get('titleSort', self.title)\n self.type = data.attrib.get('type')\n self.updatedAt = utils.toDatetime(data.attrib.get('updatedAt'))\n self.userRating = utils.cast(float, data.attrib.get('userRating'))\n self.viewCount = utils.cast(int, data.attrib.get('viewCount', 0))", "def _loadData(self, data):\n self._data = data\n self.id = utils.cast(int, data.attrib.get('id'))\n self.accountID = utils.cast(int, data.attrib.get('accountID'))\n self.serverId = utils.cast(int, data.attrib.get('serverId'))\n self.machineIdentifier = data.attrib.get('machineIdentifier')\n self.name = data.attrib.get('name')\n self.lastSeenAt = utils.toDatetime(data.attrib.get('lastSeenAt'))\n self.numLibraries = utils.cast(int, data.attrib.get('numLibraries'))\n self.allLibraries = utils.cast(bool, data.attrib.get('allLibraries'))\n self.owned = utils.cast(bool, data.attrib.get('owned'))\n self.pending = utils.cast(bool, data.attrib.get('pending'))", "def _parse_mfoutfile(mfoutfilename):\n \n # Read the file\n file_string = open(mfoutfilename, 'r').read()\n\n # Remove any Windows linefeed characters (courtesy of Microsoft Exchange/Outlook)\n regex_microsoftsucks = re.compile(r\"\"\"\\r\\n\"\"\")\n file_string = re.sub(regex_microsoftsucks, '\\n', file_string)\n \n # Remove comments from the text\n regex_comment = re.compile(r\"\"\"(^\\s?#.+|^\\s?)\\n\"\"\")\n file_string = re.sub(regex_comment, '', file_string)\n \n # Remove internal double returns from the data\n regex_returns = re.compile(r\"\"\"\\n{2,}\"\"\")\n file_string = re.sub(regex_returns, '\\n', file_string)\n \n # Strip whitespace from the beginning and end of the data\n file_string = file_string.strip()\n\n # Split the data into a list based on the data_x tags\n regex_data_tag_split = re.compile(r\"\"\"\\n(?=data_.+?\\n)\"\"\")\n split_list = re.split(r\"\"\"\\n(?=data_.+?\\n)\"\"\", file_string, flags=re.MULTILINE|re.DOTALL)\n \n # Separate the data tags from the corresponding data\n regex_data_tag_name = re.compile(r\"\"\"(data_.+?)\\n(.+)\"\"\")\n tag_data_list = [re.search(r\"\"\"(data_.+?)\\n(.+)\"\"\", x, flags=re.MULTILINE|re.DOTALL) \n for x in split_list]\n \n # Put the list into an ordered dictionary with the tag as a key\n tag_data_dict = OrderedDict([(x.group(1).strip(), x.group(2)) \n for x in tag_data_list])\n \n return tag_data_dict", "def parse_file(self, filepath):\n\n xml_file = open(filepath, \"r\")\n xml = xml_file.read()\n content = \"\"\n\n xml_file.close()\n\n for line in xml.replace(\"&amp;\", \"&\").split(\"\\n\"):\n if content != \"\":\n content += \" \"\n content += re.sub(\"(<(P|F).*?>)|(<\\\\/P>)\", \"\", line).strip()\n # XML cleanning\n\n start_offset = \"<START_OFFSET_DUCFileRep>\"\n content = start_offset + content\n content = content.replace(\"</LP>\", \"</LP>%s\"%start_offset)\n content = content.replace(\"</TEXT>\", \"</TEXT>%s\"%start_offset)\n content = re.sub(\"%s.*?<LP>(.*?)<\\\\/LP>\"%start_offset, \"\\\\1\", content)\n content = re.sub(\"%s.*?<TEXT>(.*?)<\\\\/TEXT>\"%start_offset, \"\\\\1\", content)\n content = re.sub(\"%s.*\"%start_offset, \"\", content)\n\n self.set_content(content)", "def parse_xml_data(self, xml_post_data):\n try:\n read_handler = parse_xmlrpc(xml_post_data)\n except:\n raise ValueError(ERR_MSG % xml_post_data[:50])\n else:\n # Tried to do this with self.update but it was failing :S\n for k, v in read_handler.get_data_container().items():\n self[k] = v", "def _loadData(self, data):\n self._data = data\n self.friend = self._initpath == self.key\n self.allowCameraUpload = utils.cast(bool, data.attrib.get('allowCameraUpload'))\n self.allowChannels = utils.cast(bool, data.attrib.get('allowChannels'))\n self.allowSync = utils.cast(bool, data.attrib.get('allowSync'))\n self.email = data.attrib.get('email')\n self.filterAll = data.attrib.get('filterAll')\n self.filterMovies = data.attrib.get('filterMovies')\n self.filterMusic = data.attrib.get('filterMusic')\n self.filterPhotos = data.attrib.get('filterPhotos')\n self.filterTelevision = data.attrib.get('filterTelevision')\n self.home = utils.cast(bool, data.attrib.get('home'))\n self.id = utils.cast(int, data.attrib.get('id'))\n self.protected = utils.cast(bool, data.attrib.get('protected'))\n self.recommendationsPlaylistId = data.attrib.get('recommendationsPlaylistId')\n self.restricted = data.attrib.get('restricted')\n self.thumb = data.attrib.get('thumb')\n self.title = data.attrib.get('title', '')\n self.username = data.attrib.get('username', '')\n self.servers = self.findItems(data, MyPlexServerShare)\n for server in self.servers:\n server.accountID = self.id", "def _loadData(self, data):\n Video._loadData(self, data)\n self.audioLanguage = data.attrib.get('audioLanguage', '')\n self.collections = self.findItems(data, media.Collection)\n self.guids = self.findItems(data, media.Guid)\n self.index = utils.cast(int, data.attrib.get('index'))\n self.key = self.key.replace('/children', '') # FIX_BUG_50\n self.labels = self.findItems(data, media.Label)\n self.leafCount = utils.cast(int, data.attrib.get('leafCount'))\n self.parentGuid = data.attrib.get('parentGuid')\n self.parentIndex = utils.cast(int, data.attrib.get('parentIndex'))\n self.parentKey = data.attrib.get('parentKey')\n self.parentRatingKey = utils.cast(int, data.attrib.get('parentRatingKey'))\n self.parentStudio = data.attrib.get('parentStudio')\n self.parentTheme = data.attrib.get('parentTheme')\n self.parentThumb = data.attrib.get('parentThumb')\n self.parentTitle = data.attrib.get('parentTitle')\n self.ratings = self.findItems(data, media.Rating)\n self.subtitleLanguage = data.attrib.get('audioLanguage', '')\n self.subtitleMode = utils.cast(int, data.attrib.get('subtitleMode', '-1'))\n self.viewedLeafCount = utils.cast(int, data.attrib.get('viewedLeafCount'))\n self.year = utils.cast(int, data.attrib.get('year'))", "def load_data():\n\n current_artist = None\n current_album = None\n artist_list = []\n\n print(\"Loading music data...\")\n\n with open(\"albums.txt\", \"r\") as albums:\n for line in albums:\n # bind variables to the fields of each line (stripping new line char and splitting on tabs)\n artist_field, album_field, year_field, song_field = tuple(line.strip(\"\\n\").split(\"\\t\"))\n year_field = int(year_field) # convert string value to int\n print(f\"\\t{artist_field}:{album_field}:{year_field}:{song_field}\")\n\n # Creates an Artist object for the artists read from artist_field using the current_artist tracker.\n # If there's no current_artist, simply creates an Artist object for the first artist read.\n if current_artist is None:\n current_artist = Artist(artist_field)\n # If there is a current current_artist, uses that tracker to detect when the artist being read changes.\n # When this happens, adds current current_album to the artists album list,\n # appends the artist to the artist list, creates a new Artist object for the next artist being read,\n # and resets current_album to None.\n elif current_artist.name != artist_field:\n current_artist.add_album(current_album)\n artist_list.append(current_artist)\n current_artist = Artist(artist_field)\n current_album = None\n\n # Creates an Album object for the albums read from album_field using the current_album tracker.\n # Follows a very similar process to the current_artist assignment above.\n if current_album is None:\n current_album = Album(album_field, year_field, current_artist)\n elif current_album.name != album_field:\n current_artist.add_album(current_album)\n current_album = Album(album_field, year_field, current_artist)\n\n # Creates a new Song object, and adds it to current_album object\n current_song = Song(song_field, current_artist)\n current_album.add_song(current_song)\n\n # Add final artist/album to their respective lists\n if current_artist is not None:\n if current_album is not None:\n current_artist.add_album(current_album)\n artist_list.append(current_artist)\n\n print(f\"A total of {len(artist_list)} artists were loaded.\")\n print()\n print(\"=\" * 40)\n print()\n return artist_list", "def parse_data(self, file_data):\n if self.data:\n all_api_reference = self.data.findAll([\"h4\",\"h3\"])\n for api in all_api_reference: \n title = api.text.replace(' #', '')\n\n href = self.parse_link(file_data, api)\n\n content = self.parse_content(api)\n\n example = self.parse_example(api)\n\n section = {\n 'title': title,\n 'href': href,\n 'content': content,\n 'example': example\n }\n self.parsed_data.append(section)", "def parser(self):\n\t\tdom = ET.parse(self.input_filename)\n\t\tself.doc = dom.getroot()", "def parse_dataset(self, data):\n pass", "def parse(self, data):\n raise NotImplementedError", "def parsexml(self):\n raise NotImplementedError", "def _loadData(self, data):\n Video._loadData(self, data)\n self.audienceRating = utils.cast(float, data.attrib.get('audienceRating'))\n self.audienceRatingImage = data.attrib.get('audienceRatingImage')\n self.audioLanguage = data.attrib.get('audioLanguage', '')\n self.autoDeletionItemPolicyUnwatchedLibrary = utils.cast(\n int, data.attrib.get('autoDeletionItemPolicyUnwatchedLibrary', '0'))\n self.autoDeletionItemPolicyWatchedLibrary = utils.cast(\n int, data.attrib.get('autoDeletionItemPolicyWatchedLibrary', '0'))\n self.childCount = utils.cast(int, data.attrib.get('childCount'))\n self.collections = self.findItems(data, media.Collection)\n self.contentRating = data.attrib.get('contentRating')\n self.duration = utils.cast(int, data.attrib.get('duration'))\n self.enableCreditsMarkerGeneration = utils.cast(int, data.attrib.get('enableCreditsMarkerGeneration', '-1'))\n self.episodeSort = utils.cast(int, data.attrib.get('episodeSort', '-1'))\n self.flattenSeasons = utils.cast(int, data.attrib.get('flattenSeasons', '-1'))\n self.genres = self.findItems(data, media.Genre)\n self.guids = self.findItems(data, media.Guid)\n self.index = utils.cast(int, data.attrib.get('index'))\n self.key = self.key.replace('/children', '') # FIX_BUG_50\n self.labels = self.findItems(data, media.Label)\n self.languageOverride = data.attrib.get('languageOverride')\n self.leafCount = utils.cast(int, data.attrib.get('leafCount'))\n self.locations = self.listAttrs(data, 'path', etag='Location')\n self.network = data.attrib.get('network')\n self.originallyAvailableAt = utils.toDatetime(data.attrib.get('originallyAvailableAt'), '%Y-%m-%d')\n self.originalTitle = data.attrib.get('originalTitle')\n self.rating = utils.cast(float, data.attrib.get('rating'))\n self.ratings = self.findItems(data, media.Rating)\n self.roles = self.findItems(data, media.Role)\n self.seasonCount = utils.cast(int, data.attrib.get('seasonCount', self.childCount))\n self.showOrdering = data.attrib.get('showOrdering')\n self.similar = self.findItems(data, media.Similar)\n self.studio = data.attrib.get('studio')\n self.subtitleLanguage = data.attrib.get('audioLanguage', '')\n self.subtitleMode = utils.cast(int, data.attrib.get('subtitleMode', '-1'))\n self.tagline = data.attrib.get('tagline')\n self.theme = data.attrib.get('theme')\n self.useOriginalTitle = utils.cast(int, data.attrib.get('useOriginalTitle', '-1'))\n self.viewedLeafCount = utils.cast(int, data.attrib.get('viewedLeafCount'))\n self.year = utils.cast(int, data.attrib.get('year'))", "def parse_data(self):\n\n try:\n if self.is_bytes:\n self.data = etree.XML(self.manifest)\n else:\n with open(self.manifest) as fh:\n self.data = etree.XML(fh.read().encode())\n except etree.XMLSyntaxError:\n raise InvalidManifest('Not an XML file')\n\n self.tree = etree.ElementTree(self.data)\n\n self.find_remotes()\n self.find_defaults()\n self.find_projects()\n\n return self.generate_manifest_dict()", "def test_parse_metadata_0(self):\n data = ET.parse(\"data/metadata_0.xml\")\n data_str = ET.tostring(data.getroot())\n\n dict = tesse_ros_bridge.utils.parse_metadata(data_str)\n self.assertEqual(dict['position'], [-5.692576, 2.499105, 10.63836])\n self.assertEqual(dict['quaternion'], [0, 0.5372996, 0, 0.8433914])\n self.assertEqual(dict['velocity'], [0, -0.0004944276, 0])\n self.assertEqual(dict['ang_vel'], [0, 0, 0])\n self.assertEqual(dict['acceleration'], [0, 0.001516496, 0])\n self.assertEqual(dict['ang_accel'], [0, 0, 0])\n self.assertEqual(dict['time'], 7.935)\n self.assertEqual(dict['collision_status'], False)", "def _loadData(self, data):\n self._data = data\n self.createdAt = utils.toDatetime(data.attrib.get('createdAt'))\n self.email = data.attrib.get('email')\n self.friend = utils.cast(bool, data.attrib.get('friend'))\n self.friendlyName = data.attrib.get('friendlyName')\n self.home = utils.cast(bool, data.attrib.get('home'))\n self.id = utils.cast(int, data.attrib.get('id'))\n self.server = utils.cast(bool, data.attrib.get('server'))\n self.servers = self.findItems(data, MyPlexServerShare)\n self.thumb = data.attrib.get('thumb')\n self.username = data.attrib.get('username', '')\n for server in self.servers:\n server.accountID = self.id" ]
[ "0.65253925", "0.59940094", "0.5880419", "0.5795567", "0.57881397", "0.5780533", "0.57344276", "0.57285273", "0.57080084", "0.5671317", "0.56547713", "0.56530684", "0.5651814", "0.56417334", "0.5634651", "0.5559751", "0.552812", "0.5520787", "0.5499363", "0.54895693", "0.54556364", "0.543432", "0.5431772", "0.54317176", "0.54307747", "0.539366", "0.5325763", "0.53092074", "0.53061867", "0.5304013" ]
0.74945784
0
Given a DOM node, convert the plist (fragment) it refers to and return the corresponding Python data structure. If interesting_keys is a list, "dict" keys will be filtered so that only those nominated are returned (for ALL descendant dicts). Numeric keys aren't filtered.
def dePlist(self, node, interesting_keys=None): ik = interesting_keys dtype = node.nodeName if dtype == 'string': return self.getText(node) elif dtype == 'integer': try: return int(self.getText(node)) except ValueError: raise iPhotoLibraryError, \ "Corrupted Library; unexpected value '%s' for integer" % \ self.getText(node) elif dtype == 'real': try: return float(self.getText(node)) except ValueError: raise iPhotoLibraryError, \ "Corrupted Library; unexpected value '%s' for real" % \ self.getText(node) elif dtype == 'array': return [self.dePlist(c, ik) for c in node.childNodes \ if c.nodeType == Node.ELEMENT_NODE] elif dtype == 'dict': d = {} last_key = None for c in node.childNodes: if c.nodeType != Node.ELEMENT_NODE: continue # TODO: catch out-of-order keys/values if c.nodeName == 'key': last_key = self.getText(c) else: # value if interesting_keys: # check to see if we're interested if last_key not in interesting_keys \ and not last_key.isdigit(): continue # nope. d[intern(str(last_key))] = self.dePlist(c, ik) return d elif dtype == 'true': return True elif dtype == 'false': return False elif dtype == 'data': return base64.decodestring(self.getText(c)) elif dtype == 'date': return self.appleDate(self.getText(c)) else: raise Exception, "Don't know what a %s is." % dtype
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_plist(path: str) -> dict:\n return _read_plist(path, plistlib.FMT_XML)", "def __getitem__(self, key):\n if isinstance(key, list):\n return plist([self[k] for k in key], root=plist([KeyValue(k, self[k]) for k in key]))\n else:\n return dict.__getitem__(self, key)", "def elem2dict(node):\n result = {}\n\n for element in node.iterchildren():\n # Remove namespace prefix\n key = element.tag.split('}')[1] if '}' in element.tag else element.tag\n key = key[:1].lower() + key[1:]\n\n # Process element as tree element if the inner XML contains non-whitespace content\n if element.text and element.text.strip():\n value = element.text\n else:\n value = elem2dict(element)\n if key in result:\n if type(result[key]) is list:\n result[key].append(value)\n else:\n tempvalue = result[key].copy()\n result[key] = [tempvalue, value]\n else:\n result[key] = value\n return result", "def node_properties_as_dict( node, type_hints=True, verbose=0 ):\n\n prop_dict = {}\n\n for p,v in node.props.items():\n property_val = LopperDT.property_value_decode( v.value, 0, LopperFmt.COMPOUND, LopperFmt.DEC )\n prop_dict[v.name] = property_val\n if type_hints:\n prop_dict['__{}_type__'.format(v.name)] = LopperDT.property_type_guess( v.value )\n\n return prop_dict", "def flatten_sections_filter_keys(report, key_filter=None):\n allitems = []\n\n if not key_filter:\n def walk_dict(d):\n if isinstance(d, dict):\n for kk in d:\n if isinstance(d[kk], dict):\n walk_dict(d[kk])\n else:\n allitems.append((kk, d[kk]))\n else:\n raise Exception('did not handle this case')\n return\n walk_dict(report.sections)\n\n return allitems\n else:\n def walk_dict(d):\n if isinstance(d, dict):\n for kk in d:\n if isinstance(d[kk], dict):\n walk_dict(d[kk])\n else:\n if key_filter(kk):\n allitems.append((kk, d[kk]))\n else:\n raise Exception('did not handle this case')\n return\n walk_dict(report.sections)\n\n return allitems", "def __getitem__(self, key):\n if self.__pepth__ != 0:\n return plist.__getattr__(self, '__getitem__')(key)\n try:\n if (isinstance(key, list)\n and plist(key).all(isinstance, int)):\n return plist([self[k] for k in key]) # Don't pass root -- we are uprooting\n elif isinstance(key, slice):\n if self is self.__root__:\n return plist(list.__getitem__(self, key))\n return plist(list.__getitem__(self, key), root=plist(list.__getitem__(self.__root__, key)))\n else:\n return list.__getitem__(self, key)\n except TypeError as first_exception:\n try:\n if isinstance(key, list):\n return plist([self[i][k] for i, k in enumerate(key)]) # Don't pass root -- we are uprooting\n if isinstance(key, tuple):\n try:\n return plist([x[key] for x in self], root=self.__root__)\n except Exception:\n return plist([tuple(x[k] for k in key) for x in self], root=self.__root__)\n return plist([x[key] for x in self], root=self.__root__)\n except Exception as second_exception:\n raise TypeError('Failed to apply index to self or elements.\\nself exception: %s\\nelements exception: %s' % (str(first_exception), str(second_exception)))", "def __getitem__(self, key):\n if isinstance(key, list):\n return plist([self[k] for k in key], root=plist([KeyValue(k, self[k]) for k in key]))\n else:\n return defaultdict.__getitem__(self, key)", "def xml_children_as_dict(node):\n return dict((e.tag, e.text) for e in node)", "def visit_binary_layout(self, node, children):\n # Combine all child dictionaries\n items = {k: v for d in children for k, v in d.items()}\n return items", "def parse_node(node):\n # Get everything in this tag\n data = [parse_node(child) for child in node.childNodes]\n if len(data) == 0:\n ret_list = []\n attribute_dictionary_list_int = []\n else:\n # Flatten the lists\n ret_list = []\n attribute_dictionary_list_int = []\n for item in data:\n if type(item) == tuple:\n if len(item[0]) > 0:\n ret_list += item[0]\n attribute_dictionary_list_int += item[1]\n\n if node.nodeName == 'g':\n # Group found\n # Analyse group properties\n group = dom2dict(node)\n if 'transform' in group.keys():\n trafo = group['transform']\n\n # Convert all transformations into a matrix operation\n am = parse_trafo(trafo)\n am = np.array([am[::2], am[1::2], [0., 0., 1.]])\n\n # Apply transformation to all elements of the paths\n def xy(p):\n return np.array([p.real, p.imag, 1.])\n\n def z(coords):\n return coords[0] + 1j * coords[1]\n\n ret_list = [Path(*[bpoints2bezier([z(np.dot(am, xy(pt)))\n for pt in seg.bpoints()])\n for seg in path])\n for path in ret_list]\n return ret_list, attribute_dictionary_list_int\n elif node.nodeName == 'path':\n # Path found; parsing it\n path = dom2dict(node)\n d_string = path['d']\n return [parse_path(d_string)] + ret_list, [\n path] + attribute_dictionary_list_int\n elif convert_polylines_to_paths and node.nodeName == 'polyline':\n attrs = dom2dict(node)\n path = parse_path(polyline2pathd(node['points']))\n return [path] + ret_list, [attrs] + attribute_dictionary_list_int\n elif convert_polygons_to_paths and node.nodeName == 'polygon':\n attrs = dom2dict(node)\n path = parse_path(polygon2pathd(attrs['points']))\n return [path] + ret_list, [attrs] + attribute_dictionary_list_int\n elif convert_lines_to_paths and node.nodeName == 'line':\n line = dom2dict(node)\n d_string = ('M' + line['x1'] + ' ' + line['y1'] +\n 'L' + line['x2'] + ' ' + line['y2'])\n path = parse_path(d_string)\n return [path] + ret_list, [line] + attribute_dictionary_list_int\n elif convert_ellipses_to_paths and node.nodeName == 'ellipse':\n attrs = dom2dict(node)\n path = parse_path(ellipse2pathd(attrs))\n return [path] + ret_list, [attrs] + attribute_dictionary_list_int\n\t\telif convert_circles_to_paths and node.nodeName == 'circle':\n\t\t\tattrs = dom2dict(node)\n path = parse_path(ellipse2pathd(attrs))\n return [path] + ret_list, [attrs] + attribute_dictionary_list_int\n\t\telif convert_rectangles_to_paths and node.nodeName == 'rect':\n attrs = dom2dict(node)\n path = parse_path(rect2pathd(attrs))\n return [path] + ret_list, [attrs] + attribute_dictionary_list_int", "def visit_Dict(self, node):\n self.generic_visit(node)\n if all(isinstance(key, ast.Str) for key in node.keys):\n keywords = [ ast.keyword(arg=key.s, value=value)\n for key, value in zip(node.keys, node.values) ]\n return to_call(to_name('dict'), keywords=keywords)\n return node", "def test_etree_to_dict(self):\r\n person_d = importer.etree_to_dict(self.person_tree.getroot())\r\n assert person_d == {\r\n 'world-crises': [\r\n {'crises': []},\r\n {'organizations': []},\r\n {'people': [\r\n {'person': [\r\n {'name': 'Bob TestPerson'},\r\n {'alternate-names': 'TestDude'},\r\n {'kind': 'TestPersonKind'},\r\n {'description': 'PersonTestDescription'},\r\n {'location': [\r\n {'city': 'Test Person City'},\r\n {'country': 'United States'}]},\r\n {'images': [\r\n {'image': [\r\n {'source': 'http://www.testimage.com'},\r\n {'description': 'Description of TestImage'}]}]},\r\n {'maps': [\r\n {'map': [\r\n {'source': 'http://maps.google.com'},\r\n {'description': 'Map Description'}]}]},\r\n {'videos': [{'youtube': 'r_8om4dsEmw'}]},\r\n {'social': [{'twitter': '@billgates'}]},\r\n {'citations': [\r\n {'citation': [\r\n {'source': 'http://en.wikipedia.org/wiki/Test'},\r\n {'description': 'Wiki'}]}]},\r\n {'external-links': [\r\n {'external-link': [\r\n {'source': 'http://www.zombo.com/'},\r\n {'description': 'Test Link'}]}]}],\r\n 'id': 'p-algore'}]}]}", "def extract(data, key):\n for d in data:\n if d.startswith(key):\n return d.replace(key+':','').strip() #remove the parser tag then remove the spaces", "def extract_key_nodes(self, key, nodes=None):\n if nodes is None:\n nodes = []\n if self.name == key:\n nodes.append(self)\n for i in range(len(self.children)):\n self.children[i].extract_key_nodes(key, nodes=nodes)", "def make_unpack_map(node):\n return dict(zip(node.names, node.iternodes()))", "def drop_keys(d):\n if isinstance(d, dict):\n return {\n k: drop_keys(v)\n for k, v in d.items()\n if k not in [\"propNames\", \"package\"]\n and v is not None\n and not (k == \"children\" and v == \"\")\n }\n elif isinstance(d, list):\n return [drop_keys(x) for x in d]\n return d", "def build_dictionary_element_tree(element, node_attr_fields=NODE_FIELDS, way_attr_fields=WAY_FIELDS,\n default_tag_type='regular'):\n node_attribs = {}\n way_attribs = {}\n way_nodes = []\n tags = [] # Handle secondary tags the same way for both node and way elements\n \n if element == None:\n print ('Element is Null')\n return None\n \n if element.tag == 'node':\n check = check_id(element.attrib['id'])\n \n if not check:\n print ('Node ID is Null or not a number: ', element.attrib['id'])\n fix_it.node_id_bad[element.attrib['id']] += 1\n return None\n \n for attr in element.attrib:\n if attr in node_attr_fields:\n node_attribs[attr] = element.attrib[attr]\n \n for child in element:\n temp = { }\n \n if 'cityracks.' in child.attrib['k']:\n child.attrib['k'] = child.attrib['k'].replace('cityracks.','')\n \n m = correct_chars_re.search(child.attrib['k']) # No match returns None\n\n if not m: \n print ('Node key -- Problem character! ', 'key = ', child.attrib['k'], ' value = ', child.attrib['v'])\n fix_it.counts['node child key eliminated'] += 1\n infoKey = 'node key: ' + child.attrib['k']\n fix_it.bad_keys[infoKey] += 1\n continue # eliminate the problematic child tag\n \n # Fix value\n fixed = fix_it.fixer(child, 'Node') # Correct or eliminate the child <tag> value\n # Function fix_it returns None if there is a data problem\n if fixed == '$skip':\n fix_it.counts['node tag skipped'] += 1\n continue\n \n if not fixed:\n fix_it.counts['node child value eliminated'] += 1\n continue # Eliminate this child tag\n else:\n temp['id'] = element.attrib['id'] # Save the fixed child tag for writing into csv file\n temp['value'] = fixed\n \n if ':' in child.attrib['k']:\n k = child.attrib['k'].split(':',1)\n temp['type'] = k[0]\n temp['key'] = k[1]\n else:\n temp['key'] = child.attrib['k']\n temp['type'] = default_tag_type\n \n fix_it.counts['node tag count'] += 1 # count the child tags not eliminated\n tags.append(temp)\n \n return {'node': node_attribs, 'node_tags': tags}\n \n elif element.tag == 'way':\n check = check_id(element.attrib['id'])\n \n if not check:\n print ('Way ID is Null or not a number: ', element.attrib['id'])\n fix_it.way_id_bad[element.attrib['id']] += 1\n return None\n \n for attr in element.attrib: \n if attr in way_attr_fields:\n way_attribs[attr] = element.attrib[attr]\n \n position = 0\n for child in element:\n temp = { }\n \n if child.tag == 'tag':\n m = correct_chars_re.search(child.attrib['k']) # No match returns None\n \n if not m:\n print ('Way key -- Problem char! ', 'key = ', child.attrib['k'], ' value = ', child.attrib['v'])\n fix_it.counts['way child key eliminated'] += 1\n infoKey = 'way key: ' + child.attrib['k']\n fix_it.bad_keys[infoKey] += 1\n continue # eliminate the problematic child tag\n \n # Fix value\n fixed = fix_it.fixer(child, 'Way') # Correct or eliminate the child <tag> value\n # Function fix_it returns None if there is a data problem\n if fixed == '$skip':\n fix_it.counts['way tag skipped'] += 1\n continue\n \n if not fixed:\n fix_it.counts['way child value eliminated'] += 1\n continue # Eliminate this child tag\n else:\n temp['id'] = element.attrib['id'] # Save the fixed child tag for writing into csv file\n temp['value'] = fixed\n\n if ':' in child.attrib['k']:\n k = child.attrib['k'].split(':',1)\n temp['type'] = k[0]\n temp['key'] = k[1]\n else:\n temp['key'] = child.attrib['k']\n temp['type'] = default_tag_type\n \n fix_it.counts['way tag count'] += 1 # count the child tags not eliminated\n tags.append(temp)\n \n elif child.tag == 'nd':\n check = check_id(child.attrib['ref'])\n \n if not check:\n print ('Way Node reference is Null or not a number: ', child.attrib['ref'])\n fix_it.way_node_reference_bad[child.attrib['ref']] += 1\n continue\n \n temp['id'] = element.attrib['id']\n temp['node_id'] = child.attrib['ref']\n temp['position'] = position\n position += 1\n fix_it.counts['way node tag count'] += 1 # count the child tags not eliminated\n way_nodes.append(temp)\n \n #print ('way_attribs:\\n', way_attribs)\n #print ('way_nodes:\\n', way_nodes)\n #print ('way_tags:\\n', tags)\n #print ('---------------\\n')\n return {'way': way_attribs, 'way_nodes': way_nodes, 'way_tags': tags}", "def jsonify(node) -> Dict:\n return {\n **{\n \"u{0}\".format(i):\n ui.tolist() for (i, ui) in enumerate(node.u)\n },\n **{\n \"observed\": node.observed\n },\n **{\n \"phi{0}\".format(i):\n phii.tolist() for (i, phii) in enumerate(node.phi)\n },\n **{\n \"f\": node.f.tolist(),\n \"g\": node.g.tolist()\n }\n }", "def lookup(key, keys, document, with_keys=False):\n if isinstance(document, list):\n for d in document:\n for result in lookup(keys, d, with_keys=with_keys):\n yield result\n\n if isinstance(document, dict):\n for k, v in document.items():\n if key == k and document.get(\"kind\",) == \"Video\":\n if with_keys:\n yield {k: document.get(k) for k in keys}\n else:\n yield v\n if isinstance(v, dict):\n for result in lookup(key, keys, v, with_keys=with_keys):\n yield result\n elif isinstance(v, list):\n for d in v:\n for result in lookup(key, keys, d, with_keys=with_keys):\n yield result", "def _lookup_element(lst, key):\n if not lst:\n return {}\n for ele in lst:\n if not ele or not isinstance(ele, dict):\n continue\n if key in ele:\n return ele[key]\n return {}", "def nodedict2xarraydict(nodedict):\n return {name:getXarray(node) for name,node in nodedict.items()}", "def extract_element(html, xpaths, process_dict_fn=None):\n # Attempt to parse the html, aborting here if it is not parseable\n try:\n lxml_html = lxml.html.fromstring(html)\n except lxml.etree.ParserError:\n return None\n\n # Get all elements specified and combine scores\n extracted_strings = defaultdict(dict)\n for extraction_xpath, score in xpaths:\n found_elements = lxml_html.xpath(extraction_xpath)\n found_elements = found_elements if isinstance(found_elements, list) else [found_elements]\n for found_element in found_elements:\n element = normalise_whitespace(found_element)\n if element:\n try:\n extracted_strings[element]['score'] += score\n extracted_strings[element]['xpaths'].append(extraction_xpath)\n extracted_strings[element]['xpaths'].sort()\n except KeyError:\n extracted_strings[element]['score'] = score\n extracted_strings[element]['xpaths'] = [extraction_xpath]\n\n # Edit the dictionary\n if process_dict_fn:\n extracted_strings = process_dict_fn(extracted_strings)\n\n return extracted_strings", "def extract_node(self, node, skip_node=False):\n\n return {\n child.tag: (\n child.text\n if len(child) == 0\n else self.extract_node(child)\n )\n for child in node\n }", "def value_from_data_key(node, key):\n if key == 'tags_inher':\n return node.tags\n elif key == 'children_heading':\n return [c.heading for c in node.children]\n elif key in ('parent_heading',\n 'previous_same_level_heading',\n 'next_same_level_heading',\n ):\n othernode = getattr(node, key.rsplit('_', 1)[0])\n if othernode and not othernode.is_root():\n return othernode.heading\n else:\n return\n else:\n return getattr(node, key)", "def parse(cls, el):\n if isinstance(el, list):\n for i, x in enumerate(el):\n el[i] = HashableDict.parse(x)\n elif isinstance(el, dict):\n d = HashableDict()\n for k, v in el.iteritems():\n d[k] = HashableDict.parse(v)\n return d\n return el", "def convert_heading_node_to_dict(heading_node):\n children = []\n for child in heading_node.children:\n children.append(convert_heading_node_to_dict(child))\n heading_node_data = {\n \"text\": heading_node.title,\n \"slug\": heading_node.title_slug,\n \"level\": heading_node.level,\n \"children\": children,\n }\n return heading_node_data", "def get_by_list_of_keys(dictionary: Dict, key_path: List[Any]) -> Dict:\n if len(key_path) == 1:\n return dictionary[key_path[0]]\n else:\n return get_by_list_of_keys(dictionary[key_path[0]], key_path[1:])", "def _collect(node: dict, exact: bool) -> Iterable[int]:\n keys = set(node.keys()) - KEYS # only prefix nodes\n suffixes = [] if exact else node.get(SUFFIXKEY, [])\n return chain(suffixes, node.get(ITEMSKEY, []), *(_collect(node[key], exact) for key in keys))", "def traverse(self, prt):\n lst = []\n for e in self.items:\n if(e is not None):\n prt(e[1])\n lst.append(e[1])\n return lst", "def scrape(etree, xpaths):\n return {k: get_xpath_val(apply_xpath(v, etree, k), v.path)\n if isinstance(v, lxml.etree.XPath)\n else [scrape(i, v[1]) for i in apply_xpath(v[0], etree, k)]\n for k, v in xpaths.items()}" ]
[ "0.5124263", "0.5013636", "0.4812486", "0.4779492", "0.47377607", "0.4701311", "0.46952295", "0.4666734", "0.46113923", "0.45621026", "0.45284662", "0.45052168", "0.44883114", "0.44732088", "0.44579843", "0.44518983", "0.4401898", "0.438814", "0.43835205", "0.4365073", "0.43540734", "0.4348048", "0.43311876", "0.43240517", "0.4315327", "0.43081757", "0.42889962", "0.42738315", "0.42636192", "0.42622736" ]
0.6786015
0
Walk through the events or albums (depending on the value of albums) in this library and apply each function in the list funcs to each
def walk(self, funcs): if self.use_album: targetName = "AlbumName" albums = [a for a in self.albums if a.get("Album Type", None) == "Regular"] else: targetName = "RollName" albums = self.albums i = 0 for folder in albums: i += 1 if self.use_album: folderDate = None else: folderDate = self.appleDate(folder["RollDateAsTimerInterval"]) images = folder["KeyList"] folderName = folder[targetName] #as we process albums/events in the iPhoto library, remove that album #from the list of import_albums we'll be importing at the end if self.import_albums: for ia in self.import_albums: for album_name in ia['album_names']: album_name = unicode(album_name, 'utf-8') if folderName == album_name: self.import_albums.remove(ia) if folderDate and self.use_date: date = '%(year)d%(delim)s%(month)02d%(delim)s%(day)02d' % { 'year': folderDate.year, 'month': folderDate.month, 'day': folderDate.day, 'delim': self.date_delimiter } if re.match("[A-Z][a-z]{2} [0-9]{1,2}, [0-9]{4}", folderName): outputPath = date elif re.match("[0-9]{4}.[0-9]{2}.[0-9]{2} ?.*", folderName): outputPath = folderName else: outputPath = date + " " + folderName if self.year_dir: outputPath = os.path.join(str(folderDate.year), outputPath) else: outputPath = folderName # Deconflict output directories targetFileDir = os.path.join(self.dest_dir, outputPath) if self.deconflict: j = 1 while targetFileDir in self.output_dirs: targetFileDir = os.path.join(self.dest_dir, outputPath + " %02d"%j) j += 1 self.output_dirs.add(targetFileDir) self.status("* Processing %i of %i: %s (%i images)...\n" % ( i, len(albums), folderName, len(images) )) for imageId in images: for func in funcs: func(imageId, targetFileDir, folderDate) self.status("\n") if self.import_missing: self.status("importing folders:\n") for ia in self.import_albums: self.status(ia["album_dir"] + "\n") #using the "Auto Import" dir in iPhoto was unpredictable with respect to the resulting event name. #Using AppleScript to import the event, seams to always result in the event being properly named if not self.test: #There is probably a better way to do this. I noticed I had an album with an ' in it that errored... escaped_dir = ia["album_dir"].replace("'", "\\'").replace('"', '\\"') os.system('''osascript -e ' tell application "iPhoto" import from "%s" end tell ' ''' % escaped_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def album_list(self):\n\n artist_id = self.addon_args[\"artist_id\"][0]\n\n xbmcplugin.setContent(self.addon_handle, \"albums\")\n\n for album in self.connection.walk_artist(artist_id):\n self.add_album(album)\n\n xbmcplugin.addSortMethod(\n self.addon_handle, xbmcplugin.SORT_METHOD_UNSORTED)\n xbmcplugin.addSortMethod(\n self.addon_handle, xbmcplugin.SORT_METHOD_ALBUM)\n xbmcplugin.addSortMethod(\n self.addon_handle, xbmcplugin.SORT_METHOD_ARTIST)\n xbmcplugin.addSortMethod(\n self.addon_handle, xbmcplugin.SORT_METHOD_VIDEO_YEAR)\n\n xbmcplugin.endOfDirectory(self.addon_handle)", "def collect_functions(self):\n if not self.functions:\n for item in dir(self.file_import):\n new_function = getattr(self.file_import, item)\n # if it is a YMLMetadataCollector wrapper, add it to the list.\n if (\n callable(new_function)\n and isinstance(new_function, FunctionType)\n and \"YMLMetadataCollector\" in repr(new_function)\n ):\n self.functions.append(new_function)", "def exec_event_functions(self):\n for name, fdict in self._event_functions.items():\n exec_func=False\n if self.is_eventCodePresent(fdict['eventCode']) \\\n and (self.ievent % fdict['nevents']) == 0:\n exec_func = True\n \n det_class = psutils.getattr_complete(self,fdict['det'])\n \n if exec_func and det_class.is_in_keys:\n# print 'executing',det_class._name, fdict['attr']\n func = psutils.getattr_complete(det_class,fdict['attr']) \n func(**fdict['kwargs'])", "def run_callbacks(self, **kwargs):\n for callback in self.CALLBACKS:\n getattr(self, callback)(**kwargs)", "def __init__(self, events):\n for event in events:\n #do stuff\n pass", "def call_ball_func(ball_list, func_name, action):\r\n for ball in ball_list:\r\n if func_name == \"speed\":\r\n ball.change_speed(action)\r\n elif func_name == \"size\":\r\n ball.change_size(action)\r\n elif func_name == \"movement\":\r\n ball.change_movement(action)\r\n elif func_name == \"play\":\r\n ball.start_and_stop(action)", "def execute(self):\n\t\tfor callback in self:\n\t\t\tcallback()", "def albums_by_genre_list(self):\n\n genre = self.addon_args[\"foldername\"][0].decode(\"utf-8\")\n\n xbmcplugin.setContent(self.addon_handle, \"albums\")\n\n for album in self.connection.walk_album_list_genre(genre):\n self.add_album(album, show_artist=True)\n\n xbmcplugin.endOfDirectory(self.addon_handle)", "def walk(self, members, callables):\n answer = [self._answer(members, callables)]\n for item in self.items:\n if isinstance(item, Pulse):\n answer.append(item._answer(members, callables))\n else:\n answer.append(item.walk(members, callables))\n\n return answer", "def caller():\n\n for func in funcs:\n func()", "def run_all(artist_name, client_id, client_secret):\n\n parent_dir = \"./data\"\n artist_dir = f\"{artist_name}\"\n base_path = os.path.join(parent_dir, artist_dir)\n\n # Define save-path for album art\n\n album_cover_dir = \"Album_Art/\"\n album_cover_path = os.path.join(base_path, album_cover_dir)\n try:\n os.makedirs(album_cover_path)\n except FileExistsError:\n pass\n\n # Define save-path for album analysis audio frames\n\n album_info_dir = \"Album_Info/\"\n album_info_path = os.path.join(base_path, album_info_dir)\n try:\n os.makedirs(album_info_path)\n except FileExistsError:\n pass\n\n sp = get_spotify_credentials(client_id=client_id, client_secret=client_secret)\n\n album_names, album_name_uri_dict, album_img_url_dict = get_album_details(sp=sp, artist_name=artist_name)\n\n for album_name in tqdm(album_names):\n\n get_album_art(album_name=album_name, album_img_url_dict=album_img_url_dict,\n album_cover_path=album_cover_path)\n\n for album_name in tqdm(album_names):\n\n get_album_audio_features(sp=sp, album_name=album_name, album_name_dict=album_name_uri_dict,\n album_info_path=album_info_path)\n\n get_album_audio_analysis(sp=sp, album_name=album_name, album_name_dict=album_name_uri_dict,\n album_info_path=album_info_path)", "def _add_function_to_labels_toggles(self, fun):\n for s_group in self.labels_toggles:\n for w in s_group:\n w.on_trait_change(fun, 'value')", "def _multiple_callbacks(callbacks, *args, **kwargs):\n if isinstance(callbacks, list):\n for cb in callbacks:\n cb(*args, **kwargs)\n return\n if callbacks:\n callbacks(*args, **kwargs)", "def __call__(self, *args, **kwargs):\n for hook in self:\n logger.debug(f\"Executing hook function '{hook}'.\")\n try:\n hook(*args, **kwargs)\n except Exception as error:\n logger.error(\n \"Error occurred during execution of \"\n \"hook '{}': {}.\".format(hook, error)\n )\n raise", "def callable_hooks(self):\n for hook in self.custom_hooks:\n options = rh.hooks.HookOptions(hook,\n self.custom_hook(hook),\n self.tool_paths)\n yield (hook, functools.partial(rh.hooks.check_custom,\n options=options))\n\n for hook in self.builtin_hooks:\n options = rh.hooks.HookOptions(hook,\n self.builtin_hook_option(hook),\n self.tool_paths)\n yield (hook, functools.partial(rh.hooks.BUILTIN_HOOKS[hook],\n options=options))", "def register(self, funcs):\n for name, func in funcs.items():\n self.functions[name] = func", "def __init__(self, sources, folder):\r\n # TODO: include possibility of multiple aggregating functions?\r\n self._acc = folder\r\n if type(sources) is not list:\r\n sources = [sources]\r\n\r\n self._sources = sources\r\n for k in range(len(sources)):\r\n if type(sources[k]) == type(self):\r\n sources[k] = OnEveryDt(1, sources[k])\r\n self._events = [event.subscribe(source, _(self)._update, self) for source in self._sources]", "def register_func_list(self, func_and_handler):\n for func, handler in func_and_handler:\n self._function_dispatch.register(func, handler)\n self.dispatch.cache_clear()", "def get_albums(self):\n self.artist = self.artists_list.currentText()\n self.c_albums = [x['album'] for x in dmlc.list_albums(self.artist)\n if [x['album'] in self.albums_map[self.artist]]]\n self.albums_list.clear()\n self.albums_list.addItems(self.c_albums)\n self.update_navigation_buttons()", "def walk(self, members, callables):\n answer = [self._answer(members, callables),\n self.context._answer(members, callables)]\n for item in self.items:\n if isinstance(item, Pulse):\n answer.append(item._answer(members, callables))\n else:\n answer.append(item.walk(members, callables))\n\n return answer", "def test_album_tracks(bot, monkeypatch):\n song = Song('wintersun', 'beyond the dark sun')\n monkeypatch.setattr(bot.SP, 'get_album_tracks', lambda x: [])\n monkeypatch.setattr(bot, 'get_album_tracks_lastfm', lambda x: ['lastfm'])\n assert bot.get_album_tracks(song)[0] == 'lastfm'\n\n monkeypatch.setattr(bot.SP, 'get_album_tracks', lambda x: ['spotify'])\n assert get_album_tracks(song)[0] == 'spotify'", "def dispatch_module_event(self, event: str, *args, **kwargs):\n return [callback(event, *args, **kwargs) for callback in self.event_registry[event]]", "def function(self, func):\n blocks = []\n\n for block in idaapi.FlowChart(func):\n blocks.append(self.block(block))\n\n return blocks", "def sequential(self, func, args_dict=None):\n for uri, cf in self._cfs.items():\n args = self._process_args_dict(cf, uri, args_dict)\n func(*args)", "async def events(self) -> Iterable[Event]:", "def addfunctions(dtls, bunchdt):\n snames = [\n \"BuildingSurface:Detailed\",\n \"Wall:Detailed\",\n \"RoofCeiling:Detailed\",\n \"Floor:Detailed\",\n \"FenestrationSurface:Detailed\",\n \"Shading:Site:Detailed\",\n \"Shading:Building:Detailed\",\n \"Shading:Zone:Detailed\",\n ]\n for sname in snames:\n if sname.upper() in bunchdt:\n surfaces = bunchdt[sname.upper()]\n for surface in surfaces:\n func_dict = {\n \"area\": fh.area,\n \"height\": fh.height, # not working correctly\n \"width\": fh.width, # not working correctly\n \"azimuth\": fh.azimuth,\n \"tilt\": fh.tilt,\n \"coords\": fh.getcoords, # needed for debugging\n }\n try:\n surface.__functions.update(func_dict)\n except KeyError as e:\n surface.__functions = func_dict\n # add common functions\n # for name in dtls:\n # for idfobject in bunchdt[name]:\n # idfobject.__functions\n # idfobject['__functions']['fieldnames'] = fieldnames\n # idfobject['__functions']['fieldvalues'] = fieldvalues\n # idfobject['__functions']['getrange'] = GetRange(idfobject)\n # idfobject['__functions']['checkrange'] = CheckRange(idfobject)", "def _call_followers(self, event):\n context = self.get_context(event)\n if context is not None:\n for callback in self.followers.get(context, []):\n callback(event)", "def run_functions(self):\n for function in self.functions:\n try:\n function()\n except Exception as err:\n logger.exception(\n f\"[red]Failed running and collecting data for function: {function.__name__}[/red]\"\n )\n logger.error(traceback.format_exc())\n logger.error(f\"[red]{err}[/red]\")\n logger.error(\"Continuing..\")", "def bunch__functions(idfobject): \n funcdct = idfobject.__functions\n funcsresults = [(key, funcdct[key](idfobject)) for key in funcdct.keys()]\n return funcsresults", "def map(self, func, *args, **kwds):\r\n if not self.extensions:\r\n # FIXME: Use a more specific exception class here.\r\n raise RuntimeError('No %s extensions found' % self.namespace)\r\n response = []\r\n for e in self.extensions:\r\n self._invoke_one_plugin(response.append, func, e, args, kwds)\r\n return response" ]
[ "0.5338826", "0.5199902", "0.51033753", "0.508161", "0.5081003", "0.50747555", "0.50526106", "0.5039024", "0.50192744", "0.50101346", "0.49962336", "0.49476117", "0.49463356", "0.49420202", "0.49274322", "0.4924771", "0.49216843", "0.49067512", "0.48841634", "0.4882843", "0.488199", "0.48753655", "0.48702234", "0.4853826", "0.48359078", "0.4831446", "0.48249972", "0.4812949", "0.48116806", "0.47985157" ]
0.6805687
0
Copy an image from the library to a folder in the dest_dir. The name of the folder is based on folderName and folderDate; if folderDate is None, it's only based upon the folderName. If use_metadata is True, also write the image metadata from the library to the copy. If use_faces is True, faces will be saved as keywords.
def copyImage(self, imageId, folderName, folderDate, modified = False): try: image = self.images[imageId] except KeyError: raise iPhotoLibraryError, "Can't find image #%s" % imageId if not os.path.exists(folderName): try: if not self.test: os.makedirs(folderName) except OSError, why: raise iPhotoLibraryError, \ "Can't create %s: %s" % (folderName, why[1]) self.status(" Created %s\n" % folderName) #Unedited images only have ImagePath, edited images have both ImagePath and OriginalPath, #except for some corrupted iPhoto libraries, where some images only have OriginalPath. #Trying to satisfy both conditions with this nested logic. if self.originals: if "OriginalPath" in image and modified == False: mFilePath = image["OriginalPath"] else: mFilePath = image["ImagePath"] else: if not "ImagePath" in image: mFilePath = image["OriginalPath"] else: mFilePath = image["ImagePath"] basename = os.path.basename(mFilePath) if (self.all_versions): if (modified == False): self.copyImage(self, imageId, folderName, folderDate, True) else: basename += '_modified' # Deconflict ouput filenames tFilePath = os.path.join(folderName, basename) if self.deconflict: j = 1 while tFilePath in self.output_files: tFilePath = os.path.join(folderName, "%02d_"%j + basename) j += 1 self.output_files.add(tFilePath) # Skip unchanged files, unless we're writing metadata. if not self.use_metadata and os.path.exists(tFilePath): mStat = os.stat(mFilePath) tStat = os.stat(tFilePath) if not self.ignore_time_delta and abs(tStat[stat.ST_MTIME] - mStat[stat.ST_MTIME]) <= 10: self.status("-") return if tStat[stat.ST_SIZE] == mStat[stat.ST_SIZE]: self.status("-") return if not self.test and os.path.exists(mFilePath): shutil.copy2(mFilePath, tFilePath) md_written = False if self.use_metadata: md_written = self.writePhotoMD(imageId, tFilePath) if md_written: self.status("+") else: self.status(".")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def form_sample_folder(self, input_folder, target_folder, sample_name):\n print(f'processing {sample_name} folder.')\n # first make a subfolder to contain the images - e.g. 'target_folder/sample_name'\n sample_dir = join(target_folder, sample_name)\n if not os.path.exists(sample_dir):\n mkdir(sample_dir)\n # resize and move the mask images - e.g. 'target_folder/sample_name/imgs_necrosis.png'\n img_file_nec = join(input_folder, 'Necrosis',\n 'Tissue Slides.'+sample_name+'.png')\n img_res = self.process_img(img_file_nec, self.rescale_ratio)\n img_nec = img_res.copy()\n cv2.imwrite(join(sample_dir, 'necrosis.png'), img_res)\n\n img_file_perf = join(input_folder, 'Perfusion',\n 'Tissue Slides.'+sample_name+'.png')\n img_res = self.process_img(img_file_perf, self.rescale_ratio)\n cv2.imwrite(join(sample_dir, 'perfusion.png'), img_res)\n\n # resize and move the maker HE and EF5 images\n files = listdir(input_folder)\n img_files = [x for x in files if x.split(\n '.')[-1] in ('tif', 'jpg', 'png')]\n for img_file in img_files:\n if (sample_name+'_' in img_file) or (sample_name+'-' in img_file):\n if ('HE-G' in img_file) or ('HE-green' in img_file) or ('HEgreen' in img_file):\n img_res = self.process_img(\n join(input_folder, img_file), self.rescale_ratio)\n if not os.path.exists(join(sample_dir, 'HE-green.png')):\n cv2.imwrite(join(sample_dir, 'HE-green.png'), img_res)\n else:\n warnings.warn(\n f\"file already exists, while processing {img_file}\")\n elif ('HE-R' in img_file) or ('HE-red' in img_file) or ('HEred' in img_file):\n img_res = self.process_img(\n join(input_folder, img_file), self.rescale_ratio)\n if not os.path.exists(join(sample_dir, 'HE-red.png')):\n cv2.imwrite(join(sample_dir, 'HE-red.png'), img_res)\n else:\n warnings.warn(\n f\"file already exists, while processing {img_file}\")\n elif ('HE-B' in img_file) or ('HE-blue' in img_file) or ('HE-blue' in img_file):\n img_res = self.process_img(\n join(input_folder, img_file), self.rescale_ratio)\n if not os.path.exists(join(sample_dir, 'HE-blue.png')):\n cv2.imwrite(join(sample_dir, 'HE-blue.png'), img_res)\n else:\n warnings.warn(\n f\"file already exists, while processing {img_file}\")\n elif 'EF5' in img_file:\n img_res = self.process_img(\n join(input_folder, img_file), self.rescale_ratio)\n img_ef5 = img_res.copy()\n if not os.path.exists(join(sample_dir, 'EF5.png')):\n cv2.imwrite(join(sample_dir, 'EF5.png'), img_res)\n else:\n warnings.warn(\n f\"file already exists, while processing {img_file}\")\n\n masked_ef5 = (img_ef5 * (img_nec <= 0)).astype(img_ef5.dtype)\n cv2.imwrite(join(sample_dir, 'EF5_masked.png'), masked_ef5)\n assert len(listdir(sample_dir)) == 7\n return", "def select_img(input_folder, output_folder, key_words):\n files = listdir(input_folder)\n img_files = [x for x in files if x.split('.')[-1] in ('tif', 'jpg', 'png')]\n img_files = [x for x in img_files if key_words in x]\n if exists(output_folder):\n warnings.warn(\"output folder existed, might be overwitten!\")\n for img_file in img_files:\n warnings.warn(f\"output file {img_file} already exists!\")\n if input(\"Do you really want to proceed? (y/n)\") == \"y\":\n break\n else:\n raise Exception\n else:\n mkdir(output_folder)\n\n for img_file in img_files:\n copyfile(join(input_folder, img_file), join(output_folder, img_file))", "def CpSrcDest( IMGFOLDER = '..\\\\Data\\\\',\n EXTENSION = \".jpg\",\n DESTINATION_FOLDER='..\\\\Data\\\\AllBirds\\\\'):\n for i, imag in enumerate(BirdPhotos):\n filename= IMGFOLDER+ imag + EXTENSION\n cpCommand = \"copy \" + filename + \" \" + DESTINATION_FOLDER\n os.system(cpCommand)\n return", "def save_image(image, output_folder, output_name):\n\n\tfolder_path = compute_path(output_folder, 'dataset')\n\tos.makedirs(folder_path, exist_ok=True)\n\n\tfile_path = os.path.join(folder_path, output_name + '.png')\n\timage.save(file_path)", "def copy_image(SourceImageName=None, DestinationImageName=None, DestinationRegion=None, DestinationImageDescription=None):\n pass", "def copyAsset(self, src, dst, **kw):\n if self.isfile(src):\n self.copyfile(src, dst)\n else:\n # copy folder\n if not self.exists(dst):\n self.makedirs(dst)\n for name in self.listdir(src):\n self.copyAsset(self.joinpath(src, name), self.joinpath(dst, name), copycache=0)\n\n # copy cache\n cache_src = self.cache_path(src)\n if not os.path.exists(cache_src):\n return\n\n cache_dst = self.cache_path(dst)\n cache_dst_parent = os.path.dirname(cache_dst)\n if not os.path.exists( cache_dst_parent ):\n os.makedirs(cache_dst_parent )\n if not os.path.exists(cache_dst):\n ucopytree(cache_src, cache_dst)", "def copy_folder(src: str, dest: str) -> None:\n\tuux.show_info(\"Copying folder \" + src + \" => \" + dest)\n\n\tif not os.path.exists(src):\n\t\tuux.show_error(\"Unable to copy, '\" + src + \"' does not exist.\")\n\t\treturn\n\n\tmkdir(dest)\n\n\tfor fn in os.listdir(src):\n\t\tif os.path.isfile(src + fn):\n\t\t\ttry:\n\t\t\t\tcopy_file(src + fn, dest)\n\t\t\texcept IOError as ex:\n\t\t\t\tuux.show_error(\"Failed to copy file, \" + os.strerror(ex.errno))", "def moveImage(image, dest):\n if not os.path.exists(dest):\n os.mkdir(dest)\n move(image, dest)", "def sort_by_date_taken(src_dir, dst_dir=None, file_operation='cp', filename_extensions=['jpg'], **kwargs):\n\n def _get_date_taken(path):\n \"\"\"\n get date when picture was taken from exif metadata\n :param path: path of the picture\n :return: DateTimeOriginal (exif id 36867)\n \"\"\"\n return Image.open(path)._getexif()[36867]\n\n def _get_date_modified(path):\n \"\"\"\n get date when the file was modified for the last time (for images/videos this equals the date when the file was taken)\n :param path: path of the file\n :return: date of last file change\n \"\"\"\n return str(datetime.datetime.fromtimestamp(os.path.getmtime(path)))\n\n def _create_dir_name(date, dir_structure='ymd', is_exif=True):\n \"\"\"\n create the directory path\n :param date: exif data of the picture\n :param dir_structure: structure of dir (example: 'ymd' - 'YYYY\\YYYY_MM\\YYYY_MM_DD; 'yd' - YYYY\\YYYY_MM_DD)\n :return: relative path/name of the directory\n \"\"\"\n if is_exif:\n date_split = date.split(' ')[0].split(':')\n else:\n date_split = date.split(' ')[0].split('-')\n dir_name = '\\\\'\n if 'y' in dir_structure:\n dir_name += date_split[0] + '\\\\'\n if 'm' in dir_structure:\n dir_name += '_'.join(d for d in date_split[:2]) + '\\\\'\n if 'd' in dir_structure:\n dir_name += '_'.join(d for d in date_split[:3]) + '\\\\'\n return dir_name\n\n # set dst_dir to src_dir if not specified\n if dst_dir is None:\n dst_dir = src_dir\n # find all files with specified file name extension\n files = []\n for filename_extension in filename_extensions:\n if 'read_recursive' in kwargs.keys() and kwargs['read_recursive']:\n files += glob.glob(src_dir + \"\\\\**\\\\*.\" + filename_extension, recursive=True)\n else:\n files += glob.glob(src_dir + \"\\\\*.\" + filename_extension)\n print(\"copying \" + str(len(files)) + \" files from \" + src_dir + \" to \" + dst_dir + '\\n')\n for num, file in enumerate(files):\n # create the name of directory structure\n if file.split('.')[-1].lower() in [\"jpg\", \"jpeg\", \"jpe\", \"jfif\", \"tiff\", \"tif\"]: # if exif data is stored in file header\n if 'dir_structure' in kwargs.keys():\n dir_name = _create_dir_name(_get_date_taken(file), dir_structure=kwargs['dir_structure'])\n else:\n dir_name = _create_dir_name(_get_date_taken(file))\n else: # use date of change to determine creation\n if 'dir_structure' in kwargs.keys():\n dir_name = _create_dir_name(_get_date_modified(file), dir_structure=kwargs['dir_structure'], is_exif=False)\n else:\n dir_name = _create_dir_name(_get_date_modified(file), is_exif=False)\n date_dir = dst_dir + \"\\\\\" + dir_name + \"\\\\\"\n # create new date directory if it doesn't exists\n os.makedirs(date_dir, exist_ok=True)\n if file_operation in ['copy', 'cp']:\n # copy file to new dir\n shutil.copy2(file, date_dir + file.split(\"\\\\\")[-1]) # also copies files metadata\n elif file_operation in ['move', 'mv']:\n # move file to new dir\n shutil.move(file, date_dir + file.split(\"\\\\\")[-1])\n\n # print the number of files left\n sys.stdout.write(\"\\r\" + str(len(files)-num) + \" files left\")\n sys.stdout.flush()\n\n sys.stdout.write('\\r')\n sys.stdout.flush()\n print(str(len(files)) + \" files sorted\")", "def prepare_image_folder(\n *,\n src_folder: str,\n tgt_folder: str,\n to_index: bool,\n prefix: Optional[str],\n preparation_pack: Optional[Dict[str, Any]],\n force_rerun: bool,\n extensions: Optional[Set[str]],\n make_labels_in_parallel: bool,\n save_data_in_parallel: bool,\n num_jobs: int,\n train_all_data: bool,\n valid_split: Union[int, float],\n max_num_valid: int,\n lmdb_config: Optional[Dict[str, Any]],\n use_tqdm: bool,\n strict: bool,\n) -> Tuple[str, Optional[List[str]]]:\n\n if prefix is not None:\n src_folder = os.path.join(prefix, src_folder)\n tgt_folder = os.path.join(prefix, tgt_folder)\n if preparation_pack is None:\n preparation_pack = dict(type=\"default\", info={})\n preparation: IPreparation = IPreparation.from_pack(preparation_pack)\n preparation.prepare_src_folder(src_folder)\n\n if train_all_data:\n valid_split = 0\n folder_hash = hash_dict(\n dict(\n src_folder=src_folder,\n to_index=to_index,\n preparation_pack=preparation_pack,\n extensions=extensions,\n train_all_data=train_all_data,\n valid_split=valid_split,\n max_num_valid=max_num_valid,\n lmdb_config=lmdb_config,\n strict=strict,\n )\n )\n if not force_rerun and preparation.is_ready(tgt_folder, valid_split, folder_hash):\n return tgt_folder, preparation.extra_labels\n\n if os.path.isdir(tgt_folder):\n print_warning(f\"'{tgt_folder}' already exists, it will be removed\")\n shutil.rmtree(tgt_folder)\n os.makedirs(tgt_folder, exist_ok=True)\n\n print_info(\"collecting hierarchies\")\n\n def hierarchy_callback(hierarchy: List[str], path: str) -> None:\n hierarchy = hierarchy[prefix_idx:]\n if not preparation.filter(hierarchy):\n return None\n hierarchy_list.append(hierarchy)\n all_img_paths.append(path)\n\n all_img_paths: List[str] = []\n hierarchy_list: List[List[str]] = []\n if extensions is None:\n extensions = default_image_extensions\n prefix_idx = 0\n if prefix is not None:\n prefix_idx = len(prefix.split(os.path.sep))\n walk(src_folder, hierarchy_callback, extensions)\n\n def get_labels(\n label_fn: Callable,\n label_name: Optional[str] = None,\n ) -> List[Any]:\n def task(h: List[str]) -> Any:\n try:\n args = (h,) if label_name is None else (label_name, h)\n return label_fn(*args)\n except Exception as err:\n err_path = \"/\".join(h)\n print_error(f\"error occurred ({err}) when getting label of {err_path}\")\n return None\n\n if not make_labels_in_parallel:\n return [task(h) for h in tqdm(hierarchy_list)]\n parallel = Parallel(num_jobs, use_tqdm=use_tqdm)\n num_files = len(hierarchy_list)\n random_indices = np.random.permutation(num_files).tolist()\n shuffled = [hierarchy_list[i] for i in random_indices]\n groups = parallel.grouped(task, shuffled).ordered_results\n shuffled_results: List[Any] = sum(groups, [])\n final_results = [None] * num_files\n for idx, rs in zip(random_indices, shuffled_results):\n final_results[idx] = rs\n return final_results\n\n print_info(\"making labels\")\n labels = get_labels(preparation.get_label)\n excluded_indices = {i for i, label in enumerate(labels) if label is None}\n extra_labels_dict: Optional[Dict[str, List[str]]] = None\n extra_labels = preparation.extra_labels\n extra_label_fn = preparation.get_extra_label\n if extra_labels is not None:\n extra_labels_dict = {}\n print_info(\"making extra labels\")\n for el_name in extra_labels:\n extra_labels_dict[el_name] = get_labels(extra_label_fn, el_name)\n for extra_labels in extra_labels_dict.values():\n for i, extra_label in enumerate(extra_labels):\n if extra_label is None:\n excluded_indices.add(i)\n\n # exclude samples\n if excluded_indices:\n if not strict:\n print_warning(f\"{len(excluded_indices)} samples will be excluded\")\n else:\n raise ValueError(\n \"\\n\".join(\n [\n \"following samples are invalid:\",\n *[f\"* {all_img_paths[i]}\" for i in excluded_indices],\n \"please check the log history for more details\",\n ]\n )\n )\n for i in sorted(excluded_indices)[::-1]:\n labels.pop(i)\n if extra_labels_dict is not None:\n for sub_labels in extra_labels_dict.values():\n sub_labels.pop(i)\n all_img_paths.pop(i)\n\n def get_raw_2idx(raw_labels: List[Any]) -> Dict[Any, Any]:\n return {\n v: numpy_token if isinstance(v, str) and v.endswith(\".npy\") else v\n for v in raw_labels\n }\n\n def check_dump_mappings(l2i: Dict[Any, Any]) -> bool:\n all_indices = set(l2i.values())\n if len(all_indices) > 1:\n return True\n return list(all_indices)[0] != numpy_token\n\n numpy_token = \"[NUMPY]\"\n if to_index:\n label2idx = {label: i for i, label in enumerate(sorted(set(labels)))}\n labels_dict = {\"\": [label2idx[label] for label in labels]}\n dump_mappings = True\n else:\n labels_dict = {\"\": labels}\n label2idx = get_raw_2idx(sorted(set(labels)))\n dump_mappings = check_dump_mappings(label2idx)\n\n open_file_from = lambda folder: lambda file: open(\n os.path.join(folder, file), \"w\", encoding=\"utf-8\"\n )\n open_tgt_file = open_file_from(tgt_folder)\n\n if dump_mappings:\n with open_tgt_file(f\"{LABEL_KEY}2idx.json\") as f:\n json.dump(label2idx, f, ensure_ascii=False)\n with open_tgt_file(f\"idx2{LABEL_KEY}.json\") as f:\n json.dump({v: k for k, v in label2idx.items()}, f, ensure_ascii=False)\n\n if extra_labels_dict is not None:\n for el_name, label_collection in extra_labels_dict.items():\n if not to_index:\n labels_dict[el_name] = label_collection # type: ignore\n extra2idx = get_raw_2idx(sorted(set(label_collection)))\n dump_mappings = check_dump_mappings(extra2idx)\n else:\n extra2idx = {\n extra_label: i # type: ignore\n for i, extra_label in enumerate(sorted(set(label_collection)))\n }\n labels_dict[el_name] = [extra2idx[el] for el in label_collection]\n dump_mappings = True\n if dump_mappings:\n with open_tgt_file(f\"{el_name}2idx.json\") as f:\n json.dump(extra2idx, f, ensure_ascii=False)\n with open_tgt_file(f\"idx2{el_name}.json\") as f:\n eld = {v: k for k, v in extra2idx.items()}\n json.dump(eld, f, ensure_ascii=False)\n\n # prepare core\n def save(indices: np.ndarray, d_num_jobs: int, dtype: str) -> None:\n def record(idx: int) -> Optional[Tuple[str, Dict[str, Any]]]:\n split_folder = os.path.join(tgt_folder, dtype)\n os.makedirs(split_folder, exist_ok=True)\n img_path = all_img_paths[idx]\n ext = os.path.splitext(img_path)[1]\n new_img_path = os.path.join(split_folder, f\"{idx}{ext}\")\n try:\n preparation.copy(img_path, new_img_path)\n key = os.path.abspath(new_img_path)\n idx_labels: Dict[str, Any] = {}\n for label_t, t_labels in labels_dict.items():\n idx_labels[label_t] = {key: t_labels[idx]}\n return key, idx_labels\n except Exception as err:\n print_error(f\"error occurred with {img_path} : {err}\")\n return None\n\n print_info(f\"saving {dtype} dataset\")\n results: List[Tuple[str, Dict[str, Any]]]\n indices = indices.copy()\n np.random.shuffle(indices)\n if not save_data_in_parallel:\n results = [record(i) for i in indices] # type: ignore\n else:\n parallel = Parallel(d_num_jobs, use_tqdm=use_tqdm)\n results = sum(parallel.grouped(record, indices).ordered_results, [])\n d_valid_indices = [i for i, r in enumerate(results) if r is not None]\n results = [results[i] for i in d_valid_indices]\n valid_paths = [all_img_paths[idx] for idx in indices[d_valid_indices]]\n new_paths, all_labels_list = zip(*results)\n merged_labels = shallow_copy_dict(all_labels_list[0])\n for sub_labels_ in all_labels_list[1:]:\n for k, v in shallow_copy_dict(sub_labels_).items():\n merged_labels[k].update(v)\n print_info(\n \"\\n\".join(\n [\n \"\",\n \"=\" * 100,\n f\"num {dtype} samples : {len(next(iter(merged_labels.values())))}\",\n f\"num {dtype} label types : {len(merged_labels)}\",\n \"-\" * 100,\n \"\",\n ]\n )\n )\n open_dtype_file = open_file_from(os.path.join(tgt_folder, dtype))\n\n with open_dtype_file(\"paths.json\") as f_:\n json.dump(new_paths, f_, ensure_ascii=False)\n path_mapping = dict(zip(new_paths, valid_paths))\n with open_dtype_file(\"path_mapping.json\") as f_:\n json.dump(path_mapping, f_, ensure_ascii=False)\n for label_type, type_labels in merged_labels.items():\n delim = \"_\" if label_type else \"\"\n label_file = f\"{label_type}{delim}{LABEL_KEY}.json\"\n with open_dtype_file(label_file) as f_:\n json.dump(type_labels, f_, ensure_ascii=False)\n # lmdb\n if lmdb_config is None or lmdb is None:\n if lmdb_config is not None:\n msg = \"`lmdb` is not installed, so `lmdb_config` will be ignored\"\n print_warning(msg)\n else:\n local_lmdb_config = shallow_copy_dict(lmdb_config)\n local_lmdb_config.setdefault(\"path\", default_lmdb_path(tgt_folder, dtype))\n local_lmdb_config.setdefault(\"map_size\", 1099511627776 * 2)\n db = lmdb.open(**local_lmdb_config)\n context = db.begin(write=True)\n d_num_samples = len(results)\n iterator = zip(range(d_num_samples), new_paths, all_labels_list)\n if use_tqdm:\n iterator = tqdm(iterator, total=d_num_samples, desc=\"lmdb\")\n for i, path, i_labels in iterator:\n i_new_labels = {}\n for k, v in i_labels.items():\n vv = v[path]\n if isinstance(vv, str):\n if vv.endswith(\".npy\"):\n vv = np.load(vv)\n i_new_labels[k] = vv\n context.put(\n str(i).encode(\"ascii\"),\n dill.dumps(LMDBItem(np.array(Image.open(path)), i_new_labels)),\n )\n context.put(\n \"length\".encode(\"ascii\"),\n str(d_num_samples).encode(\"ascii\"),\n )\n context.commit()\n db.sync()\n db.close()\n # dump READY\n with open_dtype_file(READY_FILE) as f_:\n f_.write(folder_hash)\n\n num_sample = len(all_img_paths)\n if isinstance(valid_split, float):\n if valid_split < 0.0 or valid_split >= 1.0:\n raise ValueError(\"`valid_split` should be within [0, 1)\")\n valid_split = max(1, min(max_num_valid, int(round(num_sample * valid_split))))\n assert isinstance(valid_split, int)\n\n if valid_split <= 0:\n save(np.arange(num_sample), max(1, num_jobs), DatasetSplit.TRAIN)\n return tgt_folder, extra_labels\n\n train_portion = (num_sample - valid_split) / num_sample\n label_indices_mapping: Dict[Any, List[int]] = {}\n for i, label in enumerate(labels):\n if isinstance(label, str) and label.endswith(\".npy\"):\n label = numpy_token\n label_indices_mapping.setdefault(label, []).append(i)\n tuple(map(random.shuffle, label_indices_mapping.values()))\n train_indices_list: List[List[int]] = []\n valid_indices_list: List[List[int]] = []\n for label_indices in label_indices_mapping.values():\n num_label_samples = len(label_indices)\n num_train = int(round(train_portion * num_label_samples))\n num_train = min(num_train, num_label_samples - 1)\n if num_train == 0:\n train_indices_list.append([label_indices[0]])\n else:\n train_indices_list.append(label_indices[:num_train])\n valid_indices_list.append(label_indices[num_train:])\n\n def propagate(src: List[List[int]], tgt: List[List[int]]) -> None:\n resolved = 0\n src_lengths = list(map(len, src))\n sorted_indices = np.argsort(src_lengths).tolist()[::-1]\n while True:\n for idx in sorted_indices:\n if len(src[idx]) > 1:\n tgt[idx].append(src[idx].pop())\n resolved += 1\n if resolved == diff:\n break\n if resolved == diff:\n break\n\n diff = sum(map(len, valid_indices_list)) - valid_split\n if diff > 0:\n propagate(valid_indices_list, train_indices_list)\n elif diff < 0:\n diff *= -1\n propagate(train_indices_list, valid_indices_list)\n merged_train_indices: List[int] = sorted(set(sum(train_indices_list, [])))\n merged_valid_indices: List[int] = sorted(set(sum(valid_indices_list, [])))\n train_indices = np.array(merged_train_indices)\n valid_indices = np.array(merged_valid_indices)\n\n save(train_indices, max(1, num_jobs), DatasetSplit.TRAIN)\n save(valid_indices, max(1, num_jobs // 2), DatasetSplit.VALID)\n return tgt_folder, extra_labels", "def copyFiles(img, lbl):\n if not os.path.exists(os.path.join(out_root_dir, \"images\")):\n os.makedirs(os.path.join(out_root_dir, \"images\"))\n if not os.path.exists(os.path.join(out_root_dir,\"labels\")):\n os.makedirs(os.path.join(out_root_dir, \"labels\"))\n # copy\n for i,f in enumerate(img):\n img_dstdir = os.path.join(out_root_dir, \"images\", os.path.basename(f))\n lbl_srcdir = os.path.join(in_root_dir, \"labels\", os.path.basename(f))\n lbl_dstdir = os.path.join(out_root_dir, \"labels\", os.path.basename(f))\n \n # copy images and labels\n try:\n # copy image\n copyfile(f, img_dstdir)\n except:\n print(f\"Error trying copy image file {f}\")\n \n try:\n # copy label\n copyfile(lbl_srcdir, lbl_dstdir)\n except:\n print(f\"Error trying copy label file {lbl_srcdir}\")", "def save(file: str, name: str, kind: str, folder=None, convert=False):\n\n # all folders in the project\n base_folders = sublime.active_window().folders()\n # create the image folder in the first folder\n image_folder = osp.join(base_folders[0], IMAGE_FOLDER_NAME)\n # exact or converted copy of the image\n copy = osp.join(image_folder, name)\n # a relative version of the image_folder for display in the status message\n image_folder_rel = osp.relpath(\n image_folder, osp.dirname(base_folders[0]))\n\n if osp.exists(copy):\n sublime.status_message(\"%s is already in %s\" %\n (name, image_folder_rel))\n return\n\n if kind == \"file\" and folder:\n sublime.status_message(\"%s is already in %s\" %\n (name, osp.relpath(osp.dirname(file), folder)))\n return\n\n ch_rec = check_recursive(base_folders, name)\n if ch_rec:\n folder, root = ch_rec\n sublime.status_message(\"%s is already in %s\" %\n (name, osp.relpath(root, folder)))\n return\n\n if not osp.exists(image_folder):\n os.mkdir(image_folder)\n\n if convert:\n # create a converted copy\n magick(file, copy)\n else:\n # create an exact copy\n shutil.copyfile(file, copy)\n\n sublime.status_message(\"%s saved in %s\" % (name, image_folder_rel))", "def save(file: str, name: str, kind: str, folder=None, convert=False):\n\n # all folders in the project\n base_folders = sublime.active_window().folders()\n # create the image folder in the first folder\n image_folder = osp.join(base_folders[0], Settings.image_folder_name)\n # exact or converted copy of the image\n copy = osp.join(image_folder, name)\n # a relative version of the image_folder for display in the status message\n image_folder_rel = osp.relpath(image_folder, osp.dirname(base_folders[0]))\n\n if osp.exists(copy):\n sublime.status_message(\"%s is already in %s\" % (name, image_folder_rel))\n return\n\n if kind == \"file\" and folder:\n sublime.status_message(\"%s is already in %s\" % (name, osp.relpath(osp.dirname(file), folder)))\n return\n\n ch_rec = check_recursive(base_folders, name)\n if ch_rec:\n folder, root = ch_rec\n sublime.status_message(\"%s is already in %s\" % (name, osp.relpath(root, folder)))\n return\n\n if not osp.exists(image_folder):\n os.mkdir(image_folder)\n\n if convert:\n # create a converted copy\n magick(file, copy)\n else:\n # create an exact copy\n shutil.copyfile(file, copy)\n\n sublime.status_message(\"%s saved in %s\" % (name, image_folder_rel))", "def _save_image(self, image_name, image, output_dir):\n dst = '{}/{}'.format(output_dir, self._image_filename(image_name))\n os.makedirs(output_dir, exist_ok=True)\n try:\n with open(dst, 'wb') as f:\n for chunk in image.save(named=self.image_registry_name(image_name)):\n f.write(chunk)\n log.info('Image {} saved as {}'.format(image_name, dst))\n except Exception as err:\n if os.path.isfile(dst):\n os.remove(dst)\n raise err", "def copy_to_folder(destination_folder, source_dir_list, use_prefix=True):\n prefix_dict = {\"AD\": \"ad\", \"MCI\": \"mci\", \"CN\": \"cn\"}\n for dir_path in source_dir_list:\n patient_type = os.path.basename(os.path.dirname(dir_path))\n prefix = prefix_dict[patient_type]\n dir_basename = prefix + \"_\" + os.path.basename(dir_path)\n target_path = os.path.join(destination_folder, dir_basename)\n copytree(dir_path, target_path)", "def subsample_imageset(self, source_folder_name, destination_folder_name, sample_step=4):\n photo_list = self.get_photo_list(source_folder_name)\n for i in range(0, len(photo_list), sample_step):\n copyfile(source_folder_name + '/' + photo_list[i], destination_folder_name + '/' + photo_list[i])", "def add(name = None, file_path = None, folder = False):\n if not folder:\n if name is None:\n name = input(\"No name found. Please enter your name: \")\n\n if file_path is None:\n print(\"No file path found. Taking picture.\")\n img_array = take_picture()\n else:\n img_array = io.imread(\"pic_file_path\")\n\n add_image(name, img_array)\n\n else:\n if name is None:\n name = input(\"No name found. Please enter name person in folder contents: \")\n\n if file_path is None:\n file_path = input(\"No file path found. Please enter file path: \")\n\n for filename in os.listdir(file_path):\n if filename.endswith(\".pkl\"):\n img_array = io.imread(filename)\n add_image(name, img_array)", "def setup_image_folder(path_to_images):\n\n print(\"setup images folder...\")\n\n if os.path.isdir(path_to_images):\n print(\"folder already exists: remove...\")\n shutil.rmtree(path_to_images)\n\n os.mkdir(path_to_images)\n print(\"folder created\")", "def save_output_image_to_directory(self):\n curr_directory = os.path.dirname(os.path.abspath(__file__))\n images_dir = curr_directory + \"/images/\"\n if not os.path.exists(images_dir):\n os.makedirs(images_dir)\n self.output_image_name = md5(str(uuid4()).encode()).hexdigest() + \".png\"\n image_file_name = images_dir + self.output_image_name\n self.output_image.save(image_file_name)\n logger.info(\"Image file saved locally : %s\", image_file_name)", "def merge_folders():\r\n from shutil import copyfile\r\n # Merge all folders into main folder\r\n grp_img_dir = os.listdir('Group_Test_Images')\r\n \r\n for grp_img_folder in grp_img_dir:\r\n image_folders = os.listdir('Group_Test_Images'+'/'+grp_img_folder)\r\n \r\n for img_label in image_folders:\r\n new_directory = 'Group_Test_Images'+'/'+img_label\r\n \r\n try:\r\n os.makedirs(new_directory)\r\n except FileExistsError:\r\n # directory already exists\r\n pass\r\n \r\n file_names = os.listdir('Group_Test_Images'+'/'+grp_img_folder+'/'+img_label)\r\n \r\n for file in file_names:\r\n copyfile('Group_Test_Images'+'/'+grp_img_folder+'/'+img_label+'/'+file, new_directory+'/'+file)", "def save_image(image: FileStorage, folder: str = None, name: str = None) -> str:\n return IMAGE_SET.save(image, folder, name)", "def move(self, **kwargs):\n if os.path.exists(self.old_artifact_path):\n if os.path.exists(self.target):\n shutil.rmtree(self.target)\n log.info(\"Copying %s on the local filesystem\" % self.type)\n shutil.copytree(self.old_artifact_path, self.target)\n else:\n log.warning(\"Not moving docs, because the build dir is unknown.\")", "def copy_one_image( class_id, orig_path, train_dir, test_dir, train_prop ) :\n\n img = skimage.io.imread(orig_path)\n\n img_st = { 'class_id' : class_id,\n 'orig_path' : orig_path,\n 'shape0' : img.shape[0],\n 'shape1' : img.shape[1],\n 'shape2' : img.shape[2] }\n\n dest_dir = train_dir if random.random() < train_prop else test_dir\n fname = os.path.basename( orig_path )\n\n dest_path = \"%s/%s_%02d.ppm\" % (dest_dir, fname[:-4], class_id )\n\n shutil.copy( orig_path, dest_path )\n return img_st, dest_path", "def copierImage(imageSrc, cheminDst):\n\tfichierImage = imageSrc.getFilename()\n\tnomImage = Blender.sys.basename(fichierImage)\n\tcheminImage = Blender.sys.dirname(fichierImage)\n\timageSrc.setFilename(Blender.sys.join(cheminDst, nomImage))\n\timageSrc.save()\n\timageSrc.setFilename(fichierImage)\n\treturn nomImage", "def move_images(self, image_subdirs, location, folders=True):\n image_files = []\n\n for image in image_subdirs:\n if folders:\n destination = os.path.join(location, image_subdirs[image], os.path.basename(image)) \n else:\n destination = os.path.join(location, os.path.basename(image))\n\n for subdir in os.listdir(location):\n image_file = os.path.join(self.image_location, subdir, os.path.basename(image))\n try: \n shutil.copyfile(image_file, destination)\n break\n except Exception:\n pass # We've checked the wrong directory.\n \n image_files.append(image)\n\n return image_files", "def copydir(self, destination, **kwargs):\n assert _os.path.isdir(self.__str__()) == True\n _shutil.copy(self.__str__(), destination, **kwargs)", "def rename(source_dir,dest_dir):\n keep_going(text=\"This script will backup the original folder to dest_dir/Source/** and remove the original folder. It will make copies of the original files and rename them in directories called Darks, Flats, etc. Do you wish to continue? Answer Y or N.\")\n\n ## Backup Original Source Folder\n dir_util.copy_tree(source_dir, dest_dir + '/Source')\n\n data = []\n for file in os.listdir(\"./\" + source_dir): # put in your path directory\n if file.endswith(\".fits\"): # what does the file end with?\n data.append(os.path.join(source_dir, file))\n\n n = len(data)\n obj, itime, filt, renamed, datemod, count, flatmod, mod = ([] for i in range(8))\n for i in range(0, n):\n header = fits.getheader(data[i])\n Name, Date, Number, Ext = data[i].split(\".\")\n obj.append(header['OBJECT'])\n itime.append(header['ITIME'])\n filt.append(header['FWINAME'])\n mod.append((header['OBJECT'] + header['FWINAME']))\n flatmod.append((header['OBJECT'] + header['FWINAME'] + Date))\n datemod.append(datetime.strptime(Date, \"%Y%m%d\").date())\n if flatmod[i] in flatmod:\n count = flatmod.count(flatmod[i])\n if ('Lamp' in obj[i] or 'Flat' in obj[i]):\n renamed.append((dest_dir + '/Flats/' + str(datemod[i]) + '/' + 'K' + header['OBJECT'] + str(count) + \".fits\"))\n os.makedirs(os.path.dirname(dest_dir + '/Flats/' + str(datemod[i]) + '/'), exist_ok=True)\n elif ('Dark' in obj[i]) or ('dark' in obj[i]):\n renamed.append((dest_dir + '/Darks/' + str(datemod[i]) + '/' + 'K' + header['OBJECT'] + str(count) + \".fits\"))\n os.makedirs(os.path.dirname(dest_dir + '/Darks/' + str(datemod[i]) + '/'), exist_ok=True)\n elif ('Sky' in obj[i]) or ('sky' in obj[i]):\n renamed.append((dest_dir + '/Skys/' + str(datemod[i]) + '/' + 'K' + header['OBJECT'] + header['FWINAME'] + str(\n count) + \".fits\"))\n os.makedirs(os.path.dirname(dest_dir + '/Skys/' + str(datemod[i]) + '/'), exist_ok=True)\n else:\n renamed.append((dest_dir + '/Objects/' + header['OBJECT'].upper() + '/' + str(datemod[i]) + '/' + 'K' + list(header['CAMNAME'])[0].title() + header['OBJECT'].upper() +\n header['FWINAME'] + str(\n count) + \".fits\"))\n os.makedirs(os.path.dirname(dest_dir + '/Objects/' + header['OBJECT'].upper() + '/' + str(datemod[i]) + '/'), exist_ok=True)\n os.rename(data[i], renamed[i])\n\n ## REMOVE LEFT OVER original Folders\n shutil.rmtree(source_dir)\n\n lists = [data, mod, datemod, itime, flatmod, renamed]\n data_headers = pd.concat([pd.Series(x) for x in lists], axis=1)\n\n return data_headers", "def copy_image(src_project, img1, dest_project, img2):\n with BMI(_username, _password, src_project) as bmi:\n ret = bmi.copy_image(img1, dest_project, img2)\n if ret[constants.STATUS_CODE_KEY] == 200:\n click.echo(\"Success\")\n else:\n click.echo(ret[constants.MESSAGE_KEY])", "def copy(self, dest, overwrite=False):\n dest = normpath(dest)\n try:\n remote = get_remote(dest)\n except ValueError: # Nothing exists at dest, nothing to worry about.\n remote = None\n else: # Something exists here.\n if not overwrite:\n raise ValueError(\"Something exists at %s\" % remote.uri)\n try:\n if self.hash == remote.hash: # Nothing to update.\n pdbox.info(\n \"%s and %s are identical\" % (self.uri, remote.uri),\n )\n return\n except AttributeError: # RemoteFolder doesn't have a hash.\n pass\n\n if not pdbox._args.get(\"dryrun\"):\n if overwrite and remote:\n # There's no way to copy and overwrite at the same time,\n # so delete the existing file first.\n remote.delete()\n\n result = execute(pdbox.dbx.files_copy_v2, self.path, dest)\n pdbox.debug(\"Metadata respones: %s\" % result.metadata)\n\n pdbox.info(\"Copied %s to %s\" % (self.uri, dbx_uri(dest)))\n if not pdbox._args.get(\"dryrun\"): # Return the newly created object.\n return get_remote(None, meta=result.metadata)", "def _copy_metadata(from_dir, to_dir):\n if not FLAGS.dry_run:\n tf.io.gfile.makedirs(to_dir)\n for fname in tfds.core.utils.list_info_files(from_dir):\n from_path = os.path.join(from_dir, fname)\n to_path = os.path.join(to_dir, fname)\n logging.info('cp %s %s', from_path, to_path)\n if not FLAGS.dry_run:\n tf.io.gfile.copy(from_path, to_path, overwrite=True)" ]
[ "0.5375309", "0.51825666", "0.508288", "0.5036496", "0.49278748", "0.49242926", "0.49131545", "0.48717657", "0.4862905", "0.4858619", "0.48510352", "0.48286095", "0.48278707", "0.48068732", "0.47598246", "0.47391027", "0.47248337", "0.4705147", "0.47013077", "0.4696376", "0.46732405", "0.46695673", "0.46671525", "0.46511748", "0.46298674", "0.4619787", "0.46127605", "0.46116522", "0.45807484", "0.45795837" ]
0.595224
0
Write the metadata from the library for imageId to filePath. If filePath is None, write it to the photo in the library. If use_faces is True, iPhoto face names will be written to keywords.
def writePhotoMD(self, imageId, filePath=None): try: image = self.images[imageId] except KeyError: raise iPhotoLibraryError, "Can't find image #%s" % imageId if not filePath: if self.originals: if "OriginalPath" in image: mFilePath = image["OriginalPath"] else: mFilePath = image["ImagePath"] else: if not "ImagePath" in image: mFilePath = image["OriginalPath"] else: mFilePath = image["ImagePath"] caption = image.get("Caption", None) rating = image.get("Rating", None) comment = image.get("Comment", None) keywords = set([self.keywords[k] for k in image.get("Keywords", [])]) if self.use_faces: keywords.update([self.faces[f['face key']] for f in image.get("Faces", []) if self.faces.has_key(f['face key'])] ) if caption or comment or rating or keywords: try: md = pyexiv2.ImageMetadata(filePath) md.read() if caption: md["Iptc.Application2.Headline"] = [caption] if rating: md["Xmp.xmp.Rating"] = rating if comment: md["Iptc.Application2.Caption"] = [comment] if keywords: md["Iptc.Application2.Keywords"] = list(keywords) if not self.test: md.write(preserve_timestamps=True) return True except IOError, why: self.status("\nProblem setting metadata (%s) on %s\n" % ( unicode(why.__str__(), errors='replace'), filePath )) return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addPhoto(fileName, personName):\n\n #Check if image is a jpg\n if (fileName[-4:] != \".jpg\"):\n print(\"\\n[!] File extenstion must be .jpg!\\n\")\n return\n\n #Check image exists\n if (not os.path.isfile(fileName)):\n print(\"\\n[!] File does not exist!\\n\")\n return\n\n #Check no illegal characters in file name\n for c in ILLEGAL_FILE_NAMES:\n if (c in personName):\n print(\"\\n[!] Provided name contains an illegal argument\\n\")\n return\n\n #Load image\n image = face_recognition.load_image_file(fileName)\n\n #Use the name in the filename as the identity key\n identity = os.path.splitext(os.path.basename(fileName))[0]\n\n #Get the face location\n locationsHog = hogDetectFaceLocations(image)\n\n locationsHaar = haarDetectFaceLocations(image)\n\n #Get the face encoding\n encodingsHaar = face_recognition.face_encodings(image, locationsHaar)\n encodingsHog = face_recognition.face_encodings(image, locationsHog)\n\n #check if exactly one face is in the photo\n if ((len(encodingsHaar) == 0) or (len(encodingsHog) == 0)):\n print(\"\\n[!] No face detected in the provided photo\\n\")\n return\n\n elif ((len(encodingsHaar) > 1) or (len(encodingsHog) > 1)):\n print(\"\\n[!] More than one face detected in the provided photo\\n\")\n return\n\n #Set path to respective dataset\n directoryToAddTo = DATABASE_PATH + personName\n\n #Look for directory\n exists = False\n for subdir, dirs, files in os.walk(DATABASE_PATH):\n if (subdir == directoryToAddTo):\n exists = True\n\n #If directory doesnt exist, make it\n if (not exists):\n os.mkdir(directoryToAddTo)\n\n #Save data to file\n np.savetxt((directoryToAddTo + \"/\" + identity + \"Haar.txt\"),\n encodingsHaar[0])\n np.savetxt((directoryToAddTo + \"/\" + identity + \"Hog.txt\"),\n encodingsHog[0])\n\n print(\"\\n[*] Face successfully added!\\n\")", "def write_image(self, image_name, image):\n raise NotImplementedError", "def write_image(self, name: str, image_path: str):\n # TODO: implement\n raise NotImplementedError(\"We are working on this!\")", "def write(self, instream: typ.BinaryIO, filepath: str,\r\n filename: str = None) -> None:\r\n if filename is not None:\r\n filename = path.basename(filename)\r\n if self.fs_type == 'FAT':\r\n allocator_metadata = self.fs.write(instream, filepath)\r\n self.metadata.add_file(filename, allocator_metadata)\r\n elif self.fs_type == 'NTFS':\r\n allocator_metadata = self.fs.write(instream, filepath)\r\n self.metadata.add_file(filename, allocator_metadata)\r\n else:\r\n raise NotImplementedError()", "def add_image_face():\n\n try:\n img = decode_image(request.files[\"image\"].read())\n except Exception as e:\n log.error(e)\n data = {\"error\": \"Error while loading image\"}\n return jsonify(data), 500\n save_picture = False\n if request.args.get(\"save\") == \"true\":\n save_picture = True\n \n face_img, _ = processor.extract_faces()\n #TODO\n #1. get several images if possible\n #2. save face_img array as picture if save_picture == True\n #3. pipe face_img array to embedder --> embedder needs to be modified to not from a folder, but from array of face_img\n #4. get the embedder result, insert to a pickle object --> can be section ID, or whatever", "def save(self, path, filename=None, overwrite=False):\n \n if filename is None and self.metadata is None:\n raise ValueError(\"If the image has no 'metadata', you must specify a filename\")\n elif filename is not None:\n pass\n elif filename is None and self.metadata is not None:\n filename = os.path.basename(self.metadata[\"pfilename\"])\n \n full_image_path = os.path.join(path, filename)\n \n if overwrite and os.path.exists(full_image_path):\n os.remove(full_image_path)\n \n self.fits.writeto(full_image_path)", "def new_face_metadata(face_image, name = None, camera_id = None, confidence = None, print_name = False):\n #camera_id = source_info.get('camera_id', None)\n #confidence = source_info.get('confidence', None)\n #source_type = source_info.get('source_type', None)\n\n if name is None:\n #source_info['name'] = source_info['camera_id'] + '_' + source_info['source_type'] + '_' + str(get_timestamp())\n name = camera_id + '_' + str(com.get_timestamp())\n else:\n if print_name:\n print('Saving face: {}'.format(name))\n\n today_now = datetime.now()\n\n return {\n 'name': name,\n 'face_id': 0,\n 'camera_id': camera_id,\n 'first_seen': today_now,\n 'first_seen_this_interaction': today_now,\n 'image': face_image,\n 'confidence': confidence,\n 'last_seen': today_now,\n 'seen_count': 1,\n 'seen_frames': 1\n }\n # 'source_type': source_type,", "def write(self, image):\n raise NotImplementedError()", "def proc_fid(out, fid):\n img = imageHash.get(fid, [])\n if len(img) == 2:\n out[img[\"mime\"]] = (img[\"img\"], fid)\n return out", "def write(self, path):\n\n annotation = copy.deepcopy(self.annotation)\n\n for image_info in annotation['images']:\n image_info['file_name'] = os.path.relpath(image_info['file_name'],\n os.path.dirname(path))\n\n with open(path, 'w') as read_file:\n json.dump(annotation, read_file)", "def write_stewicombo_metadata(file_name, metadata_dict, category=''):\n meta = set_stewicombo_meta(file_name, category=category)\n meta.tool_meta = metadata_dict\n write_metadata_to_file(paths, meta)", "def write(self, filename):\n\n self.__image.save(filename)", "def saveIntermidiary(self, filepath: str):\n if self.intermediaryImage is None:\n print('No intermidiary image, try run find first')\n return\n self.save(filepath, self.intermediaryImage)", "def add_image(self, file_name, content):\n self.face_fs.put(content, filename=file_name)", "def _writeDataToFile(self,idata,resultkeys,path,imgname, filename):\n\t\tfilepathname = path+'/'+filename\n\t\tif os.path.exists(filepathname):\n\t\t\tos.remove(filepathname)\n\t\tif idata is None:\n\t\t\treturn\n\t\tresultfile=open(filepathname,'w')\n\t\tresultlines=[]\n\t\tfor info in idata:\n\t\t\tresultline = ''\n\t\t\tfor infokey in resultkeys:\n\t\t\t\ttry:\n\t\t\t\t\t# For data object, save in file as its dbid\n\t\t\t\t\tresult = info[infokey].dbid\n\t\t\t\texcept:\n\t\t\t\t\tresult = info[infokey]\n\n\t\t\t\t# For image, save in file as its filename\n\t\t\t\tif infokey == 'image':\n\t\t\t\t\tresult=imgname\n\n\t\t\t\t# Separate the results by tabs\n\t\t\t\ttry:\n\t\t\t\t\tresultline += str(result) + '\\t'\n\t\t\t\texcept:\n\t\t\t\t\tresultline += '\\t'\n\t\t\tresultlines.append(resultline)\n\t\tresultlinestxt = '\\n'.join(resultlines) +\"\\n\"\n\t\tresultfile.write(resultlinestxt)\n\t\tresultfile.close()", "def writeToMetadata(self, context):\n pass", "def set_png_metadata(self, png_filename, metadata):\n\t\tim = Image.open(png_filename)\n\t\t\n\t\t# This hack works-around PIL's broken png metadata support. Disovered here:\n\t\t# http://blog.client9.com/2007/08/python-pil-and-png-metadata-take-2.html\n\t\tmeta = PngImagePlugin.PngInfo()\n\t\t\n\t\t# These meta-data entries are added (eroneously) by PIL, ignore them\n\t\treserved = ('interlace', 'gamma', 'dpi', 'transparency', 'aspect')\n\t\t\n\t\t# Add in the new metadata\n\t\timg_metadata = im.info.copy()\n\t\timg_metadata.update(metadata)\n\t\t\n\t\t# Add to the PNG\n\t\tfor k,v in img_metadata.iteritems():\n\t\t\tif k not in reserved:\n\t\t\t\tmeta.add_text(k,v)\n\t\t\n\t\t# Write it out\n\t\tim.save(png_filename, pnginfo=meta)", "def save_png_with_metadata(fig, filename, fig_kwds, kwds):\n from PIL import Image, PngImagePlugin\n fig.savefig(filename, **fig_kwds)\n \n im = Image.open(filename)\n meta = PngImagePlugin.PngInfo()\n \n for key in kwds:\n meta.add_text(str(key), str(kwds[key])) \n \n im.save(filename, \"png\", pnginfo=meta)", "def write_metadata_to_file(self, path):\n return write_metadata_to_ma_file(path, self)", "def write(img, path):\n create_directories_for_file_name(path)\n writer = sitk.ImageFileWriter()\n writer.Execute(img, path, True)", "def write_metadata(config, train_image_metadata, val_image_metadata):\n with open(config.ImageDataConfig.preprocessed_image_metadata_filename, 'wb') as f:\n pickle.dump({'train': train_image_metadata, 'val': val_image_metadata}, f, pickle.HIGHEST_PROTOCOL)", "def save(self, filepath=None):\n # Create dictionary for all metadata, options, and profile \n data_dict = {\n \"total_samples\": self.total_samples,\n \"encoding\": self.encoding,\n \"file_type\": self.file_type,\n \"row_has_null_count\": self.row_has_null_count,\n \"row_is_null_count\": self.row_is_null_count,\n \"hashed_row_dict\": self.hashed_row_dict,\n \"_samples_per_update\": self._samples_per_update,\n \"_min_true_samples\": self._min_true_samples,\n \"options\": self.options,\n \"chi2_matrix\": self.chi2_matrix,\n \"_profile\": self.profile,\n \"_col_name_to_idx\": self._col_name_to_idx,\n \"times\": self.times,\n }\n\n self._save_helper(filepath, data_dict)", "def save_asset_metadata(self, asset_metadata, user_id, import_only=False):\n return self.save_asset_metadata_list([asset_metadata, ], user_id, import_only)", "def save(self, filepath=None):\n # Create dictionary for all metadata, options, and profile\n data_dict = {\n \"total_samples\": self.total_samples,\n \"sample\": self.sample,\n \"encoding\": self.encoding,\n \"file_type\": self.file_type,\n \"_samples_per_update\": self._samples_per_update,\n \"_min_true_samples\": self._min_true_samples,\n \"_empty_line_count\": self._empty_line_count,\n \"memory_size\": self.memory_size,\n \"options\": self.options,\n \"_profile\": self.profile,\n \"times\": self.times,\n }\n self._save_helper(filepath, data_dict)", "def imwrite(filename, img, *args, **kwargs):\n ext = os.path.splitext(filename)[1]\n if ext.lower() == '.pfm':\n write_pfm(filename, img, *args, **kwargs)\n else:\n cv2.imwrite(filename, img, *args, **kwargs)", "def save(self, filepath):\n self.drawer.flush()\n self.img.save(filepath)", "def _update_metadata_imagedata(metadata, out_filebase, i):\n metadata['FITSImageFilename'] = [out_filebase + FITS_EXT]\n metadata['PNGImageFileName'] = [out_filebase + PNG_EXT]\n metadata['PNGThumbNailFileName'] = [out_filebase + '_tnail' + PNG_EXT]\n\n image_keys = [\"IntegrationTime\", \"RightAscension\", \"Declination\",\n \"DecRa\", \"Targets\", \"KatpointTargets\"]\n for key in image_keys:\n metadata[key] = [metadata[key][i]]", "def save(self, data, file_id=None, metadata={}):\n pass", "def save_plot(self, outdir, end, fid=None, hypo=None,\n fhkey=None, truth=None):\n import matplotlib.pyplot as plt\n plt.rcParams['text.usetex'] = True\n save_name = \"\"\n if hasattr(self, 'labels') and \\\n not self.analysis_type == 'profile_scan':\n if isinstance(self.labels, dict):\n wanted_labels = self.labels[self.labels.keys()[0]]\n else:\n wanted_labels = self.labels\n if truth is None:\n if wanted_labels.dict['data_name'] == '':\n save_name += \"data_\"\n else:\n save_name += \"true_%s_\"%wanted_labels.dict['data_name']\n else:\n save_name += \"true_%s_\"%truth \n if self.detector is not None:\n save_name += \"%s_\"%self.detector\n if self.selection is not None:\n save_name += \"%s_\"%self.selection\n if ((fid is not None) and (hypo is not None)) and (fhkey is not None):\n raise ValueError(\n \"Got a fid, hypo and fhkey specified. Please use fid \"\n \"and hypo OR fhkey (from which fid and hypo will be \"\n \"extracted) but not both.\"\n )\n if fid is not None:\n save_name += \"fid_%s_\"%wanted_labels.dict['%s_name'%fid]\n if hypo is not None:\n if hypo == 'both':\n save_name += \"both_hypos_%s_%s_\"%(\n wanted_labels.dict['h0_name'],\n wanted_labels.dict['h1_name']\n )\n else:\n save_name += \"hypo_%s_\"%wanted_labels.dict['%s_name'%hypo]\n if fhkey is not None:\n hypo = self.get_hypo_from_fiducial_hypo_key(fhkey=fhkey)\n fid = self.get_fid_from_fiducial_hypo_key(fhkey=fhkey)\n save_name += \"fid_%s_\"%wanted_labels.dict['%s_name'%fid]\n save_name += \"hypo_%s_\"%wanted_labels.dict['%s_name'%hypo]\n save_name += end\n for fileformat in self.formats:\n full_save_name = save_name + '.%s'%fileformat\n plt.savefig(os.path.join(outdir, full_save_name))", "def write_itk_image(image, path):\n\n writer = itk.ImageFileWriter()\n writer.SetFileName(path)\n\n if os.path.splitext(path)[1] == '.nii':\n Warning('You are converting nii, ' + \\\n 'be careful with type conversions')\n\n writer.Execute(image)" ]
[ "0.50928354", "0.50730085", "0.50216943", "0.4995194", "0.49944973", "0.49923807", "0.49398592", "0.49126682", "0.48790294", "0.48692298", "0.4854108", "0.47708428", "0.47395146", "0.4722252", "0.47185796", "0.47097844", "0.47096056", "0.46954182", "0.4680011", "0.46773535", "0.4661166", "0.4646005", "0.46459138", "0.46448928", "0.4624847", "0.4612142", "0.458848", "0.45698646", "0.455357", "0.45519167" ]
0.7204329
0
start(force_restart=0, timeout=60) Start remote slave processes. Description Start the remote slave interpreters in the cluster. The timeout value is specified in seconds and defaults to 60. The timeout starts counting down only after ssh/rsh has tried to start all the remote processes. This means the actual time in the function could be much longer than 60 seconds depending on how long rsh/ssh takes. Its possible the 60 second time out will be to short for large clusters but I hope not! Caveats start() is not supported on MSWindows because of the lack of standard/robust support for remote startup and background processing in the CMD shell.
def start(self,force_restart=0,timeout=60): if not force_restart and self.is_running(): return # start the worker processes. for worker in self.workers: worker.start_server() if not self.is_running(): print ' Starting Servers' print ' |----|----|----15---|----|----30---|----|----45---' \ '|----|----60' print '0.', stop_watch = timer() stop_watch.start() minute = 0 import sys while not self.is_running(): if stop_watch.current_lap() > 1: sys.stdout.write('.') stop_watch.mark_lap() elapsed = stop_watch.elapsed() if (elapsed - minute * 60) > 60: minute = minute + 1 print print minute, if elapsed > timeout: raise TimeoutError print 'servers running!'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start_slave_worker():\n print(\"Starting slave worker\")\n r = req.patch(f\"{SLAVE_API_URL}/formation/worker\", json=API_PAYLOAD_1, headers=SLAVE_API_HEADERS)\n if r.status_code != req.codes.ok:\n print(\"Unable to start the worker dyno on slave\")\n print(r.text)\n return False\n #wait a bit for the worker process to start up\n print(\"Waiting a bit\")\n time.sleep(10)\n return True", "def start_node(self, node, override_cfg_params=None):\n node.account.mkdirs(RedpandaService.DATA_DIR)\n node.account.mkdirs(os.path.dirname(RedpandaService.CONFIG_FILE))\n\n self.write_conf_file(node, override_cfg_params)\n\n if self.coproc_enabled():\n self.start_wasm_engine(node)\n\n cmd = (f\"nohup {self.find_binary('redpanda')}\"\n f\" --redpanda-cfg {RedpandaService.CONFIG_FILE}\"\n f\" --default-log-level {self._log_level}\"\n f\" --logger-log-level=exception=debug:archival=debug \"\n f\" --kernel-page-cache=true \"\n f\" --overprovisioned \"\n f\" --smp {self._num_cores} \"\n f\" --memory 6G \"\n f\" --reserve-memory 0M \"\n f\" >> {RedpandaService.STDOUT_STDERR_CAPTURE} 2>&1 &\")\n\n node.account.ssh(cmd)\n\n wait_until(\n lambda: Admin.ready(node).get(\"status\") == \"ready\",\n timeout_sec=RedpandaService.READY_TIMEOUT_SEC,\n err_msg=f\"Redpanda service {node.account.hostname} failed to start\",\n retry_on_exc=True)", "def start_frida(self, daemonize=True, restart=False):\n if not self.available():\n return False\n\n if self.is_frida_running():\n if not restart:\n return True\n\n self.kill_frida()\n\n if not daemonize:\n if self._alternate_frida_name:\n result = self.su_cmd('frida-server &')\n else:\n result = self.su_cmd('frida &')\n else:\n # with nox it starts frida fine but keeps running\n # without return so it needs some timeout here\n if self._alternate_frida_name:\n result = self.su_cmd('frida-server -D', timeout=5)\n else:\n result = self.su_cmd('frida -D', timeout=5)\n\n if result and 'Unable to start server' in result:\n return False\n\n return self.is_frida_running()", "def restart(verbose=False, force=False):\n\n _prepare_execution(verbose)\n _validate_components_prepared('restart')\n _validate_force(force, 'restart')\n\n stop(verbose, force)\n start(verbose)\n _print_time()", "def launch_engines(engines:int, profile:str):\n print(ColorText(f\"\\nLaunching ipcluster with {engines} engines...\").bold())\n\n def _launch(engines, profile):\n subprocess.call([shutil.which('ipcluster'), 'start', '-n', str(engines), '--daemonize'])\n\n # first see if a cluster has already been started\n started = False\n try:\n print(\"\\tLooking for existing engines ...\")\n lview,dview = get_client(profile=profile)\n if len(lview) != engines:\n lview,dview = wait_for_engines(engines, profile)\n started = True\n except (OSError, ipyparallel.error.NoEnginesRegistered, ipyparallel.error.TimeoutError):\n print(\"\\tNo engines found ...\")\n\n # if not, launch 'em\n if started is False:\n print(\"\\tLaunching engines ...\")\n # pid = subprocess.Popen([shutil.which('ipcluster'), 'start', '-n', str(engines)]).pid\n x = threading.Thread(target=_launch, args=(engines,profile,), daemon=True)\n x.daemon=True\n x.start()\n lview,dview = wait_for_engines(engines, profile)\n\n return lview,dview", "def start_remote_ipcluster(\n n,\n profile=\"pbs\",\n hostname=\"hpc05\",\n username=None,\n password=None,\n env_path=None,\n timeout=300,\n):\n if env_path is None:\n env_path = \"\"\n python_exec = \"python\"\n else:\n python_exec = os.path.join(env_path, \"bin\", \"python\")\n\n with setup_ssh(hostname, username, password) as ssh:\n cmd = f\"import hpc05; hpc05.start_ipcluster({n}, '{profile}', '{env_path}', {timeout})\"\n cmd = f'{python_exec} -c \"{cmd}\"'\n stdin, stdout, stderr = ssh.exec_command(cmd, get_pty=True)\n wait_for_succesful_start(stdout, timeout=timeout)", "def restart(verbose=False, force=False):\n\n _load_config_and_logger(verbose)\n _validate_manager_installed('restart')\n _validate_force(force, 'restart')\n\n stop(verbose, force)\n start(verbose)\n _print_time()", "def start_ipcluster(n, profile, env_path=None, timeout=300):\n log_file_pattern = os.path.expanduser(\n f\"~/.ipython/profile_{profile}/log/ipcluster-*.log\"\n )\n for f in glob.glob(log_file_pattern):\n # Remove old log files.\n os.remove(f)\n\n pid_pattern = os.path.expanduser(f\"~/.ipython/profile_{profile}/pid/*\")\n for f in glob.glob(pid_pattern):\n # Remove old pid files.\n os.remove(f)\n\n ipcluster = \"ipcluster\"\n if env_path:\n ipcluster = os.path.join(os.path.expanduser(env_path), \"bin\", ipcluster)\n\n print(f\"Launching {n} engines in a ipcluster.\")\n cmd = f\"{ipcluster} start --profile={profile} --n={n} --log-to-file --daemonize &\"\n\n # For an unknown reason `subprocess.Popen(cmd.split())` doesn't work when\n # running `start_remote_ipcluster` and connecting to it, so we use os.system.\n os.system(cmd + (\"> /dev/null 2>&1\" if not VERBOSE else \"\"))\n for i in range(timeout):\n print_same_line(f\"Waiting for {i} seconds for the log-file.\")\n time.sleep(1) # We wait a bit since we need the log file to exist\n\n # We don't PIPE stdout of the process above because we need a detached\n # process so we tail the log file.\n with suppress(IndexError):\n log_file = glob.glob(log_file_pattern)[0]\n break\n print(f\"Found the log-file ({log_file}) in {i} seconds.\")\n\n wait_for_succesful_start(log_file, timeout=timeout)", "def run_cluster(autoscaling: bool = False, **options) -> None:\n if autoscaling:\n thread = AutoScalingCluster.new(**options)\n else:\n thread = RemoteCluster.new(**options)\n try:\n thread.join()\n except Exception:\n thread.stop()\n raise", "def start():\n\n start_server()", "def run_with_exceptions(self: RemoteCluster) -> None:\n self.server.start()\n time.sleep(2) # NOTE: give the server a chance to start\n log.debug(f'Launching clients: {self.client_argv}')\n self.clients = Popen(self.client_argv, shell=True, stdout=sys.stdout, stderr=sys.stderr,\n env={**os.environ, **load_task_env()})\n self.clients.wait()\n self.server.join()", "def _start_servers(self):\n for user, host, port in self.server_addresses:\n remoteHost = \"%s@%s\" % (user, host)\n logger.info(\"starting remote server %s:%s\", host, port)\n command = (\"cd ~/goaway;\" +\n \"find . -name '*.pyc' -delete ;\" +\n \"DEBUG=true goaway/cmdserver.py %s %s %s >> server.std.log 2>&1\" % (\n host,\n port,\n self._config.remote_path,\n ))\n logger.debug(\"Starting server:%s remoteHost with command:%s\" % (remoteHost, command))\n ## subprocess.call blocks, while subprocces.Popen doesn't block.\n sshPopen = subprocess.Popen([\"ssh\", remoteHost, command],\n shell = False, stdout= subprocess.PIPE, stderr = subprocess.PIPE)\n self._start_local_server()", "def start_remote_and_connect(\n n,\n profile=\"pbs\",\n hostname=\"hpc05\",\n username=None,\n password=None,\n culler=True,\n culler_args=None,\n env_path=None,\n timeout=300,\n folder=None,\n client_kwargs=None,\n kill_old_ipcluster=True,\n):\n if kill_old_ipcluster:\n kill_remote_ipcluster(hostname, username, password, env_path)\n print(\"Killed old intances of ipcluster.\")\n\n start_remote_ipcluster(n, profile, hostname, username, password, env_path, timeout)\n time.sleep(2)\n\n # all arguments for `connect_ipcluster` except `local`.\n return connect_ipcluster(\n n,\n profile=profile,\n hostname=hostname,\n username=username,\n password=password,\n culler=culler,\n culler_args=culler_args,\n env_path=env_path,\n local=False,\n timeout=timeout,\n folder=folder,\n client_kwargs=client_kwargs,\n )", "def run():\n server = current_server()\n server._auto_stop = True\n return start()", "def test_snat_with_kubelet_restart_on_slave(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"kubelet\",\n host_ips = self.inputs.k8s_slave_ips)\n time.sleep(30) # Wait timer for all kubernetes pods to stablise.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def start(\n context,\n user=get_local_user(),\n remote=False,\n instance=None,\n stack=None,\n services=None,\n):\n command = \"start\"\n run_command_with_services(context, user, remote, instance, stack, command, services)", "def _start_server():\n args = [sys.executable] + sys.argv\n args.insert(args.index('wserver'), 'server')\n args.remove('wserver')\n pid = os.spawnv(os.P_NOWAIT, sys.executable, args)\n return pid", "def start_with_retry(self, server, port_name, max_retries,\n expect_launch=True,\n **kwargs):\n launch_msg = None\n for i in range(max_retries):\n exitcode, out, err = server.start(expect_exit=not expect_launch,\n **kwargs)\n name = server.server_name\n self.assertEqual(0, exitcode, \"Failed to spin up the %s server. \"\n \"Got: %s\" % (name, err))\n launch_msg = self.wait_for_servers([server], expect_launch)\n if launch_msg:\n server.stop()\n server.bind_port = get_unused_port()\n setattr(self, port_name, server.bind_port)\n else:\n self.launched_servers.append(server)\n break\n self.assertTrue(launch_msg is None, launch_msg)", "def _standby_clone():\n # manualy:\n # $ mkdir -p /var/lib/postgresql/9.1/testscluster/\n # $ rsync -avz --rsh='ssh -p2222' [email protected]:/var/lib/postgresql/9.1/testscluster/ /var/lib/postgresql/9.1/testscluster/\n\n with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):\n puts(green('Start cloning the master'))\n repmgr_clone_command = 'repmgr -D %(slave_pgdata_path)s -d %(sync_db)s -p %(cluster_port)s -U %(sync_user)s -R postgres --verbose standby clone %(pgmaster_ip)s' % env\n puts(green(repmgr_clone_command))\n puts(\"-\" * 40)\n res = sudo(repmgr_clone_command, user='postgres')\n if 'Can not connect to the remote host' in res or 'Connection to database failed' in res:\n puts(\"-\" * 40)\n puts(green(repmgr_clone_command))\n puts(\"-\" * 40)\n puts(\"Master server is %s reachable.\" % red(\"NOT\"))\n puts(\"%s you can try to CLONE the slave manually [%s]:\" % (green(\"BUT\"), red(\"at your own risk\")))\n puts(\"On the slave server:\")\n puts(\"$ sudo -u postgres rsync -avz --rsh='ssh -p%(master_ssh_port)s' postgres@%(pgmaster_ip)s:%(master_pgdata_path)s %(slave_pgdata_path)s --exclude=pg_xlog* --exclude=pg_control --exclude=*.pid\" % env)\n puts(\"Here:\")\n puts(\"$ fab <cluster_task_name> finish_configuring_slave\")\n abort(\"STOP...\")", "def _platformix_start(self, context, fake_reply): # TODO: Recursive option to start nested platforms\r\n assert fake_reply is None, \"platformix_start replies shouldn't be faked!\"\r\n if self._worker.running: # If already running - just do nothing\r\n self._reply(context, proto_success(\"already running\", \"state\"), None)\r\n return\r\n if self._worker.stopping: # If in the middle of stop - do nothing\r\n self._reply(context, proto_failure(\"stop is in progress\"), None)\r\n return\r\n new_thread = False\r\n if self._worker.starting: # If were already starting - update reply list\r\n if context not in self._context[\"reply_to\"]:\r\n new_thread = True\r\n self._context[\"reply_to\"].append(context)\r\n self._notify(context, \"waiting\")\r\n else:\r\n new_thread = True\r\n self._worker.starting = True\r\n self._context = {\"action\": \"start\", \"reply_to\": [context],\r\n \"waiting_for\": [], \"wait_ignore\": []}\r\n self._notify(context, \"received start signal\")\r\n # TODO: do recursive start? parent->childs? and call only root platforms to get up and running?\r\n # TODO: lock as validation can intersect with stop action since stop can be called from different threads\r\n if not self._validate_context({\"action\": \"start\"}):\r\n return\r\n if not self._worker.starting: # NOTE: in case if starting were interrupted by stop - just return\r\n return\r\n # Update waiting list\r\n self._context[\"waiting_for\"] = [w for w in self._worker.wait if self._worker.farm.is_running(w) is False]\r\n # If there is some platforms to wait - notify about this\r\n if self.waiting_count > 0 and new_thread:\r\n self._worker.register_reply_handler(context,\r\n self._platformix_start_reply_handler, [], {},\r\n timeout=self._worker.start_max_wait, force=True)\r\n self._notify(context, \"waiting\")\r\n # If no one left to wait for - do stop at last\r\n elif not self._worker.start_in_progress and self.waiting_count == 0:\r\n for c in self._context[\"reply_to\"]:\r\n try:\r\n self._worker.unregister_reply_handler(c, True, {}, dont_check=True)\r\n except AssertionError:\r\n pass\r\n self._worker.start_in_progress = True\r\n self._notify_all(self._context[\"reply_to\"], \"launching\")\r\n if self._validate_context({\"action\": \"start\"}):\r\n result = self._worker.start(self._context[\"reply_to\"])\r\n result_error = not isinstance(result, ProtocolReply)\r\n else:\r\n result = None\r\n result_error = False\r\n if self._validate_context({\"action\": \"start\"}):\r\n reply_to = self._context[\"reply_to\"]\r\n self._context = None\r\n else:\r\n return # TODO: probably need to fid a way to reply failure in that case\r\n assert result_error is False, \"Worker should return result as ProtocolReply instance\"\r\n if result is not None:\r\n if result.success:\r\n self._reply_all(reply_to, result, None)\r\n else:\r\n self._reply_all(reply_to, result, None)", "def connect_multiprocess(service=VoidService, config={}, remote_service=VoidService, remote_config={}, args={}):\n from multiprocessing import Process\n\n listener = socket.socket()\n listener.bind((\"localhost\", 0))\n listener.listen(1)\n remote_server = partial(_server, listener, remote_service, remote_config, args)\n t = Process(target=remote_server)\n t.start()\n host, port = listener.getsockname()\n return connect(host, port, service=service, config=config)", "def _start(self):\r\n self.lock_perimeter.set() # don't check perimeter while resetting.\r\n\r\n for name in self.process_names:\r\n p = subprocess.Popen(name)\r\n self.process_list.append(p)\r\n print(\"[toycar_restart._start()]: started Hobot control node!\")\r\n\r\n print(\"[toycar_restart._start()]: resetting toy car.\")\r\n while True:\r\n ret = self.reset_to(\r\n self.param_reset_to,\r\n max_speed=self.param_reset_speed,\r\n turning_radius=self.param_reset_radius,\r\n step_size=0.1, max_length=500\r\n )\r\n if ret:\r\n for _ in range(5):\r\n self.pub_control.publish(\r\n Control(**{'throttle': 0.0})\r\n )\r\n time.sleep(0.1)\r\n break\r\n time.sleep(2.0)\r\n self._kill_launchfile()\r\n print(\"[toycar_restart._start()]: toy car reset finished!.\")\r\n\r\n self.lock_perimeter.clear()\r\n\r\n return", "def connect_subproc():\n return factory.connect_subproc([sys.executable, \"-u\", SERVER_FILE, \"-q\", \"-m\", \"stdio\"], \n SlaveService)", "def is_running(self,timeout=0):\n\n # wait for them to start\n import time\n st = time.time()\n still_waiting = 1\n while still_waiting:\n try:\n # Send a simple command to all workers\n # and wait till they handle it successfully\n self.exec_code(\"1==1\")\n except ClusterError:\n still_waiting = 1\n elapsed = time.time() - st\n if elapsed > timeout:\n # We've run out of time.\n return 0\n else:\n still_waiting = 0\n wait_time = time.time() - st\n # should we somehow dessiminate worker topology (ids)\n # to all machines here?\n return 1", "def start_server():\n if not os.path.exists(cf.get(\"Selenium\", \"server_path\")):\n jar_name = \"selenium-server-standalone-2.25.0.jar\"\n server_jar = os.path.join(tempfile.gettempdir(), jar_name)\n if not os.path.exists(server_jar):\n r = requests.get(\"http://selenium.googlecode.com/files/%s\" % jar_name)\n jar_on_disk = open(server_jar, \"wb\")\n jar_on_disk.write(r.content)\n jar_on_disk.close()\n else:\n server_jar = cf.get(\"Selenium\", \"server_path\")\n \n s = subprocess.Popen(['java', '-jar', server_jar], \n stdout=tempfile.TemporaryFile(), \n stderr=tempfile.TemporaryFile()).pid\n pidfile = open(pid_file_path, \"w\")\n pidfile.write(str(s))\n pidfile.close()\n\n # make sure the server is actually up\n server_up = False\n waiting = 0\n while server_up == False and waiting < 60:\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((\"localhost\", 4444))\n s.close()\n server_up = True\n except socket.error:\n time.sleep(1)\n waiting = waiting + 1\n server_up = False\n\n return server_up", "def restart_cluster():\n\n if _TRAFFICCTL:\n cmd = _traffic_ctl(\"cluster\", \"restart\", \"--manager\")\n else:\n cmd = _traffic_line(\"-M\")\n\n return _subprocess(cmd)", "def runClusterBootstrap(c_session, i, b, boot_restart_file, options):\n\n # open log\n log = open(options.log,\"a\",0)\n log.write(\"\\n\\n %s: Cluster bootstrapping enabled. Running job \"\\\n \"%05i\\n\" % (timeStr(), i+1)\n )\n\n c_job = c_session.createJobTemplate()\n\n # run itself!\n #c_job.remoteCommand = \"%s %s\" % (sys.executable, sys.argv[0])\n c_job.remoteCommand = \"%s\" % sys.executable\n\n fileout_details = os.path.splitext(os.path.basename(options.outfile))\n thisjob_fileout = \"%s/%s_%04i%s\" % (options.tmpdir,\n fileout_details[0],\n i+1,\n fileout_details[1])\n log_details = os.path.splitext(os.path.basename(options.log))\n thisjob_log = \"%s/%s_%04i%s\" % (options.tmpdir,\n fileout_details[0],\n i+1,\n log_details[1])\n\n args = [sys.argv[0],\n \"-d\", options.datapath,\n \"-a\", options.feature_annot,\n \"-o\", thisjob_fileout,\n \"-l\", thisjob_log,\n \"-r\", options.script_file,\n \"-b\", str(b),\n \"--tmpdir\", options.tmpdir,\n \"--bootstrapslave\"]\n\n c_job.args = args\n\n if options.verbose:\n log.write(\"\\t\\tdrmaa command line:\\n\\t\\t%s %s\\n\" \\\n \"\" % (c_job.remoteCommand, \" \".join(c_job.args)))\n\n c_job.outputPath = \":%s\" % options.tmpdir\n c_job.errorPath = \":%s\" % options.tmpdir\n\n # pass current working directory (not that this is needed really, but hey!)\n c_job.nativeSpecification = \"-cwd\"\n\n # add support for different cluster queue specifications\n c_job.nativeSpecification = \"-clear -q '%s' %s\" \\\n \"\" % (options.clustq, c_job.nativeSpecification)\n\n if options.verbose:\n log.write(\"\\t\\tdrmaa output intermediates written to: %s\\n\" \\\n \"\" % options.tmpdir)\n\n c_job.jobEnvironment = os.environ\n jobid = c_session.runJob(c_job)\n\n log.write(\"\\t\\tJob submitted with id: %s\\n\" % jobid)\n\n log.close()\n\n return(jobid, \"%s/generic_wrapper.py.o%s\" % (options.tmpdir, jobid),\n \"%s/%s\" % (options.tmpdir, thisjob_fileout), thisjob_log)", "def test_glusterd_restart_stop_start(self):\n # restart glusterd on all servers\n g.log.info(\"Restart glusterd on all servers\")\n ret = restart_glusterd(self.servers)\n self.assertTrue(ret, \"Failed to restart glusterd on all servers\")\n g.log.info(\"Successfully restarted glusterd on all servers\")\n\n # Check if glusterd is running on all servers(expected: active)\n g.log.info(\"Check if glusterd is running on all servers\"\n \"(expected: active)\")\n ret = is_glusterd_running(self.servers)\n self.assertEqual(ret, 0, \"Glusterd is not running on all servers\")\n g.log.info(\"Glusterd is running on all the servers\")\n\n # Stop glusterd on all servers\n g.log.info(\"Stop glusterd on all servers\")\n ret = stop_glusterd(self.servers)\n self.assertTrue(ret, \"Failed to stop glusterd on all servers\")\n g.log.info(\"Successfully stopped glusterd on all servers\")\n\n # Check if glusterd is running on all servers(expected: not running)\n g.log.info(\"Check if glusterd is running on all servers\"\n \"(expected: not running)\")\n ret = is_glusterd_running(self.servers)\n self.assertNotEqual(ret, 0, \"Glusterd is still running on some \"\n \"servers\")\n g.log.info(\"Glusterd not running on any servers as expected.\")\n\n # Start glusterd on all servers\n g.log.info(\"Start glusterd on all servers\")\n ret = start_glusterd(self.servers)\n self.assertTrue(ret, \"Failed to start glusterd on all servers\")\n g.log.info(\"Successfully started glusterd on all servers\")\n\n # Check if glusterd is running on all servers(expected: active)\n g.log.info(\"Check if glusterd is running on all servers\"\n \"(expected: active)\")\n ret = is_glusterd_running(self.servers)\n self.assertEqual(ret, 0, \"Glusterd is not running on all servers\")\n g.log.info(\"Glusterd is running on all the servers\")\n\n # Wait for all the glusterd's to establish communication.\n time.sleep(30)\n\n # Validate all the peers are in connected state\n g.log.info(\"Validating all the peers are in Cluster and Connected\")\n ret = self.are_peers_in_connected_state()\n self.assertTrue(ret, \"Validating Peers to be in Cluster Failed\")\n g.log.info(\"All peers are in connected state\")\n\n self.test_method_complete = True", "def start_vnc_server(self, nReserved = 0):\n\t\treturn Job(SDK.PrlVm_StartVncServer(self.handle, nReserved)[0])", "def run_server(\n server_host=bridge.DEFAULT_HOST,\n server_port=DEFAULT_SERVER_PORT,\n response_timeout=bridge.DEFAULT_RESPONSE_TIMEOUT,\n background=True,\n):\n server = bridge.BridgeServer(\n server_host=server_host,\n server_port=server_port,\n loglevel=logging.INFO,\n response_timeout=response_timeout,\n local_call_hook=hook_local_call,\n local_eval_hook=hook_local_eval,\n local_exec_hook=hook_local_exec,\n )\n\n if background:\n server.start()\n print(\n \"Server launching in background - will continue to run after launch script finishes...\\n\"\n )\n else:\n server.run()" ]
[ "0.59675866", "0.590452", "0.56354386", "0.56137675", "0.5592708", "0.5580869", "0.55625385", "0.5503754", "0.5502141", "0.5361299", "0.53241587", "0.53121626", "0.5281268", "0.52802044", "0.52685004", "0.5257645", "0.52353615", "0.52231896", "0.5218573", "0.52134603", "0.51663834", "0.5131409", "0.51189613", "0.5115765", "0.5104953", "0.51008075", "0.5090467", "0.5045677", "0.503646", "0.49986354" ]
0.67416304
0
stop() Tell all remote slaves to terminate. Description stop calls sys.exit(0) on all the slave processes so that they will terminate gracefully. Note that if, for some reason, you are unable to connect to a remote processes due to some socket error, you'll have to kill the slave process by hand.
def stop(self): for worker in self.workers: import sys; sys.stdout.flush() try: worker.exec_code('import sys;sys.exit(0)') except: #should really do something here to # trap non-SystemExit errors. pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def terminate_slaves(self):\n self.master.terminate_slaves()", "def stop_all():\n\twhile _running:\n\t\t_running[0].stop(noerror=True)", "def stop(self):\n self.log.info(\"Stopping servers\")\n if self.runner.job.yaml_params.is_nvme():\n self.kill()\n self.storage_reset()\n # Make sure the mount directory belongs to non-root user\n self.log.info(\"Changing ownership of mount to non-root user\")\n cmd = \"sudo chown -R {0}:{0} /mnt/daos*\".format(getpass.getuser())\n pcmd(self._hosts, cmd, False)\n else:\n try:\n self.runner.stop()\n except CommandFailure as error:\n raise ServerFailed(\"Failed to stop servers:{}\".format(error))", "def shutdown(self) -> None:\n for worker in self.remote_workers:\n worker.shutdown.remote()\n worker.__ray_terminate__.remote()", "def stop(self):\n response = self._shell_client.exec_command('{} stop'.format(self._executable_path), async=False)\n\n if not response.is_success():\n self._logger.error('clusterrunner stop failed on host {} with output: {}, error: {}'.format(\n self.host, response.raw_output, response.raw_error))", "def kill(self):\n \n self.killSlavePids()", "def terminate(self):\n while self._conns:\n conn = self._conns.pop()\n try:\n conn.send((self.EXIT, ()))\n except BrokenPipeError:\n pass\n conn.close()\n while self._processes:\n p = self._processes.pop()\n p.join(1)\n if p.exitcode is None:\n # Force termination if necessary\n p.terminate()\n p.join()\n self._running = False", "def stop(self):\r\n for srv in self._servers:\r\n srv.stop()", "def stop( self ):\n log.info( \"Stopping mesosbox\" )\n self.__patch_etc_hosts( { 'mesos-master': None } )", "def stop(self):\n logging.debug(\"footprint/stop entered\")\n logging.info(\"Stopping cloud instances\")\n print \"Stopping machines\"\n for machine in self.machines:\n logging.debug(\"stopping %s\" % machine)\n server = self.machines[machine]\n server.stop()\n \n # monitor until all the machines are down\n active_machines = 1\n while active_machines:\n running = 0\n active_machines = 0\n for machine in self.machines:\n server = self.machines[machine]\n try:\n tmp = cs.servers.get(self.machines[machine].id)\n active_machines = 1\n running = running + 1 \n except novaclient.exceptions.NotFound:\n continue\n # if running == 0:\n # break\n time.sleep(10)\n sys.stdout.write(\".\")\n sys.stdout.flush()\n \n logging.info(\"Stopping Networks\")\n print\n print \"Stopping networks\"\n \n for network in self.networks:\n logging.debug(\"stopping %s\" % str(network))\n n = self.networks[network]\n n.stop()\n \n while True:\n running = 0\n # print self.networks\n for network in self.networks:\n n = self.networks[network]\n\n try:\n tmp = cn.find(id=n.id)\n running = running + 1\n except pyrax.exceptions.NotFound:\n continue\n if running == 0:\n break\n time.sleep(1)\n sys.stdout.write(\".\")\n sys.stdout.flush()", "def stop(self):\n\n # immediate is necessary if it's in recovery (for now).\n # we don't care the result.\n master = gp.MasterStop(\"Stopping Master Standby\",\n self.datadir, mode='immediate')\n master.run()", "def stop_machines(args):\n session = Session()\n finder = MachineFinder(args.finder)\n machines = finder.find_machines(session)\n print \"Machines Found: %d\" % (len(machines))\n if len(machines) > 0:\n print\n print \"Stopping the Agent on machines:\"\n for machine in machines:\n sys.stdout.write(machine.hostname + \"...\")\n sys.stdout.flush()\n machine.stop_agent()\n print \"Done\"", "def stop(self: RemoteCluster, wait: bool = False, timeout: int = None) -> None:\n self.server.stop(wait=wait, timeout=timeout)\n self.clients.terminate()\n super().stop(wait=wait, timeout=timeout)", "def stop_server(setname=None, hosts=None):\n global SESSIONS # pylint: disable=global-variable-not-assigned\n try:\n if setname is None:\n for _key, val in SESSIONS.items():\n val.send_signal(signal.SIGINT)\n time.sleep(5)\n if val.poll() is None:\n val.kill()\n val.wait()\n else:\n SESSIONS[setname].send_signal(signal.SIGINT)\n time.sleep(5)\n if SESSIONS[setname].poll() is None:\n SESSIONS[setname].kill()\n SESSIONS[setname].wait()\n print(\"<SERVER> server stopped\")\n\n except Exception as error:\n print(\"<SERVER> Exception occurred: {0}\".format(str(error)))\n raise ServerFailed(\"Server didn't stop!\")\n\n if not hosts:\n return\n\n # Make sure the servers actually stopped. Give them time to stop first\n # pgrep exit status:\n # 0 - One or more processes matched the criteria.\n # 1 - No processes matched.\n # 2 - Syntax error in the command line.\n # 3 - Fatal error: out of memory etc.\n time.sleep(5)\n result = pcmd(\n hosts, \"pgrep '(daos_server|daos_io_server)'\", False, expect_rc=1)\n if len(result) > 1 or 1 not in result:\n bad_hosts = [\n node for key in result if key != 1 for node in list(result[key])]\n kill_server(bad_hosts)\n raise ServerFailed(\n \"DAOS server processes detected after attempted stop on {}\".format(\n \", \".join([str(result[key]) for key in result if key != 1])))\n\n # we can also have orphaned ssh processes that started an orted on a\n # remote node but never get cleaned up when that remote node spontaneiously\n # reboots\n subprocess.call([\"pkill\", \"^ssh$\"])", "def terminate_all(self):\n self._stop_all('terminate')", "def stop():\n\n for unix_socket in [\n self.robot_status_sender,\n self.ssl_wrapper_sender,\n self.ssl_referee_sender,\n self.tactic_override,\n self.sensor_proto_sender,\n self.world_listener,\n ]:\n unix_socket.force_stop()\n self.primitive_listener.force_stop()", "def stop_slave_worker():\n print(\"Stopping slave worker\")\n r = req.patch(f\"{SLAVE_API_URL}/formation/worker\", json=API_PAYLOAD_0, headers=SLAVE_API_HEADERS)\n if r.status_code != req.codes.ok:\n print(\"Unable to stop the worker dyno on slave\")\n print(r.text)\n return False\n #wait a bit for the worker process to stop\n print(\"Waiting a bit\")\n time.sleep(2)\n return True", "def clean_exit(self):\n # First, stop all the plugins\n logger.info(\"Stopping all plugins...\")\n for plugin_name in self.name_to_plugin_class:\n self.zmq_pub_socket.send_multipart((plugin_name.encode(), \"EXIT\".encode()))\n if plugin_name in self.name_to_exit_event:\n self.name_to_exit_event[plugin_name].set()\n\n time.sleep(2.5)\n\n # Next, stop the tcp server\n logger.info(\"Stopping tcp server...\")\n self.tcp_server_exit_event.set()", "def __shutdownParallel(self):\n if self._parallelLib == ParallelLibEnum.ray and self._server is not None and not self.rayInstanciatedOutside:\n # we need to ssh and stop each remote node cluster (ray)\n servers = []\n if 'remoteNodes' in self.runInfoDict:\n servers += self.runInfoDict['remoteNodes']\n if 'headNode' in self.runInfoDict:\n servers += [self.runInfoDict['headNode']]\n # get local enviroment\n localEnv = os.environ.copy()\n localEnv[\"PYTHONPATH\"] = os.pathsep.join(sys.path)\n for nodeAddress in servers:\n self.raiseAMessage(\"Shutting down ray at address: \"+ nodeAddress)\n command=\"ray stop\"\n rayTerminate = utils.pickleSafeSubprocessPopen(['ssh',nodeAddress.split(\":\")[0],\"COMMAND='\"+command+\"'\",\"RAVEN_FRAMEWORK_DIR='\"+self.runInfoDict[\"FrameworkDir\"]+\"'\",self.runInfoDict['RemoteRunCommand']],shell=False,env=localEnv)\n rayTerminate.wait()\n if rayTerminate.returncode != 0:\n self.raiseAWarning(\"RAY FAILED TO TERMINATE ON NODE: \"+nodeAddress)\n # shutdown ray API (object storage, plasma, etc.)\n ray.shutdown()\n elif self._parallelLib == ParallelLibEnum.dask and self._server is not None and not self.rayInstanciatedOutside:\n self._server.close()\n if self._daskScheduler is not None:\n self._daskScheduler.terminate()", "def killSlavePids(self):\n for pid in self._all_processes_pid:\n self._sudoKillSubprocessFromPid(pid)\n # The code below is commented out, we will just wipe out the whole self._all_processes_pid[] list below\n #while pid in self._all_processes_pid: self._all_processes_pid.remove(pid) # Remove references to this child's PID in the list of children\n if not self._slave_dhcp_client_proc is None:\n self._slave_dhcp_client_proc.wait() # Wait for sudo child (our only direct child)\n \n self._all_processes_pid = [] # Empty our list of PIDs\n \n self._slave_dhcp_client_pid = None \n self._slave_dhcp_client_proc = None", "def stop_all_instances(self):\n print '# Stopping all the instances'\n number = self.compute.stop_all_instances()\n print '%d instances were stopped' % number", "def stop_all():\n subprocess.check_call(\n ['./run.py --down'], shell=True,\n cwd=orc8_docker_path,\n )\n subprocess.check_call(\n 'docker-compose down', shell=True,\n cwd=feg_docker_integ_test_path,\n )\n subprocess.check_call(\n 'vagrant halt magma', shell=True,\n cwd=agw_path,\n )", "def stop(self, *args):\n if args[0] == 'all':\n for k, v in self.processers.items():\n if v:\n try:\n v.terminate()\n except:\n pass\n print 'Killed %s.' % k\n\n self.processers = dict.fromkeys(self.processers.keys())\n else:\n seq = args[0]\n try:\n self.processers['process%s' % seq].terminate()\n self.processers['process%s' % seq] = None\n print 'Killed process%s.' % seq\n except:\n print 'Have no process%s.' % seq", "def stop(self):\n for process in self.process:\n process.stop()", "def stop_subprocesses():\n global message_interface\n global c_library_interface\n if message_interface:\n message_interface.stop()\n if c_library_interface:\n c_library_interface.stop()", "def stop():\n server = current_server()\n server.stop()", "def remote_kill():", "def terminate_workers(terminate=True):\n\n if terminate:\n print_rank(\"Terminating worker processes\")\n for worker_rank in range(1, size()):\n _send(COMMAND_TERMINATE, worker_rank)", "def stop():\n tidyup()\n shutdown_server()\n return \"Parando Servidor\"", "def stop():\n tidyup()\n shutdown_server()\n return \"Parando Servidor\"" ]
[ "0.7355106", "0.66860104", "0.6615295", "0.6604181", "0.65941006", "0.6561907", "0.65497446", "0.65311134", "0.64733106", "0.64280826", "0.635675", "0.6324678", "0.63125056", "0.6295344", "0.6278813", "0.62739265", "0.6267419", "0.6254669", "0.62507206", "0.6219218", "0.62020385", "0.62005347", "0.6198947", "0.6196356", "0.61957103", "0.61912024", "0.6143855", "0.61354613", "0.6134541", "0.6134541" ]
0.67412335
1
is_running(timeout=0) > 0 or 1 check all the slave processes in the cluster are up and running. if timeout is specified, is_running will continually check if the cluster is_running until it either gets a positive result or gives up and returns 0 after the specified number of seconds.
def is_running(self,timeout=0): # wait for them to start import time st = time.time() still_waiting = 1 while still_waiting: try: # Send a simple command to all workers # and wait till they handle it successfully self.exec_code("1==1") except ClusterError: still_waiting = 1 elapsed = time.time() - st if elapsed > timeout: # We've run out of time. return 0 else: still_waiting = 0 wait_time = time.time() - st # should we somehow dessiminate worker topology (ids) # to all machines here? return 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_instance_running(self):\n try:\n self.instance.wait(timeout=1)\n except psutil.TimeoutExpired:\n pass\n return self.instance.is_running()", "def is_running(self) -> bool:\n return self.executor.is_alive() if self.executor else False", "async def running(self, timeout=600):\n if self.are_tests_finished():\n await self.stop()\n return False\n\n await asyncio.sleep(30)\n\n return self._running", "def is_running(self):\n qstat = self._grep_qstat('running')\n if qstat:\n return True\n return False", "async def is_running(self, **kwargs: Any) -> bool:\n ...", "async def is_running(self, **kwargs: Any) -> bool:\n return True", "def is_running(self):\n status = self.get_status_response()\n return ((status[1] & 2) == 2)\n #end is_running()", "def is_running(ssh):\n\tif get_status(ssh) == \"running\":\n\t\treturn True\n\treturn False", "def is_running(self):\n # type: () -> bool\n return self._run_state.is_running()", "def is_running(self):\n self.__condition.acquire()\n result = self.__is_running\n self.__condition.release()\n return result", "def isRunning(self):\n if not self.hasBeenStarted():\n return False\n \n if not self._slave_dhcp_client_proc.poll(): # Poll our direct child (sudo)\n return False\n \n for pid in self._all_processes_pid:\n if not self._checkPid(pid):\n return False\n \n return True", "def _is_running(self):\n return self._run_state.is_running()", "def is_running(self):\n return all(p.status == 'running' for p in self.values())", "def is_running(self):\n\t\treturn self._running", "def is_running(self) -> bool:\n return False", "def check_running(self, fail_on_error=True):\n status = True\n state = self.check_mount_state(self.running_hosts)\n if state[\"unmounted\"] or state[\"nodirectory\"]:\n self.log.error(\n \"Error: dfuse not running on %s\",\n str(state[\"unmounted\"].union(state[\"nodirectory\"])))\n status = False\n if fail_on_error:\n raise CommandFailure(\"dfuse not running\")\n return status", "def is_running(self) -> Awaitable[bool]:\n return self.instance.is_running()", "def is_running(self):\n return self.running", "def is_running(self):\n return self.running", "def is_running(self):\n return self.running", "def check_yarn_service(master, ec2_opts, num_nodes):\n output = spark_ec2.ssh_read(master, ec2_opts, \"/root/ephemeral-hdfs/bin/yarn node -list -all |grep RUNNING |wc -l\")\n # Ok if one slave is down\n return int(output) >= int(num_nodes) - 1", "def is_running(self) -> bool:\n return self._is_running", "def is_running(self) -> bool:\r\n return self.__running", "def _isrunning(self):\n return self.dp.state()==PyTango.DevState.RUNNING", "def is_running(self):\n data = self._poll()\n return data.get('building', False)", "def is_running(self):\n\t\treturn self in _running", "def is_running(self):\n\n return self._state == \"RUNNING\"", "def checkRunning(procname):\n return procdata.checkRunning(procname)", "def _checkTorcsServer(self):\n isRunning = False\n if self.torcsServerProcess is not None:\n if self.torcsServerProcess.poll() is None:\n isRunning = True\n return isRunning", "def is_server_running(self, shut_off_is_down: bool = False) -> bool:\n out = self.cloud_cli.run_cloud_cmd(\n f\"compute --project={self.project} instances describe --zone={self.zone} {self.name} --format=json\")\n try:\n out = json.loads(out.strip())\n except json.JSONDecodeError:\n return False\n return True" ]
[ "0.6553389", "0.6502707", "0.6413581", "0.6377632", "0.6294592", "0.6243133", "0.623014", "0.62272906", "0.616547", "0.61529255", "0.61501044", "0.61483437", "0.6140799", "0.60886705", "0.6068144", "0.6033086", "0.60108936", "0.59781617", "0.59781617", "0.59781617", "0.5975004", "0.59649813", "0.5941734", "0.59380317", "0.5931601", "0.59191936", "0.5901552", "0.5899547", "0.5896145", "0.58911645" ]
0.78950197
0
_send(package, addendum=None) send a package to all slaves. Description _send takes a package packed up by a packer object (see sync_cluster) and sends it to each of the slave processes. addendum is either None or a list with the same length as there are slave processes. Each entry is a small package of additional information that is to be sent to a specific slave process. It contains data that is only needed by that process. Implementation Notes The send is done synchronously to each worker in turn. The entire package is sent to slave0 before moving on and sending the message to slave1. If a socket error occurs while trying to send data to a given slave, the offending worker is pushed into the self.has_send_error list. Also, self.send_exc is a dictionary that stores the (err_type,err_msg) as the key and the offending worker as the value. This information is used in recv to skip receiving from slaves who failed on send and also for error reporting.
def _send(self,package,addendum=None): if addendum: N = len(addendum) assert(N <= len(self.workers)) else: N = len(self.workers) self.send_exc = {} self.had_send_error = [] for i in range(N): try: if not addendum: self.workers[i].send(package) else: self.workers[i].send(package,addendum[i]) except socket.error, msg: import sys err_type, err_msg = str,sys.exc_info()[:2] self.had_send_error.append(self.workers[i]) key = (err_type,err_msg) try: self.send_exc[key].append(self.workers[i].id) except: self.send_exc[key] = [self.workers[i].id] # else - handle other errors? self.Nsent = N
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send(self, data, unitindices=None, confirm=False):\n\n # TEST\n confirm = False\n\n if unitindices is None:\n unitindices = xrange(self.workers)\n if not confirm:\n [self.parent_conns[i].send(data) for i in unitindices]\n else:\n while True:\n [self.parent_conns[i].send(data) for i in unitindices]\n # if send OK, terminate, otherwise, send again to the units\n # which returned False\n unitindices = nonzero(array([not self.parent_conns[i].recv()\n for i in unitindices]))[0]\n if len(unitindices) == 0:\n break\n log_debug(\"pickling error, sending data again\")", "def _send_recv(self,package,addendum=None):\n self._send(package,addendum)\n self.last_results = self._recv()\n if(len(self.send_exc) or len(self.recv_exc)):\n self.handle_error()\n return self.last_results", "def _chunk_send(self, metrics):\n messages = self._create_messages(metrics)\n request = self._create_request(messages)\n packet = self._create_packet(request)\n\n response = None\n\n for host_addr in self.zabbix_uri:\n logger.debug('Sending data to %s', host_addr)\n\n # create socket object\n connection_ = socket.socket()\n if self.socket_wrapper:\n connection = self.socket_wrapper(connection_)\n else:\n connection = connection_\n\n connection.settimeout(self.timeout)\n\n try:\n # server and port must be tuple\n connection.connect(host_addr)\n connection.sendall(packet)\n except socket.timeout:\n logger.error('Sending failed: Connection to %s timed out after %d seconds', host_addr, self.timeout)\n connection.close()\n continue\n except socket.error as err:\n # In case of error we should close connection, otherwise\n # we will close it after data will be received.\n logger.warning('Sending failed: %s', getattr(err, 'msg', str(err)))\n connection.close()\n continue\n\n try:\n response = self._get_response(connection)\n\n logger.debug('%s response: %s', host_addr, response)\n except socket.error as err:\n logger.error('Sending failed: %s', getattr(err, 'msg', str(err)))\n raise socket.error(response)\n\n break\n\n if response is None:\n logger.error('Sending failed: no servers available')\n raise socket.error()\n\n if response and (\"response\" not in response or response.get('response') != 'success'):\n logger.debug('Response error: %s}', response)\n raise socket.error(response)\n\n return response", "def _send(self):\n while self.socket is not None:\n try:\n data = self._get_data_from_send_queue()\n if self.socket is not None:\n header = self._create_data_header(data)\n with self.socket_lock:\n self.socket.sendall(header + data)\n except Exception as err:\n getLogger(__name__).debug((\"Unexpected exception occurred,\"\n \" send thread may be in a\"\n \" corrupted state\\n\"\n \"Error: {}\".format(err)))", "def sendall(self, data):\n if self._socket is None:\n raise OperationalError(\"MySQLx Connection not available\")\n try:\n self._socket.sendall(data)\n except OSError as err:\n raise OperationalError(f\"Unexpected socket error: {err}\") from err", "def send_all(self, data, sender=None):\n for client in self.clients:\n if client == sender:\n continue\n client.send(data)", "def sendToClients(self, data):\n for client in self.__clients:\n result = client.write(data)\n if (result < 0):\n print \"Error writing to\", self.__clientName(client), \"-\", client.errorString()\n elif (result <> len(data)):\n print \"Only wrote\", result, \"of\", len(data), \"bytes to\", self.__clientName(client)", "def _send(self):\n data = self.output_buffer.view()\n if not data:\n return\n if self.closed():\n raise self.Error(\"Failed to write to closed connection {!r}\".format(self.server.address))\n if self.defunct():\n raise self.Error(\"Failed to write to defunct connection {!r}\".format(self.server.address))\n self.socket.sendall(data)\n self.output_buffer.clear()", "def _send(self, msg, buffers=None):\n if self.comm is not None and (self.comm.kernel is not None if hasattr(self.comm, \"kernel\") else True):\n self.comm.send(data=msg, buffers=buffers)", "def send(self,data):\r\n # Get the data length\r\n fullDataLength = len(data)\r\n \r\n # Input sanity\r\n if fullDataLength == 0:\r\n raise ValueError, \"Cannot send a null data-set!\"\r\n \r\n # Send chunks of data until it is all sent\r\n while True:\r\n # Check if the socket is closed\r\n self._handleClosed()\r\n \r\n # Make sure we have available outgoing bandwidth\r\n self.socketLocks[\"outgoing\"].acquire()\r\n try:\r\n self.socketLocks[\"outgoing\"].release()\r\n except:\r\n # Some weird timing issues can cause an exception, but it is harmless\r\n pass\r\n \r\n # Check if the socket is closed\r\n self._handleClosed()\r\n \r\n # Get our own lock\r\n self.socketLocks[\"send\"].acquire()\r\n \r\n # How much outgoing traffic is available?\r\n outgoingAvailable = self.bufferInfo[\"outgoing\"]\r\n \r\n # If we can, just send it all at once\r\n if len(data) < outgoingAvailable:\r\n try:\r\n # Instruct the multiplexer object to send our data\r\n self.mux._send(self.id, data)\r\n except AttributeError:\r\n # The multiplexer may be closed\r\n # Check if the socket is closed\r\n self._handleClosed()\r\n \r\n # Reduce the size of outgoing avail\r\n self.bufferInfo[\"outgoing\"] -= len(data)\r\n \r\n # Release the lock\r\n self.socketLocks[\"send\"].release()\r\n \r\n # We need to explicitly leave the loop\r\n break\r\n \r\n # We need to send chunks, while waiting for more outgoing B/W\r\n else:\r\n # Get a chunk of data, and send it\r\n chunk = data[:outgoingAvailable]\r\n try:\r\n # Instruct the multiplexer object to send our data\r\n self.mux._send(self.id, chunk)\r\n except AttributeError:\r\n # The multiplexer may be closed\r\n # Check if the socket is closed\r\n self._handleClosed()\r\n \r\n # Reduce the size of outgoing avail\r\n self.bufferInfo[\"outgoing\"] = 0\r\n\r\n # Lock the outgoing lock, so that we block until we get a MULTIPLEXER_CONN_BUF_SIZE message\r\n self.socketLocks[\"outgoing\"].acquire()\r\n \r\n # Trim data to only what isn't sent syet\r\n data = data[outgoingAvailable:]\r\n \r\n # Release the lock\r\n self.socketLocks[\"send\"].release()\r\n \r\n # If there is no data left to send, then break\r\n if len(data) == 0:\r\n break\r\n \r\n # Return bytes sent, which is always the full message\r\n # since we will block indefinately until everything is sent.\r\n return fullDataLength", "def send(self,data,address,isBinary=False):\n if DEBUG: print \"In class Server, function, send\"\n #dest = self.resolve_address(address)\n peer_to_send_message = None\n #or uid in self.clientManager.peer_servers:\n #pdb.set_trace()\n #peer_server = self.clientManager.connections[uid]\n #if peer_server.ip == address:\n #peer_to_send_message = peer_server\n #pdb.set_trace()\n return self.connection_manager.send(data,address,isBinary)\n \n #for client in self.clientManager.connections.keys():\n #pdb.set_trace()\n #self.clientManager.connections[client].sendMessage(\"------From RBAnalysis---Hi\")", "def _send(self, data, newline=\"\\r\\n\", sock=None):\n self.outbuff.append(data+newline)\n for msg in self.outbuff:\n if self.print_raw:\n logger.debug(msg.strip())\n self.sock.sendall((msg+newline).encode(\"utf-8\"))", "def send(x, communicator, rank, tag=0):\n chainer.utils.experimental('chainermn.functions.send')\n\n if rank == communicator.rank:\n raise ValueError(\n 'rank must be different from communicator rank, '\n 'otherwise deadlock occurs')\n\n xp = backend.get_array_module(*x)\n\n # Dummy variable to retain gradient computation of send,\n # otherwise the corresponding recv will cause deadlock in backward\n # in the case where all inputs for this function does not require_grad.\n dummy_var = chainer.Variable(xp.array([], dtype=xp.float32))\n\n if isinstance(x, list) or isinstance(x, tuple):\n inputs = x + type(x)([dummy_var])\n delegate_variable = Send(\n communicator, peer_rank=rank, peer_tag=tag)(*inputs)\n else:\n delegate_variable = Send(\n communicator, peer_rank=rank, peer_tag=tag)(x, dummy_var)\n\n delegate_variable.name = 'delegate_variable'\n return delegate_variable", "def send_cmds(self, cmds):\r\n self.socket.sendall(cmds)", "def _do_send_packet(self, seqnum):\n sch_packet = self._sending_window[seqnum]\n if sch_packet.retries >= constants.MAX_RETRANSMISSIONS:\n self.shutdown()\n else:\n self._proto.send_datagram(sch_packet.rudp_packet, self.relay_addr)\n sch_packet.timeout_cb = REACTOR.callLater(\n sch_packet.timeout,\n self._do_send_packet,\n seqnum\n )\n sch_packet.retries += 1\n self._cancel_ack_timeout()", "def send_all(self, data):\n\n for client in self.clients:\n try:\n client.send(data)\n except Exception:\n self.clients.remove(client)", "def mpi_send(data, dest: int, tag: int) -> None:\n MPI.COMM_WORLD.send(data, dest=dest, tag=tag)", "def _send(self, batch):\n return self.agent.emitBatch(batch)", "def ol_mpi_send(data, dest: int, tag: int):\n import numba_mpi\n\n def impl(data, dest: int, tag: int) -> None:\n \"\"\"reduce a single number across all cores\"\"\"\n status = numba_mpi.send(data, dest, tag)\n assert status == 0\n\n return impl", "def send_data(self, data):\r\n try:\r\n self.sock.sendto(data, self.addr)\r\n except Exception:\r\n print(\"Cant't send a package\")", "def mxt_send(self, data):\n return self._mxt_sock.send(data)", "def impl(data, dest: int, tag: int) -> None:\n status = numba_mpi.send(data, dest, tag)\n assert status == 0", "async def _send(self, sock, command, **args):\n packet = encode_packet(command, **args)\n _LOGGER.debug(\n \"Sending packet to controller %s <%s>\", self._address, packet\n )\n res = await sock_sendto(sock, packet, self._address)\n if res != len(packet):\n raise OSError(\"Could not send all of packet\")", "def nonblocking_send(self, data):\n try:\n if len(data) == 0:\n return None\n self.amount_so_far += self.socket.send(data[self.amount_so_far:])\n except Exception as exc:\n active_sockets_dict.pop(self.socket, None)\n self.socket.close()\n print(\"An error occurred: %s\\n\" % exc)\n return -1\n ret = self.is_send_done()\n return ret", "def send(self, group=0):\n self._data1 = group\n super().send(data1=self._data1)", "def send(self, *args):\n self.clean_outbox()\n msg, tag = self.process_outgoing(args)\n req = self.mpi_comm.isend(msg, dest=self.remote_rank, tag=tag)\n self._outbox.append(req)", "def send(self):\n if(self.target):\n try:\n self.message = self.message +\"\\r\\n\"\n self.target[0].send(self.message)\n except socket.error, err:\n print err", "def _send(self, msg, binaries):\r\n WebSocketClientProtocol.sendMessage(self, msg)\r\n\r\n for data in binaries:\r\n binMsg = data[0] + data[1].getvalue()\r\n WebSocketClientProtocol.sendMessage(self, binMsg, binary=True)", "def send(self, send_conn):\n #Connect to port\n while send_conn is None:\n self.send_socket.bind((self.host, self.send_port))\n self.send_socket.listen(1)\n send_conn = self.send_socket.accept()\n # send_adr = self.send_socket.accept()\n print 'send connected'\n #Send comands from queue\n while True:\n print 'loop sender'\n cmd = self.command_queue.get(True)\n send_conn.sendall(cmd)", "def _send_command(self, command):\n command_sequence = [command, self._checksum(command)]\n\n for retry in range(0, self.__RETRY_MAX_NUM):\n bytes_sent = self._port_handle.write(command_sequence)\n if bytes_sent != len(command_sequence):\n raise DfuException('Serial port write error: tried to send {} '\n 'bytes, but {} was '\n 'sent.'.format(len(command_sequence),\n bytes_sent))\n\n if self._is_acknowledged():\n break\n\n self._port_handle.flushInput()\n self._port_handle.flushOutput()\n else:\n raise DfuException(\n 'Command {} failed after '\n '{} retries.'.format(hex(command), retry + 1))" ]
[ "0.61424744", "0.580894", "0.5774594", "0.5474106", "0.5426062", "0.5318972", "0.53153694", "0.5213522", "0.52068084", "0.5200758", "0.51669294", "0.5166509", "0.51515573", "0.5150636", "0.5056886", "0.50452703", "0.503232", "0.5028009", "0.5002065", "0.49930203", "0.49819425", "0.4960919", "0.49570572", "0.49363825", "0.49279243", "0.49231222", "0.49095207", "0.49054226", "0.49028555", "0.4895302" ]
0.8050387
0
_recv() retreive results from slave processes. Description Retreive results from all slave processes that were successfully sent a package. If an error occurs while receiving from one of the slaves, the error is noted and the results from the other slaves are retreived. A tuple the results from all workers is returned by _recv. An entry of None is placed in the tuple for any worker that had an error. The recv is done synchronously, waiting for slave0 to return its results before moving on to slave1 to recv its results.
def _recv(self): self.had_recv_error = [] self.recv_exc = {} results = [] import sys; #only listen on workers involved in calculation. for worker in self.workers[:self.Nsent]: if worker in self.had_send_error: results.append(None) else: try: sys.stdout.flush() results.append(worker.recv()) except sync_cluster.RemoteError: import sys err = sys.exc_info()[1] # Force the err msg (err[1]) to be a string. # This dimishes info content, but makes sure # that the sames errors are hashed correctly # in the dictionary. (does it?) err_type,err_msg, err_traceback = err err = err_type,str(err_msg), err_traceback self.had_recv_error.append(worker) try: self.recv_exc[err].append(worker.id) except: self.recv_exc[err] = [worker.id] results.append(None) except sync_cluster.RemoteCrashError: # Gotta be more intelligent here... msg = 'Error! Remote worker %d appears to have crashed.' \ % worker.id raise sync_cluster.RemoteCrashError,msg # else handle other errors #print return tuple(results)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def recv(self, unitindices=None): # ,discard = None, keep = None):\n if unitindices is None:\n unitindices = xrange(self.workers)\n result = []\n i = 0\n# discarded = []\n# kept = []\n# log_debug((unitindices, self.parent_conns))\n while i < len(unitindices):\n ui = unitindices[i]\n r = self.parent_conns[ui].recv()\n# log_debug('recv <%s, %s>' % (str(r), str(type(r))))\n# if (discard is not None) and (r == discard):\n# discarded.append(ui)\n# continue\n# if (keep is not None) and (r != keep):\n# kept.append(ui)\n# continue\n result.append(r)\n i += 1\n# if discard is not None:\n# return result, discarded\n# elif kept is not None:\n# return result, kept\n# else:\n return result", "def _send_recv(self,package,addendum=None):\n self._send(package,addendum)\n self.last_results = self._recv()\n if(len(self.send_exc) or len(self.recv_exc)):\n self.handle_error()\n return self.last_results", "def _recv(self) -> List[np.ndarray]:", "def _recv_step(self):\n return self._step_out([conn.recv() for conn in self._conns])", "def recv(communicator, rank, delegate_variable=None, tag=0, force_tuple=False):\n chainer.utils.experimental('chainermn.functions.recv')\n\n if rank == communicator.rank:\n raise ValueError(\n 'rank must be different from communicator rank, '\n 'otherwise deadlock occurs')\n\n if delegate_variable is None:\n res = Recv(\n communicator,\n peer_rank=rank,\n peer_tag=tag)()\n else:\n delegate_variable.name = 'delegate_variable'\n res = Recv(\n communicator,\n peer_rank=rank,\n peer_tag=tag)(delegate_variable)\n\n if force_tuple and not isinstance(res, tuple):\n return tuple([res])\n else:\n return res", "def _wait_for_results(self) -> RemoteCallableResult:\n if (\n self.subscriber is None or\n self.started is None or\n self.process is None\n ):\n raise dbt.exceptions.InternalException(\n '_wait_for_results() called before handle()'\n )\n\n try:\n msg = self.subscriber.dispatch_until_exit(\n started=self.started,\n timeout=self.timeout,\n )\n except dbt.exceptions.Exception as exc:\n raise dbt_error(exc)\n except Exception as exc:\n raise server_error(exc)\n if isinstance(msg, QueueErrorMessage):\n raise RPCException.from_error(msg.error)\n elif isinstance(msg, QueueTimeoutMessage):\n if not self._single_threaded:\n self.process.terminate()\n raise timeout_error(self.timeout)\n elif isinstance(msg, QueueResultMessage):\n return msg.result\n else:\n raise dbt.exceptions.InternalException(\n 'Invalid message type {} (result={})'.format(msg)\n )", "def _receive(self):\n # initialize sockets map\n r, w, x = [self.socket], [], []\n r, w, x = select.select(r, w, x, self.sessiondata.timeout)\n if r:\n return self.socket.recv(4096)\n # return nothing on timeout\n return None", "def receive_data(self):\n chunks = []\n bytes_recd = 0\n while bytes_recd < 8:\n #I'm reading my data in byte chunks\n try:\n chunk = self.sockfd.recv(min(8 - bytes_recd, 4))\n chunks.append(chunk)\n bytes_recd = bytes_recd + len(chunk)\n except:\n print(f'{self.ip} socket failed')\n break\n # if chunk == '':\n # raise RuntimeError(\"Socket connection broken\")\n\n stat_tuple = struct.unpack('L', chunks[0])\n data_tuple = struct.unpack('L', chunks[1])\n stat = stat_tuple[0]\n data = data_tuple[0]\n return stat, chunks[1]", "def _recv(self):\n result = self._con.receive()\n if result.startswith(Parser.NOT_OK_MSG) or len(result) == 0:\n return result\n while not result.endswith(Parser.OK_MSG + '\\n') and not result.startswith(Parser.OK_MSG):\n result += self._con.receive()\n return result", "def recv(self) -> Optional[bytes]:\n ready, _, _ = select.select([self.socket], [], [], 0)\n if len(ready) != 0:\n new_bytes = self.socket.recv(self.BUFFER_SIZE)\n self.__recv_buffer = self.__recv_buffer + new_bytes\n return self.__parse_one_message()", "def receive(self):\n if self.sock is not None:\n return recv_msg(self.sock)\n return None", "def recv(self) -> Tuple[int, bytes]:\n seq_idx: int = 0\n self.device.set_nonblocking(False)\n data_chunk: bytes = bytes(self.device.read(64 + 1))\n self.device.set_nonblocking(True)\n\n assert data_chunk[:2] == b\"\\x01\\x01\"\n assert data_chunk[2] == 5\n assert data_chunk[3:5] == seq_idx.to_bytes(2, byteorder=\"big\")\n\n data_len: int = int.from_bytes(data_chunk[5:7], byteorder=\"big\")\n data: bytes = data_chunk[7:]\n\n while len(data) < data_len:\n read_bytes = bytes(self.device.read(64 + 1, timeout_ms=1000))\n data += read_bytes[5:]\n\n sw: int = int.from_bytes(data[data_len - 2:data_len], byteorder=\"big\")\n rdata: bytes = data[:data_len - 2]\n\n LOG.debug(\"<= %s %s\", rdata.hex(), hex(sw)[2:])\n\n return sw, rdata", "def recv_from_all_ml(to_wait = False):\n return base.recv_from_all_ml(to_wait)", "def get_results():\n result = self._recv_result() # blocks\n del self._tasks_in_progress[result.task_id]\n del self._task_results_waiting[result.task_id]\n yield result.value", "def baseRecv(self, buflen):\n\n # Attempts to retrieve a message from the queue initilized in bind, returns None if there are no messages\n data = r.popMessage(); \n\n # Checks to see if a message was retrieved\n if data is None:\n return None; # Not certain if this is the correct return for this...\n else:\n dest_mac = data[0];\n source_mac = data[1];\n length = data[2];\n payload = data[3];\n mac_header = dest_mac+source_mac+length;\n\n ip_header = payload[:7];\n udp_header = payload[7:9];\n message = payload[9:];\n\n # If the message is not addressed to this computer's MAC, discard the message\n if dest_mac != self.my_mac: return None;\n\n if (buflen<len(message)): return None;\n else: return message, mac_header, ip_header, udp_header;", "def twitch_receive_messages(self):\r\n self._push_from_buffer()\r\n result = []\r\n while True:\r\n # process the complete buffer, until no data is left no more\r\n try:\r\n time.sleep(.01)\r\n if self.s is None:\r\n raise Exception('socket is closed')\r\n msg = self.s.recv(4096).decode() # NON-BLOCKING RECEIVE!\r\n except socket.error as e:\r\n err = e.args[0]\r\n if err == errno.EAGAIN or err == errno.EWOULDBLOCK:\r\n # There is no more data available to read\r\n if len(result):\r\n self._maybe_print('returning with {}'.format(result))\r\n\r\n return result\r\n else:\r\n # a \"real\" error occurred\r\n # import traceback\r\n # import sys\r\n # print(traceback.format_exc())\r\n if not self.in_shutdown:\r\n print(\"Trying to recover...\")\r\n self.connect()\r\n return result\r\n else:\r\n self._maybe_print('twitch in: ' + msg)\r\n rec = [self._parse_message(line)\r\n for line in filter(None, msg.split('\\r\\n'))]\r\n rec = [r for r in rec if r] # remove Nones\r\n result.extend(rec)\r\n self._maybe_print(\"result length {} {}\".format(len(result), result))", "def runSlaveRun():\n\n np.set_printoptions(linewidth=1000)\n function = None\n options = None\n\n # print(\"Process {}/{} reporting for duty!\".format(rank, size))\n\n function = comm.bcast(function, root=0)\n arguments = comm.scatter(options, root=0)\n\n results = function(*arguments)\n\n comm.Barrier()\n comm.gather(results, root=0)\n comm.Disconnect()", "def get_result(self):\n utt_text, is_stable = self._parent_conn.recv()\n return utt_text, is_stable", "def run_master(self, master_msg):\n self._activated = True\n\n intermediates = [(0, master_msg)]\n for i in range(self.nr_slaves):\n intermediates.append(self._queue.get())\n\n results = self._master_callback(intermediates)\n assert results[0][0] == 0, 'The first result should belongs to the master.'\n\n for i, res in results:\n if i == 0:\n continue\n self._registry[i].result.put(res)\n\n for i in range(self.nr_slaves):\n assert self._queue.get() is True\n\n return results[0][1]", "def receive_data():\n\n # Receive the first message (the header),\n # which indicates the incoming data length\n data_length = int(pickle.loads(conn.recv(HEADER_SIZE)))\n \n if data_length:\n # Receive the data itself\n data = pickle.loads(conn.recv(data_length))\n\n return data", "def receive(self):\n events = self.poller.poll(self.timeout)\n\n # If there is control socket, he has the priority\n if len(events) == 2:\n return self._recv_serialized(self.control_socket)\n elif len(events) == 1:\n return self._recv_serialized(events[0][0])\n return None", "def mpi_recv(data, source, tag) -> None:\n data[...] = MPI.COMM_WORLD.recv(source=source, tag=tag)", "def _fetch(self):\n if self.closed():\n raise self.Error(\"Failed to read from closed connection {!r}\".format(self.server.address))\n if self.defunct():\n raise self.Error(\"Failed to read from defunct connection {!r}\".format(self.server.address))\n if not self.responses:\n return 0, 0\n\n self._receive()\n\n details, summary_signature, summary_metadata = self._unpack()\n\n if details:\n log_debug(\"S: RECORD * %d\", len(details)) # TODO\n self.responses[0].on_records(details)\n\n if summary_signature is None:\n return len(details), 0\n\n response = self.responses.popleft()\n response.complete = True\n if summary_signature == SUCCESS:\n log_debug(\"S: SUCCESS (%r)\", summary_metadata)\n response.on_success(summary_metadata or {})\n elif summary_signature == IGNORED:\n self._last_run_statement = None\n log_debug(\"S: IGNORED (%r)\", summary_metadata)\n response.on_ignored(summary_metadata or {})\n elif summary_signature == FAILURE:\n self._last_run_statement = None\n log_debug(\"S: FAILURE (%r)\", summary_metadata)\n response.on_failure(summary_metadata or {})\n else:\n self._last_run_statement = None\n raise ProtocolError(\"Unexpected response message with signature %02X\" % summary_signature)\n\n return len(details), 1", "def _receive(self, what, address='localhost:502', **kwargs):\n colon_index = address.find(':')\n IP = '-i {} '.format(address[:colon_index])\n PORT = '-p {} '.format(address[colon_index+1:])\n # NOTE: following data is validated by client script\n MODE = '-m {} '.format('r')\n TYPE = '-t {} '.format(what[0])\n OFFSET = '-o {} '.format(what[1]) # NOTE: 0-based\n\n # NOTE: kwargs\n if 'count' in kwargs:\n count = kwargs['count']\n COUNT = '--count {} '.format(kwargs['count'])\n else:\n count = 1\n COUNT = '--count {} '.format(1)\n\n cmd = shlex.split(\n self._client_cmd +\n IP +\n PORT +\n MODE +\n TYPE +\n OFFSET +\n COUNT\n )\n # print 'DEBUG modbus_receive cmd shlex list: ', cmd\n\n try:\n client = subprocess.Popen(cmd, shell=False,\n stdout=subprocess.PIPE)\n\n # client.communicate is blocking\n raw_out = client.communicate()\n # print 'DEBUG modbus _receive raw_out: ', raw_out\n\n # value is stored as first tuple element\n # between a pair of square brackets\n raw_string = raw_out[0].strip()\n\n # NOTE: registers store int\n if what[0] == 'HR' or what[0] == 'IR':\n\n # NOTE: single read returns an int\n if count == 1:\n out = int(raw_string[1:-1])\n\n # NOTE: multiple reads returns a list of ints\n else:\n out = []\n hrs = raw_string[1:-1].split(',')\n for hr in hrs:\n out.append(int(hr))\n if len(out) != count:\n raise ValueError('Wrong number of values in the response.')\n\n # NOTE: coils and discrete inputs store 8 bools\n elif what[0] == 'CO' or what[0] == 'DI':\n # print 'DEBUG modbus _receive bools: ', bools\n\n # NOTE: pymodbus always returns at least a list of 8 bools\n bools = raw_string[1:-1].split(',')\n\n # NOTE: single read returns a bool\n if count == 1:\n if bools[0] == 'False':\n out = False\n elif bools[0] == 'True':\n out = True\n else:\n raise TypeError('CO or DI values must be bool.')\n\n # NOTE: multiple reads returns a list of bools\n else:\n out = []\n i = 0\n for b in bools:\n if i >= count:\n break\n elif b.strip() == 'False':\n out.append(False)\n elif b.strip() == 'True':\n out.append(True)\n else:\n raise TypeError('CO or DI values must be bool.')\n i += 1\n\n return out\n\n except Exception as error:\n print('ERROR modbus _receive: ', error)", "def receive(self):\n\t\ttry:\n\t\t\tdata = self.protocol[0].readTcpSocket(self.socket)\n\t\t\tif len(self.protocol) > 1:\n\t\t\t\tfor protocol in self.protocol[1:]:\n\t\t\t\t\tdata = protocol.decode(data)\n\t\t\t\n\t\t\treturn data\n\t\texcept protocols.ProtocolIsNotRespectedError as e:\n\t\t\traise ClientIsNotFinishedSendingError(\"The client is not finished sending it's message.\", e)\n\t\texcept protocols.DataCouldNotBeReadError as e:\n\t\t\traise SocketError(\"Client socket died\", e)", "def recv(self):\n\n return self._default_receiver()", "def recv(recv_buffer):\n\n request_len = SentmanRequest.MESSAGE_LEN\n\n msgs = []\n while True:\n if len(recv_buffer) < request_len:\n return (msgs, recv_buffer)\n\n first = recv_buffer[:request_len]\n recv_buffer = recv_buffer[request_len:]\n\n (version, op_type, msg_len) = struct.unpack(\n SentmanRequest.FORMAT, first)\n\n SentmanRequest.LOGGER.debug(\n 'version %u; op_type %u; msg_len: %u' %\n (version, op_type, msg_len))\n\n # Now that we have the entire message, we can decide whether it's\n # valid.\n #\n if version != SentmanRequest.PROTOCOL_VERSION:\n err_str = 'bad version (%u)' % (version,)\n SentmanRequest.LOGGER.warn(err_str)\n raise SentmanRequestUnpackError(err_str)\n\n if not op_type in SentmanRequest.MSG_TYPES:\n err_str = 'bad op_type (%u)' % (op_type,)\n SentmanRequest.LOGGER.warn(err_str)\n raise SentmanRequestUnpackError(err_str)\n\n if msg_len != SentmanRequest.MESSAGE_LEN:\n err_str = 'bad msg_len (%u)' % (msg_len,)\n SentmanRequest.LOGGER.warn(err_str)\n raise SentmanRequestUnpackError(err_str)\n\n # The message is complete and valid. Append it to the list of\n # messages we've received.\n #\n msgs.append(SentmanRequest(op_type))", "def receive_workers_output(node_request_map, results_list, free_nodes, command, idle_nodes):\n\n if dist.get_backend() == \"nccl\": # Async\n for node, req in node_request_map:\n if req.is_completed():\n result = build_metrics_dict(node) if command == COMMAND_TESTVAL else build_grads_dict(node)\n results_list.append(result)\n free_nodes.append(node)\n node_request_map.remove((node,req))\n print_rank(f\"Finished releasing the nodes {free_nodes}\", loglevel=logging.DEBUG)\n else: # Sync\n print_rank(f\"Waiting for a workers\", loglevel=logging.DEBUG)\n gather_objects = [(None,None,None) for i in range(size())]\n output = [None for _ in gather_objects]\n dist.all_gather_object(output, gather_objects[rank()])\n print_rank(f\" All workers have finished ... taking the remaining clients {len(output)}\", loglevel=logging.DEBUG)\n output = [e for i,e in enumerate(output) if i not in idle_nodes ] # Cleanup for idle workers\n results_list = results_list + output[1:]\n free_nodes = list(range(1, size()))\n \n return node_request_map, results_list, free_nodes", "def receive_msg_from_players():\n msg = []\n for i, player in enumerate(players):\n single_msg = player.player_socket.recv(32768)\n print \"received %s\" % single_msg\n msg.append(single_msg)\n return msg", "def alltoall_recvbuffer(\n self, obj: torch.Tensor\n ) -> List[Union[MPI.memory, Tuple[int, int], MPI.Datatype]]:\n mpi_type, _ = self.__mpi_type_mappings[obj.dtype], torch.numel(obj)\n\n nproc = self.size\n shape = obj.shape[1:]\n strides = [1] * len(shape)\n strides[0] = obj.stride()[-1]\n strides = strides[::-1]\n offsets = [obj.element_size() * stride for stride in obj.stride()[:-1]]\n\n # Step 1: Wrap along axes > 0 (all axes except recv_axis)\n for i in range(len(shape) - 1, -1, -1):\n mpi_type = mpi_type.Create_vector(shape[i], 1, strides[i]).Create_resized(0, offsets[i])\n mpi_type.Commit()\n\n # Step 2: Receive blocks along the recv axis\n # Prepare recvcount, senddispls and sendtypes for alltoallw\n recvcount = np.full((nproc,), obj.shape[0] // nproc)\n recvcount[: obj.shape[0] % nproc] += 1\n # size/extent of mpitype = offsets[0]\n tmp_displs = [0] * nproc\n tmp_displs[1:] = np.cumsum(recvcount[:-1])\n recvdispls = [offsets[0] * d for d in tmp_displs]\n recvtypes = [mpi_type] * nproc\n\n return self.as_mpi_memory(obj), (recvcount, recvdispls), recvtypes" ]
[ "0.62762433", "0.61129236", "0.56425345", "0.56387585", "0.5613972", "0.55644256", "0.5537771", "0.55095595", "0.54262745", "0.5399296", "0.53790534", "0.5372627", "0.53541785", "0.53327423", "0.53177756", "0.5311333", "0.52756435", "0.5241029", "0.5229062", "0.5167441", "0.5158276", "0.5141062", "0.51304454", "0.51268834", "0.512013", "0.51013744", "0.5096989", "0.5095809", "0.5082545", "0.5074766" ]
0.76762754
0
_send_recv(package,addendum=None) > results Description send a message to each worker in turn and then immediately began listening for the results. All sends are done before listening for results from any of the slave processes. See _send and _recv for more information. If an error occurs during either the send or recv phases, the handle_error() method is called. If know errors are found, a tuple containing the results from each slave is returned. If an error does occur and an exception is raised, it is still possible to retreive the set of results that executed correctly from the last_results attribute.
def _send_recv(self,package,addendum=None): self._send(package,addendum) self.last_results = self._recv() if(len(self.send_exc) or len(self.recv_exc)): self.handle_error() return self.last_results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _recv(self):\n\n self.had_recv_error = []\n self.recv_exc = {}\n results = []\n import sys;\n #only listen on workers involved in calculation.\n for worker in self.workers[:self.Nsent]:\n if worker in self.had_send_error:\n results.append(None)\n else:\n try:\n sys.stdout.flush()\n results.append(worker.recv())\n except sync_cluster.RemoteError:\n import sys\n err = sys.exc_info()[1]\n # Force the err msg (err[1]) to be a string.\n # This dimishes info content, but makes sure\n # that the sames errors are hashed correctly\n # in the dictionary. (does it?)\n err_type,err_msg, err_traceback = err\n err = err_type,str(err_msg), err_traceback\n self.had_recv_error.append(worker)\n try: self.recv_exc[err].append(worker.id)\n except: self.recv_exc[err] = [worker.id]\n results.append(None)\n except sync_cluster.RemoteCrashError:\n # Gotta be more intelligent here...\n msg = 'Error! Remote worker %d appears to have crashed.' \\\n % worker.id\n raise sync_cluster.RemoteCrashError,msg\n # else handle other errors\n #print\n return tuple(results)", "def _send(self,package,addendum=None):\n if addendum:\n N = len(addendum)\n assert(N <= len(self.workers))\n else:\n N = len(self.workers)\n self.send_exc = {}\n self.had_send_error = []\n for i in range(N):\n try:\n if not addendum:\n self.workers[i].send(package)\n else:\n self.workers[i].send(package,addendum[i])\n except socket.error, msg:\n import sys\n err_type, err_msg = str,sys.exc_info()[:2]\n self.had_send_error.append(self.workers[i])\n key = (err_type,err_msg)\n try:\n self.send_exc[key].append(self.workers[i].id)\n except:\n self.send_exc[key] = [self.workers[i].id]\n # else - handle other errors?\n self.Nsent = N", "def test_rpcSendRecv(self):\n cli_send = self.client_msg\n srv_send = self.server_msg\n # Send message to driver\n flag = self.client_comm.send(cli_send)\n assert(flag)\n flag, msg_recv = self.server_comm.recv(timeout=self.timeout)\n assert(flag)\n nt.assert_equal(msg_recv, srv_send)\n # Send response back to instance\n flag = self.server_comm.send(srv_send)\n assert(flag)\n # self.driver.sleep(1)\n flag, msg_recv = self.client_comm.recv(timeout=self.timeout)\n assert(flag)\n nt.assert_equal(msg_recv, cli_send)", "def sendReceive(s, port_num, question, server_list, root_servers):\n sock = s\n port = port_num\n query = question\n new_server_list = []\n for ip_address in server_list:\n try:\n DNS_IP = ip_address\n sock.sendto(query, (DNS_IP, port))\n print(\"Querying server\", ip_address, file=open('iter.txt', 'a+'))\n print(\"Querying server\", ip_address, file=open('tmp.txt', 'a+'))\n message = sock.recvfrom(1024)\n new_server_list, flag = decodeMes(message)\n\n # If an answer is received, return the IP Address associated with the query (base case)\n # If a list of IP addresses is returned, recursively call sendReceive using new IP addresses\n # If the type of request is MX, use the returned mail exchange as the new query\n # If the new_server_list is a list of Authoritative name servers\n # get the IP addresses of the returned name servers and send the query to the new IP Addresses\n if flag == 2:\n return new_server_list[0]\n\n elif flag == 1:\n return sendReceive(sock, port, query, new_server_list, root_servers)\n\n elif flag == 3:\n print(\"SOA: No such domain name\", file=open('iter.txt', 'a+'))\n print(\"SOA: No such domain name\", file=open('tmp.txt', 'a+'))\n exit(1)\n\n elif flag == 4:\n specs = []\n specs.append(new_server_list[0])\n query3 = makeQuery(specs)\n return sendReceive(sock, port, query3, root_servers, root_servers)\n\n else:\n new_server_list2 = []\n for i in range(len(new_server_list)):\n specs = []\n specs.append(new_server_list[i])\n query2 = makeQuery(specs)\n\n new_server = sendReceive(sock, port, query2, root_servers, root_servers)\n new_server_list2.append(new_server)\n\n return sendReceive(sock, port, query, new_server_list2, root_servers)\n\n except socket.timeout as e:\n print('Exception:' + str(e), file=open('iter.txt', 'a+'))\n print('Exception:' + str(e), file=open('tmp.txt', 'a+'))\n except socket.gaierror:\n pass", "def _recv(self) -> List[np.ndarray]:", "def recv(self, unitindices=None): # ,discard = None, keep = None):\n if unitindices is None:\n unitindices = xrange(self.workers)\n result = []\n i = 0\n# discarded = []\n# kept = []\n# log_debug((unitindices, self.parent_conns))\n while i < len(unitindices):\n ui = unitindices[i]\n r = self.parent_conns[ui].recv()\n# log_debug('recv <%s, %s>' % (str(r), str(type(r))))\n# if (discard is not None) and (r == discard):\n# discarded.append(ui)\n# continue\n# if (keep is not None) and (r != keep):\n# kept.append(ui)\n# continue\n result.append(r)\n i += 1\n# if discard is not None:\n# return result, discarded\n# elif kept is not None:\n# return result, kept\n# else:\n return result", "def _recv_step(self):\n return self._step_out([conn.recv() for conn in self._conns])", "def notest_send_recv_network(self) :\n\n # Demarrage du serveur\n symbol = 'S' \n oProtocol_server = Protocol(symbol,mode=\"server\",debug=self.debug)\n # tcpHandlerMethods est definie dans le module test.util.test_util\n tcpHandlerMethods[\"process\"] = test_Protocol_process\n oProtocol_server.handlerRegister(tcpHandlerMethods)\n oProtocol_server.start()\n \n # Attente de l'etat actif du serveur.\n while oProtocol_server.isActivated is not True :\n time.sleep(1)\n\n # Toutes les commandes du protocole sont testees\n symbol = 'X'\n oProtocol_client = Protocol(symbol,mode=\"client\", debug=self.debug)\n \n status = True\n # Les commandes entrees par le joueur sont simulees \n for index, command in enumerate(self.commandList) :\n command = self.commandList[index]\n message = oProtocol_client.send(command)\n # print(\"\\n*** Received message= {}\".format(message))\n status = status and message['status']\n if message['status'] is False :\n print(\"\\n*** test_send_recv_network() : {}\\n\".format(message['notify']))\n\n # Le serveur est arrete\n oProtocol_server.shutdown()\n\n # Attend la terminaison des threads\n oProtocol_server.join()\n \n self.assertTrue( status )", "def test_send_recv(self):\n channel_layer.send(\"sr_test\", {\"value\": \"blue\"})\n channel_layer.send(\"sr_test\", {\"value\": \"green\"})\n channel_layer.send(\"sr_test2\", {\"value\": \"red\"})\n # Get just one first\n channel, message = channel_layer.receive_many([\"sr_test\"])\n self.assertEqual(channel, \"sr_test\")\n self.assertEqual(message, {\"value\": \"blue\"})\n # And the second\n channel, message = channel_layer.receive_many([\"sr_test\"])\n self.assertEqual(channel, \"sr_test\")\n self.assertEqual(message, {\"value\": \"green\"})\n # And the other channel with multi select\n channel, message = channel_layer.receive_many([\"sr_test\", \"sr_test2\"])\n self.assertEqual(channel, \"sr_test2\")\n self.assertEqual(message, {\"value\": \"red\"})", "def _chunk_send(self, metrics):\n messages = self._create_messages(metrics)\n request = self._create_request(messages)\n packet = self._create_packet(request)\n\n response = None\n\n for host_addr in self.zabbix_uri:\n logger.debug('Sending data to %s', host_addr)\n\n # create socket object\n connection_ = socket.socket()\n if self.socket_wrapper:\n connection = self.socket_wrapper(connection_)\n else:\n connection = connection_\n\n connection.settimeout(self.timeout)\n\n try:\n # server and port must be tuple\n connection.connect(host_addr)\n connection.sendall(packet)\n except socket.timeout:\n logger.error('Sending failed: Connection to %s timed out after %d seconds', host_addr, self.timeout)\n connection.close()\n continue\n except socket.error as err:\n # In case of error we should close connection, otherwise\n # we will close it after data will be received.\n logger.warning('Sending failed: %s', getattr(err, 'msg', str(err)))\n connection.close()\n continue\n\n try:\n response = self._get_response(connection)\n\n logger.debug('%s response: %s', host_addr, response)\n except socket.error as err:\n logger.error('Sending failed: %s', getattr(err, 'msg', str(err)))\n raise socket.error(response)\n\n break\n\n if response is None:\n logger.error('Sending failed: no servers available')\n raise socket.error()\n\n if response and (\"response\" not in response or response.get('response') != 'success'):\n logger.debug('Response error: %s}', response)\n raise socket.error(response)\n\n return response", "def process_by_client(service, results):\n\n client = Producer(\n queue_host=Config.get('queue', 'host'),\n queue_port=Config.getint('queue', 'port'),\n queue_name=service.iden)\n for result in results:\n client.send(result)\n for obj in client.receive():\n yield json.dumps(obj)\n if 'error' in obj:\n # abort as soon as there is an error\n return", "def send_rpc_result(req, result):", "def _wait_for_results(self) -> RemoteCallableResult:\n if (\n self.subscriber is None or\n self.started is None or\n self.process is None\n ):\n raise dbt.exceptions.InternalException(\n '_wait_for_results() called before handle()'\n )\n\n try:\n msg = self.subscriber.dispatch_until_exit(\n started=self.started,\n timeout=self.timeout,\n )\n except dbt.exceptions.Exception as exc:\n raise dbt_error(exc)\n except Exception as exc:\n raise server_error(exc)\n if isinstance(msg, QueueErrorMessage):\n raise RPCException.from_error(msg.error)\n elif isinstance(msg, QueueTimeoutMessage):\n if not self._single_threaded:\n self.process.terminate()\n raise timeout_error(self.timeout)\n elif isinstance(msg, QueueResultMessage):\n return msg.result\n else:\n raise dbt.exceptions.InternalException(\n 'Invalid message type {} (result={})'.format(msg)\n )", "def __reduce_like(\n self,\n func: Callable,\n sendbuf: Union[DNDarray, torch.Tensor, Any],\n recvbuf: Union[DNDarray, torch.Tensor, Any],\n *args,\n **kwargs,\n ) -> Tuple[Optional[DNDarray, torch.Tensor]]:\n sbuf = None\n rbuf = None\n buf = None\n # unpack the send buffer if it is a HeAT tensor\n if isinstance(sendbuf, DNDarray):\n sendbuf = sendbuf.larray\n # unpack the receive buffer if it is a HeAT tensor\n if isinstance(recvbuf, DNDarray):\n recvbuf = recvbuf.larray\n\n # harmonize the input and output buffers\n # MPI requires send and receive buffers to be of same type and length. If the torch tensors are either not both\n # contiguous or differently strided, they have to be made matching (if possible) first.\n if isinstance(sendbuf, torch.Tensor):\n # convert the send buffer to a pointer, number of elements and type are identical to the receive buffer\n dummy = (\n sendbuf.contiguous()\n ) # make a contiguous copy and reassign the storage, old will be collected\n sendbuf.set_(\n dummy.storage(), dummy.storage_offset(), size=dummy.shape, stride=dummy.stride()\n )\n sbuf = sendbuf if CUDA_AWARE_MPI else sendbuf.cpu()\n sendbuf = self.as_buffer(sbuf)\n if isinstance(recvbuf, torch.Tensor):\n buf = recvbuf\n # nothing matches, the buffers have to be made contiguous\n dummy = recvbuf.contiguous()\n recvbuf.set_(\n dummy.storage(), dummy.storage_offset(), size=dummy.shape, stride=dummy.stride()\n )\n rbuf = recvbuf if CUDA_AWARE_MPI else recvbuf.cpu()\n if sendbuf is MPI.IN_PLACE:\n recvbuf = self.as_buffer(rbuf)\n else:\n recvbuf = (self.as_mpi_memory(rbuf), sendbuf[1], sendbuf[2])\n\n # perform the actual reduction operation\n return func(sendbuf, recvbuf, *args, **kwargs), sbuf, rbuf, buf", "def server_side_call(self, msg_sent):\n flag, msg_recv = self.server_comm.recv(timeout=self.timeout)\n assert(flag)\n nt.assert_equal(msg_recv, msg_sent)\n flag = self.server_comm.send(msg_sent)\n assert(flag)", "def run(self):\n super().run()\n echo = self.echo\n local = self.local\n remote = self.remote\n transport = Transceiver(local)\n transport.set_timeout(0.5)\n self.__result: list[Entry] = []\n\n while True:\n try:\n packet = transport.recv(None)\n params = frame.deserialize(packet)\n seq = params[\"seq\"]\n total = params[\"total\"]\n t_master = params[\"t_master\"]\n infinite = params[\"infinite\"]\n payload = params[\"payload\"]\n\n t_slave = time.time()\n if echo:\n data_send = frame.serialize(infinite, seq, total, t_master, t_slave, payload)\n transport.send(remote, data_send)\n t_ul = (t_slave - t_master) * 1000\n self.add_result(Entry(seq, total, t_ul, 0))\n print(f\"seq = {seq}, ul = {t_ul:.2f} ms, payload: {hex_str(payload)}\")\n if frame.is_end(params):\n print(f\"receive last packet!\")\n break\n except socket.timeout:\n continue\n except KeyboardInterrupt:\n break", "def Allreduce(\n self,\n sendbuf: Union[DNDarray, torch.Tensor, Any],\n recvbuf: Union[DNDarray, torch.Tensor, Any],\n op: MPI.Op = MPI.SUM,\n ):\n ret, sbuf, rbuf, buf = self.__reduce_like(self.handle.Allreduce, sendbuf, recvbuf, op)\n if buf is not None and isinstance(buf, torch.Tensor) and buf.is_cuda and not CUDA_AWARE_MPI:\n buf.copy_(rbuf)\n return ret", "def receive_workers_output(node_request_map, results_list, free_nodes, command, idle_nodes):\n\n if dist.get_backend() == \"nccl\": # Async\n for node, req in node_request_map:\n if req.is_completed():\n result = build_metrics_dict(node) if command == COMMAND_TESTVAL else build_grads_dict(node)\n results_list.append(result)\n free_nodes.append(node)\n node_request_map.remove((node,req))\n print_rank(f\"Finished releasing the nodes {free_nodes}\", loglevel=logging.DEBUG)\n else: # Sync\n print_rank(f\"Waiting for a workers\", loglevel=logging.DEBUG)\n gather_objects = [(None,None,None) for i in range(size())]\n output = [None for _ in gather_objects]\n dist.all_gather_object(output, gather_objects[rank()])\n print_rank(f\" All workers have finished ... taking the remaining clients {len(output)}\", loglevel=logging.DEBUG)\n output = [e for i,e in enumerate(output) if i not in idle_nodes ] # Cleanup for idle workers\n results_list = results_list + output[1:]\n free_nodes = list(range(1, size()))\n \n return node_request_map, results_list, free_nodes", "def send(self,data,address,isBinary=False):\n if DEBUG: print \"In class Server, function, send\"\n #dest = self.resolve_address(address)\n peer_to_send_message = None\n #or uid in self.clientManager.peer_servers:\n #pdb.set_trace()\n #peer_server = self.clientManager.connections[uid]\n #if peer_server.ip == address:\n #peer_to_send_message = peer_server\n #pdb.set_trace()\n return self.connection_manager.send(data,address,isBinary)\n \n #for client in self.clientManager.connections.keys():\n #pdb.set_trace()\n #self.clientManager.connections[client].sendMessage(\"------From RBAnalysis---Hi\")", "def _r_send_result(self, response, protocol):\n #print(\"Send result: %s\" % result)\n protocol.send_message(response)", "def send(self, data, unitindices=None, confirm=False):\n\n # TEST\n confirm = False\n\n if unitindices is None:\n unitindices = xrange(self.workers)\n if not confirm:\n [self.parent_conns[i].send(data) for i in unitindices]\n else:\n while True:\n [self.parent_conns[i].send(data) for i in unitindices]\n # if send OK, terminate, otherwise, send again to the units\n # which returned False\n unitindices = nonzero(array([not self.parent_conns[i].recv()\n for i in unitindices]))[0]\n if len(unitindices) == 0:\n break\n log_debug(\"pickling error, sending data again\")", "def runSlaveRun():\n\n np.set_printoptions(linewidth=1000)\n function = None\n options = None\n\n # print(\"Process {}/{} reporting for duty!\".format(rank, size))\n\n function = comm.bcast(function, root=0)\n arguments = comm.scatter(options, root=0)\n\n results = function(*arguments)\n\n comm.Barrier()\n comm.gather(results, root=0)\n comm.Disconnect()", "def run(self, session):\n rpc = None\n if session['client']['event'] == 'CONNECTION_REQUEST':\n self.add_nb_queue_to_session_queue(session)\n\n while rpc is None and session['queue']:\n try:\n # Loop through queue until there is an RPC to send, or until\n # there are no more RPCs queued, or until an error occurs\n session['rpc']['method'] = session['queue'].pop(0)\n rpc = session['rpc']['method'].send_request(session)\n except ClientMethodException:\n # Failed to send this RPC, move on to the next\n LOG.debug(\"Error during preparation of client method: %s\" % str(session['rpc']['method']))\n continue\n except Exception:\n traceback.print_exc()\n LOG.debug(\"Unexpected error during preparation of client method: %s\" % str(session['rpc']['method']))\n return RPCS.SendingRpc, None\n\n if rpc is not None:\n # RPC ready: Send it and ExpectResponse\n return RPCS.ExpectResponse, rpc\n else:\n # If there are no (more) RPCs to send, log ok\n # and send done, indicating communication is complete\n session['log'] = {'rc': 'ok', 'msg': ''}\n session['db'].clear_dirtyflag(session['client']['cid'])\n return RPCS.Listening, {'method': 'done'}", "def __call__(self):\n dv = None\n #Push as many queued calls as the self.max_batch_size and the max number of paralel HTTPS sessions allow for.\n while self.active_call_count < self.parallel and self.queue:\n #Get a chunk of entries from the command queue so we can make a batch.\n subqueue = self.queue[:self.max_batch_size]\n self.queue = self.queue[self.max_batch_size:]\n #Send a single batch to the currently selected RPC node.\n dv = self._process_batch(subqueue)\n #If there is nothing left to do, there is nothing left to do\n if not self.queue and self.active_call_count == 0:\n self.log.error(\"Queue is empty and no active HTTPS-POSTs remaining.\")\n if self.stop_when_empty:\n #On request, stop reactor when queue empty while no active queries remain.\n self.reactor.stop() \n return dv", "def _process_results(self, *args, **kwargs): # noqa: E501\n # Lock before processing results to prevent conflicts\n if not self._acquire_pr_lock():\n return\n\n # Get the future instance\n future = self.future\n\n # Skip if no Future\n if not future:\n return\n\n # Skip processing results if forget\n if self.forget:\n # Clean up client\n self.client.close()\n return\n\n try:\n # Get results using the client\n result = self.client.gather(future)\n except Exception as e:\n # Tell scheduler to stop sending updates about this key\n self.client.set_metadata(self.key, False)\n # Clean up client\n self.client.close()\n result = e\n log.warning(\n 'Exception encountered when retrieving results: \"{}\"'.format(str(e))\n )\n\n # Tell scheduler to stop sending updates about this key\n self.client.set_metadata(self.key, False)\n\n # Handle custom process results function\n if self.process_results_function:\n # Get the process_results_function in TethysJob and call it with the result retrived\n try:\n result = self.process_results_function(result)\n except Exception as e:\n log.exception(\"Process Results Function Error\")\n self._status = \"ERR\"\n result = str(e)\n\n # Serialize the result\n try:\n self.result = result\n except Exception:\n log.exception(\"Results Serialization Error\")\n self._status = \"ERR\"\n else:\n self._status = \"COM\" if self._status != \"ERR\" else \"ERR\"\n\n # Erase the key to avoid problem with dask recycle key\n self.key = \"\"\n\n # save the results or status in the database\n self.save()\n\n # Clean up client\n self.client.close()\n\n if client_fire_forget:\n client_fire_forget.close()\n\n self._release_pr_lock()", "def _process_messages(self):\r\n \r\n self._print(\"%s: Starting _process messages, looking out for special messages:\" \\\r\n % (self._clientnr))\r\n \r\n # Set some expected messages.\r\n expected = {}\r\n expected['clientconfirm'] = cb.CLIENTCONFIRM[:cb.CLIENTCONFIRM.find('_')]\r\n expected['waitwhat'] = cb.WAITWHATCLIENT[:cb.WAITWHATCLIENT.find('_')]\r\n \r\n for key in expected.keys():\r\n self._print(\"%s: Special message '%s': '%s'\" % \\\r\n (self._clientnr, key, expected[key]))\r\n \r\n # Run idefinitively\r\n while True:\r\n \r\n # Get new incoming commands.\r\n cmds = self.udp.getCommands()\r\n self._print(\"%s: Found %d new UDP commands.\" % \\\r\n (self._clientnr, len(cmds)))\r\n # Add new commands to the queue.\r\n for c in cmds:\r\n # Parse the message.\r\n target, message, clienttime = c.text.split('|')\r\n self._print(\"%s: Found message (%s to %s, t=%s) '%s'\" % \\\r\n (self._clientnr, c.ip, target, clienttime, message))\r\n # Only process messages from the server.\r\n if c.ip == self._servernr:\r\n # Check if this is a client confirmation message.\r\n if expected['clientconfirm'] in message:\r\n self._print(\"%s: Adding message '%s' (t=%s) to the incoming queue\" \\\r\n % (self._clientnr, message, clienttime))\r\n self._incominglock.acquire()\r\n self._incoming.append(message)\r\n self._incominglock.release()\r\n # Only process the messages that were directed at this client.\r\n elif target in ['None', str(self._clientnr)]:\r\n # Check if this is a confused message to find out what\r\n # the client is waiting for.\r\n if expected['waitwhat'] in message:\r\n self._print(\"%s: Received '%s' from server\" % \\\r\n (self._clientnr, message))\r\n # Parse the waitwhat message, which looks like this:\r\n # 'waitwhatclient_expected=%s'\r\n msg, xpctd = message.split('_')\r\n xpctd = xpctd[xpctd.find('=')+1:]\r\n # Re-send the last version of the expected message.\r\n if xpctd in self._lastmessage.keys():\r\n self._outgoing.append(self._lastmessage[xpctd])\r\n self._print(\"%s: Resending the last version of expected message '%s': '%s'\" % \\\r\n (self._clientnr, xpctd, self._lastmessage[xpctd]))\r\n else:\r\n self._print(\"%s: Do not have a last version of expected message '%s'\" % \\\r\n (self._clientnr, xpctd))\r\n else:\r\n # Add the message to the queue.\r\n self._print(\"%s: Adding message '%s' (t=%s) to the incoming queue\" \\\r\n % (self._clientnr, message, clienttime))\r\n self._incominglock.acquire()\r\n self._incoming.append(message)\r\n self._incominglock.release()\r\n # Chuck a message out if the queue is getting too long.\r\n if len(self._incoming) > self._maxincominglen:\r\n self._incominglock.acquire()\r\n delmsg = self._incoming.pop(0)\r\n self._incominglock.release()\r\n self._print(\"%s: Removed message '%s' from the incoming queue\" \\\r\n % (self._clientnr, delmsg))\r\n else:\r\n self._print(\"%s: Ignoring message '%s', as it wasn't for me (%s)\" \\\r\n % (self._clientnr, message, self._clientnr))\r\n else:\r\n self._print(\"%s: Ignoring message '%s', as it wasn't from the server (%s)\" \\\r\n % (self._clientnr, message, self._servernr))\r\n \r\n # Process outgoing commands.\r\n while len(self._outgoing) > 0:\r\n # Send a message to the server.\r\n self._outgoinglock.acquire()\r\n message = self._outgoing.pop(0)\r\n self._outgoinglock.release()\r\n self._print(\"%s: Sending '%s' to %s\" % \\\r\n (self._clientnr, message, self._servernr))\r\n msg = 'cmd,%s|%s' % (self._servernr, message)\r\n self.udp.sendWithTimeStamp(msg, '|')\r\n for i in range(self._message_reps):\r\n self.udp.sendWithTimeStamp(msg, '|')\r\n # Store the message in the 'last sent' dict.\r\n if '_' in message:\r\n m = message[:message.find('_')]\r\n else:\r\n m = message\r\n self._lastmessage[m] = message", "def run(self):\n for data in self.__iter_data():\n for client in self.clients:\n gevent.spawn(self.send, client, data)", "def message_test_case(message_list, expected_results, info_string):\n global passed_count, failed_count\n\n print(\"\\n\\nTEST: {}\".format(info_string))\n print(\"\\nMessage sequence: {}\".format(message_list))\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.connect((HOST, PORT))\n\n for message in message_list:\n sock.send(message)\n\n for expected in expected_results:\n response = sock.recv(4) # standard length of each message\n if response != expected:\n print(\"\\n\\tResult: FAILED.\")\n print(\"Expected server response {}, received {}.\".format(\n expected, response))\n failed_count += 1\n return\n\n print(\"\\n\\tResult: PASSED.\\n\")\n passed_count += 1", "def Reduce(\n self,\n sendbuf: Union[DNDarray, torch.Tensor, Any],\n recvbuf: Union[DNDarray, torch.Tensor, Any],\n op: MPI.Op = MPI.SUM,\n root: int = 0,\n ):\n ret, sbuf, rbuf, buf = self.__reduce_like(self.handle.Reduce, sendbuf, recvbuf, op, root)\n if buf is not None and isinstance(buf, torch.Tensor) and buf.is_cuda and not CUDA_AWARE_MPI:\n buf.copy_(rbuf)\n return ret", "def run_test(self, sender_addr, receiver_addr, queue_name_fragment, jms_message_type, test_values, msg_hdrs,\n msg_props, send_shim, receive_shim, timeout):\n queue_name = 'qit.jms_hdrs_props_test.%s' % queue_name_fragment\n\n # First create a map containing the numbers of expected mesasges for each JMS message type\n num_test_values_map = {}\n if test_values: # len > 0\n for index in test_values.keys():\n num_test_values_map[index] = len(test_values[index])\n # Create a map of flags which indicate to the receiver the details of some of the messages so that it can\n # be correctly handled (as these require some prior knowledge)\n flags_map = {}\n if msg_hdrs is not None:\n if 'JMS_CORRELATIONID_HEADER' in msg_hdrs and 'bytes' in msg_hdrs['JMS_CORRELATIONID_HEADER']:\n flags_map['JMS_CORRELATIONID_AS_BYTES'] = True\n if msg_props is not None:\n if 'JMS_REPLYTO_HEADER' in msg_hdrs and 'topic' in msg_hdrs['JMS_REPLYTO_HEADER']:\n flags_map['JMS_REPLYTO_AS_TOPIC'] = True\n if send_shim.JMS_CLIENT:\n flags_map['JMS_CLIENT_CHECKS'] = True\n # Start the receiver shim\n receiver = receive_shim.create_receiver(receiver_addr, queue_name, jms_message_type,\n dumps([num_test_values_map, flags_map]))\n\n # Start the send shim\n sender = send_shim.create_sender(sender_addr, queue_name, jms_message_type,\n dumps([test_values, msg_hdrs, msg_props]))\n\n # Wait for sender, process return string\n try:\n send_obj = sender.wait_for_completion(timeout)\n except (KeyboardInterrupt, InteropTestTimeout):\n receiver.send_signal(signal.SIGINT)\n raise\n if send_obj is not None:\n if isinstance(send_obj, str):\n if send_obj: # len > 0\n receiver.send_signal(signal.SIGINT)\n raise InteropTestError('Send shim \\'%s\\':\\n%s' % (send_shim.NAME, send_obj))\n else:\n receiver.send_signal(signal.SIGINT)\n raise InteropTestError('Send shim \\'%s\\':\\n%s' % (send_shim.NAME, send_obj))\n\n # Wait for receiver, process return string\n receive_obj = receiver.wait_for_completion(timeout)\n if isinstance(receive_obj, tuple):\n if len(receive_obj) == 2:\n return_jms_message_type, return_list = receive_obj\n if len(return_list) == 3:\n return_test_values = return_list[0]\n return_msg_hdrs = return_list[1]\n return_msg_props = return_list[2]\n self.assertEqual(return_jms_message_type, jms_message_type,\n msg='JMS message type error:\\n\\n sent:%s\\n\\n received:%s' % \\\n (jms_message_type, return_jms_message_type))\n self.assertEqual(return_test_values, test_values,\n msg='JMS message body error:\\n\\n sent:%s\\n\\n received:%s' % \\\n (test_values, return_test_values))\n self.assertEqual(return_msg_hdrs, msg_hdrs,\n msg='JMS message headers error:\\n\\n sent:%s\\n\\n received:%s' % \\\n (msg_hdrs, return_msg_hdrs))\n self.assertEqual(return_msg_props, msg_props,\n msg='JMS message properties error:\\n\\n sent:%s\\n\\n received:%s' % \\\n (msg_props, return_msg_props))\n else:\n raise InteropTestError('Receive shim \\'%s\\':\\n' \\\n 'Return value list needs 3 items, found %d items: %s' % \\\n (receive_shim.NAME, len(return_list), str(return_list)))\n else:\n raise InteropTestError('Receive shim \\'%s\\':\\n%s' % (receive_shim.NAME, receive_obj))\n else:\n raise InteropTestError('Receive shim \\'%s\\':\\n%s' % (receive_shim.NAME, receive_obj))" ]
[ "0.70221955", "0.6246846", "0.5740144", "0.5653335", "0.5544397", "0.54993975", "0.5496557", "0.54669386", "0.5269493", "0.525346", "0.52460265", "0.5244592", "0.5143056", "0.51325536", "0.51310486", "0.51261157", "0.5124277", "0.5101583", "0.5087944", "0.50807655", "0.5049678", "0.5046716", "0.50465596", "0.5042607", "0.5029889", "0.50210387", "0.502036", "0.50123554", "0.50051165", "0.49985608" ]
0.7980371
0
handle_error() make sense of send and recv errors Description Error handling attempts to examine the errors that occuer during remote execution and report them in the least verbose manner. If the same error occurs on all slaves, it tries to only report it once. Otherwise it reports all the errors that occur on slaves and prints the slaves traceback. Currently error handling is pretty simplistic. It'd be nice if socket errors were viewed as severe and the slave either restarted or marked as dead and its work distributed among the other workers.
def handle_error(self): # perhaps do some nifty stuff here to # mark bad workers, try to restart, etc. msg = '' Nworkers = len(self.workers) Nsend_errors = len(self.had_send_error) Nsend_error_types = len(self.send_exc.keys()) Nrecv_errors = len(self.had_recv_error) Nrecv_error_types = len(self.recv_exc.keys()) if (Nsend_errors == Nworkers and Nsend_error_types == 1): sock_err_type,err_msg = self.send_exc.keys()[0] if sock_err_type == 111: # An attempt at helpful info for a common problem. msg = '\n\nConnection refused on all workers.\n' msg = msg + ' Perhaps restarting the cluster would help.\n' msg = msg + ' Use Your_Clusters_Name_Here.restart()' else: msg = 'A Socket error occured sending to all workers.\n\t' msg = msg + str(sock_err_type) + ': ' + str(err_msg) elif Nsend_errors: msg = '\n\nThe following errors occured when sending data:\n\t' for err,guilty_workers in self.send_exc.items(): msg = msg + str(err) + '\n\t' msg = msg + 'Guilty workers: ' + str(guilty_workers) + '\n' if (Nrecv_errors == Nworkers and Nrecv_error_types == 1): err,dummy = self.recv_exc.items()[0] err_type, err_msg, err_traceback = err msg = '\n\nThe same error occured on all workers:\n\t' msg = msg + str(err_type) + ': ' + str(err_msg) msg = msg + err_traceback elif Nrecv_errors: msg = '\n\nThe following errors occured on workers:\n\t' for err,guilty_workers in self.recv_exc.items(): err_type, err_msg, err_traceback = err msg = msg + str(err_type) + ': ' + str(err_msg) + '\n' msg = msg + 'Guilty workers: ' + str(guilty_workers) + '\n' msg = msg + err_traceback raise ClusterError, msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def error_handler(msg):\n print(\"Server Error: %s\" % msg)", "def error_handler(msg):\n print \"Server Error: %s\" % msg", "def error_handler(msg):\n print \"Server Error: %s\" % msg", "def handle_error(self):\n self.cmd_channel.debug(\"DTPHandler.handle_error()\")\n try:\n raise\n # if error is connection related we provide a detailed\n # information about it\n except socket.error, err:\n if err[0] in errno.errorcode:\n error = err[1]\n else:\n error = \"Unknown connection error\"\n # an error could occur in case we fail reading / writing\n # from / to file (e.g. file system gets full)\n except EnvironmentError, err:\n error = _strerror(err)\n except:\n # some other exception occurred; we don't want to provide\n # confidential error messages to user so we return a\n # generic \"unknown error\" response.\n logerror(traceback.format_exc()) \n error = \"Unknown error\"\n self.cmd_channel.respond(\"426 %s; transfer aborted.\" %error)\n self.close()", "def _handle_error(self, errno, msg):\n if self.error_callback != None:\n #Call the error callback but expect failure.\n try:\n self.error_callback(errno, msg, self.rpcclient)\n except Exception as ex:\n self.log.failure(\"Error in error handler for '{cmd!r}'.\",cmd=self.command)\n else:\n #If no handler is set, all we do is log.\n self.log.error(\"Notice: no on_error defined for '{cmd!r}, command result: {msg!r}\",cmd=self.command,msg=msg)", "def handle_error():\n print \"An error occurred. Trace:\\n\"\n traceback.print_exc()", "def processError(self, error):\n logging.debug(\"Error %i\", error)\n if error == 0:\n # Remote Host can't be resolved\n self.close()\n elif error == 1:\n # Remote Host closed the connection.\n self.close()\n else:\n # TODO: somehow handle errors\n # Code 7 is detected far too late, it waits till the socket itself times out\n errortxt = \"\"\n for k in dir(QAbstractSocket):\n v = getattr(QAbstractSocket, k)\n if type(v) is type(error) and str(v) is str(error):\n errortxt = k\n\n sys.stderr.write(\"QTcpSocket@Transaction Error Code: \" + str(error) + \" \" + errortxt + \"\\n\")\n\n self.close()\n self._isConnected = False\n self._hasError = True\n self._stopWaiting.emit()", "def error_handler(e):\n logging.error('error_handler for socketio. An error has occurred: ' + str(e))", "def _handle_error(self, soc):\n err_string = \"socket error\"\n if soc in self._reading:\n err_string += (\" with '%s' read\" % self._reading[soc])\n if soc in self._writing:\n err_string += (\" with '%s' still to write\" % self._writing[soc])\n self._log_error(err_string)\n self._cleanup(soc)", "def handle_exception(e):\n print(e)\n return error()", "def sm_error_handler(self, errors):\n try:\n yield\n except Exception as e:\n if issubclass(e.__class__, ManagerError) or \\\n issubclass(e.__class__, ManagerFatalError) or \\\n isinstance(e, ConnectionError) or \\\n xmlrpclib.ProtocolError or \\\n xmlrpclib.Fault:\n\n errors.append(repr(e))\n elif isinstance(e, socket.error):\n errors.append(repr(e))\n errors.append(\"Please make sure the server port is open.\")\n else:\n raise e", "def test_internal_server_error_beomces_remote_initiated_server_error(self):\n msg = \"The server has encountered an error\"\n error = ErrorParser().process_all(msg)\n assert isinstance(error, RemoteInitiatedServerError)\n eq_(BibliothecaAPI.SERVICE_NAME, error.service_name)\n eq_(502, error.status_code)\n eq_(msg, error.message)\n doc = error.as_problem_detail_document()\n eq_(502, doc.status_code)\n eq_(\"Integration error communicating with 3M\", doc.detail)", "def _r_send_error(self, result, protocol):\n error = result.value\n if not isinstance(error, MessageHandleError):\n raise error\n print(\"Error occurred: %s\" % result)\n msgid = None\n if error.original_message is not None:\n msgid = error.original_message.id\n msg = ResponseMessage(result_code=error.error_code, response_to=msgid, result=error.error_details)\n protocol.send_message(create_message_string(msg))", "async def on_command_error(self, ctx, error):\n\n # This prevents any commands with local handlers being handled here in on_command_error.\n if hasattr(ctx.command, 'on_error'):\n return\n\n ignored = (commands.CommandNotFound, commands.UserInputError)\n\n # Allows us to check for original exceptions raised and sent to CommandInvokeError.\n # If nothing is found. We keep the exception passed to on_command_error.\n error = getattr(error, 'original', error)\n\n # Anything in ignored will return and prevent anything happening.\n if isinstance(error, ignored):\n return\n\n elif isinstance(error, commands.DisabledCommand):\n await ctx.send(f'{self.bot.settings.prefix}{ctx.command} has been disabled.')\n return\n\n elif isinstance(error, commands.NoPrivateMessage):\n try:\n await ctx.channel.send(f'{self.bot.settings.prefix}{ctx.command} can not be used in Private Messages.')\n except:\n pass\n return\n\n elif isinstance(error, commands.BadArgument):\n await ctx.send(f'Refer to.{self.bot.settings.prefix}help {ctx.command}')\n return\n\n elif isinstance(error, commands.BotMissingPermissions):\n missing = [perm.replace('_', ' ').replace('guild', 'server').title() for perm in error.missing_perms]\n if len(missing) > 2:\n fmt = '{}, and {}'.format(\"**, **\".join(missing[:-1]), missing[-1])\n else:\n fmt = ' and '.join(missing)\n await ctx.send(f'I need the **{fmt}** permission(s) to run this command.')\n return\n\n if isinstance(error, commands.MissingPermissions):\n missing = [perm.replace('_', ' ').replace('guild', 'server').title() for perm in error.missing_perms]\n if len(missing) > 2:\n fmt = '{}, and {}'.format(\"**, **\".join(missing[:-1]), missing[-1])\n else:\n fmt = ' and '.join(missing)\n await ctx.send(f'You need the **{fmt}** permission(s) to use this command.')\n return\n\n # All other Errors not returned come here... And we can just print the default TraceBack.\n print('Ignoring exception in command {}:'.format(ctx.command), file=sys.stderr)\n traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr)", "def handle_error(self):\n trace = traceback.format_exc()\n stderr(trace)\n LOGGER.error('Fatal error in core, please review exception log')\n # TODO: make not hardcoded\n logfile = codecs.open(\n os.path.join(self.config.core.logdir, 'exceptions.log'),\n 'a',\n encoding='utf-8'\n )\n logfile.write('Fatal error in core, handle_error() was called\\n')\n logfile.write('last raw line was %s' % self.raw)\n logfile.write(trace)\n logfile.write('Buffer:\\n')\n logfile.write(self.buffer)\n logfile.write('----------------------------------------\\n\\n')\n logfile.close()\n if self.error_count > 10:\n if (datetime.now() - self.last_error_timestamp).seconds < 5:\n stderr(\"Too many errors, can't continue\")\n os._exit(1)\n self.last_error_timestamp = datetime.now()\n self.error_count = self.error_count + 1", "def handle_error(self):\n debug(\"FTPServer.handle_error()\")\n logerror(traceback.format_exc())\n self.close()", "def handle_error(self):\n self.cmd_channel.debug(\"PassiveDTP.handle_error()\")\n logerror(traceback.format_exc())\n self.close()", "def handle_error(self):\n self.cmd_channel.debug(\"ActiveDTP.handle_error()\")\n logerror(traceback.format_exc())\n self.close()", "def _handle_rpc_error(self, rpc_error: grpc.RpcError) -> NoReturn:\n logger.exception(\"GRPC Error received\")\n # We have to cast the value here because, a RpcError is a Call as well.\n # https://grpc.github.io/grpc/python/grpc.html#grpc.UnaryUnaryMultiCallable.__call__\n status = rpc_status.from_call(cast(grpc.Call, rpc_error))\n if status:\n for d in status.details:\n if d.Is(error_details_pb2.ErrorInfo.DESCRIPTOR):\n info = error_details_pb2.ErrorInfo()\n d.Unpack(info)\n raise convert_exception(info, status.message) from None\n\n raise SparkConnectGrpcException(status.message) from None\n else:\n raise SparkConnectGrpcException(str(rpc_error)) from None", "def handle_err(self):\n pass", "def send_error(msg):\n\n print(msg)", "def _handle_error(self, error: Exception) -> NoReturn:\n if isinstance(error, grpc.RpcError):\n self._handle_rpc_error(error)\n elif isinstance(error, ValueError):\n if \"Cannot invoke RPC\" in str(error) and \"closed\" in str(error):\n raise SparkConnectException(\n error_class=\"NO_ACTIVE_SESSION\", message_parameters=dict()\n ) from None\n raise error", "def handle_bui_server_exception(error):\n bui.logger.error(error)\n return {\"message\": error.description}, error.code", "def bcp_receive_error(self, client, **kwargs):\n self.log.warning('Received Error command from host with parameters: %s',\n kwargs)", "def log_error(customized_msg, logfile_handle):\r\n tb = sys.exc_info()[2]\r\n tbinfo = traceback.format_tb(tb)[0]\r\n pymsg = \"PYTHON ERRORS:\\nTraceback Info:\\n\" + tbinfo + \"\\nError Info:\\n \" + str(\r\n sys.exc_type) + \": \" + str(sys.exc_value) + \"\\n\"\r\n msgs = \"ARCPY ERRORS:\\n\" + arcpy.GetMessages(2) + \"\\n\"\r\n logfile_handle.writelines(customized_msg + str(msgs) + \"\\n\" + pymsg + \"\\n\")", "def recvError(self, errorMessage):\n LOG_ERROR(\"Client.recvError: \" + errorMessage, \"EDEN\")\n # default implementation: disconnect from server\n self.disconnectFromServer()", "def handle_error(self, err): # pragma: no cover\n # log every exception raised in the application\n print('we ended up in the API handle_error()', err, err.__class__)\n\n # catch other HTTP errors\n if isinstance(err, HTTPException):\n original = getattr(err, \"original_exception\", None)\n return jsonify({\n 'success': False,\n 'error': err.code,\n \"message\": getattr(err.error, 'message')\n }), err.code\n\n # if 'message' attribute isn't set, assume it's a core Python exception\n if not getattr(err, 'message', None):\n original = getattr(err, \"original_exception\", None)\n return jsonify({\n 'message': 'Server has encountered an unknown error'\n }), 500\n\n # Handle application-specific custom exceptions\n return jsonify(**err.kwargs), err.http_status_code", "def patch_broken_pipe_error():\n from socketserver import BaseServer\n\n handle_error = BaseServer.handle_error\n\n def my_handle_error(self, request, client_address):\n type, err, tb = sys.exc_info()\n # there might be better ways to detect the specific erro\n if repr(err) == \"error(32, 'Broken pipe')\":\n pass\n else:\n handle_error(self, request, client_address)\n\n BaseServer.handle_error = my_handle_error", "async def on_handle_message_error(self, message: andesite.ReceiveOperation, exc: Exception) -> None:\n log.error(f\"uncaught error {exc} in {self} when handling message {message}\")", "def handle_error(self, params):\n\n # Run the error handler if needed.\n if (self.must_handle_error()):\n log.warning(\"Running On Error error handler...\")\n self.got_error = False\n self.error_handler.eval(context=self, params=params)\n\n # The error has now been cleared.\n self.got_error = False" ]
[ "0.6415015", "0.6402821", "0.6402821", "0.63423985", "0.6236461", "0.6134316", "0.60176396", "0.59943914", "0.593466", "0.5901477", "0.5868321", "0.58462125", "0.58436084", "0.58391523", "0.58321005", "0.5793094", "0.5737886", "0.5730986", "0.57273734", "0.5706913", "0.57000864", "0.5699289", "0.56692636", "0.56685036", "0.5667612", "0.5666155", "0.5665412", "0.5660542", "0.5655951", "0.5628158" ]
0.70273113
0
load() print human readable load information for slave hosts Description The load value printed is the 1 minute load average that is commonly printed by uptime on Unix machines. load depends on the implementation of numpy_proc on each slave's host OS. It will not work for Windows slave processes. However, if you are using a Windows master to control a Linux cluster of slaves, it should work fine.
def load(self): import string import numpy.distutils.proc as numpy_proc results = self.load_list() for i in range(len(self.workers)): name = string.split(self.workers[i].host,'.')[0] res = results[i] s = "%6s: %1.2f," % (name[-6:], res['load_1']) print s, if not ((i+1) % 5): print
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_load_data():\n proc_stat = open(\"/proc/stat\", \"r\")\n ret = []\n #times_since_startup = proc_stat.readline().strip().split()[1:]\n for line in proc_stat:\n line_split = line.strip().split()\n if(not (\"cpu\" in line_split[0])): #we have gone past the CPU lines\n break\n else:\n #everything but the label since we know [0] is overall and after that is per core by index\n ret.append(line_split[1:]) \n proc_stat.close()\n return ret", "def get_avg_load(verbose=False):\n output = run(\"top -d0.5 -n4 | grep Cpu\", quiet=True)\n\n # Strip formatting control characters (top output can have a lot of these)\n output = (output.replace('\\x1b(B','')\n .replace('\\x1b[m','')\n .replace('\\x1b[K','')\n .replace('\\x1b[39;49m',''))\n\n output = output.splitlines()\n\n loads = []\n for i in xrange(len(output)):\n # Top output tends to look like\n # Cpu(s): 2.9%us, 0.0%sy, 0.0%ni, ... OR\n # Cpu(s): 2.9% us, 0.0% sy, 0.0% ni, ... OR\n # %Cpu(s): 2.9 us, 0.0 sy, 0.0 ni, ...\n # We use a regex to match the floating point value for percentage load\n regex = re.compile(\n \"\"\"\n .*Cpu\\(s\\): # any chars before \"Cpu(s):\"\n \\s* # any amount of whitespace\n (\\d*.?\\d*) # any digits, <= 1 period, any digits (i.e. any positive float)\n \\s* # any amount of whitespace\n %? # <= 1 percent symbol (some versions of top just have one \"%\" on this line, before \"Cpu(s)\"\n \\s* # any amount of whitespace\n us # total system load appears to be marked \"us\"\n \"\"\", re.VERBOSE)\n\n matches = regex.findall(output[i])\n #print(repr(output[i]))\n if (len(matches) == 1):\n load = float(matches[0])\n loads.append(load)\n else:\n print(\"Error: On host = {Host}, unable to match total cpu load in string\\n{Output}\"\n .format(Host = env.host, Output = output[i]))\n\n # Throw out the first record of CPU load because it always seems to spike\n # briefly after the command is issued.\n loads = loads[1:]\n avg_load = None\n if len(loads) != 0:\n avg_load = sum(loads)/float(len(loads))\n else:\n print(\"Error: On host = {Host}, len(loads) == 0\"\n .format(Host = env.host))\n\n if (verbose):\n print(\"{Host:4} | Average load: {Load:3.2f}%\".format(Host=env.host, Load=avg_load))\n\n return avg_load", "def load_stat():\n loadavg = {}\n f = open(\"/proc/loadavg\")\n con = f.read().split()\n f.close()\n loadavg['lavg_1'] = con[0]\n loadavg['lavg_5'] = con[1]\n loadavg['lavg_15'] = con[2]\n loadavg['nr'] = con[3]\n loadavg['last_pid'] = con[4]\n return loadavg", "def cn_loads():\n\n # Change the definition of this list to reflect the available machines\n cn_machines = ['cn'+str(i) for i in xrange(1,25)] # if i not in [0]]\n\n # Try to connect to all known machines to see which ones respond.\n # If a machine is down for maintenence, or so overloaded that it\n # can't even respond right away, we don't want to query it further.\n with fabric.context_managers.hide(\"running\"):\n connectivity = execute(test_connectivity, hosts=cn_machines)\n for host in connectivity:\n if connectivity[host] == False:\n cn_machines.remove(host)\n\n # Get CPU load, count, and clock of the machines that did respond\n avg_loads, cpu_counts, cpu_clocks = {}, {}, {}\n with fabric.context_managers.hide(\"running\"):\n avg_loads = execute(get_avg_load, hosts=cn_machines)\n #cpu_counts = execute(get_cpu_count, hosts=cn_machines)\n #cpu_clocks = execute(get_cpu_clock, hosts=cn_machines)\n\n # Read local DB instead of querying remote machines, when possible\n cpu_counts, cpu_clocks = get_cpu_counts_and_clocks(hosts=cn_machines)\n\n # Calculate available computing power of each machine:\n # (1.0-(avg_load/100.0)) * num_processors * clockrate\n power = {}\n for host in avg_loads:\n power[host] = ((1.0-(avg_loads[host]/100.0))\n * cpu_counts[host]\n * cpu_clocks[host])\n\n for host in sorted(power, key=power.get, reverse=False):\n print(\"{Host:4s} | load: {Load: >4.1f}% | cpus: {CPUs: >2d} @ {Clock: >4.2f} GHz | power: {Power: >5.2f}\"\n .format(Host = host,\n Load = avg_loads[host],\n CPUs = cpu_counts[host],\n Clock = cpu_clocks[host],\n Power = power[host]))", "def loadavg():\n sin = psutil.getloadavg()\n return [\n round(sin[0], 3),\n round(sin[1], 3),\n round(sin[2], 3)\n ]", "def load_list(self):\n import numpy.distutils.proc as numpy_proc\n res = self.apply(numpy_proc.load_avg,())\n return res", "def get_cpu_load (processor_number=0):\n\ttry:\n\t\tf = open(\"/proc/stat\", \"r\")\n\t\ttmp = f.readlines(2000)\n\t\tf.close()\n\texcept:\n\t\tprint _(\"Failed to open /proc/stat\")\n\t\treturn None\n\tif processor_number == 0 : sufix = ''\n\telse: sufix = str(processor_number -1)\n\tline = tmp[processor_number]\n\n\tif line.startswith(\"cpu%s\"% (sufix)):\n\t\tcuse = float( line.split()[1] )\n\t\tcn = float( line.split()[2] )\n\t\tcsys = float( line.split()[3])\n\t\tif sufix == '':\n\t\t\tload = cuse + cn\n\t\telse:\n\t\t\tload = cuse + csys + cn\n\t\t#load = int(load / .update_interval)\n\t\treturn load\n\treturn None", "def test_cpuload_get_sensordef(self):\n test_sensordef = {\n \"kind\": self.test_cpuload.get_kind(),\n \"name\": \"CPU Load\",\n \"description\": \"Monitors CPU load avg on the system the mini probe is running on\",\n \"default\": \"yes\",\n \"help\": \"Monitors CPU load avg on the system the mini probe is running on\",\n \"tag\": \"mpcpuloadsensor\",\n \"fields\": [],\n \"groups\": []\n }\n assert_equal(self.test_cpuload.get_sensordef(), test_sensordef)", "def get_cpu_load(last_cpu_times):\n\n new_cpu_times = get_load_data()\n overall = None\n per_core = []\n # To get loads, find the difference between the current CPU times since startup and the old ones, then find what\n # percent those times overall and per core as spent not idle, i.e. under load. This could be done faster with \n # something like numpy, but that would add a (debatably) unnecessary dependency.\n for line_index in range(0, len(last_cpu_times)):\n difference = []\n for i in range(0, len(last_cpu_times[0])):\n difference.append(int(new_cpu_times[line_index][i]) - int(last_cpu_times[line_index][i]))\n idle = float(difference[3]) / float(sum(difference))\n load = 1 - idle # %/100 of time since the last data was collected spent not idle, i.e. CPU load.\n if(line_index == 0):\n #note: rounding removed because it seemed that a lot of error would arise when actually sending and \n # receiving the messages, so doing so was basically pointless.\n overall = load\n else:\n per_core.append(load)\n #if block added to prevent issues with serializing None as a float\n if(overall != None):\n return (overall, per_core, new_cpu_times)\n else:\n return (float(\"NaN\"), [], new_cpu_times)", "def _Load(self, vm, **kwargs):\n kwargs.setdefault('threads', self._default_preload_threads)\n if FLAGS.ycsb_record_count:\n kwargs.setdefault('recordcount', FLAGS.ycsb_record_count)\n for pv in FLAGS.ycsb_load_parameters:\n param, value = pv.split('=', 1)\n kwargs[param] = value\n command = self._BuildCommand('load', **kwargs)\n stdout, stderr = vm.RobustRemoteCommand(command)\n return ycsb_stats.ParseResults(\n str(stderr + stdout), self.measurement_type, _ERROR_RATE_THRESHOLD.value\n )", "def serverinfo( self, mess, args):\n version = open('/proc/version').read().strip()\n loadavg = open('/proc/loadavg').read().strip()\n\n return '%s\\n\\n%s' % ( version, loadavg, )", "def test_cpu_limitation(self):\n expected_dict = self.calculate_expected_values(\n load_dict=self.load_dict\n )\n assert sla_helpers.load_vm_and_check_the_load(\n load_dict=self.load_dict, expected_values=expected_dict\n )", "def test_loader_metric():\n clb = pt_clb.LoaderMetrics(TEST_METRIC)\n runner = Runner(model=TEST_MODEL, optimizer=TEST_OPTIMIZER, criterion=TEST_CRITERION, callbacks=clb)\n runner.fit(TEST_LOADER, epochs=2)\n assert clb.target[0].grad_fn is None\n assert clb.output[0].grad_fn is None\n assert clb.target[0].device == torch.device(\"cpu\")\n assert clb.output[0].device == torch.device(\"cpu\")", "def getLoad():\n ina = INA219(address=int('0x40', 16))\n load_bus_v = ina.getBusVoltage_V()\n load_shunt_mv = ina.getShuntVoltage_mV()\n load_curr_ma = ina.getCurrent_mA()\n load_volt_v = (ina.getBusVoltage_V() + ina.getShuntVoltage_mV() / 1000)\n load_power_mw = ina.getPower_mW()\n return load_volt_v, load_curr_ma", "def service_load_metrics(self) -> pulumi.Output[Optional[Sequence['outputs.ServiceLoadMetricDescriptionResponse']]]:\n return pulumi.get(self, \"service_load_metrics\")", "def help_load(self):\n print(LOAD)", "def _load_cluster(self):", "def test_vm_cpu_limitation(self):\n expected_dict = self.calculate_expected_values(\n load_dict=self.load_dict\n )\n assert sla_helpers.load_vm_and_check_the_load(\n load_dict=self.load_dict,\n expected_values=expected_dict\n )", "def getloadavg():\n global _loadavg_inititialized\n\n if not _loadavg_inititialized:\n cext.init_loadavg_counter()\n _loadavg_inititialized = True\n\n # Drop to 2 decimal points which is what Linux does\n raw_loads = cext.getloadavg()\n return tuple([round(load, 2) for load in raw_loads])", "def test_cpu_limitation_without_guest_agent(self):\n expected_dict = self.calculate_expected_values(\n load_dict=self.load_dict\n )\n assert sla_helpers.load_vm_and_check_the_load(\n load_dict=self.load_dict, expected_values=expected_dict\n )", "def main(path):\r\n whdload_slave = WHDLoadSlaveFile.from_path(path)\r\n whdload_slave.read()\r\n display = WHDLoadDisplay(whdload_slave)\r\n\r\n click.echo(click.style('WHDLoad Slave Reader v0.1.0', fg='green'))\r\n click.echo('')\r\n for key, value in display.display_properties():\r\n click.echo(click.style(key, fg='yellow'), nl=False)\r\n click.echo(\": {}\".format(value))", "def serverinfo( self, mess, args):\n version = open('/proc/version').read().strip()\n loadavg = open('/proc/loadavg').read().strip()\n\n return '%snn%s' % ( version, loadavg, )", "def get_loadavg(cls):\n\n with open(\"/proc/loadavg\") as loadavg:\n loadavg = loadavg.read().split()\n kernel_entities = loadavg[3].split(\"/\")\n loadavg_stat = { StatsKeys.LOADAVG :\n {\n StatsKeys.LAST_1_MIN : float(loadavg[0]),\n StatsKeys.LAST_5_MIN : float(loadavg[1]),\n StatsKeys.LAST_15_MIN : float(loadavg[2]),\n StatsKeys.RUNNABLE_ENTITIES : int(kernel_entities[0]),\n StatsKeys.SCHEDULING_ENTITIES : int(kernel_entities[1])\n }\n }\n logger.debug(\"Loadavg stats: {}\".format(' '.join(loadavg)))\n\n return loadavg_stat", "def get_random_cpu_load():\n load = random.gauss(55, 10)\n if load < 0:\n return 0.0\n elif load > 100:\n return 100.0\n else:\n return round(load, 1)", "def setLoad(self, new_load: float) -> None:\n self.load = new_load", "def present_load(self):\n return self._read(MX_PRESENT_LOAD)", "def output_load_versus_launch_time(self):\r\n results_dirname = get_param(\"results_dir\")\r\n per_task_filename = os.path.join(results_dirname,\r\n \"%s_task_load_vs_wait\" %\r\n get_param(\"file_prefix\"))\r\n per_task_file = open(per_task_filename, \"w\")\r\n per_task_file.write(\"load\\twait_time\\n\")\r\n \r\n per_job_filename = os.path.join(results_dirname,\r\n \"%s_job_load_vs_wait\" %\r\n get_param(\"file_prefix\"))\r\n per_job_file = open(per_job_filename, \"w\")\r\n per_job_file.write(\"load\\twait_time\\n\")\r\n for job in self.completed_jobs:\r\n # Launch time and expected load for the last task to launch.\r\n longest_task_wait = -1\r\n longest_task_load = -1\r\n for task_id in range(job.num_tasks):\r\n load = job.probe_results[task_id]\r\n wait = job.wait_times[task_id]\r\n if wait > longest_task_wait:\r\n longest_task_wait = wait\r\n longest_task_load = load\r\n per_task_file.write(\"%f\\t%f\\n\" % (load, wait))\r\n \r\n per_job_file.write(\"%f\\t%f\\n\" % (longest_task_load,\r\n longest_task_wait))\r\n per_job_file.close()\r\n per_task_file.close()", "def set_load(self, load):\n self.load= load", "def info(self):\n import string\n results = self.info_list()\n labels = \"%-8s %-9s %-4s %-8s %-8s %-4s\" % \\\n ('MACHINE','CPU','GHZ','MB TOTAL',\n 'MB FREE','LOAD')\n print labels\n for i in range(len(self.workers)):\n name = string.split(self.workers[i].host,'.')[0]\n res = results[i]\n s = \"%-8s %2dx%-6s %4.1f %8.1f %8.1f %4.2f\" % \\\n (name[-8:], res['cpu_count'],res['cpu_type'][-6:], \\\n res['cpu_speed'],res['mem_total'],res['mem_free'],\\\n res['load_1'])\n print s", "def current_load(self):\n nn = [p.n for p in self._procs]\n return sum(nn), min(nn)" ]
[ "0.63688195", "0.62896323", "0.6152832", "0.5916168", "0.554287", "0.5534526", "0.5520359", "0.5466271", "0.5461817", "0.5458563", "0.54556566", "0.54010874", "0.53958553", "0.5371696", "0.53630465", "0.5356972", "0.53519404", "0.53291065", "0.5318139", "0.52881193", "0.5263834", "0.52017003", "0.5185466", "0.5159709", "0.51566607", "0.5133536", "0.5127305", "0.5120654", "0.5112173", "0.51093906" ]
0.7384944
0
info() print human readable info about the slave hosts Description Print out the each slave interpreters host name, number and type of processors, memory usage, and current load information in human readable form. info depends on the implementation of numpy_proc on each slave's host OS. It will not work for Windows slave processes. However, if you are using a Windows master to control a Linux cluster of slaves, it should work fine.
def info(self): import string results = self.info_list() labels = "%-8s %-9s %-4s %-8s %-8s %-4s" % \ ('MACHINE','CPU','GHZ','MB TOTAL', 'MB FREE','LOAD') print labels for i in range(len(self.workers)): name = string.split(self.workers[i].host,'.')[0] res = results[i] s = "%-8s %2dx%-6s %4.1f %8.1f %8.1f %4.2f" % \ (name[-8:], res['cpu_count'],res['cpu_type'][-6:], \ res['cpu_speed'],res['mem_total'],res['mem_free'],\ res['load_1']) print s
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def info_list(self):\n import numpy.distutils.proc as numpy_proc\n res = self.apply(numpy_proc.machine_info,())\n return res", "def run_info(self):\n return \"MPI: %d, OMP: %d\" % (self.mpi_procs, self.omp_threads)", "def _get_core_membind_info():\n args = [\"lscpu\", \"--parse=CPU,Core,Socket,Node\"]\n process_lscpu = subprocess.check_output(args, universal_newlines=True).split(\"\\n\")\n\n # Get information about core, node, socket and cpu. On a machine with no NUMA nodes, the last column is empty\n # so regex also check for empty string on the last column\n bind_info = []\n for line in process_lscpu:\n pattern = r\"^([\\d]+,[\\d]+,[\\d]+,([\\d]+|$))\"\n regex_out = re.search(pattern, line)\n if regex_out:\n bind_info.append(regex_out.group(1).strip().split(\",\"))\n\n return bind_info", "def serverinfo( self, mess, args):\n version = open('/proc/version').read().strip()\n loadavg = open('/proc/loadavg').read().strip()\n\n return '%snn%s' % ( version, loadavg, )", "def serverinfo( self, mess, args):\n version = open('/proc/version').read().strip()\n loadavg = open('/proc/loadavg').read().strip()\n\n return '%s\\n\\n%s' % ( version, loadavg, )", "def getSlaveNames():", "def remote_info():\n run('uname -a')", "def print_host_info(total_cores, used_cores, total_nodes, empty_nodes, desired_host, \n disabled_cores, disabled_nodes):\n \n print(str(desired_host).ljust(TERMWIDTH))\n print('-'.center(60, '-'))\n print('Total Cores:'.ljust(int(TERMWIDTH/2)) + str(total_cores).ljust(int(TERMWIDTH/2)))\n print('Used Cores:'.ljust(int(TERMWIDTH/2)) + str(used_cores).ljust(int(TERMWIDTH/2)))\n print('Free Cores:'.ljust(int(TERMWIDTH/2)) + str(total_cores - used_cores - disabled_cores).ljust(int(TERMWIDTH/2)))\n print('Disabled/Error Cores:'.ljust(int(TERMWIDTH/2)) + str(disabled_cores).ljust(int(TERMWIDTH/2)))\n print(\"\")\n print('Total Nodes:'.ljust(int(TERMWIDTH/2)) + str(total_nodes).ljust(int(TERMWIDTH/2)))\n print('Used Nodes:'.ljust(int(TERMWIDTH/2)) + str(total_nodes - empty_nodes - disabled_nodes).ljust(int(TERMWIDTH/2)))\n print('Disabled/Error Nodes:'.ljust(int(TERMWIDTH/2)) + str(disabled_nodes).ljust(int(TERMWIDTH/2)))\n print('Empty Nodes:'.ljust(int(TERMWIDTH/2)) + str(empty_nodes).ljust(int(TERMWIDTH/2)))\n return", "def slave_status():\n run_mysql_command(\"SHOW SLAVE STATUS\\G;\")", "def sys_info(self):\n\n for i in self._nodes.items():\n print(\"\\n==============================\")\n name = i[0]\n node = i[1]\n\n print(\"NODE: {}\\n\".format(name))\n\n # CPU\n print(\"CPU:\")\n self.cpu_info(node)\n\n # Grub\n print(\"\\nGrub Command Line:\")\n if \"grub\" in node:\n print(\" Current: {}\".format(node[\"grub\"][\"current_cmdline\"]))\n print(\" Configured: {}\".format(node[\"grub\"][\"default_cmdline\"]))\n\n # Huge Pages\n print(\"\\nHuge Pages:\")\n self.hugepage_info(node)\n\n # Devices\n print(\"\\nDevices:\")\n self.device_info(node)\n\n # Status\n print(\"\\nVPP Service Status:\")\n state, errors = VPPUtil.status(node)\n print(\" {}\".format(state))\n for e in errors:\n print(\" {}\".format(e))\n\n # Minimum system resources\n self.min_system_resources(node)\n\n print(\"\\n==============================\")", "def print_detailed_host(total_cores, used_cores, total_nodes, empty_nodes, desired_host, \n disabled_cores, disabled_nodes, node_list):\n \n print('\\nDetailed info pertaining to: ' + desired_host)\n print('Total Nodes: {0}'.format(str(len(node_list)))) \n print('Total Cores : {0}'.format(total_cores) + PRINT_INDENT + 'Used Cores: {0}'.format(used_cores)\n + PRINT_INDENT + 'Free Cores: {0}'.format(str(total_cores - used_cores - disabled_cores)) \n + PRINT_INDENT + 'Disabled Cores: {0}'.format(disabled_cores))\n print('\\nThe following is a list of each node within {0}:\\n'.format(desired_host))\n print('Node name'.ljust(int(TERMWIDTH/2)) + 'Used Cores/Total Cores')\n for node in node_list:\n cores = str(node.get_used()) + '/' + str(node.get_total())\n if node.get_disabled_switch():\n disabled = 'Unavailable'\n else:\n disabled = ''\n print((PRINT_INDENT + node.get_name()).ljust(int(TERMWIDTH/2)) + PRINT_INDENT + (str(cores).rjust(5,' ') \\\n + PRINT_INDENT + disabled))\n return", "def getHostInfo():", "def process_info(process):\n\thelp(process)", "def mmo_cluster_hostInfo(self, mmo_connection, inc_mongos):\n return self.mmo_execute_on_cluster(mmo_connection, \"hostInfo\", inc_mongos)", "def do_hostinfo(self, args):\n host = opts = None\n if args:\n args = args.split()\n host = args.pop()\n\n if not host:\n print('Usage: hostinfo [-cdmu] host_name_or_ip')\n print(' uptime and load stats returned if no options specified')\n return\n\n try:\n ip = socket.gethostbyname(host)\n except socket.gaierror:\n print('cannot resolve', host, file=sys.stderr)\n return\n\n opts = []\n while args:\n arg = args.pop(0)\n if arg.startswith('--'):\n if arg == '--cpu':\n opts.append('c')\n elif arg == '--disk':\n opts.append('d')\n elif arg == '--memory':\n opts.append('m')\n elif arg == '--uptime':\n opts.append('u')\n else:\n print('unrecognized option:', arg, file=sys.stderr)\n return\n else:\n if arg[0] == '-':\n for ch in arg[1:]:\n if ch in ('cdmu') and ch not in opts:\n opts.append(ch)\n else:\n print('unrecognized option:', ch, file=sys.stderr)\n return\n\n stats = self._qm.get_host_stats(ip)\n\n if not opts:\n # Get uptime and load averages.\n up = stats['uptime']\n load = stats['cpu_load']\n print('Up for %s days, %s hours, %s minutes, '\n 'load averages: %s, %s, %s'\n % (up['days'], up['hours'], up['minutes'], load['one'],\n load['five'], load['fifteen']))\n return\n\n all_stats = []\n for opt in opts:\n if opt == 'd':\n # Get disk usage.\n disks = stats['disk_usage']\n st = ['Disk Usage:']\n for mount, disk_info in disks.viewitems():\n st.append(' Usage for: %s' % mount)\n for k, v in disk_info.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'c':\n # Get CPU load.\n load_stats = stats['cpu_load']\n st = ['CPU Load Average:']\n st.append(' last one minute: %s' % load_stats['one'])\n st.append(' last five minutes: %s' % load_stats['five'])\n st.append(' last fifteen minutes: %s' % load_stats['fifteen'])\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'm':\n # Get Memory Usage.\n memory_usage = stats['memory_usage']\n st = ['Memory usage:']\n for k, v in memory_usage.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'u':\n # Get uptime.\n up = stats['uptime']\n st = ['Uptime:']\n st.append(' Up for %s days, %s hours and %s minutes'\n % (up['days'], up['hours'], up['minutes']))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n\n print('\\n'.join(all_stats))", "def cpuinfo(self):\n \n command = 'cat /proc/cpuinfo'\n\tpipe = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = pipe.communicate()\n\tinfo = stdout.strip()\n cpu_type = None\n\tn_proc = 0\n\tfor line in info.split('\\n'):\n if 'model name' in line:\n\t n_proc += 1\n if cpu_type is None:\n\t\t cpu_type = ' '.join(line.split(':')[-1].strip().split())\n\t\n\treturn (cpu_type, n_proc)", "def process_host(desired_host):\n \n node_list = []\n host_info_list = [] \n if desired_host == \"all\":\n desired_host_list = getAllMachines()\n else:\n desired_host_list = (subprocess.getoutput(\"qconf -shgrp_resolved \" + '@' + str(desired_host))).split()\n qstat = subprocess.getoutput('qstat -f')\n for host in desired_host_list:\n if qstat.find(host) != (-1):\n #Searches the long string for the index of the occurance of the specified host, then\n #parses it the string for just that one line with the host that we want.\n host_info_list.append((qstat[qstat.find(host):].split('\\n'))[0])\n #Start at with everything at 0, and will count up as encountered.\n total_nodes = 0\n total_cores = 0\n disabled_cores = 0\n used_cores = 0\n free_cores = 0\n empty_nodes = 0\n disabled_nodes = 0\n for host in host_info_list:\n #simply gathering info qstat spat out for us\n temp_node = Node((host.split()[0]))\n cores = host.split()[2].replace('/', ' ').split()\n host_used_cores = cores[1]\n host_total_cores = cores[2]\n if len(host.split()) == 6 and (host.split()[5] == 'd' or host.split()[5] == 'E' or \\\n host.split()[5] == 'au' or host.split()[5] == 'Eau' or host.split()[5] == 'Eqw' \\\n or host.split()[5] == 'adu'):\n temp_node.set_disabled_switch(True)\n disabled_cores += int(host_total_cores)\n total_cores += int(host_total_cores)\n disabled_nodes += 1\n else: \n temp_node.set_disabled_switch(False)\n used_cores += int(host_used_cores)\n total_cores += int(host_total_cores)\n free_cores += int(host_total_cores) - int(host_used_cores)\n if int(host_used_cores) == 0:\n empty_nodes += 1\n temp_node.set_cores(host_total_cores, host_used_cores)\n total_nodes += 1\n node_list.append(temp_node) \n \n if len(sys.argv) == 3:\n if sys.argv[2] == '--details':\n print_detailed_host(total_cores, used_cores, total_nodes, empty_nodes, desired_host, \n disabled_cores, disabled_nodes, node_list)\n elif sys.argv[2] == '-v' or sys.argv[2] == '--visual':\n draw_queue(total_nodes, total_cores, used_cores, empty_nodes, desired_host, disabled_cores, \n disabled_nodes, node_list, free_cores)\n else:\n print('Error: Arg syntax error with: ' + sys.argv[2])\n show_usage(23)\n elif sys.argv[1] == \"-qlong\":\n # Returning values from this host group to the qlong function\n return(total_cores, used_cores, total_nodes, empty_nodes, disabled_cores,disabled_nodes, node_list)\n elif len(sys.argv) < 3:\n print_host_info(total_cores, used_cores, total_nodes, empty_nodes, desired_host, disabled_cores, \n disabled_nodes)\n else:\n print('Error: Too many args')\n show_usage(23)\n return", "def computer_info():\n return {\n 'system': platform.system(),\n 'architecture': platform.architecture(),\n 'name': platform.node(),\n 'release': platform.release(),\n 'version': platform.version(),\n 'machine': platform.machine(),\n 'processor': platform.processor(),\n 'virtual CPUs': mproc.cpu_count(),\n 'total RAM': _get_ram(),\n }", "def slave_hosts(self) -> 'List[str]':\n raise NotImplementedError", "def _proc_info(self):\n ret = cext.proc_info(self.pid)\n assert len(ret) == len(pinfo_map)\n return ret", "def get_system_information(self):\n\t\tsys = platform.uname()\n\t\treturn {\n\t\t\t'hostname': sys.node,\n\t\t\t'operating_system': sys.system,\n\t\t\t'version': sys.version,\n\t\t\t'release': sys.release,\n\t\t\t'processor' : sys.processor,\n\t\t\t'processor_type': sys.machine,\n\t\t}", "def getSlave(name):", "def info(self, handle):\n\n # Each process group gathers their output\n\n groupstr = \"\"\n procstr = \"\"\n\n gcomm = self._comm.comm_group\n wcomm = self._comm.comm_world\n rcomm = self._comm.comm_rank\n\n if wcomm.rank == 0:\n handle.write(\"Data distributed over {} processes in {} groups\\n\".format(self._comm.world_size, self._comm.ngroups))\n\n for ob in self.obs:\n id = ob['id']\n tod = ob['tod']\n base = ob['baselines']\n nse = ob['noise']\n intrvl = ob['intervals']\n\n if gcomm.rank == 0:\n groupstr = \"observation {}:\\n\".format(id)\n groupstr = \"{} {} total samples, {} detectors\\n\".format(groupstr, tod.total_samples, len(tod.detectors))\n if intrvl is not None:\n groupstr = \"{} {} intervals:\\n\".format(groupstr, len(intrvl))\n for it in intrvl:\n groupstr = \"{} {} --> {} ({} --> {})\\n\".format(groupstr, it.first, it.last, it.start, it.stop)\n\n # rank zero of the group will print general information,\n # and each process will get its statistics.\n\n nsamp = tod.local_samples[1]\n dets = tod.local_dets\n\n procstr = \" proc {}\\n\".format(gcomm.rank)\n my_chunks = 1\n if tod.local_chunks is not None:\n my_chunks = tod.local_chunks[1]\n procstr = \"{} sample range {} --> {} in {} chunks:\\n\".format(procstr, tod.local_samples[0], (tod.local_samples[0] + nsamp - 1), my_chunks)\n \n if tod.local_chunks is not None:\n chkoff = tod.local_samples[0]\n for chk in range(tod.local_chunks[1]):\n abschk = tod.local_chunks[0] + chk\n chkstart = chkoff\n chkstop = chkstart + tod.total_chunks[abschk] - 1\n procstr = \"{} {} --> {}\\n\".format(procstr, chkstart, chkstop)\n chkoff += tod.total_chunks[abschk]\n\n if nsamp > 0:\n \n stamps = tod.read_times(local_start=0, n=nsamp)\n\n procstr = \"{} timestamps {} --> {}\\n\".format(procstr, stamps[0], stamps[-1])\n\n for dt in dets:\n procstr = \"{} det {}:\\n\".format(procstr, dt)\n\n pdata = tod.read_pntg(detector=dt, local_start=0, n=nsamp)\n\n procstr = \"{} pntg [{:.3e} {:.3e} {:.3e} {:.3e}] --> [{:.3e} {:.3e} {:.3e} {:.3e}]\\n\".format(procstr, pdata[0,0], pdata[0,1], pdata[0,2], pdata[0,3], pdata[-1,0], pdata[-1,1], pdata[-1,2], pdata[-1,3])\n\n data = tod.read(detector=dt, local_start=0, n=nsamp)\n flags, common = tod.read_flags(detector=dt, local_start=0, n=nsamp)\n procstr = \"{} {:.3e} ({}) --> {:.3e} ({})\\n\".format(procstr, data[0], flags[0], data[-1], flags[-1])\n good = np.where((flags | common) == 0)[0]\n procstr = \"{} {} good samples\\n\".format(procstr, len(good))\n min = np.min(data[good])\n max = np.max(data[good])\n mean = np.mean(data[good])\n rms = np.std(data[good])\n procstr = \"{} min = {:.4e}, max = {:.4e}, mean = {:.4e}, rms = {:.4e}\\n\".format(procstr, min, max, mean, rms)\n\n for cname in tod.cache.keys():\n procstr = \"{} cache {}:\\n\".format(procstr, cname)\n ref = tod.cache.reference(cname)\n min = np.min(ref)\n max = np.max(ref)\n mean = np.mean(ref)\n rms = np.std(ref)\n procstr = \"{} min = {:.4e}, max = {:.4e}, mean = {:.4e}, rms = {:.4e}\\n\".format(procstr, min, max, mean, rms)\n\n recvstr = \"\"\n if gcomm.rank == 0:\n groupstr = \"{}{}\".format(groupstr, procstr)\n for p in range(1, gcomm.size):\n if gcomm.rank == 0:\n recvstr = gcomm.recv(source=p, tag=p)\n groupstr = \"{}{}\".format(groupstr, recvstr)\n elif p == gcomm.rank:\n gcomm.send(procstr, dest=0, tag=p)\n gcomm.barrier()\n\n # the world rank 0 process collects output from all groups and\n # writes to the handle\n\n recvgrp = \"\"\n if wcomm.rank == 0:\n handle.write(groupstr)\n for g in range(1, self._comm.ngroups):\n if wcomm.rank == 0:\n recvgrp = rcomm.recv(source=g, tag=g)\n handle.write(recvgrp)\n elif g == self._comm.group:\n if gcomm.rank == 0:\n rcomm.send(groupstr, dest=0, tag=g)\n wcomm.barrier()\n\n return", "def runSlaveRun():\n\n np.set_printoptions(linewidth=1000)\n function = None\n options = None\n\n # print(\"Process {}/{} reporting for duty!\".format(rank, size))\n\n function = comm.bcast(function, root=0)\n arguments = comm.scatter(options, root=0)\n\n results = function(*arguments)\n\n comm.Barrier()\n comm.gather(results, root=0)\n comm.Disconnect()", "def cpu_info(node):\n\n cpu = CpuUtils.get_cpu_info_per_node(node)\n\n item = \"Model name\"\n if item in cpu:\n print(\"{:>20}: {}\".format(item, cpu[item]))\n item = \"CPU(s)\"\n if item in cpu:\n print(\"{:>20}: {}\".format(item, cpu[item]))\n item = \"Thread(s) per core\"\n if item in cpu:\n print(\"{:>20}: {}\".format(item, cpu[item]))\n item = \"Core(s) per socket\"\n if item in cpu:\n print(\"{:>20}: {}\".format(item, cpu[item]))\n item = \"Socket(s)\"\n if item in cpu:\n print(\"{:>20}: {}\".format(item, cpu[item]))\n item = \"NUMA node(s)\"\n numa_nodes = 0\n if item in cpu:\n numa_nodes = int(cpu[item])\n for i in range(0, numa_nodes):\n item = \"NUMA node{} CPU(s)\".format(i)\n print(\"{:>20}: {}\".format(item, cpu[item]))\n item = \"CPU max MHz\"\n if item in cpu:\n print(\"{:>20}: {}\".format(item, cpu[item]))\n item = \"CPU min MHz\"\n if item in cpu:\n print(\"{:>20}: {}\".format(item, cpu[item]))\n\n if node[\"cpu\"][\"smt_enabled\"]:\n smt = \"Enabled\"\n else:\n smt = \"Disabled\"\n print(\"{:>20}: {}\".format(\"SMT\", smt))\n\n # VPP Threads\n print(\"\\nVPP Threads: (Name: Cpu Number)\")\n vpp_processes = cpu[\"vpp_processes\"]\n for i in vpp_processes.items():\n print(\" {:10}: {:4}\".format(i[0], i[1]))", "def __init__(self, is_master, track_processes, write_profile,\n verbose_cluster_stats):\n my_ip = appscale_info.get_private_ip()\n lb_ips = appscale_info.get_load_balancer_ips()\n\n self._is_lb = my_ip in lb_ips\n if is_master is not None:\n self._is_master = is_master\n else:\n self._is_master = my_ip == appscale_info.get_headnode_ip()\n self._track_processes = track_processes\n self._write_profile = write_profile\n\n # There are 3 kinds of local stats (node/processes/proxies)\n self._local_node_stats = LocalStats(\n cache_size=NODE_STATS_CACHE_SIZE,\n update_interval=UPDATE_NODE_STATS_INTERVAL)\n self._local_processes_stats = LocalStats(\n cache_size=PROCESSES_STATS_CACHE_SIZE,\n update_interval=UPDATE_PROCESSES_STATS_INTERVAL)\n self._local_proxies_stats = LocalStats(\n cache_size=PROXIES_STATS_CACHE_SIZE,\n update_interval=UPDATE_PROXIES_STATS_INTERVAL)\n\n if self._is_master:\n # And 3 same kinds of cluster stats\n self._cluster_nodes_stats = ClusterStats(\n cache_size=CLUSTER_NODES_STATS_CACHE_SIZE,\n update_interval=UPDATE_CLUSTER_NODES_STATS_INTERVAL)\n self._cluster_processes_stats = ClusterStats(\n cache_size=CLUSTER_PROCESSES_STATS_CACHE_SIZE,\n update_interval=UPDATE_CLUSTER_PROCESSES_STATS_INTERVAL)\n self._cluster_proxies_stats = ClusterStats(\n cache_size=CLUSTER_PROXIES_STATS_CACHE_SIZE,\n update_interval=UPDATE_CLUSTER_PROXIES_STATS_INTERVAL)\n\n if not verbose_cluster_stats:\n # To reduce slave-to-master traffic and verbosity of cluster stats\n # you can select which fields of stats to collect on master\n self._cluster_nodes_stats.included_field_lists = {\n 'node': ['cpu', 'memory', 'partitions_dict', 'loadavg'],\n 'node.cpu': ['percent', 'count'],\n 'node.memory': ['available'],\n 'node.partition': ['free', 'used'],\n 'node.loadavg': ['last_5min'],\n }\n self._cluster_processes_stats.included_field_lists = {\n 'process': ['monit_name', 'unified_service_name', 'application_id',\n 'port', 'cpu', 'memory', 'children_stats_sum'],\n 'process.cpu': ['user', 'system', 'percent'],\n 'process.memory': ['resident', 'virtual', 'unique'],\n 'process.children_stats_sum': ['cpu', 'memory'],\n }\n self._cluster_proxies_stats.included_field_lists = {\n 'proxy': ['name', 'unified_service_name', 'application_id',\n 'frontend', 'backend'],\n 'proxy.frontend': ['scur', 'smax', 'rate', 'req_rate', 'req_tot'],\n 'proxy.backend': ['qcur', 'scur', 'hrsp_5xx', 'qtime', 'rtime'],\n }\n\n # All routes (handlers will be assigned during configuration)\n self._routes = {\n '/stats/local/node/cache': None,\n '/stats/local/node/current': None,\n '/stats/local/processes/cache': None,\n '/stats/local/processes/current': None,\n '/stats/local/proxies/cache': None,\n '/stats/local/proxies/current': None,\n '/stats/cluster/nodes': None,\n '/stats/cluster/processes': None,\n '/stats/cluster/proxies': None,\n }\n self._publishers = []", "async def sysinfo(self, ctx: Context):\n\t\tstart = time.perf_counter()\n\t\tend = time.perf_counter()\n\t\tduration = (end - start) * 1000\n\t\tcpuavg = psutil.cpu_percent(interval=None)\n\t\tmem = psutil.virtual_memory()[2]\n\t\tdurround = round(duration, 3)\n\t\tosun = os.uname()\n\t\tawait self.send(f\"System Info | CPU: {cpuavg}% | RAM: {mem}% | Latency: {durround * 1000}ms | OS: {sys.platform}\", whisper=[ctx.author.id])", "def load(self):\n import string\n import numpy.distutils.proc as numpy_proc\n results = self.load_list()\n for i in range(len(self.workers)):\n name = string.split(self.workers[i].host,'.')[0]\n res = results[i]\n s = \"%6s: %1.2f,\" % (name[-6:], res['load_1'])\n print s,\n if not ((i+1) % 5):\n print", "def procinfo() -> None:\n if pwndbg.gdblib.qemu.is_qemu():\n print(\n message.error(\n \"QEMU target detected: showing result for the qemu process\"\n \" - so it will be a bit inaccurate (excessive for the parts\"\n \" used directly by the qemu process)\"\n )\n )\n exe = pwndbg.auxv.get()[\"AT_EXECFN\"]\n print(\"%-10s %r\" % (\"exe\", exe))\n\n proc = Process()\n\n # qemu-usermode fail!\n if not proc.status:\n return\n\n print(\"%-10s %s\" % (\"cmdline\", proc.cmdline))\n\n print(\"%-10s %s\" % (\"cwd\", proc.cwd))\n\n files = dict(proc.open_files)\n\n for c in proc.connections:\n files[c.fd] = str(c)\n\n print(\"%-10s %s\" % (\"pid\", proc.pid))\n print(\"%-10s %s\" % (\"tid\", proc.tid))\n\n if proc.selinux != \"unconfined\":\n print(\"%-10s %s\" % (\"selinux\", proc.selinux))\n\n print(\"%-10s %s\" % (\"ppid\", proc.ppid))\n\n if not pwndbg.gdblib.android.is_android():\n print(\"%-10s %s\" % (\"uid\", proc.uid))\n print(\"%-10s %s\" % (\"gid\", proc.gid))\n print(\"%-10s %s\" % (\"groups\", proc.groups))\n else:\n print(\"%-10s %s\" % (\"uid\", list(map(pwndbg.lib.android.aid_name, proc.uid))))\n print(\"%-10s %s\" % (\"gid\", list(map(pwndbg.lib.android.aid_name, proc.gid))))\n print(\"%-10s %s\" % (\"groups\", list(map(pwndbg.lib.android.aid_name, proc.groups))))\n\n for fd, path in files.items():\n if not set(path) < set(string.printable):\n path = repr(path)\n\n print(\"%-10s %s\" % (\"fd[%i]\" % fd, path))\n\n return", "def do_serverinfo(self, server):\n print('QManager server:', self._server)\n server_info = self._qm.get_server_info()\n for k, v in server_info.items():\n print(' %s: %s' % (k, v))" ]
[ "0.6628139", "0.6029541", "0.59994787", "0.59888726", "0.59654504", "0.5912724", "0.5873242", "0.584339", "0.57711625", "0.57645833", "0.5538665", "0.54950523", "0.5493533", "0.5452326", "0.54289675", "0.5418666", "0.54113436", "0.53924394", "0.53721255", "0.5360203", "0.53551906", "0.53155845", "0.5313745", "0.52923983", "0.52888745", "0.52756554", "0.525145", "0.52490675", "0.52465695", "0.5178963" ]
0.63965917
1
load_list() Return a list of slave load information dictionaries Description Retreive a dictionary with information about the load on each host processor. The dictionaries have three keys, load1, load5, and load15 indicating the 1, 5, and 15 minute load averages for the processor. These could be useful for (as yet unimplemented) load balancing schemes. load_list depends on the implementation of numpy_proc on each slave's host OS. It will not work for Windows slave processes. However, if you are using a Windows master to control a Linux cluster of slaves, it should work fine.
def load_list(self): import numpy.distutils.proc as numpy_proc res = self.apply(numpy_proc.load_avg,()) return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_load_data():\n proc_stat = open(\"/proc/stat\", \"r\")\n ret = []\n #times_since_startup = proc_stat.readline().strip().split()[1:]\n for line in proc_stat:\n line_split = line.strip().split()\n if(not (\"cpu\" in line_split[0])): #we have gone past the CPU lines\n break\n else:\n #everything but the label since we know [0] is overall and after that is per core by index\n ret.append(line_split[1:]) \n proc_stat.close()\n return ret", "def load(self):\n import string\n import numpy.distutils.proc as numpy_proc\n results = self.load_list()\n for i in range(len(self.workers)):\n name = string.split(self.workers[i].host,'.')[0]\n res = results[i]\n s = \"%6s: %1.2f,\" % (name[-6:], res['load_1'])\n print s,\n if not ((i+1) % 5):\n print", "def load_stat():\n loadavg = {}\n f = open(\"/proc/loadavg\")\n con = f.read().split()\n f.close()\n loadavg['lavg_1'] = con[0]\n loadavg['lavg_5'] = con[1]\n loadavg['lavg_15'] = con[2]\n loadavg['nr'] = con[3]\n loadavg['last_pid'] = con[4]\n return loadavg", "def cn_loads():\n\n # Change the definition of this list to reflect the available machines\n cn_machines = ['cn'+str(i) for i in xrange(1,25)] # if i not in [0]]\n\n # Try to connect to all known machines to see which ones respond.\n # If a machine is down for maintenence, or so overloaded that it\n # can't even respond right away, we don't want to query it further.\n with fabric.context_managers.hide(\"running\"):\n connectivity = execute(test_connectivity, hosts=cn_machines)\n for host in connectivity:\n if connectivity[host] == False:\n cn_machines.remove(host)\n\n # Get CPU load, count, and clock of the machines that did respond\n avg_loads, cpu_counts, cpu_clocks = {}, {}, {}\n with fabric.context_managers.hide(\"running\"):\n avg_loads = execute(get_avg_load, hosts=cn_machines)\n #cpu_counts = execute(get_cpu_count, hosts=cn_machines)\n #cpu_clocks = execute(get_cpu_clock, hosts=cn_machines)\n\n # Read local DB instead of querying remote machines, when possible\n cpu_counts, cpu_clocks = get_cpu_counts_and_clocks(hosts=cn_machines)\n\n # Calculate available computing power of each machine:\n # (1.0-(avg_load/100.0)) * num_processors * clockrate\n power = {}\n for host in avg_loads:\n power[host] = ((1.0-(avg_loads[host]/100.0))\n * cpu_counts[host]\n * cpu_clocks[host])\n\n for host in sorted(power, key=power.get, reverse=False):\n print(\"{Host:4s} | load: {Load: >4.1f}% | cpus: {CPUs: >2d} @ {Clock: >4.2f} GHz | power: {Power: >5.2f}\"\n .format(Host = host,\n Load = avg_loads[host],\n CPUs = cpu_counts[host],\n Clock = cpu_clocks[host],\n Power = power[host]))", "def loadbyproc(path, parameter, nproc):\n vals = []\n for iproc in range(nproc):\n vals += [loadbin(path, iproc, parameter)]\n return vals", "def loadavg():\n sin = psutil.getloadavg()\n return [\n round(sin[0], 3),\n round(sin[1], 3),\n round(sin[2], 3)\n ]", "def get_cpu_load(last_cpu_times):\n\n new_cpu_times = get_load_data()\n overall = None\n per_core = []\n # To get loads, find the difference between the current CPU times since startup and the old ones, then find what\n # percent those times overall and per core as spent not idle, i.e. under load. This could be done faster with \n # something like numpy, but that would add a (debatably) unnecessary dependency.\n for line_index in range(0, len(last_cpu_times)):\n difference = []\n for i in range(0, len(last_cpu_times[0])):\n difference.append(int(new_cpu_times[line_index][i]) - int(last_cpu_times[line_index][i]))\n idle = float(difference[3]) / float(sum(difference))\n load = 1 - idle # %/100 of time since the last data was collected spent not idle, i.e. CPU load.\n if(line_index == 0):\n #note: rounding removed because it seemed that a lot of error would arise when actually sending and \n # receiving the messages, so doing so was basically pointless.\n overall = load\n else:\n per_core.append(load)\n #if block added to prevent issues with serializing None as a float\n if(overall != None):\n return (overall, per_core, new_cpu_times)\n else:\n return (float(\"NaN\"), [], new_cpu_times)", "def _get_core_membind_info():\n args = [\"lscpu\", \"--parse=CPU,Core,Socket,Node\"]\n process_lscpu = subprocess.check_output(args, universal_newlines=True).split(\"\\n\")\n\n # Get information about core, node, socket and cpu. On a machine with no NUMA nodes, the last column is empty\n # so regex also check for empty string on the last column\n bind_info = []\n for line in process_lscpu:\n pattern = r\"^([\\d]+,[\\d]+,[\\d]+,([\\d]+|$))\"\n regex_out = re.search(pattern, line)\n if regex_out:\n bind_info.append(regex_out.group(1).strip().split(\",\"))\n\n return bind_info", "def conf_load_par_list(par_def):\n par_def = par_def[1:-1].split(',')\n par_list = list()\n for p in par_def:\n par_list.append(p.strip())\n return par_list", "def get_avg_load(verbose=False):\n output = run(\"top -d0.5 -n4 | grep Cpu\", quiet=True)\n\n # Strip formatting control characters (top output can have a lot of these)\n output = (output.replace('\\x1b(B','')\n .replace('\\x1b[m','')\n .replace('\\x1b[K','')\n .replace('\\x1b[39;49m',''))\n\n output = output.splitlines()\n\n loads = []\n for i in xrange(len(output)):\n # Top output tends to look like\n # Cpu(s): 2.9%us, 0.0%sy, 0.0%ni, ... OR\n # Cpu(s): 2.9% us, 0.0% sy, 0.0% ni, ... OR\n # %Cpu(s): 2.9 us, 0.0 sy, 0.0 ni, ...\n # We use a regex to match the floating point value for percentage load\n regex = re.compile(\n \"\"\"\n .*Cpu\\(s\\): # any chars before \"Cpu(s):\"\n \\s* # any amount of whitespace\n (\\d*.?\\d*) # any digits, <= 1 period, any digits (i.e. any positive float)\n \\s* # any amount of whitespace\n %? # <= 1 percent symbol (some versions of top just have one \"%\" on this line, before \"Cpu(s)\"\n \\s* # any amount of whitespace\n us # total system load appears to be marked \"us\"\n \"\"\", re.VERBOSE)\n\n matches = regex.findall(output[i])\n #print(repr(output[i]))\n if (len(matches) == 1):\n load = float(matches[0])\n loads.append(load)\n else:\n print(\"Error: On host = {Host}, unable to match total cpu load in string\\n{Output}\"\n .format(Host = env.host, Output = output[i]))\n\n # Throw out the first record of CPU load because it always seems to spike\n # briefly after the command is issued.\n loads = loads[1:]\n avg_load = None\n if len(loads) != 0:\n avg_load = sum(loads)/float(len(loads))\n else:\n print(\"Error: On host = {Host}, len(loads) == 0\"\n .format(Host = env.host))\n\n if (verbose):\n print(\"{Host:4} | Average load: {Load:3.2f}%\".format(Host=env.host, Load=avg_load))\n\n return avg_load", "def load_partitions(partition_list, pickle_base_name=DEFAULT_REVIEWS_PICKLE + '.'):\n\n num_partition = 1\n result = []\n for partition in partition_list:\n print 'Reading partition %d of %d' % (num_partition, len(partition_list))\n with open(pickle_base_name + str(partition)) as file:\n loaded_element = pickle.load(file)\n result.extend(loaded_element)\n\n num_partition += 1\n\n print \"Read a total of %d partitions for a total of %d objects\" % (num_partition - 1, len(result))\n return result", "def get_win_cpu(parent, host, port, community):\n oid = (1, 3, 6, 1, 2, 1, 25, 3, 3, 1, 2) # HOST-RESOURCES-MIB::hrProcessorLoad\n data = parent.get_snmp_data(host, port, community, oid, 1)\n if data:\n return tuple([int(load) for (oid, num, load) in data])", "def get_cpu_load (processor_number=0):\n\ttry:\n\t\tf = open(\"/proc/stat\", \"r\")\n\t\ttmp = f.readlines(2000)\n\t\tf.close()\n\texcept:\n\t\tprint _(\"Failed to open /proc/stat\")\n\t\treturn None\n\tif processor_number == 0 : sufix = ''\n\telse: sufix = str(processor_number -1)\n\tline = tmp[processor_number]\n\n\tif line.startswith(\"cpu%s\"% (sufix)):\n\t\tcuse = float( line.split()[1] )\n\t\tcn = float( line.split()[2] )\n\t\tcsys = float( line.split()[3])\n\t\tif sufix == '':\n\t\t\tload = cuse + cn\n\t\telse:\n\t\t\tload = cuse + csys + cn\n\t\t#load = int(load / .update_interval)\n\t\treturn load\n\treturn None", "def _LoadThreaded(self, vms, workload_file, **kwargs):\n results = []\n\n kwargs.setdefault('threads', self._default_preload_threads)\n if FLAGS.ycsb_record_count:\n kwargs.setdefault('recordcount', FLAGS.ycsb_record_count)\n if FLAGS.ycsb_field_count:\n kwargs.setdefault('fieldcount', FLAGS.ycsb_field_count)\n if FLAGS.ycsb_field_length:\n kwargs.setdefault('fieldlength', FLAGS.ycsb_field_length)\n\n with open(workload_file) as fp:\n workload_meta = ParseWorkload(fp.read())\n workload_meta.update(kwargs)\n workload_meta.update(\n stage='load',\n clients=len(vms) * kwargs['threads'],\n threads_per_client_vm=kwargs['threads'],\n workload_name=os.path.basename(workload_file),\n )\n self.workload_meta = workload_meta\n record_count = int(workload_meta.get('recordcount', '1000'))\n n_per_client = int(record_count) // len(vms)\n loader_counts = [\n n_per_client + (1 if i < (record_count % len(vms)) else 0)\n for i in range(len(vms))\n ]\n\n remote_path = posixpath.join(\n linux_packages.INSTALL_DIR, os.path.basename(workload_file)\n )\n\n args = [((vm, workload_file, remote_path), {}) for vm in dict.fromkeys(vms)]\n background_tasks.RunThreaded(PushWorkload, args)\n\n kwargs['parameter_files'] = [remote_path]\n\n def _Load(loader_index):\n start = sum(loader_counts[:loader_index])\n kw = copy.deepcopy(kwargs)\n kw.update(insertstart=start, insertcount=loader_counts[loader_index])\n if self.perclientparam is not None:\n kw.update(self.perclientparam[loader_index])\n results.append(self._Load(vms[loader_index], **kw))\n logging.info('VM %d (%s) finished', loader_index, vms[loader_index])\n\n start = time.time()\n background_tasks.RunThreaded(_Load, list(range(len(vms))))\n events.record_event.send(\n type(self).__name__,\n event='load',\n start_timestamp=start,\n end_timestamp=time.time(),\n metadata=copy.deepcopy(kwargs),\n )\n\n if len(results) != len(vms):\n raise IOError(\n 'Missing results: only {0}/{1} reported\\n{2}'.format(\n len(results), len(vms), results\n )\n )\n\n samples = []\n if FLAGS.ycsb_include_individual_results and len(results) > 1:\n for i, result in enumerate(results):\n samples.extend(\n ycsb_stats.CreateSamples(\n ycsb_result=result,\n ycsb_version=FLAGS.ycsb_version,\n include_command_line=_SHOULD_RECORD_COMMAND_LINE.value,\n result_type='individual',\n result_index=i,\n **workload_meta,\n )\n )\n\n # hdr histograms not collected upon load, only upon run\n combined = ycsb_stats.CombineResults(results, self.measurement_type, {})\n samples.extend(\n ycsb_stats.CreateSamples(\n ycsb_result=combined,\n ycsb_version=FLAGS.ycsb_version,\n include_histogram=FLAGS.ycsb_histogram,\n include_command_line=_SHOULD_RECORD_COMMAND_LINE.value,\n result_type='combined',\n **workload_meta,\n )\n )\n\n return samples", "def get_load_avg():\n \n with open('/proc/loadavg') as f:\n line = f.readline()\n \n return [float(x) for x in line.split()[:3]]", "def load_avg():\n \n with open(Path.proc_loadavg()) as f:\n line = f.readline()\n \n load_avgs = [float(x) for x in line.split()[:3]]\n \n return load_avgs", "def getloadavg():\n global _loadavg_inititialized\n\n if not _loadavg_inititialized:\n cext.init_loadavg_counter()\n _loadavg_inititialized = True\n\n # Drop to 2 decimal points which is what Linux does\n raw_loads = cext.getloadavg()\n return tuple([round(load, 2) for load in raw_loads])", "def test_cpuload_get_sensordef(self):\n test_sensordef = {\n \"kind\": self.test_cpuload.get_kind(),\n \"name\": \"CPU Load\",\n \"description\": \"Monitors CPU load avg on the system the mini probe is running on\",\n \"default\": \"yes\",\n \"help\": \"Monitors CPU load avg on the system the mini probe is running on\",\n \"tag\": \"mpcpuloadsensor\",\n \"fields\": [],\n \"groups\": []\n }\n assert_equal(self.test_cpuload.get_sensordef(), test_sensordef)", "def getHourlyLoads(self):\n\n\t\tloads_data = self.getDataForLoadComparisons()\n\t\tload_values = [] # Array that will contain all the load data\n\t\tload_data = {} # Dictionary of load data\n\t\thour = 0 # Counter that determines the 24 hours in a day\n\n\t\t# Parsing load data\n\t\ttoday = self.helper.getMonth() + \"/\" + self.helper.getDay() + \"/\" + self.helper.getYear()\n\t\tfor data in loads_data[0]['values']:\t\t\t\n\t\t\tif data[\"label\"] == \"12:00 AM\":\n\t\t\t\tdata[\"label\"] = \" 00:00\"\n\t\t\telif data[\"label\"].split(\" \")[1] == \"AM\":\n\n\t\t\t\thour = int(data[\"label\"].split(\":\")[0])\n\t\t\t\tif hour < 10:\n\t\t\t\t\tdata[\"label\"] = \" 0\" + str(hour) + \":00\"\n\t\t\t\telse:\n\t\t\t\t\tdata[\"label\"] = str(hour) + \":00\"\n\t\t\telif data[\"label\"].split(\" \")[1] == \"PM\":\n\t\t\t\tif data[\"label\"] == \"12:00 PM\":\n\t\t\t\t\tdata[\"label\"] = \" 12:00\"\n\t\t\t\telse:\n\t\t\t\t\thour = int(data[\"label\"].split(\":\")[0])\n\t\t\t\t\thour += 12\n\t\t\t\t\tdata[\"label\"] = \" \" + str(hour) + \":00\"\n\t\t\tload_data[\"x\"] = self.helper.getDateInEpoch(today + \" \" + data[\"label\"])\n\t\t\tload_data[\"y\"] = float(data[\"value\"])\n\t\t\tload_values.append(load_data)\n\t\t\tload_data = {}\n\n\t\treturn load_values", "def get_loadavg(cls):\n\n with open(\"/proc/loadavg\") as loadavg:\n loadavg = loadavg.read().split()\n kernel_entities = loadavg[3].split(\"/\")\n loadavg_stat = { StatsKeys.LOADAVG :\n {\n StatsKeys.LAST_1_MIN : float(loadavg[0]),\n StatsKeys.LAST_5_MIN : float(loadavg[1]),\n StatsKeys.LAST_15_MIN : float(loadavg[2]),\n StatsKeys.RUNNABLE_ENTITIES : int(kernel_entities[0]),\n StatsKeys.SCHEDULING_ENTITIES : int(kernel_entities[1])\n }\n }\n logger.debug(\"Loadavg stats: {}\".format(' '.join(loadavg)))\n\n return loadavg_stat", "def generate_load_parallel(clients = List[str],\n command: str = DEFAULT_CHAOS_LOAD_COMMAND,\n timeout: Union[str,int] = DEFAULT_CHAOS_LOAD_TIMEOUT,\n ssh_config_file: str = DEFAULT_CHAOS_SSH_CONFIG_FILE) -> bool:\n message = \"\"\"Generating load from clients %s using command >%s< and timeout\n >%s seconds<\"\"\"\n logger.info(message, clients, command, timeout)\n try:\n client_list = json.loads(clients)\n except Exception as e:\n message = \"\"\"Failed to parse JSON clients list. The list of clients on\n which to generate load, must be a valid JSON list of node\n aliases found in your ssh config file %s\"\"\"\n logger.error(message, ssh_config_file)\n logger.exception(e)\n return False\n ssh_config_file=expanduser(ssh_config_file)\n executor = ParallelFabricExecutor(ssh_config_file=ssh_config_file)\n result = executor.execute(client_list, command, as_sudo=True,\n timeout=int(timeout))\n\n logger.debug(\"result: %s\", json.dumps(result))\n for client in client_list:\n if result[client]['return_code'] != 0:\n logger.error(\"Failed to generate load from client %s\", client)\n return False\n return True", "def conf_load_par_range(par_def):\n try:\n s,e,n = [float(i) for i in par_def.split(':')]\n except ValueError, e:\n raise ValueError(\n \"Excpected float1:float2:float3 for the range defiction. {}\".format(e)\n )\n par_list = list(np.arange(s,e,n))\n if len(par_list) == 0:\n raise ValueError(\"No parameter values generated.\")\n return par_list", "def do_list(self,line):\n # app_logger = self.construct_logger(rta_constants.PROPERTIES_LOG_FILE)\n obj = ProcessInfo('jobs')\n process_list = obj.handle_parameter()\n\n if process_list:\n # get the hostname\n hostname = process_list[0]\n del process_list[0]\n process_list = obj.extract_process(process_list)\n # print 'dict is here$$$$$'\n # sys.exit(1)\n dict_processor = []\n for proc_val in process_list:\n if proc_val.search_result ==0:\n dict_processor.append({'processor':proc_val.name,'status':'Stopped','PID':str(proc_val.pid)})\n\n elif proc_val.search_result >=1:\n dict_processor.append({'processor':proc_val.name,'status':'Running','PID':str(proc_val.pid)})\n # dict_processor[proc_val.name] = 'Running'\n # print (\"|%-20s|%-5s|\"%(proc_val.name,proc_val.search_result))\n # print dict_processor\n print('##############################################')\n print('PID #'+' Processor #'+' Status')\n print('##############################################')\n spark_ls = []\n for processor in dict_processor:\n if processor.get('processor') == 'spark<spark_worker>' or processor.get('processor') == 'spark<spark_master>':\n spark_ls.append(processor)\n del dict_processor[dict_processor.index(processor)]\n # print dict_processor\n for processor in dict_processor:\n space_pid = 7 - len(processor.get('PID'))\n space_name = 30 - len(processor.get('processor'))\n if processor.get('status') == 'Running':\n print str(processor.get('PID'))+space_pid*' '+processor.get('processor') + space_name*' '+ '\\33[32m' +processor.get('status')+ '\\33[0m'\n else:\n print str(processor.get('PID'))+space_pid*' '+processor.get('processor') + space_name*' '+ '\\33[33m' +processor.get('status')+ '\\33[0m'\n # space_num = 30 - len(k)\n # print k + space_num*' '+v\n print 7*' '+'spark'\n for item in spark_ls:\n space_pid = 8 - len(item.get('PID'))\n space_name = 29 - len(item.get('processor').split('<')[1].split('>')[0])\n if item.get('status')=='Running':\n print str(item.get('PID'))+space_pid*' '+item.get('processor').split('<')[1].split('>')[0] + space_name*' '+ '\\33[32m'+item.get('status')+'\\33[0m'\n else:\n print str(item.get('PID'))+space_pid*' '+item.get('processor').split('<')[1].split('>')[0] + space_name*' '+ '\\33[33m'+item.get('status')+'\\33[0m'\n print('##############################################')\n else:\n print(\"cmd is not support from this host\")", "def get_cpu_info(vars = {}, log = sys.stderr):\n\n try:\n cpuinfo_file= file(PROC_CPUINFO_PATH,\"r\")\n except IOError, e:\n return\n\n cpu_info = {}\n count = 0\n\n for line in cpuinfo_file:\n\n try:\n (fieldname,value)= string.split(line,\":\")\n except ValueError, e:\n # this will happen for lines that don't have two values\n # (like the first line on 2.4 kernels)\n continue\n\n fieldname= string.strip(fieldname)\n value= string.strip(value)\n\n if fieldname == 'processor' or fieldname == 'cpu cores' or fieldname == 'model name' :\n count += 1\n cpu_to_dict(cpu_info, fieldname, value, count)\n\n\n cpuinfo_file.close()\n return cpu_info", "def _load_cluster(self):", "def test_cpu_limitation(self):\n expected_dict = self.calculate_expected_values(\n load_dict=self.load_dict\n )\n assert sla_helpers.load_vm_and_check_the_load(\n load_dict=self.load_dict, expected_values=expected_dict\n )", "def calc(self):\n self.proc_blocks = [cluster.cells for cluster in self.clusters]\n self.cell_loads = [sum([len(cell) for cell in self.proc_blocks])]\n self.particle_loads = [cluster.np for cluster in self.clusters]\n self.imbalance = LoadBalancer.get_load_imbalance(self.particle_loads)", "def cpu_online_map():\r\n cpuinfo = get_cpuinfo()\r\n cpus = []\r\n for cpu in cpuinfo:\r\n cpus.append(cpu['processor']) # grab cpu number\r\n return cpus", "def get_loadings(self):\n return super().get_loadings()", "def _Load(self, vm, **kwargs):\n kwargs.setdefault('threads', self._default_preload_threads)\n if FLAGS.ycsb_record_count:\n kwargs.setdefault('recordcount', FLAGS.ycsb_record_count)\n for pv in FLAGS.ycsb_load_parameters:\n param, value = pv.split('=', 1)\n kwargs[param] = value\n command = self._BuildCommand('load', **kwargs)\n stdout, stderr = vm.RobustRemoteCommand(command)\n return ycsb_stats.ParseResults(\n str(stderr + stdout), self.measurement_type, _ERROR_RATE_THRESHOLD.value\n )" ]
[ "0.65984374", "0.65468526", "0.62084115", "0.6062331", "0.57889664", "0.5767226", "0.55753046", "0.5543519", "0.55034167", "0.5493241", "0.5443097", "0.535275", "0.5327818", "0.53258514", "0.53239393", "0.5265224", "0.5229792", "0.5228068", "0.5143339", "0.5137199", "0.51035786", "0.5076805", "0.50659025", "0.5043142", "0.5040668", "0.5039071", "0.5031946", "0.5026143", "0.50183916", "0.49973089" ]
0.6565332
1
info() print human readable info about the slave hosts Description Print out the each slave interpreters host name, number and type of processors, memory usage, and current load information in human readable form. info depends on the implementation of numpy_proc on each slave's host OS. It will not work for Windows slave processes. However, if you are using a Windows master to control a Linux cluster of slaves, it should work fine.
def info_list(self): import numpy.distutils.proc as numpy_proc res = self.apply(numpy_proc.machine_info,()) return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def info(self):\n import string\n results = self.info_list()\n labels = \"%-8s %-9s %-4s %-8s %-8s %-4s\" % \\\n ('MACHINE','CPU','GHZ','MB TOTAL',\n 'MB FREE','LOAD')\n print labels\n for i in range(len(self.workers)):\n name = string.split(self.workers[i].host,'.')[0]\n res = results[i]\n s = \"%-8s %2dx%-6s %4.1f %8.1f %8.1f %4.2f\" % \\\n (name[-8:], res['cpu_count'],res['cpu_type'][-6:], \\\n res['cpu_speed'],res['mem_total'],res['mem_free'],\\\n res['load_1'])\n print s", "def run_info(self):\n return \"MPI: %d, OMP: %d\" % (self.mpi_procs, self.omp_threads)", "def _get_core_membind_info():\n args = [\"lscpu\", \"--parse=CPU,Core,Socket,Node\"]\n process_lscpu = subprocess.check_output(args, universal_newlines=True).split(\"\\n\")\n\n # Get information about core, node, socket and cpu. On a machine with no NUMA nodes, the last column is empty\n # so regex also check for empty string on the last column\n bind_info = []\n for line in process_lscpu:\n pattern = r\"^([\\d]+,[\\d]+,[\\d]+,([\\d]+|$))\"\n regex_out = re.search(pattern, line)\n if regex_out:\n bind_info.append(regex_out.group(1).strip().split(\",\"))\n\n return bind_info", "def serverinfo( self, mess, args):\n version = open('/proc/version').read().strip()\n loadavg = open('/proc/loadavg').read().strip()\n\n return '%snn%s' % ( version, loadavg, )", "def serverinfo( self, mess, args):\n version = open('/proc/version').read().strip()\n loadavg = open('/proc/loadavg').read().strip()\n\n return '%s\\n\\n%s' % ( version, loadavg, )", "def getSlaveNames():", "def remote_info():\n run('uname -a')", "def print_host_info(total_cores, used_cores, total_nodes, empty_nodes, desired_host, \n disabled_cores, disabled_nodes):\n \n print(str(desired_host).ljust(TERMWIDTH))\n print('-'.center(60, '-'))\n print('Total Cores:'.ljust(int(TERMWIDTH/2)) + str(total_cores).ljust(int(TERMWIDTH/2)))\n print('Used Cores:'.ljust(int(TERMWIDTH/2)) + str(used_cores).ljust(int(TERMWIDTH/2)))\n print('Free Cores:'.ljust(int(TERMWIDTH/2)) + str(total_cores - used_cores - disabled_cores).ljust(int(TERMWIDTH/2)))\n print('Disabled/Error Cores:'.ljust(int(TERMWIDTH/2)) + str(disabled_cores).ljust(int(TERMWIDTH/2)))\n print(\"\")\n print('Total Nodes:'.ljust(int(TERMWIDTH/2)) + str(total_nodes).ljust(int(TERMWIDTH/2)))\n print('Used Nodes:'.ljust(int(TERMWIDTH/2)) + str(total_nodes - empty_nodes - disabled_nodes).ljust(int(TERMWIDTH/2)))\n print('Disabled/Error Nodes:'.ljust(int(TERMWIDTH/2)) + str(disabled_nodes).ljust(int(TERMWIDTH/2)))\n print('Empty Nodes:'.ljust(int(TERMWIDTH/2)) + str(empty_nodes).ljust(int(TERMWIDTH/2)))\n return", "def slave_status():\n run_mysql_command(\"SHOW SLAVE STATUS\\G;\")", "def sys_info(self):\n\n for i in self._nodes.items():\n print(\"\\n==============================\")\n name = i[0]\n node = i[1]\n\n print(\"NODE: {}\\n\".format(name))\n\n # CPU\n print(\"CPU:\")\n self.cpu_info(node)\n\n # Grub\n print(\"\\nGrub Command Line:\")\n if \"grub\" in node:\n print(\" Current: {}\".format(node[\"grub\"][\"current_cmdline\"]))\n print(\" Configured: {}\".format(node[\"grub\"][\"default_cmdline\"]))\n\n # Huge Pages\n print(\"\\nHuge Pages:\")\n self.hugepage_info(node)\n\n # Devices\n print(\"\\nDevices:\")\n self.device_info(node)\n\n # Status\n print(\"\\nVPP Service Status:\")\n state, errors = VPPUtil.status(node)\n print(\" {}\".format(state))\n for e in errors:\n print(\" {}\".format(e))\n\n # Minimum system resources\n self.min_system_resources(node)\n\n print(\"\\n==============================\")", "def print_detailed_host(total_cores, used_cores, total_nodes, empty_nodes, desired_host, \n disabled_cores, disabled_nodes, node_list):\n \n print('\\nDetailed info pertaining to: ' + desired_host)\n print('Total Nodes: {0}'.format(str(len(node_list)))) \n print('Total Cores : {0}'.format(total_cores) + PRINT_INDENT + 'Used Cores: {0}'.format(used_cores)\n + PRINT_INDENT + 'Free Cores: {0}'.format(str(total_cores - used_cores - disabled_cores)) \n + PRINT_INDENT + 'Disabled Cores: {0}'.format(disabled_cores))\n print('\\nThe following is a list of each node within {0}:\\n'.format(desired_host))\n print('Node name'.ljust(int(TERMWIDTH/2)) + 'Used Cores/Total Cores')\n for node in node_list:\n cores = str(node.get_used()) + '/' + str(node.get_total())\n if node.get_disabled_switch():\n disabled = 'Unavailable'\n else:\n disabled = ''\n print((PRINT_INDENT + node.get_name()).ljust(int(TERMWIDTH/2)) + PRINT_INDENT + (str(cores).rjust(5,' ') \\\n + PRINT_INDENT + disabled))\n return", "def getHostInfo():", "def process_info(process):\n\thelp(process)", "def mmo_cluster_hostInfo(self, mmo_connection, inc_mongos):\n return self.mmo_execute_on_cluster(mmo_connection, \"hostInfo\", inc_mongos)", "def do_hostinfo(self, args):\n host = opts = None\n if args:\n args = args.split()\n host = args.pop()\n\n if not host:\n print('Usage: hostinfo [-cdmu] host_name_or_ip')\n print(' uptime and load stats returned if no options specified')\n return\n\n try:\n ip = socket.gethostbyname(host)\n except socket.gaierror:\n print('cannot resolve', host, file=sys.stderr)\n return\n\n opts = []\n while args:\n arg = args.pop(0)\n if arg.startswith('--'):\n if arg == '--cpu':\n opts.append('c')\n elif arg == '--disk':\n opts.append('d')\n elif arg == '--memory':\n opts.append('m')\n elif arg == '--uptime':\n opts.append('u')\n else:\n print('unrecognized option:', arg, file=sys.stderr)\n return\n else:\n if arg[0] == '-':\n for ch in arg[1:]:\n if ch in ('cdmu') and ch not in opts:\n opts.append(ch)\n else:\n print('unrecognized option:', ch, file=sys.stderr)\n return\n\n stats = self._qm.get_host_stats(ip)\n\n if not opts:\n # Get uptime and load averages.\n up = stats['uptime']\n load = stats['cpu_load']\n print('Up for %s days, %s hours, %s minutes, '\n 'load averages: %s, %s, %s'\n % (up['days'], up['hours'], up['minutes'], load['one'],\n load['five'], load['fifteen']))\n return\n\n all_stats = []\n for opt in opts:\n if opt == 'd':\n # Get disk usage.\n disks = stats['disk_usage']\n st = ['Disk Usage:']\n for mount, disk_info in disks.viewitems():\n st.append(' Usage for: %s' % mount)\n for k, v in disk_info.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'c':\n # Get CPU load.\n load_stats = stats['cpu_load']\n st = ['CPU Load Average:']\n st.append(' last one minute: %s' % load_stats['one'])\n st.append(' last five minutes: %s' % load_stats['five'])\n st.append(' last fifteen minutes: %s' % load_stats['fifteen'])\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'm':\n # Get Memory Usage.\n memory_usage = stats['memory_usage']\n st = ['Memory usage:']\n for k, v in memory_usage.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'u':\n # Get uptime.\n up = stats['uptime']\n st = ['Uptime:']\n st.append(' Up for %s days, %s hours and %s minutes'\n % (up['days'], up['hours'], up['minutes']))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n\n print('\\n'.join(all_stats))", "def cpuinfo(self):\n \n command = 'cat /proc/cpuinfo'\n\tpipe = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = pipe.communicate()\n\tinfo = stdout.strip()\n cpu_type = None\n\tn_proc = 0\n\tfor line in info.split('\\n'):\n if 'model name' in line:\n\t n_proc += 1\n if cpu_type is None:\n\t\t cpu_type = ' '.join(line.split(':')[-1].strip().split())\n\t\n\treturn (cpu_type, n_proc)", "def process_host(desired_host):\n \n node_list = []\n host_info_list = [] \n if desired_host == \"all\":\n desired_host_list = getAllMachines()\n else:\n desired_host_list = (subprocess.getoutput(\"qconf -shgrp_resolved \" + '@' + str(desired_host))).split()\n qstat = subprocess.getoutput('qstat -f')\n for host in desired_host_list:\n if qstat.find(host) != (-1):\n #Searches the long string for the index of the occurance of the specified host, then\n #parses it the string for just that one line with the host that we want.\n host_info_list.append((qstat[qstat.find(host):].split('\\n'))[0])\n #Start at with everything at 0, and will count up as encountered.\n total_nodes = 0\n total_cores = 0\n disabled_cores = 0\n used_cores = 0\n free_cores = 0\n empty_nodes = 0\n disabled_nodes = 0\n for host in host_info_list:\n #simply gathering info qstat spat out for us\n temp_node = Node((host.split()[0]))\n cores = host.split()[2].replace('/', ' ').split()\n host_used_cores = cores[1]\n host_total_cores = cores[2]\n if len(host.split()) == 6 and (host.split()[5] == 'd' or host.split()[5] == 'E' or \\\n host.split()[5] == 'au' or host.split()[5] == 'Eau' or host.split()[5] == 'Eqw' \\\n or host.split()[5] == 'adu'):\n temp_node.set_disabled_switch(True)\n disabled_cores += int(host_total_cores)\n total_cores += int(host_total_cores)\n disabled_nodes += 1\n else: \n temp_node.set_disabled_switch(False)\n used_cores += int(host_used_cores)\n total_cores += int(host_total_cores)\n free_cores += int(host_total_cores) - int(host_used_cores)\n if int(host_used_cores) == 0:\n empty_nodes += 1\n temp_node.set_cores(host_total_cores, host_used_cores)\n total_nodes += 1\n node_list.append(temp_node) \n \n if len(sys.argv) == 3:\n if sys.argv[2] == '--details':\n print_detailed_host(total_cores, used_cores, total_nodes, empty_nodes, desired_host, \n disabled_cores, disabled_nodes, node_list)\n elif sys.argv[2] == '-v' or sys.argv[2] == '--visual':\n draw_queue(total_nodes, total_cores, used_cores, empty_nodes, desired_host, disabled_cores, \n disabled_nodes, node_list, free_cores)\n else:\n print('Error: Arg syntax error with: ' + sys.argv[2])\n show_usage(23)\n elif sys.argv[1] == \"-qlong\":\n # Returning values from this host group to the qlong function\n return(total_cores, used_cores, total_nodes, empty_nodes, disabled_cores,disabled_nodes, node_list)\n elif len(sys.argv) < 3:\n print_host_info(total_cores, used_cores, total_nodes, empty_nodes, desired_host, disabled_cores, \n disabled_nodes)\n else:\n print('Error: Too many args')\n show_usage(23)\n return", "def computer_info():\n return {\n 'system': platform.system(),\n 'architecture': platform.architecture(),\n 'name': platform.node(),\n 'release': platform.release(),\n 'version': platform.version(),\n 'machine': platform.machine(),\n 'processor': platform.processor(),\n 'virtual CPUs': mproc.cpu_count(),\n 'total RAM': _get_ram(),\n }", "def slave_hosts(self) -> 'List[str]':\n raise NotImplementedError", "def _proc_info(self):\n ret = cext.proc_info(self.pid)\n assert len(ret) == len(pinfo_map)\n return ret", "def get_system_information(self):\n\t\tsys = platform.uname()\n\t\treturn {\n\t\t\t'hostname': sys.node,\n\t\t\t'operating_system': sys.system,\n\t\t\t'version': sys.version,\n\t\t\t'release': sys.release,\n\t\t\t'processor' : sys.processor,\n\t\t\t'processor_type': sys.machine,\n\t\t}", "def info(self, handle):\n\n # Each process group gathers their output\n\n groupstr = \"\"\n procstr = \"\"\n\n gcomm = self._comm.comm_group\n wcomm = self._comm.comm_world\n rcomm = self._comm.comm_rank\n\n if wcomm.rank == 0:\n handle.write(\"Data distributed over {} processes in {} groups\\n\".format(self._comm.world_size, self._comm.ngroups))\n\n for ob in self.obs:\n id = ob['id']\n tod = ob['tod']\n base = ob['baselines']\n nse = ob['noise']\n intrvl = ob['intervals']\n\n if gcomm.rank == 0:\n groupstr = \"observation {}:\\n\".format(id)\n groupstr = \"{} {} total samples, {} detectors\\n\".format(groupstr, tod.total_samples, len(tod.detectors))\n if intrvl is not None:\n groupstr = \"{} {} intervals:\\n\".format(groupstr, len(intrvl))\n for it in intrvl:\n groupstr = \"{} {} --> {} ({} --> {})\\n\".format(groupstr, it.first, it.last, it.start, it.stop)\n\n # rank zero of the group will print general information,\n # and each process will get its statistics.\n\n nsamp = tod.local_samples[1]\n dets = tod.local_dets\n\n procstr = \" proc {}\\n\".format(gcomm.rank)\n my_chunks = 1\n if tod.local_chunks is not None:\n my_chunks = tod.local_chunks[1]\n procstr = \"{} sample range {} --> {} in {} chunks:\\n\".format(procstr, tod.local_samples[0], (tod.local_samples[0] + nsamp - 1), my_chunks)\n \n if tod.local_chunks is not None:\n chkoff = tod.local_samples[0]\n for chk in range(tod.local_chunks[1]):\n abschk = tod.local_chunks[0] + chk\n chkstart = chkoff\n chkstop = chkstart + tod.total_chunks[abschk] - 1\n procstr = \"{} {} --> {}\\n\".format(procstr, chkstart, chkstop)\n chkoff += tod.total_chunks[abschk]\n\n if nsamp > 0:\n \n stamps = tod.read_times(local_start=0, n=nsamp)\n\n procstr = \"{} timestamps {} --> {}\\n\".format(procstr, stamps[0], stamps[-1])\n\n for dt in dets:\n procstr = \"{} det {}:\\n\".format(procstr, dt)\n\n pdata = tod.read_pntg(detector=dt, local_start=0, n=nsamp)\n\n procstr = \"{} pntg [{:.3e} {:.3e} {:.3e} {:.3e}] --> [{:.3e} {:.3e} {:.3e} {:.3e}]\\n\".format(procstr, pdata[0,0], pdata[0,1], pdata[0,2], pdata[0,3], pdata[-1,0], pdata[-1,1], pdata[-1,2], pdata[-1,3])\n\n data = tod.read(detector=dt, local_start=0, n=nsamp)\n flags, common = tod.read_flags(detector=dt, local_start=0, n=nsamp)\n procstr = \"{} {:.3e} ({}) --> {:.3e} ({})\\n\".format(procstr, data[0], flags[0], data[-1], flags[-1])\n good = np.where((flags | common) == 0)[0]\n procstr = \"{} {} good samples\\n\".format(procstr, len(good))\n min = np.min(data[good])\n max = np.max(data[good])\n mean = np.mean(data[good])\n rms = np.std(data[good])\n procstr = \"{} min = {:.4e}, max = {:.4e}, mean = {:.4e}, rms = {:.4e}\\n\".format(procstr, min, max, mean, rms)\n\n for cname in tod.cache.keys():\n procstr = \"{} cache {}:\\n\".format(procstr, cname)\n ref = tod.cache.reference(cname)\n min = np.min(ref)\n max = np.max(ref)\n mean = np.mean(ref)\n rms = np.std(ref)\n procstr = \"{} min = {:.4e}, max = {:.4e}, mean = {:.4e}, rms = {:.4e}\\n\".format(procstr, min, max, mean, rms)\n\n recvstr = \"\"\n if gcomm.rank == 0:\n groupstr = \"{}{}\".format(groupstr, procstr)\n for p in range(1, gcomm.size):\n if gcomm.rank == 0:\n recvstr = gcomm.recv(source=p, tag=p)\n groupstr = \"{}{}\".format(groupstr, recvstr)\n elif p == gcomm.rank:\n gcomm.send(procstr, dest=0, tag=p)\n gcomm.barrier()\n\n # the world rank 0 process collects output from all groups and\n # writes to the handle\n\n recvgrp = \"\"\n if wcomm.rank == 0:\n handle.write(groupstr)\n for g in range(1, self._comm.ngroups):\n if wcomm.rank == 0:\n recvgrp = rcomm.recv(source=g, tag=g)\n handle.write(recvgrp)\n elif g == self._comm.group:\n if gcomm.rank == 0:\n rcomm.send(groupstr, dest=0, tag=g)\n wcomm.barrier()\n\n return", "def getSlave(name):", "def cpu_info(node):\n\n cpu = CpuUtils.get_cpu_info_per_node(node)\n\n item = \"Model name\"\n if item in cpu:\n print(\"{:>20}: {}\".format(item, cpu[item]))\n item = \"CPU(s)\"\n if item in cpu:\n print(\"{:>20}: {}\".format(item, cpu[item]))\n item = \"Thread(s) per core\"\n if item in cpu:\n print(\"{:>20}: {}\".format(item, cpu[item]))\n item = \"Core(s) per socket\"\n if item in cpu:\n print(\"{:>20}: {}\".format(item, cpu[item]))\n item = \"Socket(s)\"\n if item in cpu:\n print(\"{:>20}: {}\".format(item, cpu[item]))\n item = \"NUMA node(s)\"\n numa_nodes = 0\n if item in cpu:\n numa_nodes = int(cpu[item])\n for i in range(0, numa_nodes):\n item = \"NUMA node{} CPU(s)\".format(i)\n print(\"{:>20}: {}\".format(item, cpu[item]))\n item = \"CPU max MHz\"\n if item in cpu:\n print(\"{:>20}: {}\".format(item, cpu[item]))\n item = \"CPU min MHz\"\n if item in cpu:\n print(\"{:>20}: {}\".format(item, cpu[item]))\n\n if node[\"cpu\"][\"smt_enabled\"]:\n smt = \"Enabled\"\n else:\n smt = \"Disabled\"\n print(\"{:>20}: {}\".format(\"SMT\", smt))\n\n # VPP Threads\n print(\"\\nVPP Threads: (Name: Cpu Number)\")\n vpp_processes = cpu[\"vpp_processes\"]\n for i in vpp_processes.items():\n print(\" {:10}: {:4}\".format(i[0], i[1]))", "def runSlaveRun():\n\n np.set_printoptions(linewidth=1000)\n function = None\n options = None\n\n # print(\"Process {}/{} reporting for duty!\".format(rank, size))\n\n function = comm.bcast(function, root=0)\n arguments = comm.scatter(options, root=0)\n\n results = function(*arguments)\n\n comm.Barrier()\n comm.gather(results, root=0)\n comm.Disconnect()", "def __init__(self, is_master, track_processes, write_profile,\n verbose_cluster_stats):\n my_ip = appscale_info.get_private_ip()\n lb_ips = appscale_info.get_load_balancer_ips()\n\n self._is_lb = my_ip in lb_ips\n if is_master is not None:\n self._is_master = is_master\n else:\n self._is_master = my_ip == appscale_info.get_headnode_ip()\n self._track_processes = track_processes\n self._write_profile = write_profile\n\n # There are 3 kinds of local stats (node/processes/proxies)\n self._local_node_stats = LocalStats(\n cache_size=NODE_STATS_CACHE_SIZE,\n update_interval=UPDATE_NODE_STATS_INTERVAL)\n self._local_processes_stats = LocalStats(\n cache_size=PROCESSES_STATS_CACHE_SIZE,\n update_interval=UPDATE_PROCESSES_STATS_INTERVAL)\n self._local_proxies_stats = LocalStats(\n cache_size=PROXIES_STATS_CACHE_SIZE,\n update_interval=UPDATE_PROXIES_STATS_INTERVAL)\n\n if self._is_master:\n # And 3 same kinds of cluster stats\n self._cluster_nodes_stats = ClusterStats(\n cache_size=CLUSTER_NODES_STATS_CACHE_SIZE,\n update_interval=UPDATE_CLUSTER_NODES_STATS_INTERVAL)\n self._cluster_processes_stats = ClusterStats(\n cache_size=CLUSTER_PROCESSES_STATS_CACHE_SIZE,\n update_interval=UPDATE_CLUSTER_PROCESSES_STATS_INTERVAL)\n self._cluster_proxies_stats = ClusterStats(\n cache_size=CLUSTER_PROXIES_STATS_CACHE_SIZE,\n update_interval=UPDATE_CLUSTER_PROXIES_STATS_INTERVAL)\n\n if not verbose_cluster_stats:\n # To reduce slave-to-master traffic and verbosity of cluster stats\n # you can select which fields of stats to collect on master\n self._cluster_nodes_stats.included_field_lists = {\n 'node': ['cpu', 'memory', 'partitions_dict', 'loadavg'],\n 'node.cpu': ['percent', 'count'],\n 'node.memory': ['available'],\n 'node.partition': ['free', 'used'],\n 'node.loadavg': ['last_5min'],\n }\n self._cluster_processes_stats.included_field_lists = {\n 'process': ['monit_name', 'unified_service_name', 'application_id',\n 'port', 'cpu', 'memory', 'children_stats_sum'],\n 'process.cpu': ['user', 'system', 'percent'],\n 'process.memory': ['resident', 'virtual', 'unique'],\n 'process.children_stats_sum': ['cpu', 'memory'],\n }\n self._cluster_proxies_stats.included_field_lists = {\n 'proxy': ['name', 'unified_service_name', 'application_id',\n 'frontend', 'backend'],\n 'proxy.frontend': ['scur', 'smax', 'rate', 'req_rate', 'req_tot'],\n 'proxy.backend': ['qcur', 'scur', 'hrsp_5xx', 'qtime', 'rtime'],\n }\n\n # All routes (handlers will be assigned during configuration)\n self._routes = {\n '/stats/local/node/cache': None,\n '/stats/local/node/current': None,\n '/stats/local/processes/cache': None,\n '/stats/local/processes/current': None,\n '/stats/local/proxies/cache': None,\n '/stats/local/proxies/current': None,\n '/stats/cluster/nodes': None,\n '/stats/cluster/processes': None,\n '/stats/cluster/proxies': None,\n }\n self._publishers = []", "async def sysinfo(self, ctx: Context):\n\t\tstart = time.perf_counter()\n\t\tend = time.perf_counter()\n\t\tduration = (end - start) * 1000\n\t\tcpuavg = psutil.cpu_percent(interval=None)\n\t\tmem = psutil.virtual_memory()[2]\n\t\tdurround = round(duration, 3)\n\t\tosun = os.uname()\n\t\tawait self.send(f\"System Info | CPU: {cpuavg}% | RAM: {mem}% | Latency: {durround * 1000}ms | OS: {sys.platform}\", whisper=[ctx.author.id])", "def procinfo() -> None:\n if pwndbg.gdblib.qemu.is_qemu():\n print(\n message.error(\n \"QEMU target detected: showing result for the qemu process\"\n \" - so it will be a bit inaccurate (excessive for the parts\"\n \" used directly by the qemu process)\"\n )\n )\n exe = pwndbg.auxv.get()[\"AT_EXECFN\"]\n print(\"%-10s %r\" % (\"exe\", exe))\n\n proc = Process()\n\n # qemu-usermode fail!\n if not proc.status:\n return\n\n print(\"%-10s %s\" % (\"cmdline\", proc.cmdline))\n\n print(\"%-10s %s\" % (\"cwd\", proc.cwd))\n\n files = dict(proc.open_files)\n\n for c in proc.connections:\n files[c.fd] = str(c)\n\n print(\"%-10s %s\" % (\"pid\", proc.pid))\n print(\"%-10s %s\" % (\"tid\", proc.tid))\n\n if proc.selinux != \"unconfined\":\n print(\"%-10s %s\" % (\"selinux\", proc.selinux))\n\n print(\"%-10s %s\" % (\"ppid\", proc.ppid))\n\n if not pwndbg.gdblib.android.is_android():\n print(\"%-10s %s\" % (\"uid\", proc.uid))\n print(\"%-10s %s\" % (\"gid\", proc.gid))\n print(\"%-10s %s\" % (\"groups\", proc.groups))\n else:\n print(\"%-10s %s\" % (\"uid\", list(map(pwndbg.lib.android.aid_name, proc.uid))))\n print(\"%-10s %s\" % (\"gid\", list(map(pwndbg.lib.android.aid_name, proc.gid))))\n print(\"%-10s %s\" % (\"groups\", list(map(pwndbg.lib.android.aid_name, proc.groups))))\n\n for fd, path in files.items():\n if not set(path) < set(string.printable):\n path = repr(path)\n\n print(\"%-10s %s\" % (\"fd[%i]\" % fd, path))\n\n return", "def load(self):\n import string\n import numpy.distutils.proc as numpy_proc\n results = self.load_list()\n for i in range(len(self.workers)):\n name = string.split(self.workers[i].host,'.')[0]\n res = results[i]\n s = \"%6s: %1.2f,\" % (name[-6:], res['load_1'])\n print s,\n if not ((i+1) % 5):\n print", "def do_serverinfo(self, server):\n print('QManager server:', self._server)\n server_info = self._qm.get_server_info()\n for k, v in server_info.items():\n print(' %s: %s' % (k, v))" ]
[ "0.6398009", "0.6030625", "0.6001482", "0.5990546", "0.5967216", "0.59123135", "0.5875806", "0.5846", "0.57706267", "0.5767146", "0.5541231", "0.5497143", "0.54949677", "0.5453637", "0.5431881", "0.5421146", "0.54144233", "0.5394256", "0.53710485", "0.5361586", "0.53574234", "0.5315297", "0.5313989", "0.5291599", "0.5291586", "0.5275746", "0.5253754", "0.52497363", "0.5248493", "0.5179227" ]
0.6628613
0
ps(sort_by='cpu',filters) list processes on slave machines. Description List all the processes on all remote slave machines. This is like a clusterwide Unix ps command and is output in a similar human readable form. The sort_by argument allows you to sore the process list by various fields including, pid, cpu, user, machine, memory, state and command. keyword arguments are used as filters to limit the number of processes displayed. For example, the keyword, user='ej' will only list processes for user ej and cpu='>10' will only list processes using more th 50% of the cpu cycles. ps depends on the implementation of numpy_proc on each slave's host OS. It will not work for Windows slave processes. However, if you are using a Windows master to control a Linux cluster of slaves, it should work fine.
def ps(self,sort_by='cpu',**filters): psl = self.ps_list(sort_by,**filters) if len(psl): print psl[0].labels_with_name() for i in psl: print i.str_with_name()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ps_list(self,sort_by='cpu',**filters):\n import operator\n import numpy.distutils.proc as numpy_proc\n res = self.apply(numpy_proc.ps_list,())\n psl = reduce(operator.add,res)\n psl = numpy_proc.ps_sort(psl,sort_by,**filters)\n return psl", "def ListProcesses(self):\n stdout, stderr = self.RunCmdOnDevice(\n [\n '/bin/ps', '--no-headers', '-A', '-o', 'pid,ppid,args:4096,state'\n ],\n quiet=True)\n assert stderr == '', stderr\n procs = []\n for l in stdout.split('\\n'):\n if l == '':\n continue\n m = re.match(r'^\\s*(\\d+)\\s+(\\d+)\\s+(.+)\\s+(.+)', l, re.DOTALL)\n assert m\n procs.append((int(m.group(1)), m.group(3).rstrip(), int(m.group(2)),\n m.group(4)))\n logging.debug(\"ListProcesses(<predicate>)->[%i processes]\" % len(procs))\n return procs", "def ListProcesses(self):\n stdout, stderr = self.RunCmdOnDevice(\n ['/bin/ps', '--no-headers', '-A', '-o', 'pid,ppid,args:4096,state'],\n quiet=True)\n assert stderr == '', stderr\n procs = []\n for l in stdout.split('\\n'):\n if l == '':\n continue\n m = re.match(r'^\\s*(\\d+)\\s+(\\d+)\\s+(.+)\\s+(.+)', l, re.DOTALL)\n assert m\n procs.append(\n (int(m.group(1)), m.group(3).rstrip(), int(m.group(2)), m.group(4)))\n logging.debug(\"ListProcesses(<predicate>)->[%i processes]\" % len(procs))\n return procs", "def get_processes_running():\r\n p = [] #array of processes\r\n if platform == \"linux\" or platform == \"linux2\":\r\n for proc in psutil.process_iter():\r\n try:\r\n tmp=Process(proc.name(),int(proc.pid),proc.username(),int(0),int(0))\r\n p.append(tmp)\r\n except:\r\n continue\r\n return (p)\r\n\t\t\t\r\n tasks = check_output(['tasklist']).decode('cp866', 'ignore').split(\"\\r\\n\")\r\n for task in tasks:\r\n m = re.match(b'(.*?)\\\\s+(\\\\d+)\\\\s+(\\\\w+)\\\\s+(\\\\w+)\\\\s+(.*?)\\\\s.*', task.encode())\r\n if m is not None:\r\n tmp=Process(m.group(1).decode(),int(m.group(2).decode()),m.group(3).decode(),int(m.group(4).decode()),int(m.group(5).decode('ascii', 'ignore')))\r\n p.append(tmp)\r\n #m.group(1).decode() image name\r\n #m.group(2).decode() process id\r\n #m.group(3).decode() session_name\r\n #m.group(4).decode() session_num\r\n #m.group(5).decode('ascii', 'ignore') memory usage\r\n return(p)", "def list_processes(pid, name):\n \n if not pid and not name:\n rc, out, err = j.sal.process.execute(\"ps ax\")\n click.echo(out)\n elif name:\n click.echo(j.sal.process.psfind(name))\n elif pid:\n click.echo(j.sal.process.getProcessPid(pid))", "def get_processes():\n cmd = 'ps -do pid:1,cmd' # linux command to run\n processes = {}\n\n with os.popen(cmd) as out:\n # see https://stackoverflow.com/questions/24362007/\n next(out.__iter__()) # skip header (first line)\n\n for line in out:\n # sepate pid and command in a tuple\n p = line.rstrip('\\n').split(' ', 2)\n\n # skip kernel threads\n if p[1][0] == '[':\n continue\n\n processes[p[0]] = p[1]\n\n return processes", "def process():\n interesting_procs = set(INTERESTING_PROCESSES)\n\n pids = psutil.pids()\n info = {\n \"stats_type\": \"process\",\n \"proc\": {\n \"count\": len(pids),\n }\n }\n proc_root = os.environ.get(\"PROC_ROOT\", \"/proc\")\n for pid in pids:\n proc_info = proc.core.Process.from_path(\n os.path.join(proc_root, str(pid)))\n\n proc_name = get_proc_name(proc_info, interesting_procs)\n if not proc_name:\n continue\n\n if 'sshd' in proc_name and ':' in proc_info.cmdline:\n continue\n\n if proc_name not in info['proc']:\n info['proc'][proc_name] = {\n 'running': proc_info.state in ('R', 'S', 'D', 'T', 'W'),\n 'pid': proc_info.pid,\n 'ppid': proc_info.ppid,\n 'user_time': int(proc_info.stat_fields[16]), # cutime\n 'sys_time': int(proc_info.stat_fields[17]), # cstime\n 'vsize': proc_info.vsize,\n 'rss': proc_info.rss,\n 'voluntary_ctxt_switches': int(proc_info.status_fields[\n 'voluntary_ctxt_switches']),\n 'nonvoluntary_ctxt_switches': int(proc_info.status_fields[\n 'nonvoluntary_ctxt_switches']),\n 'age': proc_info.runtime,\n 'count': 1\n }\n else:\n pinfo = info['proc'][proc_name]\n pinfo['count'] += 1\n\n def append(dest, field, value):\n \"\"\"Append values for an existing process.\"\"\"\n if isinstance(dest[field], list):\n dest[field].append(value)\n else:\n dest[field] = [dest[field], value]\n\n # append('state', proc_info.state)\n append(pinfo, 'pid', proc_info.pid)\n append(pinfo, 'ppid', proc_info.ppid)\n pinfo['user_time'] += int(proc_info.stat_fields[16]) # cutime\n pinfo['sys_time'] += int(proc_info.stat_fields[17]) # cstime\n pinfo['vsize'] += proc_info.vsize\n pinfo['rss'] += proc_info.rss\n pinfo['voluntary_ctxt_switches'] = \\\n int(proc_info.status_fields['voluntary_ctxt_switches'])\n pinfo['nonvoluntary_ctxt_switches'] = \\\n int(proc_info.status_fields['nonvoluntary_ctxt_switches'])\n append(pinfo, 'age', proc_info.runtime)\n\n return info", "def pids():\n stream = os.popen(\"ps aux | grep '[m]itm' | awk '{print $2}'\")\n return stream.read()", "def get_processes(sort_by_name=True):\r\n if sort_by_name:\r\n return sorted(_list_processes(), key=cmp_to_key(\r\n lambda p1, p2: (cmp(p1.name, p2.name) or cmp(p1.pid, p2.pid))\r\n ))\r\n else:\r\n return sorted(_list_processes(), key=cmp_to_key(\r\n lambda p1, p2: (cmp(p1.pid, p2.pid) or cmp(p1.name, p2.name))\r\n ))", "def getProcs(**options):\n procSeq = search.ProcSearch.byOptions(**options).procs\n return [Proc(p) for p in procSeq.procs]", "def ps(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, self.user, self.password)\n print(vmrun.listProcessesInGuest())", "def get_processes_info():\n processes_list = []\n for proc in get_processes():\n try:\n # Fetch process details as dict\n pinfo = proc.as_dict(attrs=[\"pid\", \"name\", \"username\"])\n pinfo[\"rss\"] = proc.memory_info().rss / (1024 * 1024)\n pinfo[\"ports\"] = []\n try:\n connections = proc.connections()\n except psutil.Error:\n continue\n if connections:\n for conn in connections:\n pinfo[\"ports\"].append({\"port\": conn.laddr.port, \"status\": conn.status})\n # Append dict to list\n processes_list.append(pinfo)\n except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):\n pass\n processes_list = sorted(processes_list, key=lambda procObj: procObj[\"rss\"], reverse=True)\n return processes_list[:25]", "def get_processes_list_within_container(self):\n items_list = []\n proc_item = []\n procs_dict = {}\n\n try:\n p = Popen(DOCKER_TOP_CMD.format(self.container_id), shell=True, stdout=PIPE, stderr=PIPE)\n stdout_dump, stderr_data = p.communicate()\n except Exception as e:\n log.debug('{}[*]{} {}'.format(DFbase.LOG_ERROR_COLOR,\n DFbase.LOG_INFO_COLOR,e))\n return False\n\n procs_lines = stdout_dump.decode('utf-8')\n procs_lines = procs_lines.split(\"\\n\")\n\n for procs_item in procs_lines:\n if 'USER' in procs_item:\n continue\n elif len(procs_item):\n proc_item.append(procs_item)\n\n for item in proc_item:\n x = item.split(None, 4)\n log.debug('{}[*]{} PID:{}, {}, {}, {}, {}'.format(DFbase.LOG_DEBUG_COLOR, \n DFbase.LOG_INFO_COLOR, x[0], x[1],x[2],x[3],x[4]))\n procs_dict['USER'] = x[0]\n procs_dict['PID'] = x[1]\n procs_dict['PPID'] = x[2]\n procs_dict['STIME'] = x[3]\n procs_dict['CMD'] = x[4]\n\n items_list.append(procs_dict.copy())\n\n procs_path = self.artifacts_path + '/' + 'top_command.json'\n with open(procs_path, 'w') as f:\n json.dump(items_list, f, indent=4)\n\n self.copy_executable(items_list)\n\n return True", "def list_java_processes():\n for line in shell_command_output('jps -l').splitlines():\n line = line.strip()\n if len(line) == 0:\n continue\n (pid, class_name) = line.split()\n yield (int(pid), class_name)", "def pids(node, java_class):\n cmd = \"ps -C java -wwo pid,args | grep '%s' | awk -F' ' '{print $1}'\" % java_class\n\n return [int(pid) for pid in node.account.ssh_capture(cmd, allow_fail=True)]", "def list(self):\n return self._list('/os-psvm', 'psvms')", "def get_processes(self):\n processes = {}\n # Get ps output\n cmd = [\"ps\", \"-Z\"]\n # Split by newlines and remove first line (\"LABEL USER PID PPID NAME\")\n # TODO: surround with try/except?\n psz = subprocess.check_output(self.shell + cmd).decode().split('\\n')[1:]\n for line in psz:\n line = line.strip(\"\\r\")\n if line:\n try:\n p = Process(line, self.android_version)\n except ValueError as e:\n self.log.warning(e)\n else:\n processes[p.pid] = p\n return processes", "def _select_processes(self):\n\n # check if at least one process is running\n is_running = False\n for pid in self.__pids:\n if ProcessMonitor.__is_running(pid):\n is_running = True\n break # at least one process is running\n\n if is_running:\n if not self.__aggregate_multiple_processes:\n return self.__pids\n\n # aggregate metrics, check the last discovered time\n if (\n self.__last_discovered\n and time.time() * 1000 - self.__last_discovered\n < self.__process_discovery_interval * 1000\n ):\n return self.__pids\n\n ps = ProcessList()\n if self.__commandline_matcher:\n self.__last_discovered = time.time() * 1000\n if self.__include_child_processes:\n matched_processes = ps.get_matches_commandline_with_children(\n self.__commandline_matcher\n )\n else:\n matched_processes = ps.get_matches_commandline(\n self.__commandline_matcher\n )\n self.__pids = matched_processes\n\n if not self.__aggregate_multiple_processes and len(self.__pids) > 1:\n # old behaviour where multiple processes were not supported for aggregation\n self._logger.warning(\n \"Multiple processes match the command '%s'. Returning existing pid. \"\n \"You can turn on the multi process aggregation support by adding the \"\n \"aggregate_multiple_processes configuration to true\"\n % self.__commandline_matcher,\n limit_once_per_x_secs=300,\n limit_key=\"linux-process-monitor-existing-pid\",\n )\n self.__pids = [self.__pids[0]]\n else:\n # See if the specified target pid is running. If so, then return it.\n # Special cases:\n # '$$' mean this process.\n # '$$TBD' mean that the PID of the target process has not been determined yet and it will be set later.\n pids = []\n if self.__target_pids:\n for t_pid in self.__target_pids:\n if t_pid == \"$$\":\n t_pid = int(os.getpid())\n\n # skip this until it will be replaced with a real PID.\n elif t_pid == \"$$TBD\":\n continue\n else:\n t_pid = int(t_pid)\n pids.append(t_pid)\n self.__pids = pids\n return self.__pids", "def ps():\n for p in psutil.process_iter():\n try:\n pid = p.pid\n name = p.name()\n cmdline = p.cmdline()\n except psutil.AccessDenied:\n continue\n\n print(\"%5d %10s %s\" % (pid, name, cmdline))", "def do_list(self,line):\n # app_logger = self.construct_logger(rta_constants.PROPERTIES_LOG_FILE)\n obj = ProcessInfo('jobs')\n process_list = obj.handle_parameter()\n\n if process_list:\n # get the hostname\n hostname = process_list[0]\n del process_list[0]\n process_list = obj.extract_process(process_list)\n # print 'dict is here$$$$$'\n # sys.exit(1)\n dict_processor = []\n for proc_val in process_list:\n if proc_val.search_result ==0:\n dict_processor.append({'processor':proc_val.name,'status':'Stopped','PID':str(proc_val.pid)})\n\n elif proc_val.search_result >=1:\n dict_processor.append({'processor':proc_val.name,'status':'Running','PID':str(proc_val.pid)})\n # dict_processor[proc_val.name] = 'Running'\n # print (\"|%-20s|%-5s|\"%(proc_val.name,proc_val.search_result))\n # print dict_processor\n print('##############################################')\n print('PID #'+' Processor #'+' Status')\n print('##############################################')\n spark_ls = []\n for processor in dict_processor:\n if processor.get('processor') == 'spark<spark_worker>' or processor.get('processor') == 'spark<spark_master>':\n spark_ls.append(processor)\n del dict_processor[dict_processor.index(processor)]\n # print dict_processor\n for processor in dict_processor:\n space_pid = 7 - len(processor.get('PID'))\n space_name = 30 - len(processor.get('processor'))\n if processor.get('status') == 'Running':\n print str(processor.get('PID'))+space_pid*' '+processor.get('processor') + space_name*' '+ '\\33[32m' +processor.get('status')+ '\\33[0m'\n else:\n print str(processor.get('PID'))+space_pid*' '+processor.get('processor') + space_name*' '+ '\\33[33m' +processor.get('status')+ '\\33[0m'\n # space_num = 30 - len(k)\n # print k + space_num*' '+v\n print 7*' '+'spark'\n for item in spark_ls:\n space_pid = 8 - len(item.get('PID'))\n space_name = 29 - len(item.get('processor').split('<')[1].split('>')[0])\n if item.get('status')=='Running':\n print str(item.get('PID'))+space_pid*' '+item.get('processor').split('<')[1].split('>')[0] + space_name*' '+ '\\33[32m'+item.get('status')+'\\33[0m'\n else:\n print str(item.get('PID'))+space_pid*' '+item.get('processor').split('<')[1].split('>')[0] + space_name*' '+ '\\33[33m'+item.get('status')+'\\33[0m'\n print('##############################################')\n else:\n print(\"cmd is not support from this host\")", "def pids(self, node):\n try:\n cmd = \"ps ax | grep -i 'redpanda\\|node' | grep -v grep | awk '{print $1}'\"\n pid_arr = [\n pid for pid in node.account.ssh_capture(\n cmd, allow_fail=True, callback=int)\n ]\n return pid_arr\n except (RemoteCommandError, ValueError):\n return []", "def cmd_ps(args):\n\n remote.show_status(_get_current_project_name())", "def _showProcessList(self, procs):\n device_name = self._devId\n proc_list = ['%s %s %s' % (pid, name, args) for pid, (name, args) in sorted(procs)]\n proc_list.append('')\n log.info(\"#===== Processes on %s:\\n%s\", device_name, '\\n'.join(proc_list))", "def do_command(self, args):\n hostops = dbops.Hosts()\n listing = hostops.list(args)\n ordering = ['host_name', 'host_memory', 'host_cores',\n 'is_64bit', 'is_enabled']\n do_list(listing, ordering)", "def sub_processor(lock, pid, video_list):\r\n text = 'processor %d' % pid\r\n with lock:\r\n progress = tqdm.tqdm(\r\n total=len(video_list),\r\n position=pid,\r\n desc=text\r\n )\r\n for i in range(len(video_list)):\r\n video_name = video_list[i]\r\n \"\"\" Read result csv file \"\"\"\r\n df = pd.read_csv(os.path.join(config.post_csv_load_dir, video_name + \".csv\"))\r\n \"\"\" Calculate final score of proposals \"\"\"\r\n df['score'] = df.iou.values[:] * df.start.values[:] * df.end.values[:]\r\n if len(df) > 1:\r\n df = softNMS(df)\r\n df = df.sort_values(by=\"score\", ascending=False)\r\n video_info = video_dict[video_name]\r\n video_duration = video_info[\"duration_second\"]\r\n proposal_list = []\r\n\r\n for j in range(min(top_number, len(df))):\r\n tmp_proposal = {}\r\n tmp_proposal[\"score\"] = df.score.values[j]\r\n tmp_proposal[\"segment\"] = [max(0, df.xmin.values[j]) * video_duration,\r\n min(1, df.xmax.values[j]) * video_duration]\r\n tmp_proposal[\"label\"] = \"行走\"\r\n # tmp_proposal[\"label\"] = \"Fun sliding down\"\r\n proposal_list.append(tmp_proposal)\r\n result_dict[video_name] = proposal_list\r\n with lock:\r\n progress.update(1)\r\n\r\n with lock:\r\n progress.close()", "def eval_cpuset():\n\tnum_cpu = run('grep -c ^processor /proc/cpuinfo',quiet=True,warn_only=True)\n\tprint(red('Number of cpus : \\t'+num_cpu))", "def get_pid_of_all_slaves(containers):\n res = []\n for i in containers:\n if \"mongo\" not in i.name and \"slave\" in i.name:\n print(i.name, file=sys.stdout)\n pid = i.attrs[\"State\"][\"Pid\"]\n res.append(pid)\n return res", "def get_process_list() -> Dict:\n return {proc.pid: proc.name() for proc in psutil.process_iter()}", "def getProcessInfo():\n \n blacklist = [\"_Total\",\"Idle\"] #processes we don't care about\n \n #execute wmic command and capture output\n temp = subprocess.check_output([\"wmic\", \"path\", \"Win32_PerfRawData_PerfProc_Process\", \"get\", \n \"Name,PercentProcessorTime\"]) \n \n #iterate over processes and split into lists\n firstline = True\n result = [] #list of lists to contain the final result\n \n for line in temp.splitlines():\n if(firstline):\n firstline = False\n continue\n elif not line: #skip empty lines\n continue\n \n proclist = line.split() #split on whitespace to return a 2 element list\n \n if (proclist[0] not in blacklist ):\n result.append([proclist[0], int(proclist[1])/(10**7)]) #convert times to ints, percent processor time is in 100 nanosecond intervals\n \n \n #sort list on processor time, highest first\n result.sort(key=lambda x: x[1])\n result.reverse()\n \n # narrow process list down\n times = [x[1] for x in result]\n\n nonzero = [x for x in times if x]\n \n ind = min(int(math.ceil(len(times)/5)),len(nonzero)) #reduce processes to top 20% (atleast 1) or to all with nonzero cpu time\n cutoff = max(times[ind],1)\n \n return [x for x in result if x[1] >= cutoff]", "def get_filtered_pids(filterstr, excludes=None):\n excludes = excludes or []\n cmd = \"ps ax | grep '%s'\" % filterstr\n rc, out, err = j.core.executors.run_local(cmd)\n # print out\n found = []\n\n def checkexclude(c, excludes):\n for item in excludes:\n c = c.lower()\n if c.find(item.lower()) != -1:\n return True\n return False\n\n for line in out.split(\"\\n\"):\n if line.find(\"grep\") != -1 or line.strip() == \"\":\n continue\n if line.strip() != \"\":\n if line.find(filterstr) != -1:\n line = line.strip()\n if not checkexclude(line, excludes):\n # print \"found pidline:%s\"%line\n found.append(int(line.split(\" \")[0]))\n return found" ]
[ "0.66693133", "0.6300955", "0.62944216", "0.57963485", "0.5615728", "0.55979764", "0.55165416", "0.54790705", "0.54333967", "0.5377084", "0.5361904", "0.53616923", "0.5353906", "0.53328574", "0.5328634", "0.5282079", "0.52597374", "0.52357227", "0.5193442", "0.5173996", "0.5159371", "0.50896984", "0.5088218", "0.5072473", "0.50606745", "0.5051397", "0.504782", "0.5013412", "0.5012731", "0.4995018" ]
0.6801216
0
ps_list(self,sort_by='cpu',filters) get cluster processes Description Return a list containing one numpy_proc.process objects for each process running on the cluster host machines. process objects contain a ton of information about cpu, memory, etc. used by the process. See ps for more information.
def ps_list(self,sort_by='cpu',**filters): import operator import numpy.distutils.proc as numpy_proc res = self.apply(numpy_proc.ps_list,()) psl = reduce(operator.add,res) psl = numpy_proc.ps_sort(psl,sort_by,**filters) return psl
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ps(self,sort_by='cpu',**filters):\n psl = self.ps_list(sort_by,**filters)\n if len(psl):\n print psl[0].labels_with_name()\n for i in psl: print i.str_with_name()", "def ListProcesses(self):\n stdout, stderr = self.RunCmdOnDevice(\n ['/bin/ps', '--no-headers', '-A', '-o', 'pid,ppid,args:4096,state'],\n quiet=True)\n assert stderr == '', stderr\n procs = []\n for l in stdout.split('\\n'):\n if l == '':\n continue\n m = re.match(r'^\\s*(\\d+)\\s+(\\d+)\\s+(.+)\\s+(.+)', l, re.DOTALL)\n assert m\n procs.append(\n (int(m.group(1)), m.group(3).rstrip(), int(m.group(2)), m.group(4)))\n logging.debug(\"ListProcesses(<predicate>)->[%i processes]\" % len(procs))\n return procs", "def ListProcesses(self):\n stdout, stderr = self.RunCmdOnDevice(\n [\n '/bin/ps', '--no-headers', '-A', '-o', 'pid,ppid,args:4096,state'\n ],\n quiet=True)\n assert stderr == '', stderr\n procs = []\n for l in stdout.split('\\n'):\n if l == '':\n continue\n m = re.match(r'^\\s*(\\d+)\\s+(\\d+)\\s+(.+)\\s+(.+)', l, re.DOTALL)\n assert m\n procs.append((int(m.group(1)), m.group(3).rstrip(), int(m.group(2)),\n m.group(4)))\n logging.debug(\"ListProcesses(<predicate>)->[%i processes]\" % len(procs))\n return procs", "def get_processes(sort_by_name=True):\r\n if sort_by_name:\r\n return sorted(_list_processes(), key=cmp_to_key(\r\n lambda p1, p2: (cmp(p1.name, p2.name) or cmp(p1.pid, p2.pid))\r\n ))\r\n else:\r\n return sorted(_list_processes(), key=cmp_to_key(\r\n lambda p1, p2: (cmp(p1.pid, p2.pid) or cmp(p1.name, p2.name))\r\n ))", "def get_processes_running():\r\n p = [] #array of processes\r\n if platform == \"linux\" or platform == \"linux2\":\r\n for proc in psutil.process_iter():\r\n try:\r\n tmp=Process(proc.name(),int(proc.pid),proc.username(),int(0),int(0))\r\n p.append(tmp)\r\n except:\r\n continue\r\n return (p)\r\n\t\t\t\r\n tasks = check_output(['tasklist']).decode('cp866', 'ignore').split(\"\\r\\n\")\r\n for task in tasks:\r\n m = re.match(b'(.*?)\\\\s+(\\\\d+)\\\\s+(\\\\w+)\\\\s+(\\\\w+)\\\\s+(.*?)\\\\s.*', task.encode())\r\n if m is not None:\r\n tmp=Process(m.group(1).decode(),int(m.group(2).decode()),m.group(3).decode(),int(m.group(4).decode()),int(m.group(5).decode('ascii', 'ignore')))\r\n p.append(tmp)\r\n #m.group(1).decode() image name\r\n #m.group(2).decode() process id\r\n #m.group(3).decode() session_name\r\n #m.group(4).decode() session_num\r\n #m.group(5).decode('ascii', 'ignore') memory usage\r\n return(p)", "def get_processes_info():\n processes_list = []\n for proc in get_processes():\n try:\n # Fetch process details as dict\n pinfo = proc.as_dict(attrs=[\"pid\", \"name\", \"username\"])\n pinfo[\"rss\"] = proc.memory_info().rss / (1024 * 1024)\n pinfo[\"ports\"] = []\n try:\n connections = proc.connections()\n except psutil.Error:\n continue\n if connections:\n for conn in connections:\n pinfo[\"ports\"].append({\"port\": conn.laddr.port, \"status\": conn.status})\n # Append dict to list\n processes_list.append(pinfo)\n except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):\n pass\n processes_list = sorted(processes_list, key=lambda procObj: procObj[\"rss\"], reverse=True)\n return processes_list[:25]", "def processes(self):\n # MODIFIED 11/1/16 OLD:\n return list(item.process for item in self.process_tuples)\n # # MODIFIED 11/1/16 NEW:\n # return sorted(list(item.process for item in self.process_tuples), key=lambda process: process.name)\n # MODIFIED 11/1/16 END", "def getProcs(**options):\n procSeq = search.ProcSearch.byOptions(**options).procs\n return [Proc(p) for p in procSeq.procs]", "def do_list(self,line):\n # app_logger = self.construct_logger(rta_constants.PROPERTIES_LOG_FILE)\n obj = ProcessInfo('jobs')\n process_list = obj.handle_parameter()\n\n if process_list:\n # get the hostname\n hostname = process_list[0]\n del process_list[0]\n process_list = obj.extract_process(process_list)\n # print 'dict is here$$$$$'\n # sys.exit(1)\n dict_processor = []\n for proc_val in process_list:\n if proc_val.search_result ==0:\n dict_processor.append({'processor':proc_val.name,'status':'Stopped','PID':str(proc_val.pid)})\n\n elif proc_val.search_result >=1:\n dict_processor.append({'processor':proc_val.name,'status':'Running','PID':str(proc_val.pid)})\n # dict_processor[proc_val.name] = 'Running'\n # print (\"|%-20s|%-5s|\"%(proc_val.name,proc_val.search_result))\n # print dict_processor\n print('##############################################')\n print('PID #'+' Processor #'+' Status')\n print('##############################################')\n spark_ls = []\n for processor in dict_processor:\n if processor.get('processor') == 'spark<spark_worker>' or processor.get('processor') == 'spark<spark_master>':\n spark_ls.append(processor)\n del dict_processor[dict_processor.index(processor)]\n # print dict_processor\n for processor in dict_processor:\n space_pid = 7 - len(processor.get('PID'))\n space_name = 30 - len(processor.get('processor'))\n if processor.get('status') == 'Running':\n print str(processor.get('PID'))+space_pid*' '+processor.get('processor') + space_name*' '+ '\\33[32m' +processor.get('status')+ '\\33[0m'\n else:\n print str(processor.get('PID'))+space_pid*' '+processor.get('processor') + space_name*' '+ '\\33[33m' +processor.get('status')+ '\\33[0m'\n # space_num = 30 - len(k)\n # print k + space_num*' '+v\n print 7*' '+'spark'\n for item in spark_ls:\n space_pid = 8 - len(item.get('PID'))\n space_name = 29 - len(item.get('processor').split('<')[1].split('>')[0])\n if item.get('status')=='Running':\n print str(item.get('PID'))+space_pid*' '+item.get('processor').split('<')[1].split('>')[0] + space_name*' '+ '\\33[32m'+item.get('status')+'\\33[0m'\n else:\n print str(item.get('PID'))+space_pid*' '+item.get('processor').split('<')[1].split('>')[0] + space_name*' '+ '\\33[33m'+item.get('status')+'\\33[0m'\n print('##############################################')\n else:\n print(\"cmd is not support from this host\")", "def get_processes(self):\n processes = {}\n # Get ps output\n cmd = [\"ps\", \"-Z\"]\n # Split by newlines and remove first line (\"LABEL USER PID PPID NAME\")\n # TODO: surround with try/except?\n psz = subprocess.check_output(self.shell + cmd).decode().split('\\n')[1:]\n for line in psz:\n line = line.strip(\"\\r\")\n if line:\n try:\n p = Process(line, self.android_version)\n except ValueError as e:\n self.log.warning(e)\n else:\n processes[p.pid] = p\n return processes", "def _showProcessList(self, procs):\n device_name = self._devId\n proc_list = ['%s %s %s' % (pid, name, args) for pid, (name, args) in sorted(procs)]\n proc_list.append('')\n log.info(\"#===== Processes on %s:\\n%s\", device_name, '\\n'.join(proc_list))", "def get_process_list() -> Dict:\n return {proc.pid: proc.name() for proc in psutil.process_iter()}", "def allprocs(self):\n\n processes = self.getHash( 'nameHash' ) # safely get copy of process name dictionary\n\n allprocs = {}\n\n for p in processes.keys():\n allprocs[p] = processes[p].procinfo()\n\n return allprocs", "def allprocs(self):\n\n processes = self.getHash( 'nameHash' ) # safely get copy of process name dictionary\n\n allprocs = {}\n\n for p in processes.keys():\n allprocs[p] = processes[p].procinfo()\n\n return allprocs", "def get_process_list(config):\n # get list of processes\n process_list = getlist(config.getstr('config', 'PROCESS_LIST'))\n\n out_process_list = []\n # for each item remove dashes, underscores, and cast to lower-case\n for process in process_list:\n # if instance is specified, extract the text inside parenthesis\n match = re.match(r'(.*)\\((.*)\\)', process)\n if match:\n instance = match.group(2)\n process_name = match.group(1)\n else:\n instance = None\n process_name = process\n\n wrapper_name = get_wrapper_name(process_name)\n if wrapper_name is None:\n config.logger.warning(f\"PROCESS_LIST item {process_name} \"\n \"may be invalid.\")\n wrapper_name = process_name\n\n # if MakePlots is in process list, remove it because\n # it will be called directly from StatAnalysis\n if wrapper_name == 'MakePlots':\n continue\n\n out_process_list.append((wrapper_name, instance))\n\n return out_process_list", "def get_processes_list_within_container(self):\n items_list = []\n proc_item = []\n procs_dict = {}\n\n try:\n p = Popen(DOCKER_TOP_CMD.format(self.container_id), shell=True, stdout=PIPE, stderr=PIPE)\n stdout_dump, stderr_data = p.communicate()\n except Exception as e:\n log.debug('{}[*]{} {}'.format(DFbase.LOG_ERROR_COLOR,\n DFbase.LOG_INFO_COLOR,e))\n return False\n\n procs_lines = stdout_dump.decode('utf-8')\n procs_lines = procs_lines.split(\"\\n\")\n\n for procs_item in procs_lines:\n if 'USER' in procs_item:\n continue\n elif len(procs_item):\n proc_item.append(procs_item)\n\n for item in proc_item:\n x = item.split(None, 4)\n log.debug('{}[*]{} PID:{}, {}, {}, {}, {}'.format(DFbase.LOG_DEBUG_COLOR, \n DFbase.LOG_INFO_COLOR, x[0], x[1],x[2],x[3],x[4]))\n procs_dict['USER'] = x[0]\n procs_dict['PID'] = x[1]\n procs_dict['PPID'] = x[2]\n procs_dict['STIME'] = x[3]\n procs_dict['CMD'] = x[4]\n\n items_list.append(procs_dict.copy())\n\n procs_path = self.artifacts_path + '/' + 'top_command.json'\n with open(procs_path, 'w') as f:\n json.dump(items_list, f, indent=4)\n\n self.copy_executable(items_list)\n\n return True", "def get_processes():\n cmd = 'ps -do pid:1,cmd' # linux command to run\n processes = {}\n\n with os.popen(cmd) as out:\n # see https://stackoverflow.com/questions/24362007/\n next(out.__iter__()) # skip header (first line)\n\n for line in out:\n # sepate pid and command in a tuple\n p = line.rstrip('\\n').split(' ', 2)\n\n # skip kernel threads\n if p[1][0] == '[':\n continue\n\n processes[p[0]] = p[1]\n\n return processes", "def get_cluster_parcels(self, cluster_name, view='summary'):\n return self.api_client.get_cluster_parcels(cluster_name=cluster_name,\n view=view)['items']", "def getProcessInfo():\n \n blacklist = [\"_Total\",\"Idle\"] #processes we don't care about\n \n #execute wmic command and capture output\n temp = subprocess.check_output([\"wmic\", \"path\", \"Win32_PerfRawData_PerfProc_Process\", \"get\", \n \"Name,PercentProcessorTime\"]) \n \n #iterate over processes and split into lists\n firstline = True\n result = [] #list of lists to contain the final result\n \n for line in temp.splitlines():\n if(firstline):\n firstline = False\n continue\n elif not line: #skip empty lines\n continue\n \n proclist = line.split() #split on whitespace to return a 2 element list\n \n if (proclist[0] not in blacklist ):\n result.append([proclist[0], int(proclist[1])/(10**7)]) #convert times to ints, percent processor time is in 100 nanosecond intervals\n \n \n #sort list on processor time, highest first\n result.sort(key=lambda x: x[1])\n result.reverse()\n \n # narrow process list down\n times = [x[1] for x in result]\n\n nonzero = [x for x in times if x]\n \n ind = min(int(math.ceil(len(times)/5)),len(nonzero)) #reduce processes to top 20% (atleast 1) or to all with nonzero cpu time\n cutoff = max(times[ind],1)\n \n return [x for x in result if x[1] >= cutoff]", "def process_query(self) -> List[ParticipatoryProcess]:\n\n component_filter: ParticipatoryProcessFilter = ParticipatoryProcessFilter()\n component_sort: ParticipatoryProcessSort = ParticipatoryProcessSort()\n response: dict = super().process_query_from_file({'filter': component_filter, 'order': component_sort})\n\n participatory_processes: List[ParticipatoryProcess] = []\n for participatory_process_dict in response['participatoryProcesses']:\n id: str = participatory_process_dict['id']\n translations: [] = participatory_process_dict['title']['translations']\n title: TranslatedField = TranslatedField.parse_from_gql(translations)\n participatory_process: ParticipatoryProcess = ParticipatoryProcess(id=id, title=title)\n participatory_processes.append(participatory_process)\n return participatory_processes", "def list(self):\n return self._list('/os-psvm', 'psvms')", "def list_java_processes():\n for line in shell_command_output('jps -l').splitlines():\n line = line.strip()\n if len(line) == 0:\n continue\n (pid, class_name) = line.split()\n yield (int(pid), class_name)", "def getAllProcessInfo(self):\r\n self._update('getAllProcessInfo')\r\n\r\n all_processes = self._getAllProcesses(lexical=True)\r\n\r\n output = []\r\n for group, process in all_processes:\r\n name = make_namespec(group.config.name, process.config.name)\r\n output.append(self.getProcessInfo(name))\r\n return output", "def get_cluster_parcels(self, cluster_name, view='summary'):\n return self._get(endpoint='{}/clusters/{}/parcels'.format(self.api_version,\n cluster_name),\n params={'view': view}).json()", "def List(cls):\n\t\tres = {}\n\t\tfor p in glob.glob(\"/proc/*/cmdline\"):\n\t\t\tprocess = p.split(\"/\")[2]\n\t\t\tif cls.RE_PID.match(process):\n\t\t\t\tres[int(process)] = cat(p).replace(\"\\x00\", \" \")\n\t\treturn res", "def process():\n interesting_procs = set(INTERESTING_PROCESSES)\n\n pids = psutil.pids()\n info = {\n \"stats_type\": \"process\",\n \"proc\": {\n \"count\": len(pids),\n }\n }\n proc_root = os.environ.get(\"PROC_ROOT\", \"/proc\")\n for pid in pids:\n proc_info = proc.core.Process.from_path(\n os.path.join(proc_root, str(pid)))\n\n proc_name = get_proc_name(proc_info, interesting_procs)\n if not proc_name:\n continue\n\n if 'sshd' in proc_name and ':' in proc_info.cmdline:\n continue\n\n if proc_name not in info['proc']:\n info['proc'][proc_name] = {\n 'running': proc_info.state in ('R', 'S', 'D', 'T', 'W'),\n 'pid': proc_info.pid,\n 'ppid': proc_info.ppid,\n 'user_time': int(proc_info.stat_fields[16]), # cutime\n 'sys_time': int(proc_info.stat_fields[17]), # cstime\n 'vsize': proc_info.vsize,\n 'rss': proc_info.rss,\n 'voluntary_ctxt_switches': int(proc_info.status_fields[\n 'voluntary_ctxt_switches']),\n 'nonvoluntary_ctxt_switches': int(proc_info.status_fields[\n 'nonvoluntary_ctxt_switches']),\n 'age': proc_info.runtime,\n 'count': 1\n }\n else:\n pinfo = info['proc'][proc_name]\n pinfo['count'] += 1\n\n def append(dest, field, value):\n \"\"\"Append values for an existing process.\"\"\"\n if isinstance(dest[field], list):\n dest[field].append(value)\n else:\n dest[field] = [dest[field], value]\n\n # append('state', proc_info.state)\n append(pinfo, 'pid', proc_info.pid)\n append(pinfo, 'ppid', proc_info.ppid)\n pinfo['user_time'] += int(proc_info.stat_fields[16]) # cutime\n pinfo['sys_time'] += int(proc_info.stat_fields[17]) # cstime\n pinfo['vsize'] += proc_info.vsize\n pinfo['rss'] += proc_info.rss\n pinfo['voluntary_ctxt_switches'] = \\\n int(proc_info.status_fields['voluntary_ctxt_switches'])\n pinfo['nonvoluntary_ctxt_switches'] = \\\n int(proc_info.status_fields['nonvoluntary_ctxt_switches'])\n append(pinfo, 'age', proc_info.runtime)\n\n return info", "def list_processes(pid, name):\n \n if not pid and not name:\n rc, out, err = j.sal.process.execute(\"ps ax\")\n click.echo(out)\n elif name:\n click.echo(j.sal.process.psfind(name))\n elif pid:\n click.echo(j.sal.process.getProcessPid(pid))", "def list_cluster(self, ip, x_api_session):\n log.log_debug(\"cluster object list is started\")\n list_object = ListModule.ListModule()\n object_list = list_object.listing(\"uom\", ip,\n self.root, self.content_type,\n \"Cluster\", x_api_session)\n log.log_debug(\"cluster object list is returned\")\n return object_list", "def get_running_processes(self, dev_handler):\n # Get the list of running processes on each device\n running_processes = NvmlHandler.exec_nvml_function(nvmlDeviceGetComputeRunningProcesses,dev_handler)\n\n # Turns these process objects into dicts\n running_processes_dicts = [obj.__dict__ for obj in running_processes if obj]\n\n # Enhance these dicts with information from psutil\n new_dicts = []\n for running_processes_dict in running_processes_dicts:\n\n # Init the new dict with the current information\n more_ps_infos = {}\n more_ps_infos.update(running_processes_dict)\n\n # Rename the usedGpuMemory key, if any\n if 'usedGpuMemory' in more_ps_infos:\n more_ps_infos['gpu_memory_used'] = utils.psutil_parse_readable_bytes(\n more_ps_infos.get('usedGpuMemory')\n )\n del more_ps_infos['usedGpuMemory']\n\n # Try to retreive info about the process using psutil\n try:\n pid = running_processes_dict.get('pid')\n more_ps_infos.update(utils.psutil_snapshot_process(pid))\n except Exception as e:\n logger.warning('Cannot gather info from process {}'.format(pid))\n\n new_dicts.append(more_ps_infos)\n\n return new_dicts", "def list_local_processes(self, process_type=''):\n if not process_type:\n return self.procs.values()\n\n return [p for p in self.procs.itervalues() if p.process_type == process_type]" ]
[ "0.7186785", "0.6677296", "0.6657441", "0.61431795", "0.6073727", "0.59592265", "0.58759093", "0.5791517", "0.5747416", "0.57362986", "0.5698524", "0.56719214", "0.5671053", "0.5671053", "0.5669222", "0.56527954", "0.5619255", "0.5599501", "0.5578112", "0.556991", "0.5557092", "0.5549396", "0.5481311", "0.5463704", "0.5441462", "0.54405785", "0.53560764", "0.5355282", "0.53191906", "0.53024566" ]
0.7327976
0
nice(increment=10) > success_list increment all slave interpreter's nice value by increment. hmmm. this doesn't seem to work. see os.nice()
def nice(self,increment=10): res = self.apply(os.nice,(increment,)) return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def num_processes(self, new_value):", "def update_cpu_through_stress(ssh_client, num_stress=0):\n if num_stress < 0:\n invalid_resource_parameter(\"CPU Allocation\", limit)\n return\n \n #Separate CPU limit into several smaller increments of 10 instead of one larger one\n for x in range(num_stress):\n cmd = \"stress -c 1 &> /dev/null & cpulimit -p $( pidof -o $! stress ) -l {} &> /dev/null &\".format(10)\n ssh_exec(ssh_client, cmd)", "def test_redis_increase_replica_count_usual_case():", "def progress(i, my_list, message=\"\"):\n\tmy_progress = (i / len(my_list)) * 100\n\tmy_progress = str(round(my_progress, 1)) + \"% \" + message\n\tsys.stdout.write('\\r')\n\tsys.stdout.write(my_progress)\n\tsys.stdout.flush()", "def incr(n=1):\n for i in xrange(n):\n pulse_hi(INCR)", "async def maintain_SCII_count(count: int, controllers: List[Controller], proc_args: List[Dict] = None):\n # kill unhealthy ones.\n if controllers:\n to_remove = []\n alive = await asyncio.wait_for(\n asyncio.gather(*(c.ping() for c in controllers if not c._ws.closed), return_exceptions=True), timeout=20\n )\n i = 0 # for alive\n for controller in controllers:\n if controller._ws.closed:\n if not controller._process._session.closed:\n await controller._process._session.close()\n to_remove.append(controller)\n else:\n if not isinstance(alive[i], sc_pb.Response):\n try:\n await controller._process._close_connection()\n finally:\n to_remove.append(controller)\n i += 1\n for c in to_remove:\n c._process._clean(verbose=False)\n if c._process in kill_switch._to_kill:\n kill_switch._to_kill.remove(c._process)\n controllers.remove(c)\n\n # spawn more\n if len(controllers) < count:\n needed = count - len(controllers)\n if proc_args:\n index = len(controllers) % len(proc_args)\n else:\n proc_args = [{} for _ in range(needed)]\n index = 0\n extra = [SC2Process(**proc_args[(index + _) % len(proc_args)]) for _ in range(needed)]\n logger.info(f\"Creating {needed} more SC2 Processes\")\n for _ in range(3):\n if platform.system() == \"Linux\":\n # Works on linux: start one client after the other\n # pylint: disable=C2801\n new_controllers = [await asyncio.wait_for(sc.__aenter__(), timeout=50) for sc in extra]\n else:\n # Doesnt seem to work on linux: starting 2 clients nearly at the same time\n new_controllers = await asyncio.wait_for(\n # pylint: disable=C2801\n asyncio.gather(*[sc.__aenter__() for sc in extra], return_exceptions=True),\n timeout=50\n )\n\n controllers.extend(c for c in new_controllers if isinstance(c, Controller))\n if len(controllers) == count:\n await asyncio.wait_for(asyncio.gather(*(c.ping() for c in controllers)), timeout=20)\n break\n extra = [\n extra[i] for i, result in enumerate(new_controllers) if not isinstance(new_controllers, Controller)\n ]\n else:\n logger.critical(\"Could not launch sufficient SC2\")\n raise RuntimeError\n\n # kill excess\n while len(controllers) > count:\n proc = controllers.pop()\n proc = proc._process\n logger.info(f\"Removing SCII listening to {proc._port}\")\n await proc._close_connection()\n proc._clean(verbose=False)\n if proc in kill_switch._to_kill:\n kill_switch._to_kill.remove(proc)", "def add_succeed(self, value: int = None):\n\n if value == 0:\n raise ValueError(\"You really want to increment of 0?\")\n\n if value is None:\n self.test_run.actual += 1\n self.test_run.succeed += 1\n else:\n self.test_run.actual += value\n self.test_run.succeed += value\n\n self.__send_update()", "def setNice(self, nice=0):\n self.nice = nice", "def increase_progress(self, value):\r\n\r\n pass", "def slow_sum( nsecs, x, y ):\n print(\"Process %s going to sleep for %d second(s)\" \\\n % (current_process().pid,nsecs))\n\n time.sleep(nsecs)\n\n print(\"Process %s waking up\" % current_process().pid)\n\n return x+y", "def increment(self, inc):\n self.done += inc", "def increment_counter(self) -> None:", "def num_processes():\n return 1", "def enable_nice(node):\n Helpers.__warn_if_not_empty(node)\n node.prefix = \"nice\"", "def increment(self, succeeded=0, failed=0, skipped=0, retried_nomax=0, retried_withmax=0, state=None):\r\n self.attempted += (succeeded + failed)\r\n self.succeeded += succeeded\r\n self.failed += failed\r\n self.skipped += skipped\r\n self.retried_nomax += retried_nomax\r\n self.retried_withmax += retried_withmax\r\n if state is not None:\r\n self.state = state", "def test_it(self):\n self.n += 1\n if self.n >= 5:\n self.fail(\"eventually failing\")", "def renice(self,process_list,level):\n res = []\n pids = {}\n for process in process_list:\n if hasattr(process,'machine'):\n try:\n worker = self.worker_by_name[process.machine]\n except KeyError:\n worker = self.worker_by_name[process.long_machine]\n pid = process.pid\n else:\n worker = self.workers[process[0]]\n pid = process[1]\n try:\n pids[worker] = pids[worker] + ' ' + str(pid)\n except:\n pids[worker] = str(pid)\n for worker,value in pids.items():\n arg = 'renice %d -p %s' % (level,value)\n res.append(worker.apply(os.system,(arg,)))\n return res", "def _tally(self, user_gpio, level, tick):\n self.count += 1", "def pairTest(clients, servers):\n results = []\n #initOutput( opts.outfile )\n # 9 categories in linux 2.6+\n cpuHeader = ( 'cpu(start,stop,user%,nice%,sys%,idle%,iowait%,'\n 'irq%,sirq%,steal%,guest%)' )\n for pairs in [1]:\n #net.start()\n intervals, cpuEntries = iperfPairs(clients, servers )\n #net.stop()\n # Write output incrementally in case of failure\n result = { 'pairs': pairs, 'results': intervals,\n cpuHeader: cpuEntries }\n #appendOutput( opts, [ result ] )\n results += [ result ]\n return results", "def progress_table(machine_list):\n # go into a loop\n # print machine_list\n finished = 0\n # calculate max name length\n cs_dict = {}\n # welp, maybe benerate a new list of servers? referencing problems seem\n # to be causing the progress to not update\n \n for machine in machine_list:\n try:\n cs_dict[machine] = cs.servers.find(id=machine_list[machine].id)\n except novaclient.exceptions.NotFound:\n cs_dict[machine] = None\n \n cs_dict = machine_list\n \n while True:\n name_len = 0\n # machines = cs.list()\n # machines = machine_list\n # format the columns for the longest names\n for machine in cs_dict:\n m = cs_dict[machine]\n if len(m.machine_name) > name_len:\n name_len = len(m.machine_name)\n tmpl = \"%%-%ds:[%%s][%%4s%%%%]\\n\" % name_len\n term_width = 80\n hash_len = term_width - 2 - name_len \n # lines, cols = struct.unpack('hh', fcntl.ioctl(sys.stdout, termios.TIOCGWINSZ, '1234'))\n \n # print \"-\" * cols\n \n # create a hash of cloud_servers\n \n \n \n # create the table\n finished = 0\n num_servs = len(cs_dict)\n for machine in cs_dict:\n # states[i] = states[i] + random.random()\n m = cs_dict[machine]\n progress = m.progress\n if progress >= 100:\n finished += 1\n num_hashes = int(round(progress/100.0 * hash_len))\n num_blanks = hash_len - num_hashes\n pstring = int(num_hashes) * \"#\" + int(num_blanks) * \" \"\n sys.stdout.write(tmpl % (m.machine_name, pstring, progress))\n sys.stdout.flush()\n if finished == len(cs_dict):\n return time.time() - start_time\n sys.stdout.write(\"%d seconds\\n\" % (time.time() - start_time))\n sys.stdout.write(\"%c[%dA\" % (27, num_servs+1))\n sys.stdout.flush()\n time.sleep(5)\n #\n # move the cursor up len(states.keys()) rows", "def status_callback(val):\n global CUMRETVAL\n CUMRETVAL += val", "def increase_count(self, number=1):\n self.count += number", "def partitions(value, show_progress=False):\n p = {(1,): 1}\n for num in range(2, value+1):\n p = add_number(p, num)\n if show_progress:\n print(('%d' % num) + '.'*num)\n return p", "def test_rebuilt_server_vcpus(self):\n\n remote_client = self.server_behaviors.get_remote_instance_client(\n self.server, self.servers_config)\n server_actual_vcpus = remote_client.get_number_of_cpus()\n self.assertEqual(server_actual_vcpus, self.expected_vcpus)", "def counter(self, value: int, /) -> None:", "def increment(self):\r\n return self.add(1)", "def numpsus():\n click.echo(_wrapper_get_num_psus())", "def update_progress(i, n):\n global _current_line\n prog = progress_string(i, n)\n if _current_line is not None:\n _current_line.update(prog)", "def test_priority_add_many_ok(self):\n test_name = sys._getframe().f_code.co_name\n for i in xrange(11):\n self._execute('priority add p%s' % i)\n rv, output = self._execute('priority list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def update(self):\n self.cpus_current = psutil.cpu_percent(percpu=True)\n assert len(self.cpus_current) == len(self.cpus)\n return self.cpus_current" ]
[ "0.6036316", "0.5600162", "0.5561521", "0.5558175", "0.5384459", "0.53523105", "0.52948165", "0.5276893", "0.51638585", "0.5109805", "0.51070994", "0.50984627", "0.5059864", "0.50574064", "0.5056935", "0.5005578", "0.49772373", "0.49721122", "0.4970361", "0.4964796", "0.49635303", "0.4963451", "0.49459553", "0.49224573", "0.49065465", "0.4904466", "0.48937473", "0.48908", "0.48708743", "0.48526862" ]
0.72512513
0
renice(process_list,level) set nice value multiple processes Description Change the nice level of multiple remote processes. process_list is a list of numpy_proc.process objects. level is the new nice value for the listed processes. Caveats Once niced down, a process cannot be reniced back up. This is a Linux issue.
def renice(self,process_list,level): res = [] pids = {} for process in process_list: if hasattr(process,'machine'): try: worker = self.worker_by_name[process.machine] except KeyError: worker = self.worker_by_name[process.long_machine] pid = process.pid else: worker = self.workers[process[0]] pid = process[1] try: pids[worker] = pids[worker] + ' ' + str(pid) except: pids[worker] = str(pid) for worker,value in pids.items(): arg = 'renice %d -p %s' % (level,value) res.append(worker.apply(os.system,(arg,))) return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def nice(self,increment=10):\n res = self.apply(os.nice,(increment,))\n return res", "def setNice(self, nice=0):\n self.nice = nice", "def _set_processes(self, processes: int = 1):\n self.__processes = processes", "def num_processes(self, new_value):", "def pim_vrrp_updown(self):\n print \"Changing PIM DR and VRRP priority on {0}\".format(self.device_name)\n _pim_vrrp_config_template=[\"interface \",\"ip pim dr-priority \", \"vrrp \", \"priority \"]\n try:\n _vrrp_interfaces=self.show_command(\"show vrrp\")[\"TABLE_vrrp_group\"]\n except Exception as err:\n print err\n return\n if type(_vrrp_interfaces) == dict:\n # If there is only one VRRP interface, NXAPI returns dict instead of list of dict.\n _vrrp_interfaces=[_vrrp_interfaces]\n for _entry in _vrrp_interfaces:\n _pim_vrrp_config_command=[]\n time.sleep(5)\n if self.resume:\n if _entry[\"ROW_vrrp_group\"][\"sh_priority\"] == 10:\n _pim_vrrp_config_command.append(_pim_vrrp_config_template[0]+_entry[\"ROW_vrrp_group\"][\"sh_if_index\"])\n _pim_vrrp_config_command.append(_pim_vrrp_config_template[1]+str(250))\n _pim_vrrp_config_command.append(_pim_vrrp_config_template[2]+str(_entry[\"ROW_vrrp_group\"][\"sh_group_id\"]))\n _pim_vrrp_config_command.append(_pim_vrrp_config_template[3]+str(250))\n print \"Restoring VRRP and PIM DR priority on {0}\\n\".format(_entry[\"ROW_vrrp_group\"][\"sh_if_index\"])\n else:\n if _entry[\"ROW_vrrp_group\"][\"sh_group_state\"] == \"Master\":\n _pim_vrrp_config_command.append(_pim_vrrp_config_template[0]+_entry[\"ROW_vrrp_group\"][\"sh_if_index\"])\n _pim_vrrp_config_command.append(_pim_vrrp_config_template[1]+str(10))\n _pim_vrrp_config_command.append(_pim_vrrp_config_template[2]+str(_entry[\"ROW_vrrp_group\"][\"sh_group_id\"]))\n _pim_vrrp_config_command.append(_pim_vrrp_config_template[3]+str(10))\n print \"Lowering VRRP and PIM DR priority on {0}\\n\".format(_entry[\"ROW_vrrp_group\"][\"sh_if_index\"])\n try:\n self.conf_command(_pim_vrrp_config_command)\n except Exception as err:\n print err\n pass", "def update_processes_config():\n\n config.set(\"ProcessDisplay\", \"processes\", str(PROCESSES))\n with open(CONFIG_PATH, 'w', encoding='utf-8') as configfile:\n config.write(configfile)", "def enable_nice(node):\n Helpers.__warn_if_not_empty(node)\n node.prefix = \"nice\"", "def process():\n interesting_procs = set(INTERESTING_PROCESSES)\n\n pids = psutil.pids()\n info = {\n \"stats_type\": \"process\",\n \"proc\": {\n \"count\": len(pids),\n }\n }\n proc_root = os.environ.get(\"PROC_ROOT\", \"/proc\")\n for pid in pids:\n proc_info = proc.core.Process.from_path(\n os.path.join(proc_root, str(pid)))\n\n proc_name = get_proc_name(proc_info, interesting_procs)\n if not proc_name:\n continue\n\n if 'sshd' in proc_name and ':' in proc_info.cmdline:\n continue\n\n if proc_name not in info['proc']:\n info['proc'][proc_name] = {\n 'running': proc_info.state in ('R', 'S', 'D', 'T', 'W'),\n 'pid': proc_info.pid,\n 'ppid': proc_info.ppid,\n 'user_time': int(proc_info.stat_fields[16]), # cutime\n 'sys_time': int(proc_info.stat_fields[17]), # cstime\n 'vsize': proc_info.vsize,\n 'rss': proc_info.rss,\n 'voluntary_ctxt_switches': int(proc_info.status_fields[\n 'voluntary_ctxt_switches']),\n 'nonvoluntary_ctxt_switches': int(proc_info.status_fields[\n 'nonvoluntary_ctxt_switches']),\n 'age': proc_info.runtime,\n 'count': 1\n }\n else:\n pinfo = info['proc'][proc_name]\n pinfo['count'] += 1\n\n def append(dest, field, value):\n \"\"\"Append values for an existing process.\"\"\"\n if isinstance(dest[field], list):\n dest[field].append(value)\n else:\n dest[field] = [dest[field], value]\n\n # append('state', proc_info.state)\n append(pinfo, 'pid', proc_info.pid)\n append(pinfo, 'ppid', proc_info.ppid)\n pinfo['user_time'] += int(proc_info.stat_fields[16]) # cutime\n pinfo['sys_time'] += int(proc_info.stat_fields[17]) # cstime\n pinfo['vsize'] += proc_info.vsize\n pinfo['rss'] += proc_info.rss\n pinfo['voluntary_ctxt_switches'] = \\\n int(proc_info.status_fields['voluntary_ctxt_switches'])\n pinfo['nonvoluntary_ctxt_switches'] = \\\n int(proc_info.status_fields['nonvoluntary_ctxt_switches'])\n append(pinfo, 'age', proc_info.runtime)\n\n return info", "def num_processes():\n return 1", "def print_detailed_user(node_list, pending_list, user_name, user_jobs, num_cores):\n \n user_pend = []\n if len(pending_list): \n for j in range(1, len(pending_list)):\n user_pend.append(pending_list[j])\n \n print(\"=\".center(TERMWIDTH,\"=\"))\n print(\"=\".ljust(TERMWIDTH - 1) + \"=\")\n print(\"=\" + (\"Detailed Process information for {0}.\"\\\n .format(user_name)).center(TERMWIDTH - 2) + \"=\")\n print(\"=\".ljust(TERMWIDTH - 1) + \"=\")\n print(\"=\".center(TERMWIDTH, '=') + \"\\n\")\n\n print(\"{0}'s total number of running jobs on UGE: {1}\\n\".format(user_name, user_jobs))\n # Getting every process of the user to print\n for node in node_list:\n user_proc_list = []\n cleanName = str(node).replace('long@','').replace('debug@','').replace('.crc.nd.edu','').replace('gpu@','').replace('gpu-debug@','')\n full_page = urllib.request.urlopen(\"https://mon.crc.nd.edu/xymon-cgi/svcstatus.sh?HOST={0}.crc.nd.edu&SERVICE=cpu\".format(cleanName))\n mybytes = full_page.read() # getting all html into a byte-list\n pageStr = mybytes.decode(\"utf8\") # Now the html is in a string\n full_page.close()\n del mybytes #releasing these\n del full_page\n # Each line below will be a line in Top for processes\n userNodeMem = [] # List to hold the different amounts of memory a user is using on this node!\n for line in pageStr.split('\\n'):\n if user_name in line:\n lineSplit = line.split()\n memCheck = lineSplit[5] # used to check quality of memory string (check for m's, t's, or g's)\n tmp_user_list = {}\n lineSplit = line.split()\n tmp_user_list[\"PID\"] = lineSplit[0]\n tmp_user_list[\"RESMEM\"] = cleanMem(memCheck)\n tmp_user_list[\"CPU%\"] = lineSplit[8]\n tmp_user_list[\"TIME\"] = lineSplit[10]\n tmp_user_list[\"PNAME\"] = lineSplit[11]\n user_proc_list.append(tmp_user_list)\n if ('t' in memCheck) or ('g' in memCheck) or ('m' in memCheck): # this is what contains the amount of resident memory\n userNodeMem.append(toKB(memCheck))\n else:\n userNodeMem.append(memCheck) # we want it in KB to add up after finished running through node\n\n\n # Printing process information that pertains to the current user only.\n print(cleanName + (\"Cores Used / Total Cores : \" + str(node.used_cores) + \"/\" + str(node.total_cores)).rjust(int(TERMWIDTH/2)))\n print('-'.center(TERMWIDTH,\"-\"))\n print('PID'.center(10, ' ') + 'ProcName'.center(20, ' ') + 'Memory Used'.center(20) + 'CPU%'.center(10) + 'TIME'.center(16))\n for proc in user_proc_list:\n print(proc['PID'].center(10) + proc['PNAME'].center(20) + proc['RESMEM'].center(20) + proc['CPU%'].center(10) + proc['TIME'].center(16))\n userTotalMem = 0\n for mem in userNodeMem:\n userTotalMem += int(mem) \n print(\"User's total memory usage on Node: {0}\".format(cleanMem(str(userTotalMem))))\n print(\"Total number of processes owned by user on Node: {0}\".format(str(len(user_proc_list))))\n \n print('') # Simple newline\n\n if len(user_pend):\n print('\\n' + '#'.center(TERMWIDTH, '#'))\n print(\"{0}'s pending jobs:\".format(user_name).center(TERMWIDTH))\n print('#'.center(TERMWIDTH, '#'))\n for job in user_pend:\n print(job)\n sys.exit()", "def setpriority(self, pid=None, priority=5):\n\t \n\t import win32api,win32process,win32con\n\t \n\t priorityclasses = [win32process.IDLE_PRIORITY_CLASS,\n\t win32process.BELOW_NORMAL_PRIORITY_CLASS,\n\t win32process.NORMAL_PRIORITY_CLASS,\n\t win32process.ABOVE_NORMAL_PRIORITY_CLASS,\n\t win32process.HIGH_PRIORITY_CLASS,\n\t win32process.REALTIME_PRIORITY_CLASS]\n\t if pid == None:\n\t pid = win32api.GetCurrentProcessId()\n\t handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, pid)\n\t win32process.SetPriorityClass(handle, priorityclasses[priority])", "def set_process_name_and_cpu_priority(name):\n try:\n os.nice(19) # smooth cpu priority\n libc = cdll.LoadLibrary(\"libc.so.6\") # set process name\n buff = create_string_buffer(len(name.lower().strip()) + 1)\n buff.value = bytes(name.lower().strip().encode(\"utf-8\"))\n libc.prctl(15, byref(buff), 0, 0, 0)\n except Exception:\n return False # this may fail on windows and its normal, so be silent.\n else:\n log.debug(\"Process Name set to: {0}.\".format(name))\n return True", "def setpriority(pid=None, priority=1):\n\n #import win32api,win32process,win32con\n from ctypes import windll\n\n priorityclasses = [0x40, # IDLE_PRIORITY_CLASS,\n 0x4000, # BELOW_NORMAL_PRIORITY_CLASS,\n 0x20, # NORMAL_PRIORITY_CLASS,\n 0x8000, # ABOVE_NORMAL_PRIORITY_CLASS,\n 0x80, # HIGH_PRIORITY_CLASS,\n 0x100, # REALTIME_PRIORITY_CLASS\n ]\n if pid is None:\n pid = windll.kernel32.GetCurrentProcessId()\n handle = windll.kernel32.OpenProcess(PROCESS_ALL_ACCESS, True, pid)\n windll.kernel32.SetPriorityClass(handle, priorityclasses[priority])", "def _showProcessList(self, procs):\n device_name = self._devId\n proc_list = ['%s %s %s' % (pid, name, args) for pid, (name, args) in sorted(procs)]\n proc_list.append('')\n log.info(\"#===== Processes on %s:\\n%s\", device_name, '\\n'.join(proc_list))", "def _UpdateProcessingStatus(self, pid, process_status, used_memory):", "def setup_ec2_launch_override_to_emulate_ice(\n cluster, single_instance_type_ice_cr=\"\", multi_instance_types_ice_cr=\"\", multi_instance_types_exp_cr=\"\"\n):\n remote_command_executor = RemoteCommandExecutor(cluster)\n\n # fmt: off\n remote_command_executor.run_remote_script(\n script_file=str(SCALING_COMMON_DATADIR / \"overrides.sh\"),\n args=[\n f\"--single-instance-type-ice-cr \\\"{single_instance_type_ice_cr}\\\"\",\n f\"--multi-instance-types-ice-cr \\\"{multi_instance_types_ice_cr}\\\"\",\n f\"--multi-instance-types-exp-cr \\\"{multi_instance_types_exp_cr}\\\"\",\n ],\n run_as_root=True,\n )\n # fmt: on", "def samtools_change_multi(region_tag):\n sams = cleaner.listsams(region_tag)\n pool = Pool(21)\n pool.map(changer, sams)\n pool.close()\n pool.join()", "def chk_proccess_status(inst, hlist):\n\n if inst == 'i':\n data_dir = '/data/hrc/i/'\n else:\n data_dir = '/data/hrc/s/'\n\n cmd = 'ls ' + data_dir + '* > ' + zspace\n data = mcf.read_data_file(zspace, remove=1)\n d_list = []\n for ent in data:\n if mcf.is_neumeric(ent):\n d_list.append(int(float(ent)))\n\n done = []\n for obsid in hlist:\n if obsid in d_list:\n done.append(obsid)\n\n if len(done) > 0:\n line = 'Following obsids are processed for hrc-' + str(inst) + ':\\n'\n for obsid in done:\n line = line + '\\t' + str(obsid) + '\\n'\n#\n#--- change the status of processed data\n#\n cmd = 'chgrp -R hat /data/hrc/i/' + str(obsid)\n os.system(cmd)\n cmd = 'find /data/hrc/i/ -type d -user isobe -exec chmod a+rx,ug+w,o-w {}'\n os.system(cmd)\n cmd = 'chmod -fR a+r,g+w,o-w /data/hrc/i/' + str(obsid)\n os.system(cmd)\n\n\n with opne(zspace, 'w') as fo:\n fo.write(line)\n\n cmd = 'cat ' + zspace + ' |mailx -s \"Subject: HRC Obs Re-processed\" [email protected]'\n os.system(cmd)\n cmd = 'cat ' + zspace + ' |mailx -s \"Subject: HRC Obs Re-processed\" [email protected]'\n os.system(cmd)\n\n mcf.rm_files(zspace)", "def update_cpu_through_stress(ssh_client, num_stress=0):\n if num_stress < 0:\n invalid_resource_parameter(\"CPU Allocation\", limit)\n return\n \n #Separate CPU limit into several smaller increments of 10 instead of one larger one\n for x in range(num_stress):\n cmd = \"stress -c 1 &> /dev/null & cpulimit -p $( pidof -o $! stress ) -l {} &> /dev/null &\".format(10)\n ssh_exec(ssh_client, cmd)", "def suspend_processes(self, scaling_processes=None):\r\n return self.connection.suspend_processes(self.name, scaling_processes)", "def create_and_launch_subprocesses(num_cpu, classify_warnings_fn, arg_groups,\n group_results):\n pool = multiprocessing.Pool(num_cpu)\n for cpu in range(num_cpu):\n proc_result = pool.map(classify_warnings_fn, arg_groups[cpu])\n if proc_result is not None:\n group_results.append(proc_result)\n return group_results", "async def maintain_SCII_count(count: int, controllers: List[Controller], proc_args: List[Dict] = None):\n # kill unhealthy ones.\n if controllers:\n to_remove = []\n alive = await asyncio.wait_for(\n asyncio.gather(*(c.ping() for c in controllers if not c._ws.closed), return_exceptions=True), timeout=20\n )\n i = 0 # for alive\n for controller in controllers:\n if controller._ws.closed:\n if not controller._process._session.closed:\n await controller._process._session.close()\n to_remove.append(controller)\n else:\n if not isinstance(alive[i], sc_pb.Response):\n try:\n await controller._process._close_connection()\n finally:\n to_remove.append(controller)\n i += 1\n for c in to_remove:\n c._process._clean(verbose=False)\n if c._process in kill_switch._to_kill:\n kill_switch._to_kill.remove(c._process)\n controllers.remove(c)\n\n # spawn more\n if len(controllers) < count:\n needed = count - len(controllers)\n if proc_args:\n index = len(controllers) % len(proc_args)\n else:\n proc_args = [{} for _ in range(needed)]\n index = 0\n extra = [SC2Process(**proc_args[(index + _) % len(proc_args)]) for _ in range(needed)]\n logger.info(f\"Creating {needed} more SC2 Processes\")\n for _ in range(3):\n if platform.system() == \"Linux\":\n # Works on linux: start one client after the other\n # pylint: disable=C2801\n new_controllers = [await asyncio.wait_for(sc.__aenter__(), timeout=50) for sc in extra]\n else:\n # Doesnt seem to work on linux: starting 2 clients nearly at the same time\n new_controllers = await asyncio.wait_for(\n # pylint: disable=C2801\n asyncio.gather(*[sc.__aenter__() for sc in extra], return_exceptions=True),\n timeout=50\n )\n\n controllers.extend(c for c in new_controllers if isinstance(c, Controller))\n if len(controllers) == count:\n await asyncio.wait_for(asyncio.gather(*(c.ping() for c in controllers)), timeout=20)\n break\n extra = [\n extra[i] for i, result in enumerate(new_controllers) if not isinstance(new_controllers, Controller)\n ]\n else:\n logger.critical(\"Could not launch sufficient SC2\")\n raise RuntimeError\n\n # kill excess\n while len(controllers) > count:\n proc = controllers.pop()\n proc = proc._process\n logger.info(f\"Removing SCII listening to {proc._port}\")\n await proc._close_connection()\n proc._clean(verbose=False)\n if proc in kill_switch._to_kill:\n kill_switch._to_kill.remove(proc)", "def worker(nums, outdict):\n print(threading.current_thread().name)\n print (\"pid:\", os.getpid())\n for n in nums:\n outdict[n] = factorize_naive(n)", "def set_priority(priority=2, pid=None):\n print \"TODO: add os independent support\"\n priorityclasses = [win32process.IDLE_PRIORITY_CLASS,\n win32process.BELOW_NORMAL_PRIORITY_CLASS,\n win32process.NORMAL_PRIORITY_CLASS,\n win32process.ABOVE_NORMAL_PRIORITY_CLASS,\n win32process.HIGH_PRIORITY_CLASS,\n win32process.REALTIME_PRIORITY_CLASS]\n if pid == None:\n pid = win32api.GetCurrentProcessId()\n handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, pid)\n win32process.SetPriorityClass(handle, priorityclasses[priority])", "def test_n_minus_f_pool_processes_attrib(looper, nodeSet,\n sdk_pool_handle,\n sdk_wallet_steward):\n make_pool_n_minus_f_nodes(looper, nodeSet)\n\n sdk_add_raw_attribute(looper, sdk_pool_handle, sdk_wallet_steward, 'foo', 'bar')", "def resume_processes(self, scaling_processes=None):\r\n return self.connection.resume_processes(self.name, scaling_processes)", "async def find_processes(self, msg):\n running_processes = []\n new_embed = DEFAULT_EMBED.copy()\n\n for proc in psutil.process_iter():\n if proc.name() in PROCESSES.keys():\n running_processes.append(proc.name())\n elif proc.name() in [\"java.exe\", \"javaw.exe\"] and proc.cwd() in PROCESSES.keys():\n running_processes.append(proc.cwd())\n\n for process in PROCESSES:\n try:\n if process in running_processes:\n new_embed.add_field(name=PROCESSES.get(process),\n value=\"Online <:GreenTick:592083498534174721>\", inline=self.inline)\n else:\n new_embed.add_field(name=PROCESSES.get(process),\n value=\"Offline <:RedCross:592082557961633877>\", inline=self.inline)\n except PermissionError:\n new_embed.add_field(name=PROCESSES.get(process),\n value=\"Admin Required <:OrangeUnknown:592082676891123722>\", inline=self.inline)\n await msg.edit(content=\"\", embed=new_embed)", "def assignTentativeSpinSystemResidues(spinSystem, residues, weight=1.0, doWarnings=False):\n\n if spinSystem.residue:\n spinSystem.setResidue(None)\n \n if doWarnings:\n msg = 'Remove sequential spin system links?'\n\n if getSeqSpinSystemLinks(spinSystem) and showYesNo('Query',msg):\n clearSeqSpinSystemLinks(spinSystem)\n\n for resonance2 in spinSystem.resonances:\n resonanceSet = resonance2.resonanceSet\n if resonanceSet:\n if len(resonanceSet.resonances) == 1:\n resonanceSet.delete()\n else:\n resonanceSet.removeResonance(resonance2)\n \n residueProbs = []\n for residue in residues:\n residueProb = spinSystem.findFirstResidueProb(possibility=residue) or \\\n spinSystem.newResidueProb(possibility=residue)\n residueProb.weight = weight\n residueProbs.append(residueProb)\n \n return residueProbs", "def create_groups():\n groups = [\"iDRAC-Administrators\", \"iDRAC-Operators\", \"iDRAC-Readonly\"]\n group_priviledges = [\"0x000001ff\", \"0x000000f9\", \"0x00000001\"]\n for host in online_hosts:\n for index in [1,2,3]:\n print index,\" \", groups[index-1]\n with settings(warn_only=True):\n\n result1 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgStandardSchema -i \"+str(index) +\" -o cfgSSADRoleGroupName \"+groups[index-1])\n if result1.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for RoleGroupName failed \")\n\n result2 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgStandardSchema -i \"+str(index) +\" -o cfgSSADRoleGroupDomain corp.inmobi.com\")\n if result2.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for RoleGroupDomain failed \")\n\n result3 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgStandardSchema -i \"+str(index) +\" -o cfgSSADRoleGroupPrivilege \"+ group_priviledges[index-1])\n if result3.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for RoleGroupPriviledge failed \")", "def get_processes():\n yield from psutil.process_iter()" ]
[ "0.50437987", "0.4948403", "0.48628172", "0.48540446", "0.48317713", "0.4823909", "0.4634812", "0.4625937", "0.4610073", "0.45091096", "0.45026562", "0.44959944", "0.4490443", "0.44796947", "0.445953", "0.44437", "0.44412768", "0.44145095", "0.44009617", "0.4400679", "0.43849185", "0.43784514", "0.437233", "0.4341311", "0.43107834", "0.43069437", "0.43010634", "0.42883706", "0.42858267", "0.42740458" ]
0.74223495
0
kill(self,process_list,signal = 'TERM') Signal process list. Description Send a signal to all of the numpy_proc.process objects in the process_list. This is usually used to kill the processes. The signal may be given as a signal name or number.
def kill(self,process_list,signal = 'TERM'): res = [] pids = {} for process in process_list: if hasattr(process,'machine'): try: worker = self.worker_by_name[process.machine] except KeyError: worker = self.worker_by_name[process.long_machine] pid = process.pid else: worker = self.workers[process[0]] pid = process[1] try: pids[worker] = pids[worker] + ' ' + str(pid) except: pids[worker] = str(pid) for worker,value in pids.items(): arg = 'kill -s ' + signal + ' %s' % (level,value) res.append(worker.apply(os.system,(arg,))) return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def kill_all_processes(self, signal=signal.SIGINT) -> None:\n for task_name, sp in self.process_queue:\n sp.send_signal(signal)", "def stop_all(self, signal, frame):\n for event in self.event_list:\n event.set()\n for process in self.process_list:\n process.join()\n sys.exit()", "def kill_processes(self):\n for proc in self.processes:\n if proc['proc'].poll() is not None:\n proc['proc'].terminate()", "def terminate_all_processes(processes):\n for process in processes:\n process.terminate()", "def _kill_launchfile(self):\r\n if len(self.process_list) is 0:\r\n print(\"[ToyCarEpisodeMonitor._terminate()]: no process to terminate\")\r\n else:\r\n for p in self.process_list:\r\n p.send_signal(signal.SIGINT)\r\n while p.poll() is None:\r\n print (\r\n \"[SimulatorEpisodeMonitor._terminate()]: \"\r\n \"simulator process {} termination in progress...\"\r\n ).format(p.pid)\r\n time.sleep(1.0)\r\n print (\r\n \"[ToyCarEpisodeMonitor._terminate()]: \"\r\n \"simulator proc {} terminated with exit code {}\"\r\n ).format(p.pid, p.returncode)\r\n self.process_list = []\r\n print(\"[ToyCarEpisodeMonitor]: termination done!\")\r\n\r\n return", "def kill_all(name, sig=signal.SIGKILL):\n sig = int(sig)\n for proc in psutil.process_iter():\n if proc.name() == name:\n kill(proc.pid, sig)", "def stopAllProcesses(self, wait=True):\r\n self._update('stopAllProcesses')\r\n\r\n processes = self._getAllProcesses()\r\n\r\n killall = make_allfunc(processes, isRunning, self.stopProcess,\r\n wait=wait)\r\n\r\n killall.delay = 0.05\r\n killall.rpcinterface = self\r\n return killall # deferred\r", "def stop(self, *args):\n if args[0] == 'all':\n for k, v in self.processers.items():\n if v:\n try:\n v.terminate()\n except:\n pass\n print 'Killed %s.' % k\n\n self.processers = dict.fromkeys(self.processers.keys())\n else:\n seq = args[0]\n try:\n self.processers['process%s' % seq].terminate()\n self.processers['process%s' % seq] = None\n print 'Killed process%s.' % seq\n except:\n print 'Have no process%s.' % seq", "def kill_all(self, procname):\n procs = self.find_processes_by_name(procname)\n for proc in procs:\n result = self.kill_process(proc['PID'])\n if not result['HasExited']:\n for i in xrange(3):\n result = self.kill_process(result['PID'], False)\n if result['HasExited']:\n break\n else:\n raise MicroManagerError(\"Process with name'{}' and PID '{}' would not exit on machine '{}'.\".format(procname, proc['PID'], self.hostname))", "def kill_child_processes(parent_pid, sig=signal.SIGTERM):\n try:\n parent = psutil.Process(parent_pid)\n except psutil.NoSuchProcess:\n return\n children = parent.children(recursive=True)\n for process in children:\n try:\n process.send_signal(sig)\n except psutil.NoSuchProcess:\n return", "def kill(name, signal=9, exact=False):\n for pid in find(name, exact):\n run(\"kill -s {0} {1}\".format(signal, pid))", "def kill_all(self):\n self._stop_all('kill')", "def stopProcesses(*args):\n _stopProcessSet(_running)", "def _killForks(forkList):\n for pid in forkList:\n try:\n os.kill(pid, signal.SIGTERM)\n except OSError, e:\n # 3 means No such process\n if e.errno != 3:\n raise\n # Wait on our children to actually die, so that we don't look like we've\n # exited but are still using the port (matters for people using pidfiles).\n for pid in forkList:\n while _checkAlive(pid):\n time.sleep(0.1)", "def do_kill(cs, args):\n for container in args.containers:\n opts = {}\n opts['id'] = container\n opts['signal'] = args.signal\n opts = zun_utils.remove_null_parms(**opts)\n try:\n cs.containers.kill(**opts)\n print(\n \"Request to kill signal to container %s has been accepted.\" %\n container)\n except Exception as e:\n print(\n \"kill signal for container %(container)s failed: %(e)s\" %\n {'container': container, 'e': e})", "def kill_process(proc):\r\n p1_group = psutil.Process(proc.pid)\r\n\r\n child_pids = p1_group.get_children(recursive=True)\r\n\r\n for child_pid in child_pids:\r\n os.kill(child_pid.pid, signal.SIGKILL)", "def kill(self):\n kill_cmds = [\n \"sudo pkill '(daos_server|daos_io_server)' --signal INT\",\n \"sleep 5\",\n \"pkill '(daos_server|daos_io_server)' --signal KILL\",\n ]\n self.log.info(\"Killing any server processes\")\n pcmd(self._hosts, \"; \".join(kill_cmds), False, None, None)", "def kill(pids):\n for pid in pids:\n process = psutil.Process(pid)\n for proc in process.children(recursive=True):\n proc.kill()\n process.kill()\n return", "def killAll(controller=False):", "def kill_manager(self) -> None:\n\n for p in self.process_list:\n p.terminate()\n # NOTE: Seems Python does not appreciate if close is called too quickly.\n sleep(0.5)\n # Release the resources held by the Proess (Python 3.7 and up)\n p.close()", "def _stopProcessSet(procSet):\n # Send a SIGTERM to all (still running) processes.\n finished = {}\n needToWait = False\n for i, p in enumerate(procSet):\n if p.poll() is not None:\n finished[p] = None\n continue\n\n needToWait = True\n try:\n if platformType == \"windows\":\n win32process.TerminateProcess(p._handle, 0)\n else:\n os.kill(p.pid, signal.SIGTERM)\n if i == 0:\n children = getattr(p, \"children\", [])\n for cpid in children:\n os.kill(cpid, signal.SIGTERM)\n except OSError:\n # This can happen if the process has died before the call to kill, so\n # we ignore it.\n pass\n\n if needToWait:\n # At least one process has been signalled, so wait for about\n # _stopProcessTimeout * 0.1 seconds or until all the processes have\n # died.\n for i in range(_stopProcessTimeout):\n done = True\n for p in procSet:\n # print(\">>\", p.poll())\n if p.poll() is not None:\n finished[p] = None\n continue\n done = False\n\n if done:\n break\n else:\n time.sleep(0.1)\n\n # Now use SIGKILL on any processes still running.\n for p in procSet:\n if p not in finished:\n try:\n if platformType == \"windows\":\n win32process.TerminateProcess(p._handle, 0)\n else:\n os.kill(p.pid, signal.SIGKILL)\n except OSError:\n # Process may have died before the call to kill.\n pass\n\n # Wait again for all the processes to die. If they do not then\n # something really horrid has happened.\n for i in range(_stopProcessTimeout):\n done = True\n for p in procSet:\n if p.poll() is not None:\n finished[p] = None\n continue\n done = False\n\n if done:\n break\n else:\n time.sleep(0.1)\n\n for p in procSet:\n if p.poll() is None:\n print(\"Heck! Could not stop process with ID = %d\" % p.pid)\n\n # Clear the list of processes.\n procSet[:] = []", "def kill_all():\n compose_kill_all()", "def run_multi_processes(cmd_list, out_log=None, err_log=None):\r\n kwargs = {'shell': True, 'cwd': None}\r\n pids = []\r\n\r\n if out_log:\r\n out_log_file = open(out_log, 'w')\r\n kwargs['stdout'] = out_log_file\r\n\r\n if err_log:\r\n err_log_file = open(err_log, 'w')\r\n kwargs['stderr'] = err_log_file\r\n\r\n try:\r\n for cmd in cmd_list:\r\n pids.extend([subprocess.Popen(cmd, **kwargs)])\r\n\r\n def _signal_handler(*args):\r\n print(\"\\nEnding...\")\r\n\r\n signal.signal(signal.SIGINT, _signal_handler)\r\n print(\"Enter CTL-C to end\")\r\n signal.pause()\r\n print(\"Processes ending\")\r\n\r\n except Exception as err:\r\n print(\"Error running process {}\".format(err), file=sys.stderr)\r\n\r\n finally:\r\n for pid in pids:\r\n kill_process(pid)", "def killMongosProc():\n cmd = [\"pgrep -f \\\"\" + MONGOS_KSTR + \"\\\" | xargs kill -9\"]\n executeCommand(cmd)", "def kill(self):\n self._stop_proc(signal.SIGKILL)", "def kill_processes(self) -> None:\n for process in [p for p in self.processes if p.is_running()]:\n for child in process.children(recursive=True):\n if child.is_running():\n child.kill()\n\n process.kill()", "def kill_svc(self, svc_names: List[str]):\n self._send_signals(svc_names, \"SIGKILL\")", "def _kill_running_processes(self):\n # Kill any rouge processes that are still running.\n with _thread_lock:\n killed = []\n for pid in self._pids:\n try:\n os.kill(pid, _KILLED_BY_ANYPYTOOLS)\n killed.append(str(pid))\n except:\n pass\n self._pids.clear()", "def killall(self):\n\n for job_id, job in self.jobs:\n backend.kill( job )", "def terminate(self):\n for processor in self._processors.values():\n Stats.decr(\n \"dag_processing.processes\", tags={\"file_path\": processor.file_path, \"action\": \"terminate\"}\n )\n processor.terminate()" ]
[ "0.6993041", "0.6301664", "0.6288038", "0.6048694", "0.59778625", "0.5973349", "0.5903882", "0.5890457", "0.5890336", "0.58836174", "0.57966644", "0.57964116", "0.5794382", "0.5788191", "0.5737244", "0.57180685", "0.56969357", "0.569088", "0.5687089", "0.56694955", "0.5639071", "0.5592052", "0.55672586", "0.5538126", "0.5504865", "0.549662", "0.54789805", "0.54672384", "0.54274374", "0.5416775" ]
0.8355109
0
system(cmd) execute cmd on all remote machines A list of all the remote responses is returned. Unlike os.system which returns the exit value of the cmd string, this function returns the text output by the command.
def system(self,cmd): code = 'import os;f=os.popen("%s");res = f.read(-1);f.close();' % cmd return self.exec_code(code,returns=['res'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def system(cmds):\n if isinstance(cmds, six.string_types):\n cmds = [cmds]\n\n output = None\n if isinstance(cmds, (tuple, list)):\n for cmd in cmds:\n logger.debug(cmd)\n\n try:\n output = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)\n if output:\n logger.info(output.decode('utf-8'))\n\n\n except subprocess.CalledProcessError as e:\n if e.returncode != 2:\n msg = \"Command failed: \\n {} \\n \\n Return code: {} \".format(cmd, e.returncode)\n logger.error(msg)\n logger.error(e.output.decode(\"utf-8\"))\n\n sys.exit(1)\n\n else:\n raise TypeError(\"cmd argument is wrong type\")\n\n return output", "def get_output(cmd=['echo', 'NO COMMAND SPECIFIED'],\n cwd=os.getcwd(),\n stderr=subprocess.STDOUT,\n shell=False): # noqa\n\n output = subprocess.check_output( # nosec\n cmd,\n cwd=cwd,\n stderr=stderr,\n shell=shell\n ).decode('UTF-8').splitlines()\n return output", "def output(cmd):\n return subprocess.check_output(cmd, shell=True)", "def sys_cmd(cmd: list) -> str:\n\n out, err = Popen(cmd, stdout=PIPE, stderr=PIPE).communicate()\n # Checking return code\n if err != b\"\":\n log.error(err.decode())\n notify_owner(f\"Exited(1) for: {err.decode()}\")\n exit(1)\n else:\n return out.decode()", "def check_output(cmd):\n logger.info(cmd)\n args = shlex.split(cmd)\n\n p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out = [ii.decode() for ii in p.communicate()]\n\n return out", "def system(command):\n print('[system] {}'.format(command))\n p = subprocess.Popen(command, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, shell=True)\n output, err = p.communicate()\n rc = p.returncode\n if PY3:\n output = output.decode(\"ascii\")\n err = err.decode(\"ascii\")\n return rc, output, err", "def systemCommand(command):\n\n commStatus, commOut = commands.getstatusoutput(command)\n # If our command fails, abort entirely and notify CloudKick\n if commStatus != 0:\n sys.stderr.write('Error: Failure when executing the following ')\n sys.stderr.write(\"command: '%s'\\n\" % (command,))\n sys.stderr.write(\"Exit status: %d\\n\" % (commStatus,))\n sys.stderr.write(\"Output: %s\\n\\n\" % (commOut,))\n sys.stderr.write('status err System command failure: ')\n sys.stderr.write('%s\\n' % (command,))\n sys.exit(1)\n # If we get a 0 exit code, all is well. Return the data.\n else:\n return commOut", "def shell_output(cmd):\n return subprocess.check_output(cmd.split()).decode().strip()", "def getoutputs(cmd, check_privileges=True):\n\n if check_privileges:\n check_user_permissions()\n\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n stdout, stderr = proc.communicate()\n ret = proc.returncode\n if ret:\n raise OSError(\"%s exited with returncode %d: stderr %s stdout: %s\" %\n (str(cmd), ret, stderr, stdout))\n return stdout", "def get_shell_cmd_output(cmd):\n output = subprocess.check_output(cmd, shell=True, universal_newlines=True)\n return output", "def result_of(cmd):\n cmd_list_arr = cmd.split(\" \")\n result = check_output(cmd_list_arr).decode(\"utf-8\")\n return result", "def executeCommand(cmd,loopsleep):\n\tsleep(loopsleep)\n\tresult = subprocess.getoutput(cmd)\n\treturn(result.split(\"\\n\"))", "def collect_output_from_command(cmd):\n \n try:\n # print \"Trying %s\" % ' '.join(sys.argv[1:])\n p = subprocess.Popen(cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n output, err_msgs = p.communicate()\n except OSError as err:\n print(\"Failed running '%s' (%d - %s)\" %\n (sys.argv, err.errno, err.strerror))\n raise\n else:\n return output, err_msgs", "def _runsystem(self, cmd):\n try:\n import subprocess\n except ImportError:\n pass\n try:\n pout = subprocess.check_output(\n cmd,\n stderr=subprocess.STDOUT,\n shell=True)\n except subprocess.CalledProcessError as called_err:\n raise ModulecmdRuntimeError(str(called_err))\n except Exception:\n try:\n sub_p = subprocess.Popen(\n cmd,\n shell=True,\n stdout=subprocess.PIPE)\n pout = sub_p.stdout.read()\n except Exception:\n try:\n proc_h = os.popen(cmd)\n pout = proc_h.read()\n except Exception:\n if self.verbose:\n print(\"Could not read output from '%s'\" % cmd)\n traceback.print_exc()\n if pout:\n pout = pout.strip()\n if isinstance(pout, str):\n pout = pout.decode('utf-8')\n return pout", "def shell ( cmd ):\n p = subprocess.Popen( cmd, shell=True, stderr=subprocess.PIPE,\n stdout=subprocess.PIPE )\n x = p.communicate()\n p.stdout.close()\n p.stderr.close()\n if x[1] == '':\n status = True\n else:\n status = False\n \n return [ status, x[0].split( '\\n' ), x[1].split( '\\n' ) ]", "def run_command(cmd):\n\n return filter(lambda x: x, Popen(cmd.split(), stdout = PIPE).communicate()[0].split(\"\\n\"))", "def system_call(command):\n p = subprocess.Popen([command], stdout=subprocess.PIPE, shell=True)\n return p.stdout.read()", "def run(cmd: str, verbose: bool = False):\n\n if verbose:\n print(cmd)\n\n out = subprocess.check_output(cmd, shell=True).decode(\"utf-8\")\n\n if verbose:\n print(out)\n\n return out", "def ssh_exec_command(command):\n with _ssh_connect() as ssh:\n stdin, stdout, stderr = ssh.exec_command(command)\n\n if _ssh_error_report(stderr):\n sys.exit()\n\n rv = []\n for line in stdout.read().splitlines():\n rv.append(line.decode())\n\n return rv", "def cmd(self, cmd, verbose=None, timeout=120, listformat=False):\n \n if verbose is None:\n verbose = self.verbose\n \n cmd = str(cmd)\n t = None #used for timer \n start = time.time()\n output = []\n if verbose:\n self.debug( \"[\" + self.username +\"@\" + str(self.host) + \"]# \" + cmd)\n try:\n tran = self.connection.get_transport()\n chan = tran.open_session()\n chan.get_pty()\n f = chan.makefile()\n t = Timer(timeout, self.ssh_sys_timeout,[chan, start,cmd] )\n t.start()\n chan.exec_command(cmd)\n if ( listformat is True):\n #return output as list of lines\n output = f.readlines()\n else:\n #return output as single string buffer\n output = f.read()\n if verbose:\n self.debug(\"done with exec\")\n except CommandTimeoutException, cte: \n elapsed = str(time.time()-start).split('.')[0]\n self.debug(\"Command (\"+cmd+\") timed out after \" + str(elapsed) + \" seconds\\nException\") \n raise cte\n finally:\n if (t is not None):\n t.cancel() \n if verbose:\n elapsed = str(time.time()-start).split('.')[0]\n if (listformat is True):\n self.debug(\"\".join(output))\n else:\n self.debug(output)\n \n return output", "def send_command(self, command):\n stdin, stdout, stderr = self.ssh_client.exec_command(command)\n return stdout.readlines()", "def cluster_run(self, cmd):\n instances = self.service.get_instances()\n responses = []\n for instance in instances:\n success, output = self.run_remote_script(cmd, instance=instance)\n responses.append((success, output))\n return responses", "def old_get_status_output(cmd):\r\n if sys.platform[:3] != \"win\":\r\n cmd = \"{ \" + cmd + \"; }\"\r\n pipe = os.popen(cmd + \" 2>&1\", \"r\")\r\n text = list()\r\n for item in pipe:\r\n text.append(item.rstrip())\r\n try:\r\n sts = pipe.close()\r\n except IOError:\r\n sts = 1\r\n if sts is None: sts = 0\r\n return sts, text", "def subprocess_check_output(cmd):\n print(shlex.join(cmd))\n return subprocess.check_output(cmd, text=True, env=os.environ.copy())", "def native_cmd(cmd, whitespace=False):\n result = subprocess.check_output(cmd, shell=True).decode()\n\n result = subprocess.run(cmd, stdout=subprocess.PIPE).stdout.decode('utf-8')\n\n return result", "def cli(self, cmd):\n p1 = Popen(cmd,stdout=PIPE, shell=True)\n output = p1.communicate()\n if p1.returncode != 0 :\n print('error returned from shell command: %s was %s'%(cmd,output[0]))\n return output[0],p1.returncode", "def run(cmd: list) -> str:\n\n try:\n s = subprocess.run(cmd, shell=True, check=True, capture_output=True)\n except subprocess.CalledProcessError as error:\n out = error.stderr or error.stdout\n raise Failure(out.decode().strip())\n\n return s.stdout.decode(\"iso-8859-1\").strip()", "def execute(cmd) :\n return os.system( cmd )", "def ssh_command(client, command):\n _stdin, _stdout, _stderr = client.exec_command(command, get_pty=True, timeout=60)\n _stdout.channel.recv_exit_status()\n return _stdout.readlines()", "def run_cmd(cmd):\n return check_output(cmd, shell=True).decode('utf-8')" ]
[ "0.694567", "0.6511475", "0.63991964", "0.63601625", "0.6340061", "0.63375807", "0.629912", "0.6257301", "0.61772144", "0.6161609", "0.61456496", "0.6145014", "0.6136143", "0.6066929", "0.6061483", "0.605223", "0.6043965", "0.60113454", "0.6006606", "0.59515357", "0.59413016", "0.59106225", "0.5902474", "0.58701926", "0.5839901", "0.5809976", "0.58031666", "0.5793439", "0.5789109", "0.57868236" ]
0.66030544
1
reload(module) reload module on all remote interpreters module can either be the name of a module or the actual module object.
def reload(self,module): try: code = 'import %s; reload(%s)' % ((module.__name__,)*2) except AttributeError: code = 'import %s; reload(%s)' % ((module,)*2) self.workers.exec_code(code)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _reload(mod,larch=None,**kw):\n\n if isinstance(mod, str):\n return larch.import_module(mod, do_reload=True)\n\n for k,v in chain(larch.symtable._sys.modules.iteritems(), sys.modules.iteritems()):\n if v == mod:\n modname = k\n break\n try:\n return larch.import_module(modname,do_reload=True)\n except NameError:\n pass", "def reloadModule(module):\n\ttry:\n\t\treload # Python 2.7\n\texcept NameError:\n\t\ttry:\n\t\t\tfrom importlib import reload # Python 3.4+\n\t\texcept ImportError:\n\t\t\tfrom imp import reload # Python 3.0 - 3.3\n\n\treload(module)", "def reload_module(module_name):\n try:\n reload(eval(module_name))\n except:\n pass", "def reload(*mods):\n for mod in mods:\n importlib.reload(importlib.import_module(mod))", "def reload(self):\n\n\t\tif self.module is None:\n\t\t\t# Do nothing, as the module will be imported on attribute access.\n\t\t\tpass\n\t\telse:\n\t\t\texec \"reload(\" + self.name + \")\"\n\t\t\t# The module object is still identical, only its code has been\n\t\t\t# replaced. Thus no eval(self.name) is necessary.", "def reload(self):\n self.rpc.call(MsfRpcMethod.CoreReloadModules)", "async def _reload(self, ctx, *, module: str=None):\n if module is None or module == \"all\":\n await ctx.message.add_reaction('\\N{HOURGLASS}')\n try:\n for extension in startup_extensions:\n self.bot.unload_extension(extension)\n self.bot.load_extension(extension)\n except Exception as e:\n await ctx.message.remove_reaction('\\N{HOURGLASS}', ctx.me)\n await ctx.message.add_reaction('\\N{CROSS MARK}')\n await ctx.send('{}: {}'.format(type(e).__name__, e))\n traceback.print_exc()\n else:\n await ctx.message.remove_reaction('\\N{HOURGLASS}', ctx.me)\n await ctx.message.add_reaction('\\N{WHITE HEAVY CHECK MARK}')\n else:\n await ctx.message.add_reaction('\\N{HOURGLASS}')\n try:\n self.bot.unload_extension(module)\n self.bot.load_extension(module)\n except Exception as e:\n await ctx.message.remove_reaction('\\N{HOURGLASS}', ctx.me)\n await ctx.message.add_reaction('\\N{CROSS MARK}')\n await ctx.send('{}: {}'.format(type(e).__name__, e))\n traceback.print_exc()\n else:\n await ctx.message.remove_reaction('\\N{HOURGLASS}', ctx.me)\n await ctx.message.add_reaction('\\N{WHITE HEAVY CHECK MARK}')", "def onReload(self, moduleName=\"NeedleFinder\"):\r\n if profiling : profbox()\r\n # framework\r\n globals()[moduleName] = slicer.util.reloadScriptedModule(moduleName)", "def reload_from_cwd(module, reloader=...):\n ...", "def transitive_reload(module, visited):\n if not module in visited:\n status(module)\n reload(module)\n visited[module] = None\n for attrobj in module.__dict__.values():\n if type(attrobj) is types.ModuleType:\n transitive_reload(attrobj, visited)", "async def reload_modules(self) -> bool:\n self.reloading_modules = True\n newmodules = await self.detect_modules()\n todrop = []\n toload = []\n\n # Logs!\n errors = False\n\n for name, module in self.modules.items():\n if module.loaded:\n if hasattr(module.module, \"unload\"):\n try:\n await module.module.unload(self.client.loop)\n except:\n LOGGER.exception(\n f\"Hit an exception while unloading module {name}.\")\n errors = True\n\n if name not in newmodules:\n LOGGER.debug(f\"Dropping removed module {name}.\")\n if hasattr(module.module, \"shutdown\"):\n try:\n await module.module.shutdown(self.client.loop)\n except:\n LOGGER.exception(\n f\"Hit an exception while shutting down module {name}.\")\n errors = True\n\n todrop.append(module)\n continue\n\n newmodules.remove(name)\n module.handlers = {}\n try:\n importlib.reload(module.module)\n\n except:\n LOGGER.exception(\n f\"Hit an exception while reloading module {name}.\")\n todrop.append(module)\n errors = True\n continue\n\n toload.append(module)\n module.loaded = True\n\n # Loops over NEW modules. Because we can't just reload them.\n for name in newmodules:\n newmod = MModule(name)\n self.modules[name] = newmod\n\n try:\n mod = importlib.import_module(name)\n except:\n LOGGER.exception(\n f\"Hit an exception while loading module {name}.\")\n # Alas it was not meant to be.\n del self.modules[name]\n errors = True\n continue\n\n newmod.module = mod\n toload.append(newmod)\n\n newmod.loaded = True\n for server in self.servers.values():\n server.modules[name] = newmod\n #LOGGER.info(f\"$BLUESuccessfully loaded module $WHITE{name}$BLUE.\")\n\n for module in toload:\n if hasattr(module.module, \"load\"):\n try:\n await module.module.load(self.client.loop)\n\n except:\n LOGGER.exception(\n f\"Hit an exception while load()ing module {module.name}.\")\n errors = True\n\n for module in todrop:\n for server in self.servers.values():\n if module.name in server.modules:\n del server.modules[module.name]\n\n del self.modules[module.name]\n\n self.reloading_modules = False\n\n for handler in self.temp_module_handlers:\n try:\n if handler.module in self.modules:\n self.register_handler(handler)\n\n else:\n LOGGER.warning(f\"Attempted to late-register for nonexistant module: {handler.module}/{handler.name}\")\n\n except:\n LOGGER.exception(\n f\"Exception while registering handler {handler.module}/{handler.name}!\")\n errors = True\n\n self.temp_module_handlers = []\n\n return errors", "def onReload(self,moduleName=\"NeedleFinder\"):\n if profiling : profbox()\n #framework\n globals()[moduleName] = slicer.util.reloadScriptedModule(moduleName)", "def reload_dependences(module):\n tree = get_reversed_tree()\n reload(module)\n for dependant in tree[module]:\n reload(dependant)", "def reload_import(path, hard = True):\r\n\r\n # in case the path is not present in the\r\n # system modules no need to reload\r\n if not path in sys.modules: return\r\n\r\n # in case the hard approach for reloading is\r\n # taken the system modules should be changed\r\n if hard:\r\n # retrieves the module for the given path from\r\n # system module and then removes it from the system\r\n # modules and then deletes it from the virtual\r\n # machine environment\r\n module = sys.modules[path]\r\n del sys.modules[path]\r\n del module\r\n # otherwise the \"soft\" reload provides the normal\r\n # module reload method\r\n else:\r\n # retrieves the module for the given path from\r\n # system module and then forces a reload on the\r\n # module (to flush the contents)\r\n module = sys.modules[path]\r\n legacy.reload(module)", "def reload_subs(verbose=True):\n if verbose:\n print('Reloading submodules')\n rrr(verbose=verbose)\n def wrap_fbrrr(mod):\n def fbrrr(*args, **kwargs):\n \"\"\" fallback reload \"\"\"\n if verbose:\n print('No fallback relaod for mod=%r' % (mod,))\n # Breaks ut.Pref (which should be depricated anyway)\n # import imp\n # imp.reload(mod)\n return fbrrr\n def get_rrr(mod):\n if hasattr(mod, 'rrr'):\n return mod.rrr\n else:\n return wrap_fbrrr(mod)\n def get_reload_subs(mod):\n return getattr(mod, 'reload_subs', wrap_fbrrr(mod))\n get_rrr(util_graph)(verbose=verbose)\n rrr(verbose=verbose)\n try:\n # hackish way of propogating up the new reloaded submodule attributes\n reassign_submodule_attributes(verbose=verbose)\n except Exception as ex:\n print(ex)", "async def reload_all(ctx):\n await ext_manager.reload_all()\n await ctx.send(\"Successfully reloaded.\")", "def onReload(self,moduleName=\"FlexCrop\"):\n globals()[moduleName] = slicer.util.reloadScriptedModule(moduleName)", "def command_reload(interface,command,args):\n command_unload(interface,command,args)\n command_load(interface,command,args)", "def update_module(conn, module, chunk_size = 16000):\n rmodule = conn.modules[module.__name__]\n lf = inspect.getsourcefile(module)\n rf = conn.modules.inspect.getsourcefile(rmodule)\n upload_file(conn, lf, rf, chunk_size = chunk_size)\n conn.modules.__builtin__.reload(rmodule)", "def onReload(self,moduleName=\"MarkupsInViewsSelfTest\"):\n globals()[moduleName] = slicer.util.reloadScriptedModule(moduleName)", "def reload(mod):\n import difflib, imp, logging\n # Set the logger\n logger = logging.getLogger(\"myfuncs.reload\")\n logger.addHandler( logging.NullHandler() )\n logger.setLevel( logging.DEBUG )\n #\n if mod.__file__[-1] in \"oc\":\n mod.__file__ = mod.__file__[:-1]\n # end if\n #\n if \"__track_source__\" in mod.__dict__:\n orig = mod.__track_source__\n else:\n orig = None\n # end if\n #\n # Read the source file in its current state.\n with open(mod.__file__, \"r\") as fid:\n mod.__track_source__ = fid.readlines()\n # end with\n #\n # Check for differences and report any changes.\n logger.debug(mod.__file__)\n if orig is None:\n for it in range(len(mod.__track_source__)):\n logger.debug(\"{:d} {:s}\".format( \\\n it+1, mod.__track_source__[it].rstrip() \\\n ) )\n # end for\n else:\n diffs = difflib.unified_diff( \\\n orig, mod.__track_source__, \\\n fromfile=\"Original\", tofile=\"Updated\" \\\n )\n for line in diffs:\n logger.debug(line.rstrip())\n # end for\n # end if\n return imp.reload(mod)", "async def reload(ctx, name):\n await unload_extension(name, channel=ctx.channel)\n await load_extension(name, channel=ctx.channel)", "def reload():\n import cubegame\n importlib.reload(cubegame)\n exec(\"from cubegame import *\")", "def reload_module_by_name(mod_name, var_name):\n for mod in list(sys.modules.keys()):\n if mod_name in mod:\n del sys.modules[mod]\n if var_name in globals():\n del globals()[var_name] # deletes the variable named <var_name>\n return importlib.__import__(mod_name)", "def exec_module(self, module):\n pass", "def reload_subs(verbose=True):\n import_subs()\n rrr(verbose=verbose)\n getattr(constants, 'rrr', lambda verbose: None)(verbose=verbose)\n getattr(entry_points, 'rrr', lambda verbose: None)(verbose=verbose)\n getattr(params, 'rrr', lambda verbose: None)(verbose=verbose)\n getattr(other, 'reload_subs', lambda verbose: None)(verbose=verbose)\n getattr(dbio, 'reload_subs', lambda verbose: None)(verbose=verbose)\n getattr(algo, 'reload_subs', lambda verbose: None)(verbose=verbose)\n getattr(control, 'reload_subs', lambda verbose: None)(verbose=verbose)\n getattr(viz, 'reload_subs', lambda: None)()\n\n getattr(gui, 'reload_subs', lambda verbose: None)(verbose=verbose)\n getattr(algo, 'reload_subs', lambda verbose: None)(verbose=verbose)\n getattr(viz, 'reload_subs', lambda verbose: None)(verbose=verbose)\n getattr(web, 'reload_subs', lambda verbose: None)(verbose=verbose)\n\n rrr(verbose=verbose)", "def force_reload(*include: str, modules: Optional[Dict[str, ModuleType]] = None):\n if modules is None:\n modules = sys.modules\n\n include_exact = set(include)\n include_prefixes = tuple(name + \".\" for name in include)\n\n to_delete = [\n name\n for name in modules\n if (name in include_exact or name.startswith(include_prefixes))\n ]\n\n for name in to_delete:\n modules.pop(name, None)", "async def on_reload(name: str):\n global started\n local_started = started\n\n await plugins.reload(name)\n\n started = local_started", "def comando_reload(self):\r\n\tif args.opcao == 'gne':\r\n configs = self.reload_gne_framework(args.file, args.loja, args.serie, args.nnf)\r\n return configs\r\n else:\r\n configs = self.reload_daruma_framework(args.file)\r\n return configs", "def reloadChangedConfigModules(modules, options, sender, tags):\n etcdir = os.path.join(options.cdir, 'etc')\n currentModules = set(listConfigModules(etcdir))\n currentPaths = set(os.path.join(etcdir, name)\n for name in currentModules)\n changed = False\n\n # Reload any midule that has changed\n for path, (module, timestamp) in modules.iteritems():\n if path not in currentPaths:\n continue\n curmtime = os.path.getmtime(path)\n if curmtime > timestamp:\n LOG.info('Reloading %s, file has changed', path)\n module = loadConfigModule(module, options, tags)\n modules[path] = (module, curmtime)\n changed = True\n\n # Remove any module that has been removed\n for path in set(modules).difference(currentPaths):\n LOG.info('%s has been removed, cmanager should be restarted', path)\n del modules[path]\n changed = True\n\n # Check for any added module\n for name in currentModules:\n path = os.path.join(etcdir, name)\n if path not in modules:\n module = loadConfigModule(name, options, tags)\n modules[path] = (module, os.path.getmtime(path))\n changed = True\n\n if changed:\n sender.tagstr = ' '.join('%s=%s' % (k, v) for k, v in tags.iteritems())\n sender.tagstr = ' ' + sender.tagstr.strip()\n return changed" ]
[ "0.74145204", "0.7367877", "0.7293504", "0.7069121", "0.6777144", "0.66111887", "0.63678116", "0.6366045", "0.63599694", "0.63358724", "0.63288987", "0.6298444", "0.62526506", "0.6154786", "0.60551083", "0.60262233", "0.6022249", "0.5863082", "0.5854398", "0.58133113", "0.5802653", "0.5741354", "0.57390285", "0.5673612", "0.5630636", "0.5616659", "0.5509139", "0.54500234", "0.54477966", "0.5435673" ]
0.75071913
0
exec_code(code,inputs=None,returns=None) Similar to Python's exec statement. Execute the same code fragment on all remote interpreter. inputs is a dictionary of variable values to use when executing the code. returns is a list of variable names that should be returned after executing the code. If one value is specified, the value for that variable is returned. If multiple values are specified, a tuple is returned. exec_code returns a list of the values requested variables, one entry for each slave.
def exec_code(self,code,inputs=None,returns=None): #use the first worker to package up the cmd. package = self.workers[0].exec_code_pack(code,inputs,returns) return self._send_recv(package)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _exec(self, code):\n self._send_command('EXEC ' + code)", "def RunScript(code):\n with ScriptContext() as script_module:\n try:\n exec code in script_module.__dict__\n except:\n # Get exception output as close to exec as possible.\n # We don't take the first entry in the traceback because it just contains\n # \"exec\". Everything after that is the submitted code.\n try:\n etype, evalue, tb = sys.exc_info()\n traceback.print_exception(etype,\n evalue,\n tb.tb_next, # one frame up\n file=sys.stderr)\n finally:\n del tb # break circular references when using exc_info\n\n return sys.stdout.getvalue(), sys.stderr.getvalue()", "def exec_(_code_, _globs_=None, _locs_=None):\r\n if _globs_ is None:\r\n frame = sys._getframe(1)\r\n _globs_ = frame.f_globals\r\n if _locs_ is None:\r\n _locs_ = frame.f_locals\r\n del frame\r\n elif _locs_ is None:\r\n _locs_ = _globs_\r\n exec(\"\"\"exec _code_ in _globs_, _locs_\"\"\")", "def exec_(_code_, _globs_=None, _locs_=None):\r\n if _globs_ is None:\r\n frame = sys._getframe(1)\r\n _globs_ = frame.f_globals\r\n if _locs_ is None:\r\n _locs_ = frame.f_locals\r\n del frame\r\n elif _locs_ is None:\r\n _locs_ = _globs_\r\n exec(\"\"\"exec _code_ in _globs_, _locs_\"\"\")", "def exec_(_code_, _globs_=None, _locs_=None):\r\n if _globs_ is None:\r\n frame = sys._getframe(1)\r\n _globs_ = frame.f_globals\r\n if _locs_ is None:\r\n _locs_ = frame.f_locals\r\n del frame\r\n elif _locs_ is None:\r\n _locs_ = _globs_\r\n exec(\"\"\"exec _code_ in _globs_, _locs_\"\"\")", "def exec_(_code_, _globs_=None, _locs_=None):\n if _globs_ is None:\n frame = sys._getframe(1)\n _globs_ = frame.f_globals\n if _locs_ is None:\n _locs_ = frame.f_locals\n del frame\n elif _locs_ is None:\n _locs_ = _globs_\n exec(\"\"\"exec _code_ in _globs_, _locs_\"\"\")", "def exec_(_code_, _globs_=None, _locs_=None):\n if _globs_ is None:\n frame = sys._getframe(1)\n _globs_ = frame.f_globals\n if _locs_ is None:\n _locs_ = frame.f_locals\n del frame\n elif _locs_ is None:\n _locs_ = _globs_\n exec(\"\"\"exec _code_ in _globs_, _locs_\"\"\")", "def exec_(_code_, _globs_=None, _locs_=None):\n if _globs_ is None:\n frame = sys._getframe(1)\n _globs_ = frame.f_globals\n if _locs_ is None:\n _locs_ = frame.f_locals\n del frame\n elif _locs_ is None:\n _locs_ = _globs_\n exec(\"\"\"exec _code_ in _globs_, _locs_\"\"\")", "def exec(self, code, timeout=10):\n self.__exec_part_1(code)\n ret, ret_err = self.__exec_part_2(timeout)\n if ret_err:\n raise ReplError(ret_err)\n return ret", "def exec_code(code, db, write=True):\n evaler = Evaluator(db, write=write)\n glb = {}\n loc = ExecutionContext(evaler=evaler)\n exec(code, glb, loc)", "def loop_code(self,code,loop_var,inputs=None,returns=None):\n the_inputs = {}\n the_inputs.update(inputs)\n loop_data = the_inputs[loop_var]\n the_inputs[loop_var] = None #make it small for packing\n package = self.workers[0].loop_code_pack(code,loop_var,\n the_inputs,returns)\n return self.loop_send_recv(package,loop_data,loop_var)", "def Exec_Python(code):\n # pylint: disable=exec-used\n try:\n exec(code, globals())\n # pylint: disable=broad-except\n # pylint: disable=bare-except\n except:\n _LOGGER.error('Execution of following code has failed %s', code)\n return False\n return True", "def execute(self, code):\n code = code()\n\n # Build an AST tree from the Python code, to get the line number of each statement\n try:\n nodes = compiler.parse(code).getChildNodes()[0].getChildNodes()\n lines = [node.lineno - 1 for node in nodes]\n except:\n self.executions += '>>> ' + code + '\\n' + ''.join(traceback.format_exception(*sys.exc_info())[4:])\n return\n\n code = code.splitlines()\n\n with IDEFrameContext.exec_lock:\n stdout = sys.stdout\n\n try:\n # Iterate over all the statements\n for (a, b) in zip(lines, lines[1:] + [None]):\n sys.stdout = StringIO()\n\n source = code[a:b]\n\n try:\n # Execute the statement using this local and global context\n frame = self.get_frame()\n exec compile('\\n'.join(source), '<web>', 'single', 0, 1) in frame.f_locals, frame.f_globals\n except:\n print ''.join(traceback.format_exception(*sys.exc_info())[2:]).rstrip()\n\n self.executions += '\\n'.join([('... ' if line.startswith(' ') else '>>> ') + line for line in source]) + '\\n' + sys.stdout.getvalue()\n finally:\n sys.stdout = stdout", "def exec_(code, globs=None, locs=None):\n if globs is None:\n frame = sys._getframe(1)\n globs = frame.f_globals\n if locs is None:\n locs = frame.f_locals\n del frame\n elif locs is None:\n locs = globs\n exec(\"\"\"exec code in globs, locs\"\"\")", "def exec_(code, globs=None, locs=None):\n if globs is None:\n frame = sys._getframe(1)\n globs = frame.f_globals\n if locs is None:\n locs = frame.f_locals\n del frame\n elif locs is None:\n locs = globs\n exec(\"\"\"exec code in globs, locs\"\"\")", "def run_code():\n\n output = None\n code = request.json['code']\n\n cmd = 'python -c \"' + code +'\"'\n p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE,\n stderr=STDOUT, close_fds=True)\n output = p.stdout.read()\n\n return jsonify(output.decode('utf-8'))", "def safe_exec(code, globals_dict, random_seed=None, python_path=None, cache=None, slug=None, unsafely=False):\r\n # Check the cache for a previous result.\r\n if cache:\r\n safe_globals = json_safe(globals_dict)\r\n md5er = hashlib.md5()\r\n md5er.update(repr(code))\r\n update_hash(md5er, safe_globals)\r\n key = \"safe_exec.%r.%s\" % (random_seed, md5er.hexdigest())\r\n cached = cache.get(key)\r\n if cached is not None:\r\n # We have a cached result. The result is a pair: the exception\r\n # message, if any, else None; and the resulting globals dictionary.\r\n emsg, cleaned_results = cached\r\n globals_dict.update(cleaned_results)\r\n if emsg:\r\n raise SafeExecException(emsg)\r\n return\r\n\r\n # Create the complete code we'll run.\r\n code_prolog = CODE_PROLOG % random_seed\r\n\r\n # Decide which code executor to use.\r\n if unsafely:\r\n exec_fn = codejail_not_safe_exec\r\n else:\r\n exec_fn = codejail_safe_exec\r\n\r\n # Run the code! Results are side effects in globals_dict.\r\n try:\r\n exec_fn(\r\n code_prolog + LAZY_IMPORTS + code, globals_dict,\r\n python_path=python_path, slug=slug,\r\n )\r\n except SafeExecException as e:\r\n emsg = e.message\r\n else:\r\n emsg = None\r\n\r\n # Put the result back in the cache. This is complicated by the fact that\r\n # the globals dict might not be entirely serializable.\r\n if cache:\r\n cleaned_results = json_safe(globals_dict)\r\n cache.set(key, (emsg, cleaned_results))\r\n\r\n # If an exception happened, raise it now.\r\n if emsg:\r\n raise e", "def execute(self, code, timeout=REQUEST_TIMEOUT):\n response = []\n try:\n msg_id = self._send_request(code)\n\n post_idle = False\n while True:\n response_message = self._get_response(msg_id, timeout, post_idle)\n if response_message:\n response_message_type = response_message['msg_type']\n\n if response_message_type == 'error' or \\\n (response_message_type == 'execute_reply' and\n response_message['content']['status'] == 'error'):\n response.append('{}:{}:{}'.format(response_message['content']['ename'],\n response_message['content']['evalue'],\n response_message['content']['traceback']))\n elif response_message_type == 'stream':\n response.append(KernelClient._convert_raw_response(response_message['content']['text']))\n\n elif response_message_type == 'execute_result' or response_message_type == 'display_data':\n if 'text/plain' in response_message['content']['data']:\n response.append(\n KernelClient._convert_raw_response(response_message['content']['data']['text/plain']))\n elif 'text/html' in response_message['content']['data']:\n response.append(\n KernelClient._convert_raw_response(response_message['content']['data']['text/html']))\n elif response_message_type == 'status':\n if response_message['content']['execution_state'] == 'idle':\n post_idle = True # indicate we're at the logical end and timeout poll for next message\n continue\n else:\n self.log.debug(\"Unhandled response for msg_id: {} of msg_type: {}\".\n format(msg_id, response_message_type))\n\n if response_message is None: # We timed out. If post idle, its ok, else make mention of it\n if not post_idle:\n self.log.warning(\"Unexpected timeout occurred for msg_id: {} - no 'idle' status received!\".\n format(msg_id))\n break\n\n except BaseException as b:\n self.log.debug(b)\n\n return ''.join(response)", "def py_exec(self, code_string):\n if not isinstance(code_string, str):\n print('py_exec: Error, the code must be a string `{}`!'.format(code_string))\n return False\n\n try: ret = eval(code_string, self.global_vars, self.global_vars)\n except Exception, e:\n print('py_exec: Error execution code `{}`! Exception `{}`!'.format(code_string, e))\n ret = False\n\n return ret", "def execute(self, code, environment = dict()):\r\n if not self.config.get('scripting', 'enable') and type(code) == str:\r\n self.send(code, log = False)\r\n else:\r\n if type(code) == str:\r\n c = compile(code, 'errors.log', 'exec')\r\n else:\r\n c = code\r\n eval(c, self.getEnvironment(environment))", "def execute(code=\"\", kc=None, **kwargs):\n from .test_message_spec import validate_message\n\n if kc is None:\n kc = KC # noqa\n msg_id = kc.execute(code=code, **kwargs)\n reply = kc.get_shell_msg(timeout=TIMEOUT) # noqa\n validate_message(reply, \"execute_reply\", msg_id)\n busy = kc.get_iopub_msg(timeout=TIMEOUT) # noqa\n validate_message(busy, \"status\", msg_id)\n assert busy[\"content\"][\"execution_state\"] == \"busy\"\n\n if not kwargs.get(\"silent\"):\n execute_input = kc.get_iopub_msg(timeout=TIMEOUT) # noqa\n validate_message(execute_input, \"execute_input\", msg_id)\n assert execute_input[\"content\"][\"code\"] == code\n\n return msg_id, reply[\"content\"]", "async def _eval(self, ctx, *, code):\r\n env = {\r\n 'self': self,\r\n 'bot': self.bot,\r\n 'ctx': ctx,\r\n 'message': ctx.message,\r\n 'guild': ctx.guild,\r\n 'channel': ctx.channel,\r\n 'author': ctx.author,\r\n 'me': ctx.me,\r\n 'that': self.last_result\r\n }\r\n env.update(globals())\r\n\r\n stdout = io.StringIO()\r\n\r\n toCompile = f'async def func():\\n{textwrap.indent(code, \" \")}'\r\n\r\n try:\r\n exec(toCompile, env)\r\n except Exception as e:\r\n em = discord.Embed(description=f\"Excecuted and errored: {e.__class__.__name__}: {e}\",\r\n color=0xff0000)\r\n em.set_author(name=\"Evaluated and errored\",\r\n icon_url=ctx.message.author.avatar_url.replace(\"?size=1024\", \"\"))\r\n em.set_footer(text=\"Executed by: \" + str(ctx.message.author))\r\n em.set_thumbnail(\r\n url='https://upload.wikimedia.org/wikipedia/commons/thumb/b/ba/Red_x.svg/480px-Red_x.svg.png')\r\n em.add_field(name=\"Code\", value=f\"[See here.]({hastebin.post(code.encode('utf-8'))}.py)\")\r\n return await ctx.send(embed=em)\r\n\r\n func = env['func']\r\n try:\r\n with redirect_stdout(stdout):\r\n ret = await func()\r\n except Exception as e:\r\n value = stdout.getvalue()\r\n em = discord.Embed(description=f\"Excecuted and errored: ```py\\n{value}{traceback.format_exc()}```\",\r\n color=0xff0000)\r\n em.set_author(name=\"Evaluated and errored\",\r\n icon_url=ctx.message.author.avatar_url.replace(\"?size=1024\", \"\"))\r\n em.set_footer(text=\"Executed by: \" + str(ctx.message.author))\r\n em.set_thumbnail(\r\n url='https://upload.wikimedia.org/wikipedia/commons/thumb/b/ba/Red_x.svg/480px-Red_x.svg.png')\r\n em.add_field(name=\"Code\", value=f\"[See here.]({hastebin.post(code.encode('utf-8'))}.py)\")\r\n await ctx.send(embed=em)\r\n else:\r\n value = stdout.getvalue()\r\n if ret is None or type(ret) is discord.Message:\r\n if value:\r\n x = f\"{value}\"\r\n self.last_result = value\r\n else:\r\n x = \"Executed successfully with no objects returned.\"\r\n else:\r\n x = f\"Executed successfully and returned: {value}{ret}\"\r\n self.last_result = f\"{value}{ret}\"\r\n em = discord.Embed(description=x, color=0x00ff00)\r\n em.set_author(name=\"Evaluated with success\",\r\n icon_url=ctx.message.author.avatar_url.replace(\"?size=1024\", \"\"))\r\n em.set_footer(text=\"Executed by: \" + str(ctx.message.author))\r\n em.set_thumbnail(url='http://www.iconsdb.com/icons/preview/green/checked-checkbox-xxl.png')\r\n em.add_field(name=\"Code\", value=f\"[See here.]({hastebin.post(code.encode('utf-8'))}.py)\")\r\n await ctx.send(embed=em)", "def exec_and_return(execargs):\n return subprocess.call(execargs)", "async def remote_eval(self, code):\n async with self.next_step_write:\n self.next_step_content = code\n self.next_step_has_data.set()\n\n await self.last_result_has_data.wait()\n return self.last_result_content", "def code_compile_and_run(code = '', gv = {}, lv = {}, return_keys = []):\n code_ = compile(code, \"<string>\", \"exec\")\n exec(code, gv, lv)\n # no keys given, return entire local variables dict\n if len(return_keys) < 1:\n return lv\n # single key given, return just the value of this entry\n elif len(return_keys) == 1:\n if return_keys[0] in lv:\n return lv[return_keys[0]]\n # several keys given, filter local variables dict by these keys and return\n else:\n return dict([(k, lv[k]) for k in return_keys if k in lv])", "def execute(code: str, context: dict, json_output: bool) -> None: # pylint: disable=g-bare-generic\n\n # On Python >= 3.8, where there is built-in support for top-level await, a\n # very simple implementation would be possible in terms of\n # `compile(mode='single')`, except that we would then have no way to customize\n # the printing of the output (in order to use `pprint`). Therefore we use the\n # following workaround even on Python 3.8:\n\n # On Python < 3.8, top-level await is not directly supported. As a\n # workaround, wrap `code` in an async function. To preserve the normal\n # interactive evaluation behavior, two additional hacks are needed:\n #\n # - For interactive evaluation, we need to print the value of the last\n # expression, if any, but normally that value would not be available. The\n # `_ast_asyncify` function modifies the AST after parsing to return the\n # value of the final expression, if any.\n #\n # - Variable assignments need to apply to the global context, but normal\n # variable assignments within a function body are treated as local variable\n # assignments. To workaround that, we compile the function twice: once to\n # determine the list of local variables that are referenced from the\n # resultant code object, then we compile a modified function with added\n # `global <name>` directives for each variable that is referenced.\n\n # First compile the code in order to determine the list of local variables\n # that are referenced.\n async_code = _compile_async_block(code)\n # Ensure all local variable are treated as global variables.\n preamble = ''\n for name in async_code.co_varnames:\n preamble += f'global {name}\\n'\n # Compile the modified code.\n async_code = _compile_async_block(preamble + code)\n # Start the coroutine.\n coroutine = eval(async_code, context) # pylint: disable=eval-used\n # Run it to completion.\n result = asyncio.get_event_loop().run_until_complete(coroutine)\n # Print the value of the last expression, if any.\n if result is not None:\n if json_output:\n print(json_pprint.pformat(result, indent=2))\n else:\n try:\n pprint.pprint(result)\n except: # pylint: disable=bare-except\n # pprint fails on some types.\n print(repr(result))", "def run_code(code: List) -> Tuple[int, int]:\n executed_lines = set()\n\n prv_ptr, ins_ptr, acc = -1, 0, 0\n\n while True:\n if ins_ptr in executed_lines:\n break\n\n executed_lines.add(ins_ptr)\n\n cmd, args = code[ins_ptr]\n\n if cmd == \"acc\":\n acc += int(args)\n\n elif cmd == \"nop\":\n pass\n\n elif cmd == \"jmp\":\n prv_ptr = ins_ptr\n ins_ptr += int(args)\n continue\n\n prv_ptr = ins_ptr\n ins_ptr += 1\n\n else:\n # No loop detected\n return acc, -1\n\n return acc, ins_ptr", "def part_1(code: List):\n acc, _ = run_code(code)\n\n return acc", "async def aexec(code, localz, **kwargs):\n # Restore globals later\n args = \", \".join(list(kwargs.keys()))\n code_from_action = code.replace(\"\\n\", \"\\n \")\n async_code = (f\"async def func({args}):\"\n f\"\\n {code_from_action}\"\n f\"\\n return ax\")\n exec(async_code, {}, localz) # pylint: disable=exec-used\n # Don't expect it to return from the coro.\n result = await localz[\"func\"](**kwargs)\n return result", "def eval(self, code):\n if self._ws is None:\n raise RuntimeError('App not connected')\n self._send_command('EVAL ' + code)" ]
[ "0.65443575", "0.6386612", "0.63011175", "0.63011175", "0.63011175", "0.62941974", "0.62941974", "0.62941974", "0.62929505", "0.62840676", "0.6053061", "0.60476625", "0.5998702", "0.59937024", "0.59937024", "0.5916869", "0.58860344", "0.58178943", "0.57701963", "0.572192", "0.5690588", "0.56760484", "0.5641173", "0.55908084", "0.5590762", "0.5489755", "0.54848284", "0.5467192", "0.54442334", "0.54038024" ]
0.777075
0
apply(function,args=(),keywords=None) Similar to Python's builtin apply method. Execute the given function with the argument list, args, and keyword arguments, keywords, on each of the slave processes. apply returns a list of the results from calling function, one result for each slave.
def apply(self,function,args=(),keywords=None): package = self.workers[0].apply_pack(function,args,keywords) return self._send_recv(package)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def apply(self, func, *args):\n import ray\n done_ids, undone_ids = ray.wait([shard.apply.remote(func, *args)\n for shard in self.shard_list],\n num_returns=len(self.shard_list))\n assert len(undone_ids) == 0\n return self", "def multi_apply(func, *args, **kwargs):\n\n pfunc = partial(func, **kwargs) if kwargs else func\n map_results = map(pfunc, *args)\n return tuple(map(list, zip(*map_results)))", "def apply(self, func, *args, **kwargs):\n log = get_logger()\n self._is_debug(log) and log.debug(f\"ENTER::Partition.apply::{self._identity}\")\n call_queue = self.call_queue + [[func, args, kwargs]]\n if len(call_queue) > 1:\n self._is_debug(log) and log.debug(\n f\"SUBMIT::_apply_list_of_funcs::{self._identity}\"\n )\n futures = DaskWrapper.deploy(\n func=apply_list_of_funcs,\n f_args=(call_queue, self._data),\n num_returns=2,\n pure=False,\n )\n else:\n # We handle `len(call_queue) == 1` in a different way because\n # this improves performance a bit.\n func, f_args, f_kwargs = call_queue[0]\n futures = DaskWrapper.deploy(\n func=apply_func,\n f_args=(self._data, func, *f_args),\n f_kwargs=f_kwargs,\n num_returns=2,\n pure=False,\n )\n self._is_debug(log) and log.debug(f\"SUBMIT::_apply_func::{self._identity}\")\n self._is_debug(log) and log.debug(f\"EXIT::Partition.apply::{self._identity}\")\n return self.__constructor__(futures[0], ip=futures[1])", "def multiprocess_map(function, arguments, n_processes=1):\n from multiprocessing import Queue, Process\n\n # Initialize queues\n queue_in = Queue(1)\n queue_out = Queue()\n\n # Initialize processes and link to input and output queues\n processes = [Process(target=spawn(function), args=(queue_in, queue_out))\n for i in range(n_processes)]\n for p in processes:\n p.daemon = True\n p.start()\n\n # Construct input queue, including 'None' signals to terminate\n input = [queue_in.put((i, argument)) for i, argument in\n enumerate(arguments)]\n for i in range(n_processes):\n queue_in.put((None, None))\n\n # Retrieve output queue\n output = [queue_out.get() for i in range(len(input))]\n\n # Rejoin processes and return results\n for p in processes:\n p.join()\n return [x for i, x in sorted(output)]", "def loop_apply(self,function,loop_var,args=(),keywords=None):\n #----------------------------------------------------\n # Prepare main package for sending\n # almost verbatim from loop_apply_pack in sync_cluster\n #----------------------------------------------------\n #if type(loop_var) == type(1):\n # loop_var = function.func_code.co_varnames[loop_var]\n all_keywords = {}\n if keywords: all_keywords.update(keywords)\n #more_keywords = sync_cluster.args_to_keywords(function,args)\n #sync_cluster.catch_keyword_conflicts(more_keywords,all_keywords)\n #all_keywords.update(more_keywords)\n\n # pull out the loop variable.\n if type(loop_var) != type(''):\n loop_var = int(loop_var)\n loop_data = args[loop_var]\n # no need to pack and send since it'll be in the \"addendum\"\n args = list(args)\n args[loop_var] = None\n args = tuple(args)\n else:\n loop_data = all_keywords[loop_var]\n # no need to pack and send since it'll be in the \"addendum\"\n del all_keywords[loop_var]\n contents={'_command':sync_cluster.loop_func,'function':function,\n 'args':args,'keywords':all_keywords,'loop_var':loop_var}\n package = self.workers[0].packer.pack(contents)\n return self.loop_send_recv(package,loop_data,loop_var)", "def apply(self, func, *args):\n pass", "def parallel_control(target_function, list2process, fixed_args=None, return_results=True, num_threads=None):\n if num_threads is None:\n num_threads = mp.cpu_count()\n num_threads = min(num_threads, len(list2process))\n\n if fixed_args is None:\n fixed_args = ()\n\n # Start the Queue, this could be also a list, dict or a shared array.\n if return_results:\n mp_manager = mp.Manager()\n output_queue = mp_manager.Queue()\n else:\n output_queue = None\n\n processes = []\n for rank, batch in enumerate(batchify(list2process, num_threads)):\n p = mp.Process(target=paralll_worker,\n args=(rank, num_threads),\n kwargs=dict(target_function=target_function,\n batch=batch,\n fixed_args=fixed_args,\n output_queue=output_queue)\n )\n processes.append(p)\n\n # Run processes\n for p in processes:\n p.start()\n\n # Exit completed processes\n for p in processes:\n p.join()\n\n # Extract results\n if return_results:\n results = []\n while (not output_queue.empty()):\n results.append(output_queue.get())\n else:\n results = None\n\n return results", "def m_proc(dfs, func):\n pool = Pool(processes=cpu_count())\n results = [pool.apply_async(func, args=(df,)) for df in dfs]\n output = [p.get() for p in results]\n return output", "def apply(self, args, run):\n return self.apply_function(args, run)", "def _doMap(self, func, iterable):\n name = \"Mapper\"\n sys.stderr.write(\"Master[%s phase]: starting\\n\" % name)\n pipes = [mp.Pipe() for _ in range(self.num_workers)]\n proc = [mp.Process(target=spawn_mapper(func), name=name, args=(q,)) for q in pipes]\n for p in proc:\n p.daemon = True\n p.start()\n for output_p, input_p in pipes:\n input_p.close() # we don't need to read from the pipes\n qi = 0\n for item in iterable:\n pipes[qi][0].send(item)\n qi = (qi+1) % self.num_workers\n for q,_ in pipes:\n q.send(None) # add termination tokens\n q.close()\n for p in proc:\n p.join()\n sys.stderr.write(\"Master[%s phase]: ended..\\n\" % name)", "def basic_parallel_loop(func, *args, parallel=True):\n if parallel is True:\n results = Parallel(n_jobs=cpu_count())(delayed(func)(*a) for a in args[0])\n else:\n results = []\n for a in args[0]:\n results.append(func(*a))\n\n return results", "def apply(self, function, *args, **kwargs):\n pass", "def multiprocess_map(func, iterable, *worker_args, n_cores=None, mode=\"map\", **pool_kwargs):\n results = []\n\n with mp.Manager() as manager:\n shared_args_proxy = None\n if worker_args is not None:\n shared_args_proxy = manager.list(worker_args)\n\n with mp.Pool(processes=n_cores, initializer=init_worker,\n initargs=shared_args_proxy, **pool_kwargs) as pool:\n if mode == \"map\":\n results = pool.map(func, iterable)\n elif mode == \"starmap\":\n results = pool.starmap(func, iterable)\n elif mode == \"imap\":\n for result in pool.imap(func, iterable):\n results.append(result)\n\n return results", "def ApplyInParallel(function, work_list, on_failure=None):\n if not work_list:\n return\n\n try:\n # Note that this is speculatively halved as an attempt to fix\n # crbug.com/953365.\n cpu_count = multiprocessing.cpu_count() // 2\n if sys.platform == 'win32':\n # TODO(crbug.com/1190269) - we can't use more than 56\n # cores on Windows or Python3 may hang.\n cpu_count = min(cpu_count, 56)\n\n except NotImplementedError:\n # Some platforms can raise a NotImplementedError from cpu_count()\n logging.warning('cpu_count() not implemented.')\n cpu_count = 4\n pool = ThreadPool(min(cpu_count, len(work_list)))\n\n def function_with_try(arg):\n try:\n function(arg)\n except Exception: # pylint: disable=broad-except\n # logging exception here is the only way to get a stack trace since\n # multiprocessing's pool implementation does not save that data. See\n # crbug.com/953365.\n logging.exception('Exception while running %s' % function.__name__)\n if on_failure:\n on_failure(arg)\n\n try:\n pool.imap_unordered(function_with_try, work_list)\n pool.close()\n pool.join()\n finally:\n pool.terminate()", "def runSlaveRun():\n\n np.set_printoptions(linewidth=1000)\n function = None\n options = None\n\n # print(\"Process {}/{} reporting for duty!\".format(rank, size))\n\n function = comm.bcast(function, root=0)\n arguments = comm.scatter(options, root=0)\n\n results = function(*arguments)\n\n comm.Barrier()\n comm.gather(results, root=0)\n comm.Disconnect()", "def apply_parallel(df, func, **kwargs):\n num_workers = cpu_count()\n\n if (df.shape[0] == 1) or (num_workers == 1):\n return apply_df((df, func, kwargs))\n\n retLst = Parallel(n_jobs=num_workers)(delayed(apply_df)(\n input_args=(d, func, kwargs)) for d in np.array_split(df, num_workers))\n return pd.concat(retLst)", "def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)", "def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)", "def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)", "def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)", "def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)", "def __call__(self):\n # apply(self.func, self.args)\n self.func(*self.args)", "def apply(self, func, *args, **kwargs):\n pass", "def apply(df, f):\n return [f(row) for row in df]", "def apply(self, func, *args, **kwargs):\n\n def call_queue_closure(data, call_queue):\n \"\"\"\n Apply callables from `call_queue` on copy of the `data` and return the result.\n\n Parameters\n ----------\n data : pandas.DataFrame or pandas.Series\n Data to use for computations.\n call_queue : array-like\n Array with callables and it's kwargs to be applied to the `data`.\n\n Returns\n -------\n pandas.DataFrame or pandas.Series\n \"\"\"\n result = data.copy()\n for func, f_args, f_kwargs in call_queue:\n try:\n result = func(result, *f_args, **f_kwargs)\n except Exception as err:\n self.call_queue = []\n raise err\n return result\n\n self._data = call_queue_closure(self._data, self.call_queue)\n self.call_queue = []\n return self.__constructor__(func(self._data.copy(), *args, **kwargs))", "def apply_(self, function):\n self.sequences = [function(seq) for seq in self.sequences]\n return self", "def dask(function, argument_list):\n from dask import delayed, compute\n\n if _cluster_setup.dask._connection is None:\n error_message = (\n 'No connection was established to a Dask scheduler that distributes jobs to workers. '\n \"Please use unified_map.cluster_setup.dask and/or Dask's command line \"\n 'interface for\\n'\n ' 1. Starting a scheduler\\n'\n ' 2. Starting several workers\\n'\n ' 3. Connecting to the scheduler')\n raise ConnectionError(error_message)\n\n jobs = [delayed(function)(*args) for args in argument_list]\n result_tuple = compute(*jobs, get=_cluster_setup.dask._connection.get)\n result_list = list(result_tuple)\n return result_list", "def __multiprocess_wrapper(func, processes, on_complete):\n\n def wrapper(*args):\n print(\"Using {processes} CPU's\".format(processes=processes))\n pool = multiprocessing.Pool(processes)\n manager = multiprocessing.Manager()\n\n # a queue for STDOUT and STDERR output of sub-processes (see cea.utilities.workerstream.QueueWorkerStream)\n queue = manager.Queue()\n\n # make sure the first arg is a list (not a generator) since we need the length of the sequence\n args = [list(a) for a in args]\n n = len(args[0]) # the number of iterations to map\n\n # set up the list of i-values for on_complete\n i_queue = manager.Queue()\n for i in range(n):\n i_queue.put(i)\n\n args = [repeat(func, n),\n repeat(queue, n),\n repeat(on_complete, n),\n repeat(i_queue, n),\n repeat(n, n)] + args\n args = zip(*args)\n\n map_result = pool.map_async(__apply_func_with_worker_stream, args)\n\n while not map_result.ready():\n stream_from_queue(queue)\n result = map_result.get()\n\n pool.close()\n pool.join()\n\n # process the rest of the queue\n while not queue.empty():\n stream_from_queue(queue)\n return result\n\n return wrapper", "def map_multi_args(self, func, iterable, chunksize=None):\n assert self._state == RUN\n return self.map_async(one_to_many(func), iterable, chunksize).get()", "def map(self, target, *iterable: iter):\n for args in zip(*iterable):\n self.submit(target=target, args=args)" ]
[ "0.66074944", "0.60110694", "0.59976894", "0.5838005", "0.5826872", "0.58206975", "0.5797238", "0.57299274", "0.56924593", "0.5683949", "0.5676245", "0.56597686", "0.5618988", "0.5606794", "0.5596438", "0.5573493", "0.5522505", "0.5522505", "0.5522505", "0.5522505", "0.5522505", "0.551539", "0.5484323", "0.54219973", "0.54195267", "0.54170525", "0.5407388", "0.5406643", "0.5401179", "0.5399768" ]
0.6772759
0
loop_apply(function,loop_var, args=(),keywords=None) Description Call function with the given args and keywords. One of the arguments or keywords is actually a sequence of arguments. This sequence is looped over, calling function once for each value in the sequence. loop_var indicates which variable to loop over. If an integer, loop_var indexes the args list. If a string, it specifies a keyword variable. The loop sequence is divided as evenly as possible between the worker nodes and executed in parallel.
def loop_apply(self,function,loop_var,args=(),keywords=None): #---------------------------------------------------- # Prepare main package for sending # almost verbatim from loop_apply_pack in sync_cluster #---------------------------------------------------- #if type(loop_var) == type(1): # loop_var = function.func_code.co_varnames[loop_var] all_keywords = {} if keywords: all_keywords.update(keywords) #more_keywords = sync_cluster.args_to_keywords(function,args) #sync_cluster.catch_keyword_conflicts(more_keywords,all_keywords) #all_keywords.update(more_keywords) # pull out the loop variable. if type(loop_var) != type(''): loop_var = int(loop_var) loop_data = args[loop_var] # no need to pack and send since it'll be in the "addendum" args = list(args) args[loop_var] = None args = tuple(args) else: loop_data = all_keywords[loop_var] # no need to pack and send since it'll be in the "addendum" del all_keywords[loop_var] contents={'_command':sync_cluster.loop_func,'function':function, 'args':args,'keywords':all_keywords,'loop_var':loop_var} package = self.workers[0].packer.pack(contents) return self.loop_send_recv(package,loop_data,loop_var)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def apply(self,function,args=(),keywords=None):\n package = self.workers[0].apply_pack(function,args,keywords)\n return self._send_recv(package)", "def basic_parallel_loop(func, *args, parallel=True):\n if parallel is True:\n results = Parallel(n_jobs=cpu_count())(delayed(func)(*a) for a in args[0])\n else:\n results = []\n for a in args[0]:\n results.append(func(*a))\n\n return results", "def parallel(fn, args=None, kwargs=None, loop_kw=None, spare_cpu=1, num_processes=None,\n do_parallel=True):\n if num_processes is None:\n num_processes = os.cpu_count() - spare_cpu # Default to defining a number of spare CPUs\n if args is None:\n args = ()\n if kwargs is None:\n kwargs = {}\n\n # Set up wrapper for arguments for each iteration of the loop\n try:\n za = zip(*args)\n except TypeError:\n args = (args,)\n za = zip(*args)\n wrapper_arg = [(fn, a, kwargs.copy()) for a in za]\n if loop_kw is not None:\n # Deal with kwargs with different parameters for each loop\n if isinstance(loop_kw, str):\n loop_kw = [loop_kw]\n if len(kwargs[loop_kw[0]]) > len(wrapper_arg):\n # Args are non existent\n wrapper_arg = [(fn, (), kwargs.copy()) for _ in range(len(kwargs[loop_kw[0]]))]\n for idx, wa in enumerate(wrapper_arg):\n wa = list(wa)\n for kw in loop_kw:\n wa[2][kw] = kwargs[kw][idx]\n wrapper_arg[idx] = tuple(wa)\n\n # Perform actual loop\n do_parallel = do_parallel and num_processes > 1\n if do_parallel:\n with Pool(num_processes) as pool:\n return pool.map(_parallel_wrapper, wrapper_arg)\n else:\n return list(map(_parallel_wrapper, wrapper_arg))", "def apply(self, function, *args, **kwargs):\n pass", "def loop(func):\n def wrapper(*a, **b):\n while True:\n func(*a, **b)\n return wrapper", "def apply(self, func, *args):\n pass", "def apply(self, func, *args, **kwargs):\n pass", "def apply(self, args, run):\n return self.apply_function(args, run)", "def loop(func, n):\n for i in range(n):\n func()", "def ApplyInParallel(function, work_list, on_failure=None):\n if not work_list:\n return\n\n try:\n # Note that this is speculatively halved as an attempt to fix\n # crbug.com/953365.\n cpu_count = multiprocessing.cpu_count() // 2\n if sys.platform == 'win32':\n # TODO(crbug.com/1190269) - we can't use more than 56\n # cores on Windows or Python3 may hang.\n cpu_count = min(cpu_count, 56)\n\n except NotImplementedError:\n # Some platforms can raise a NotImplementedError from cpu_count()\n logging.warning('cpu_count() not implemented.')\n cpu_count = 4\n pool = ThreadPool(min(cpu_count, len(work_list)))\n\n def function_with_try(arg):\n try:\n function(arg)\n except Exception: # pylint: disable=broad-except\n # logging exception here is the only way to get a stack trace since\n # multiprocessing's pool implementation does not save that data. See\n # crbug.com/953365.\n logging.exception('Exception while running %s' % function.__name__)\n if on_failure:\n on_failure(arg)\n\n try:\n pool.imap_unordered(function_with_try, work_list)\n pool.close()\n pool.join()\n finally:\n pool.terminate()", "def multi_apply(func, *args, **kwargs):\n\n pfunc = partial(func, **kwargs) if kwargs else func\n map_results = map(pfunc, *args)\n return tuple(map(list, zip(*map_results)))", "def sequential(self, func, args_dict=None):\n for uri, cf in self._cfs.items():\n args = self._process_args_dict(cf, uri, args_dict)\n func(*args)", "def apply(self, func, *args):\n import ray\n done_ids, undone_ids = ray.wait([shard.apply.remote(func, *args)\n for shard in self.shard_list],\n num_returns=len(self.shard_list))\n assert len(undone_ids) == 0\n return self", "def apply_function(f, args):\n if len(signature(f).parameters) == len(args):\n func = curry(f)\n for arg_value in args:\n func = func(arg_value)\n return func()\n else:\n raise Exception(\"the number of function's parameter is not matched args, len(args): \", len(args))", "def receive_fn(fn: Callable):\n\n global __enveloop_number_of_loops__\n\n __enveloop_number_of_loops__[fn.__name__] = number_of_loops\n\n @functools.wraps(fn)\n def wrapper_fn(*args, **kwargs):\n \"\"\"Function that does the actual wrapping.\n :param args:\n :param kwargs:\n :return: function response\n \"\"\"\n\n if __enveloop_number_of_loops__[fn.__name__] > 0:\n __enveloop_number_of_loops__[fn.__name__] -= 1\n return fn(*args, **kwargs)\n else:\n del __enveloop_number_of_loops__[fn.__name__]\n if callback:\n return callback(*args, **kwargs)\n\n return wrapper_fn", "def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)", "def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)", "def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)", "def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)", "def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)", "def do(self, function, args):\n self.continue_event.clear()\n function(*args)\n self.continue_event.wait()", "def apply(self, *args):\n scalarmap=dict()\n if len(args) == 1:\n sequence = args[0]\n if isinstance(sequence, str):\n raise TypeError(\"Sequence cannot be str\")\n elif isinstance(sequence, Sequence):\n scalarmap = {tag: scalar for tag, scalar in sequence}\n elif isinstance(sequence, dict):\n scalarmap = sequence\n elif len(args) == 2:\n scalarmap = {args[0]: args[1]}\n else:\n return ValueError(\"apply() only accepts 1 or 2 arguments\")\n \n for tag in scalarmap:\n for cashflow in self._project[tag]:\n cashflow.amount *= scalarmap[tag]", "def parallel_control(target_function, list2process, fixed_args=None, return_results=True, num_threads=None):\n if num_threads is None:\n num_threads = mp.cpu_count()\n num_threads = min(num_threads, len(list2process))\n\n if fixed_args is None:\n fixed_args = ()\n\n # Start the Queue, this could be also a list, dict or a shared array.\n if return_results:\n mp_manager = mp.Manager()\n output_queue = mp_manager.Queue()\n else:\n output_queue = None\n\n processes = []\n for rank, batch in enumerate(batchify(list2process, num_threads)):\n p = mp.Process(target=paralll_worker,\n args=(rank, num_threads),\n kwargs=dict(target_function=target_function,\n batch=batch,\n fixed_args=fixed_args,\n output_queue=output_queue)\n )\n processes.append(p)\n\n # Run processes\n for p in processes:\n p.start()\n\n # Exit completed processes\n for p in processes:\n p.join()\n\n # Extract results\n if return_results:\n results = []\n while (not output_queue.empty()):\n results.append(output_queue.get())\n else:\n results = None\n\n return results", "def applyfunc(func, args, kwargs, pure=False):\n\n args, dasks = unzip(map(to_task_dasks, args), 2)\n if kwargs:\n dask_kwargs, dasks2 = to_task_dasks(kwargs)\n dasks = dasks + (dasks2,)\n task = (apply, func, list(args), dask_kwargs)\n else:\n task = (func,) + args\n name = funcname(func) + '-' + tokenize(*task, pure=pure)\n dasks = flat_unique(dasks)\n dasks.append({name: task})\n return Value(name, dasks)", "def get_train_loop_fun(param_dict, fun_dict):\n def train_loop_fun(x_pl, hid_pl, err_acc, count, f_state, eps_z, debug_tensors):\n return train_loop(x_pl, hid_pl, err_acc, count, f_state, eps_z, param_dict, fun_dict, debug_tensors)\n return train_loop_fun", "def get_gen_loop_fun(param_dict, fun_dict):\n def f(x_pl, hid_pl, count, f_state, eps_z, eps_x):\n return gen_loop(x_pl, hid_pl, count, f_state, eps_z, eps_x, param_dict, fun_dict)\n return f", "def parallel(self, func, args_dict=None):\n try:\n self.parallel_safe(func, args_dict)\n except Exception:\n pass", "def foreach(func, iterable):\n\n\tfor x in iterable:\n\t\tfunc(x)", "def __call__(self):\n # apply(self.func, self.args)\n self.func(*self.args)", "def run(self):\n self.fn(*self.args, **self.kwargs)" ]
[ "0.6428172", "0.6072475", "0.5980121", "0.59507483", "0.5832301", "0.56668514", "0.5642099", "0.5402423", "0.53344244", "0.52769375", "0.5213421", "0.51631296", "0.5139644", "0.5095581", "0.50520253", "0.5027378", "0.5027378", "0.5027378", "0.5027378", "0.5027378", "0.50005674", "0.49630684", "0.49570322", "0.49549004", "0.4942165", "0.49344423", "0.48753572", "0.48677886", "0.48656243", "0.48600733" ]
0.82264096
0
loop_code(code,loop_var,inputs=None,returns=None) Description Similar to exec_code and loop_apply. Here loop_var indicates the variable name in the inputs dictionary that is looped over.
def loop_code(self,code,loop_var,inputs=None,returns=None): the_inputs = {} the_inputs.update(inputs) loop_data = the_inputs[loop_var] the_inputs[loop_var] = None #make it small for packing package = self.workers[0].loop_code_pack(code,loop_var, the_inputs,returns) return self.loop_send_recv(package,loop_data,loop_var)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _eval_python(loop, context, params=None, add_boilerplate=False, namespace=None):\n params = params # pylint\n \n # Are we actually doing this?\n if (not context.do_jit):\n return False\n\n # Emulating full VB programs in Python is difficult, so for now skip loops\n # that Execute() dynamic VB.\n full_code_vba = safe_str_convert(loop).replace(\"\\n\", \"\\\\n\")\n code_vba = full_code_vba[:20]\n code_vba_lower = full_code_vba.lower()\n if (not context.throttle_logging):\n log.info(\"Starting JIT emulation of '\" + code_vba + \"...' ...\")\n if ((\"Execute(\".lower() in code_vba_lower) or\n (\"ExecuteGlobal(\".lower() in code_vba_lower) or\n (\"Eval(\".lower() in code_vba_lower)):\n log.warning(\"Loop Execute()s dynamic code. Not JIT emulating.\")\n return False\n if (\".Item(\".lower() in code_vba_lower):\n log.warning(\"Loop references forms with .Item(). Not JIT emulating.\")\n return False\n \n # Generate the Python code for the VB code and execute the generated Python code.\n # TODO: Remove dangerous functions from what can be exec'ed.\n code_python = \"\"\n try:\n\n # For JIT handling we modify the values of certain variables to\n # handle recursive python code generation, so make a copy of the\n # original context.\n tmp_context = Context(context=context, _locals=context.locals, copy_globals=True)\n \n # Get the Python code for the loop.\n if (not context.throttle_logging):\n log.info(\"Generating Python JIT code...\")\n code_python = to_python(loop, tmp_context)\n if add_boilerplate:\n var_inits, _ = _loop_vars_to_python(loop, tmp_context, 0)\n func_defns = _called_funcs_to_python(loop, tmp_context, 0)\n code_python = _boilerplate_to_python(0) + \"\\n\" + \\\n func_defns + \"\\n\" + \\\n var_inits + \"\\n\" + \\\n code_python + \"\\n\" + \\\n _check_for_iocs(loop, tmp_context, 0) + \"\\n\" + \\\n _updated_vars_to_python(loop, tmp_context, 0)\n if (log.getEffectiveLevel() == logging.DEBUG):\n safe_print(\"JIT CODE!!\")\n safe_print(code_python)\n #print \"REMOVE THIS!!!\"\n #sys.exit(0)\n if (not context.throttle_logging):\n log.info(\"Done generating Python JIT code.\")\n\n # Extended ASCII strings are handled differently in VBScript and VBA.\n # Punt if we are emulating VBA and we have what appears to be extended ASCII\n # strings. For performance we are not handling the MS VBA extended ASCII in the python\n # JIT code.\n if (not context.is_vbscript):\n \n # Look for non-ASCII strings.\n non_ascii_pat = r'\"[^\"]*[\\x7f-\\xff][^\"]*\"'\n non_ascii_pat1 = r'\"[^\"]*(?:\\\\x7f|\\\\x[89a-f][0-9a-f])[^\"]*\"'\n if ((re.search(non_ascii_pat1, code_python) is not None) or\n (re.search(non_ascii_pat, code_python) is not None)):\n log.warning(\"VBA code contains Microsoft specific extended ASCII strings. Not JIT emulating.\")\n return False\n\n # Check for dynamic code execution in called functions.\n if (('\"Execute\", ' in code_python) or\n ('\"ExecuteGlobal\", ' in code_python) or\n ('\"Eval\", ' in code_python)):\n log.warning(\"Functions called by loop Execute() dynamic code. Not JIT emulating.\")\n return False\n \n # Run the Python code.\n \n # Have we already run this exact loop?\n if (code_python in jit_cache):\n var_updates = jit_cache[code_python]\n if (not context.throttle_logging):\n log.info(\"Using cached JIT loop results.\")\n if (var_updates == \"ERROR\"):\n log.error(\"Previous run of Python JIT loop emulation failed. Using fallback emulation for loop.\")\n return False\n\n # No cached results. Run the loop.\n elif (namespace is None):\n\n # JIT code execution goes not involve emulating VB GOTOs.\n context.goto_executed = False\n \n # Magic. For some reason exec'ing in locals() makes the dynamically generated\n # code recognize functions defined in the dynamic code. I don't know why.\n if (not context.throttle_logging):\n log.info(\"Evaluating Python JIT code...\")\n exec code_python in locals()\n else:\n\n # JIT code execution goes not involve emulating VB GOTOs.\n context.goto_executed = False\n\n # Run the JIT code in the given namespace.\n exec(code_python, namespace)\n var_updates = namespace[\"var_updates\"]\n if (not context.throttle_logging):\n log.info(\"Done JIT emulation of '\" + code_vba + \"...' .\")\n\n # Cache the loop results.\n jit_cache[code_python] = var_updates\n \n # Update the context with the variable values from the JIT code execution.\n try:\n for updated_var in var_updates.keys():\n if (updated_var == \"__shell_code__\"):\n continue\n context.set(updated_var, var_updates[updated_var])\n except (NameError, UnboundLocalError):\n log.warning(\"No variables set by Python JIT code.\")\n\n # Update shellcode bytes from the JIT emulation.\n import vba_context\n vba_context.shellcode = var_updates[\"__shell_code__\"]\n\n except NotImplementedError as e:\n log.error(\"Python JIT emulation of loop failed. \" + safe_str_convert(e) + \". Using fallback emulation method for loop...\")\n #safe_print(\"REMOVE THIS!!\")\n #raise e\n return False\n\n except Exception as e:\n\n # Cache the error.\n jit_cache[code_python] = \"ERROR\"\n \n # If we bombed out due to a potential infinite loop we\n # are done.\n if (\"Infinite Loop\" in safe_str_convert(e)):\n log.warning(\"Detected infinite loop. Terminating loop.\")\n return True\n\n # We had some other error. Emulating the loop in Python failed.\n log.error(\"Python JIT emulation of loop failed. \" + safe_str_convert(e) + \". Using fallback emulation method for loop...\")\n if (log.getEffectiveLevel() == logging.DEBUG):\n traceback.print_exc(file=sys.stdout)\n safe_print(\"-*-*-*-*-\\n\" + code_python + \"\\n-*-*-*-*-\")\n return False\n\n # Done.\n return True", "def exec_code(self,code,inputs=None,returns=None):\n #use the first worker to package up the cmd.\n package = self.workers[0].exec_code_pack(code,inputs,returns)\n return self._send_recv(package)", "def code():", "def loop_apply(self,function,loop_var,args=(),keywords=None):\n #----------------------------------------------------\n # Prepare main package for sending\n # almost verbatim from loop_apply_pack in sync_cluster\n #----------------------------------------------------\n #if type(loop_var) == type(1):\n # loop_var = function.func_code.co_varnames[loop_var]\n all_keywords = {}\n if keywords: all_keywords.update(keywords)\n #more_keywords = sync_cluster.args_to_keywords(function,args)\n #sync_cluster.catch_keyword_conflicts(more_keywords,all_keywords)\n #all_keywords.update(more_keywords)\n\n # pull out the loop variable.\n if type(loop_var) != type(''):\n loop_var = int(loop_var)\n loop_data = args[loop_var]\n # no need to pack and send since it'll be in the \"addendum\"\n args = list(args)\n args[loop_var] = None\n args = tuple(args)\n else:\n loop_data = all_keywords[loop_var]\n # no need to pack and send since it'll be in the \"addendum\"\n del all_keywords[loop_var]\n contents={'_command':sync_cluster.loop_func,'function':function,\n 'args':args,'keywords':all_keywords,'loop_var':loop_var}\n package = self.workers[0].packer.pack(contents)\n return self.loop_send_recv(package,loop_data,loop_var)", "def run_code():\n\n output = None\n code = request.json['code']\n\n cmd = 'python -c \"' + code +'\"'\n p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE,\n stderr=STDOUT, close_fds=True)\n output = p.stdout.read()\n\n return jsonify(output.decode('utf-8'))", "def get_gen_loop_fun(param_dict, fun_dict):\n def f(x_pl, hid_pl, count, f_state, eps_z, eps_x):\n return gen_loop(x_pl, hid_pl, count, f_state, eps_z, eps_x, param_dict, fun_dict)\n return f", "def replace(code_block=None, local_variables=None):\n global CODE\n if not code_block:\n code_block = CODE\n if local_variables is not None:\n variables = {**globals(), **local_variables}\n else:\n variables = globals()\n matches = itertools.chain(re.finditer(\"%s\\\\s*%s\\\\s+(?P<code>(.|\\n)+?)%s\" % (ml_comment_symbol_start, pycodegen_cmd, ml_comment_symbol_end), code_block, flags=0),\n re.finditer(\"%s\\\\s+(?!block_start\\\\s+)(?!block_end\\\\s+)(?P<code>(.)+?)\\n\" % (pragma_cmd), code_block, flags=0))\n for res_ml in matches:\n res_ml_code = res_ml.group(0)\n try:\n evaluated = str(eval(res_ml.groupdict()[\"code\"], variables))\n code_block = code_block.replace(res_ml_code, evaluated)\n logging.debug(\"Evaluated '%s' to '%s'\" % (res_ml.groupdict()[\"code\"], evaluated))\n continue\n except Exception as e:\n logging.debug(\"Failed to evaluate inline code\")\n try:\n exec(res_ml.groupdict()[\"code\"], globals())\n code_block = code_block.replace(res_ml_code, \"\")\n logging.debug(\"Executed in global space: '%s'\" % res_ml.groupdict()[\"code\"])\n except Exception as e:\n logging.warning(\"Could not execute inline code:\\n\\tCommand: '''\\n%s\\n'''\\n\\tError: %s\" % (res_ml.groupdict()[\"code\"], e))\n return code_block", "def part_1(code: List):\n acc, _ = run_code(code)\n\n return acc", "def evaluateCode(lang, code):", "def main_code():\n pass", "async def _eval(self, ctx, *, code):\r\n env = {\r\n 'self': self,\r\n 'bot': self.bot,\r\n 'ctx': ctx,\r\n 'message': ctx.message,\r\n 'guild': ctx.guild,\r\n 'channel': ctx.channel,\r\n 'author': ctx.author,\r\n 'me': ctx.me,\r\n 'that': self.last_result\r\n }\r\n env.update(globals())\r\n\r\n stdout = io.StringIO()\r\n\r\n toCompile = f'async def func():\\n{textwrap.indent(code, \" \")}'\r\n\r\n try:\r\n exec(toCompile, env)\r\n except Exception as e:\r\n em = discord.Embed(description=f\"Excecuted and errored: {e.__class__.__name__}: {e}\",\r\n color=0xff0000)\r\n em.set_author(name=\"Evaluated and errored\",\r\n icon_url=ctx.message.author.avatar_url.replace(\"?size=1024\", \"\"))\r\n em.set_footer(text=\"Executed by: \" + str(ctx.message.author))\r\n em.set_thumbnail(\r\n url='https://upload.wikimedia.org/wikipedia/commons/thumb/b/ba/Red_x.svg/480px-Red_x.svg.png')\r\n em.add_field(name=\"Code\", value=f\"[See here.]({hastebin.post(code.encode('utf-8'))}.py)\")\r\n return await ctx.send(embed=em)\r\n\r\n func = env['func']\r\n try:\r\n with redirect_stdout(stdout):\r\n ret = await func()\r\n except Exception as e:\r\n value = stdout.getvalue()\r\n em = discord.Embed(description=f\"Excecuted and errored: ```py\\n{value}{traceback.format_exc()}```\",\r\n color=0xff0000)\r\n em.set_author(name=\"Evaluated and errored\",\r\n icon_url=ctx.message.author.avatar_url.replace(\"?size=1024\", \"\"))\r\n em.set_footer(text=\"Executed by: \" + str(ctx.message.author))\r\n em.set_thumbnail(\r\n url='https://upload.wikimedia.org/wikipedia/commons/thumb/b/ba/Red_x.svg/480px-Red_x.svg.png')\r\n em.add_field(name=\"Code\", value=f\"[See here.]({hastebin.post(code.encode('utf-8'))}.py)\")\r\n await ctx.send(embed=em)\r\n else:\r\n value = stdout.getvalue()\r\n if ret is None or type(ret) is discord.Message:\r\n if value:\r\n x = f\"{value}\"\r\n self.last_result = value\r\n else:\r\n x = \"Executed successfully with no objects returned.\"\r\n else:\r\n x = f\"Executed successfully and returned: {value}{ret}\"\r\n self.last_result = f\"{value}{ret}\"\r\n em = discord.Embed(description=x, color=0x00ff00)\r\n em.set_author(name=\"Evaluated with success\",\r\n icon_url=ctx.message.author.avatar_url.replace(\"?size=1024\", \"\"))\r\n em.set_footer(text=\"Executed by: \" + str(ctx.message.author))\r\n em.set_thumbnail(url='http://www.iconsdb.com/icons/preview/green/checked-checkbox-xxl.png')\r\n em.add_field(name=\"Code\", value=f\"[See here.]({hastebin.post(code.encode('utf-8'))}.py)\")\r\n await ctx.send(embed=em)", "def generate(code):\n name, traits = parseCode(code)\n return globals()[name](**traits)", "def run_code(code: List) -> Tuple[int, int]:\n executed_lines = set()\n\n prv_ptr, ins_ptr, acc = -1, 0, 0\n\n while True:\n if ins_ptr in executed_lines:\n break\n\n executed_lines.add(ins_ptr)\n\n cmd, args = code[ins_ptr]\n\n if cmd == \"acc\":\n acc += int(args)\n\n elif cmd == \"nop\":\n pass\n\n elif cmd == \"jmp\":\n prv_ptr = ins_ptr\n ins_ptr += int(args)\n continue\n\n prv_ptr = ins_ptr\n ins_ptr += 1\n\n else:\n # No loop detected\n return acc, -1\n\n return acc, ins_ptr", "def compile_code(name, code, context=None):\n if context is None:\n context = {} # pragma: no cover\n try:\n obj = compile(code, \"\", \"exec\")\n except SyntaxError as e: # pragma: no cover\n raise SyntaxError(f\"Unable to compile\\n{code}\") from e\n context_g = context.copy()\n context_l = context.copy()\n exec(obj, context_g, context_l) # pylint: disable=W0122\n return context_l[name]", "async def _compile(ctx, code: Option(str, \"Brainfuck code to compile into python\")):\n compiled = bot.brainfuck.compile(code)\n await send_code(ctx, compiled.code, lang=\"py\")", "def main():\n parser = ArgumentParser(description=__doc__,\n formatter_class=RawTextHelpFormatter)\n parser.add_argument('code', help='Python code to execute')\n group = parser.add_mutually_exclusive_group()\n group.add_argument('-3', action='store_const', dest='python',\n const='python3', help='Explicitly use Python 3')\n group.add_argument('-2', action='store_const', dest='python',\n const='python2', help='Explicitly use Python 2')\n group.add_argument('-p', '--python', help='Specify python interpreter')\n args = parser.parse_args()\n if args.python is not None:\n call([args.python, __file__, args.code])\n else:\n InteractiveInterpreter(LocalsImportDict()).runsource(args.code)", "def code(self) -> pulumi.Input['CanaryCodeArgs']:\n return pulumi.get(self, \"code\")", "def get_code(self, images):\n return self.sess.run(self.z, feed_dict={self.input: images})", "async def cli(self, code, *m):\n if self.bot.check_code(code):\n p = subprocess.run(args=m, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n await self.bot.say(codify(p.stdout.decode('utf-8'), p.stderr.decode('utf-8'), language='DOS'))\n else:\n await self.bot.reply('Bad code!')", "def process_python(data, code):\n\tx=data\n\treturn eval(code)", "def runcode(self, code):\n if not self.locals.get('autocommit', None):\n return self.locals['db'].transact(code.InteractiveConsole.runcode, self, code)\n return code.InteractiveConsole.runcode(self, code)", "def _compile_async_block(code: str):\n code_ast = _ast_asyncify(code, 'async-def-wrapper')\n async_wrapper_code = compile(code_ast, filename='fakefile.py', mode='exec')\n context = {}\n exec(async_wrapper_code, context) # pylint: disable=exec-used\n return context.pop('async-def-wrapper').__code__ # type: ignore", "def RunScript(code):\n with ScriptContext() as script_module:\n try:\n exec code in script_module.__dict__\n except:\n # Get exception output as close to exec as possible.\n # We don't take the first entry in the traceback because it just contains\n # \"exec\". Everything after that is the submitted code.\n try:\n etype, evalue, tb = sys.exc_info()\n traceback.print_exception(etype,\n evalue,\n tb.tb_next, # one frame up\n file=sys.stderr)\n finally:\n del tb # break circular references when using exc_info\n\n return sys.stdout.getvalue(), sys.stderr.getvalue()", "def runner(code, out_stream):\n code_obj = compiler.compile_source(code)\n vm = virtual_machine.VirtualMachine(out_stream)\n vm.run_code(code_obj)", "def execute(self, code, environment = dict()):\r\n if not self.config.get('scripting', 'enable') and type(code) == str:\r\n self.send(code, log = False)\r\n else:\r\n if type(code) == str:\r\n c = compile(code, 'errors.log', 'exec')\r\n else:\r\n c = code\r\n eval(c, self.getEnvironment(environment))", "def generate_code_implementation(name, code, comment=None):\n k = code.dimension()\n cs, _p = code.standard_form()\n info = {\n 'name': name,\n 'n': code.length(),\n 'k': k,\n 'generator': [bools_to_binvec(row) for row in cs.systematic_generator_matrix()],\n 'generator_bools': cs.systematic_generator_matrix(),\n 'parity_matrix': [bools_to_binvec(row) for row in cs.parity_check_matrix()],\n \"comment\": comment,\n }\n\n max_error = code.decoder().maximum_error_weight()\n\n syndrome_map = {}\n for (he, error) in cs.decoder().syndrome_table().items():\n syndrome_map[ZZ(list(he), base=2)] = bools_to_binvec(error)\n\n info['syndrome_map'] = syndrome_map\n info['syndrome_map_itemlen'] = len(list(syndrome_map.values())[0])\n\n assert max(syndrome_map) < 2**64, \"sydrome map too big!\"\n \n info['info_set'] = cs.information_set()\n\n testcases = []\n if 'might-error' in cs.decoder().decoder_type():\n max_error -= 3\n for _ in range(20):\n randvec = random_vector(GF(2), code.length())\n codeword = cs.decode_to_code(randvec)\n testcase = {\n 'randvec': randvec,\n 'codeword': codeword,\n }\n testcases.append(testcase)\n\n info['testcases'] = testcases\n\n with open('syndrome_code_implementation.rs.j2', 'r') as templatefile:\n template = ENVIRONMENT.from_string(templatefile.read())\n with open('{name}/{name}_{n}_{k}.rs'.format(name=name.lower(),\n n=code.length(), k=k),\n 'w') as outputfile:\n outputfile.write(template.render(**info))\n\n rendered_codes[name].append((code.length(), code.dimension()))", "def code_mix(code, **changes):\n args = (changes.get(arg, getattr(code, arg)) for arg in code_mix.keys)\n return types.CodeType(*args)", "def runIntcode(program):\n\n pc = 0\n\n while program[pc] != 99:\n command = program[pc]\n reg1 = program[program[pc + 1]]\n reg2 = program[program[pc + 2]]\n dest = program[pc + 3]\n\n if command == 1:\n print (pc, \" (add) \", reg1, \" \", reg2, \" -> \", dest)\n program[dest] = reg1 + reg2\n\n if command == 2:\n print (pc, \" (mul) \", reg1, \" \", reg2, \" -> \", dest)\n program[dest] = reg1 * reg2\n\n pc = pc + 4\n\n return program", "def _score_code(self, code):\n # Get list of 2-tuples, each containing an input sequence and an output\n # sequence.\n io_seqs = self.task.make_io_set()\n terminal_reward = 0.0\n results = []\n reason = 'correct'\n for input_seq, output_seq in io_seqs:\n eval_result = bf.evaluate(\n code, input_buffer=input_seq, timeout=0.1,\n max_steps=self.max_execution_steps,\n base=self.task.base,\n require_correct_syntax=self.require_correct_syntax)\n result, success = eval_result.output, eval_result.success\n if not success:\n # Code execution timed out.\n terminal_reward = self.failure_reward\n results = []\n reason = eval_result.failure_reason\n break\n else:\n terminal_reward += self.reward_fn(result, output_seq, self.task.base)\n if result == output_seq:\n terminal_reward += self.correct_bonus # Bonus for correct answer.\n\n # Only add additional reward for shorter code. Subtracting reward\n # interferes with the main objective. Only optimize for length once\n # any solution is found.\n if self.min_code_length == self.max_code_length:\n terminal_reward += self.code_length_bonus\n else:\n terminal_reward += self.code_length_bonus * clipped_linear(\n x=len(code), x0=self.min_code_length, y0=1.0,\n slope=-self.time_penalty, y_range=(0.0, 1.0))\n\n # reason remains 'correct' if it is already\n elif reason == 'correct':\n reason = 'wrong'\n results.append(result)\n\n # Return list of rewards, one for each char in the code. All are 0 except\n # for the terminal reward.\n terminal_reward /= self.best_reward\n return misc.RewardInfo(\n episode_rewards=[0.0] * (len(code) - 1) + [terminal_reward],\n input_case=misc.IOTuple(i for i, o in io_seqs),\n correct_output=misc.IOTuple(o for i, o in io_seqs),\n code_output=misc.IOTuple(results),\n input_type=self.input_type,\n output_type=self.output_type,\n reason=reason)", "def code_compile_and_run(code = '', gv = {}, lv = {}, return_keys = []):\n code_ = compile(code, \"<string>\", \"exec\")\n exec(code, gv, lv)\n # no keys given, return entire local variables dict\n if len(return_keys) < 1:\n return lv\n # single key given, return just the value of this entry\n elif len(return_keys) == 1:\n if return_keys[0] in lv:\n return lv[return_keys[0]]\n # several keys given, filter local variables dict by these keys and return\n else:\n return dict([(k, lv[k]) for k in return_keys if k in lv])" ]
[ "0.6456923", "0.62488866", "0.6225537", "0.59651935", "0.5602554", "0.55333126", "0.5486385", "0.54698336", "0.5395104", "0.5374207", "0.53630006", "0.5345739", "0.5341821", "0.52655876", "0.52394456", "0.51990825", "0.5174573", "0.5164324", "0.5150377", "0.51400226", "0.5123509", "0.5119771", "0.50982213", "0.50865436", "0.50816864", "0.5078395", "0.5066511", "0.50641996", "0.5061308", "0.5060129" ]
0.837846
0
Split jobs into Nworkers equal groups. When an equal split is not possible, the larger groups occur at the front of the list.
def equal_balance(jobs,Nworkers): #no jobs to do - return empty group list. if not len(jobs): return () Ntotal_jobs = len(jobs) # find the number of jobs each wroker must do # for everyone to have equal work loads group_size = Ntotal_jobs / Nworkers # if there are jobs left over, some of the workers # will need to do 1 extra job. if Ntotal_jobs % Nworkers: group_size = group_size + 1 # after some algebra, we can solve for the # number, a, of workers that need to do extra work a = Ntotal_jobs + Nworkers - Nworkers*group_size if a*group_size < Ntotal_jobs: b = Nworkers - a else: b = 0 # a workers do an extra job, b workers do standard # number of jobs. group_sizes = a*[group_size] + b*[group_size-1] # now split the jobs up into groups for each of # the workers. last = 0 job_groups = [] for size in group_sizes: next = last+size job_groups.append(jobs[last:next]) last = next # sum = 0 # for grp in job_groups: # sum = sum + len(grp) # assert(sum,Ntotal_jobs) return tuple(job_groups)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_n_group_split(self):\n # Test 2 groups like HalfSplitter first\n hs = NGroupPartitioner(2)\n\n for isreversed, splitter in enumerate((hs, hs)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in hs.generate(self.data) ]\n self.assertTrue(len(splits) == 2)\n\n for i, p in enumerate(splits):\n self.assertTrue( len(p) == 2 )\n self.assertTrue( p[0].nsamples == 50 )\n self.assertTrue( p[1].nsamples == 50 )\n\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n\n # check if it works on pure odd and even chunk ids\n moresplits = [ list(spl.generate(p)) for p in hs.generate(splits[0][0])]\n\n for split in moresplits:\n self.assertTrue(split[0] != None)\n self.assertTrue(split[1] != None)\n\n # now test more groups\n s5 = NGroupPartitioner(5)\n\n # get the splits\n for isreversed, s5splitter in enumerate((s5, s5)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in s5splitter.generate(self.data) ]\n\n # must have 10 splits\n self.assertTrue(len(splits) == 5)\n\n # check split content\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [2, 3, 4, 5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [2, 3])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 4, 5, 6, 7, 8, 9])\n # ...\n assert_array_equal(splits[4][1-isreversed].sa['chunks'].unique,\n [8, 9])\n assert_array_equal(splits[4][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4, 5, 6, 7])\n\n\n # Test for too many groups\n def splitcall(spl, dat):\n return list(spl.generate(dat))\n s20 = NGroupPartitioner(20)\n self.assertRaises(ValueError,splitcall,s20,self.data)", "def split_large_groups(ctx):\n asyncio.run(split_large_groups_impl(ctx.obj[\"config\"]))", "def chunker(results, n):\n\n def grouper(iterable, n, fillvalue=None):\n args = [iter(iterable)] * n\n return zip_longest(*args, fillvalue=fillvalue)\n\n m = int(len(results) / n)\n return list(grouper(iterable=results, n=m, fillvalue=None))", "def test_chunk_size_priority_over_n_splits(self):\n with self.subTest(input='list', chunk_size=1, n_splits=6):\n self.assertEqual(get_n_chunks(self.test_data, iterable_len=None, chunk_size=1, n_splits=6, n_jobs=None), 13)\n with self.subTest(input='numpy', chunk_size=1, n_splits=6):\n self.assertEqual(get_n_chunks(self.test_data_numpy, iterable_len=None, chunk_size=1, n_splits=6,\n n_jobs=None), 100)\n\n with self.subTest(input='list', chunk_size=3, n_splits=3):\n self.assertEqual(get_n_chunks(self.test_data, iterable_len=None, chunk_size=3, n_splits=3, n_jobs=None), 5)\n with self.subTest(input='numpy', chunk_size=3, n_splits=3):\n self.assertEqual(get_n_chunks(self.test_data_numpy, iterable_len=None, chunk_size=3, n_splits=3,\n n_jobs=None), 34)", "def test_chunk_size_has_priority_over_n_splits(self):\n chunks = list(chunk_tasks(range(4), chunk_size=4, n_splits=4))\n self.assertEqual(len(chunks), 1)\n self.assertEqual(len(chunks[0]), 4)\n self.assertEqual(list(range(4)), list(chain.from_iterable(chunks)))", "def split(a, N):\n\n integ = int(len(a) / N)\n remain = int(len(a) % N)\n\n splitted = [a[i * integ + min(i, remain):(i + 1) * integ +\n min(i + 1, remain)] for i in range(N)]\n\n return splitted", "def getMyChunkSize(numJobs, numWorkers, chunkSize, rank):\n assert(numJobs >= numWorkers)\n allJobs = np.arange(numJobs)\n startInd = (np.arange(numWorkers)) * chunkSize\n endInd = (np.arange(numWorkers) + 1) * chunkSize\n endInd[-1] = numJobs\n myJobs = allJobs[startInd[rank]:endInd[rank]]\n return myJobs", "def split_into_chunks(x, n):\n csize = int(np.ceil(len(x) / n))\n out = list()\n \n i = 0\n while i * csize < len(x):\n out.append(x[(i * csize):(i * csize + csize)])\n i += 1\n\n return out", "def grouper(iterable, n):\n it = iter(iterable)\n while True:\n chunk = tuple(islice(it, n))\n if not chunk:\n return\n yield chunk", "def test_n_splits(self):\n for n_splits, n_jobs in product([1, 6], [None, 2, 8]):\n with self.subTest(input='list', n_splits=n_splits, n_jobs=n_jobs):\n self.assertEqual(get_n_chunks(self.test_data, iterable_len=None, chunk_size=None, n_splits=n_splits,\n n_jobs=n_jobs), n_splits)\n\n with self.subTest(input='numpy', n_splits=n_splits, n_jobs=n_jobs):\n self.assertEqual(get_n_chunks(self.test_data_numpy, iterable_len=None, chunk_size=None,\n n_splits=n_splits, n_jobs=n_jobs), n_splits)", "def chunks_threads(li, n):\n\tindex = int(len(li) / n + 0.5)\n\tfor i in range(n-1):\n\t\tyield li[i*index:i*index + index]\n\tyield li[n*index - index:]", "def grouper(n, iterable):\n\tit = iter(iterable)\n\twhile True:\n\t\tchunk = tuple(itertools.islice(it, n))\n\t\tif not chunk:\n\t\t\treturn\n\t\tyield chunk", "def grouper(n, iterable):\n it = iter(iterable)\n while True:\n chunk = tuple(itertools.islice(it, n))\n if not chunk:\n return\n yield chunk", "def grouped(iterable, n):\n batch_window = [None for _ in range(n)]\n cur_size = 0\n for item in iterable:\n batch_window[cur_size] = item\n cur_size += 1\n if cur_size >= n:\n batched = batch_window[:]\n batch_window = [None for _ in range(n)]\n cur_size = 0\n yield batched", "def getMyChunkSize(numJobs, numWorkers, chunkSize, rank):\n print \"numJobs, numWorkers: \", numJobs, numWorkers, chunkSize\n assert(numJobs >= numWorkers)\n allJobs = np.arange(numJobs)\n startInd = (np.arange(numWorkers)) * chunkSize\n endInd = (np.arange(numWorkers) + 1) * chunkSize\n endInd[-1] = numJobs\n myJobs = allJobs[startInd[rank]:endInd[rank]]\n return myJobs", "def naive_grouper(inputs, n):\n num_groups = len(inputs) // n\n return [tuple(inputs[i*n:(i+1)*n]) for i in range(num_groups)]", "def split(a, n):\n k, m = divmod(len(a), n)\n ret = [a[i*k+min(i, m):(i+1)*k+min(i+1, m)] for i in range(n)]\n return ret", "def split_groups(self, num_of_groups=None, verbose=False):\n if num_of_groups is None:\n num_of_groups = self.number_of_groups\n self.training_file.split_groups(num_of_groups=num_of_groups,\n verbose=verbose)", "def divide_chunks(a_list, n):\n return [a_list[i:i + n] for i in range(0, len(a_list), n)]", "def test_n_jobs(self):\n for n_jobs in [1, 6]:\n with self.subTest(input='list', n_jobs=n_jobs):\n self.assertEqual(get_n_chunks(self.test_data, iterable_len=None, chunk_size=None, n_splits=None,\n n_jobs=n_jobs), min(4 * n_jobs, len(self.test_data)))\n\n with self.subTest(input='numpy', n_jobs=n_jobs):\n self.assertEqual(get_n_chunks(self.test_data_numpy, iterable_len=None, chunk_size=None, n_splits=None,\n n_jobs=n_jobs), min(4 * n_jobs, len(self.test_data_numpy)))", "def getMyUnfairShare(numJobs,numWorkers,rank):\n if numJobs >= numWorkers:\n allJobs = np.arange(numJobs)\n jobChunks = np.array_split(allJobs,numWorkers)\n myChunk = jobChunks[rank]\n myJobs = allJobs[myChunk[0]:myChunk[-1]+1]\n else:\n if rank == 0:\n myJobs = np.arange(numJobs)\n else:\n myJobs = []\n return myJobs", "def get_n_splits(self, X=None, y=None, groups=None):\n\n return 2", "def split_to_batches(iterable, n=1):\n l = len(iterable)\n for ndx in range(0, l, n):\n yield iterable[ndx:min(ndx + n, l)]", "def greedy_split(arr, n, axis=0):\n length = arr.shape[axis]\n # compute the size of each of the first n-1 blocks\n block_size = int(np.ceil(length / float(n)))\n # the indices at which the splits will occur\n ix = np.arange(block_size, length, block_size)\n return np.array(np.split(arr, ix, axis))", "def split_chunk(chunk, sizes, max_iter=1000, rng=None):\n assert len(chunk) == sum(sizes), f\"{len(chunk)} != {sum(sizes)}\"\n if not isinstance(rng, random.Random):\n rng = random\n # Precompute neighbors for each cube in the chunk\n neighbors = dict()\n for c in chunk:\n neighbors[c] = set(c.neighbors()) & set(chunk)\n for i in range(max_iter):\n result = split_chunk_iter(chunk, sizes, neighbors, rng)\n if result != None:\n return result\n raise SplitChunkMaxIterationExceeded(\"Ran out of iterations trying to split chunk\")", "async def split_large_groups_impl(config):\n async with create_sessionmaker(config)() as dbsession:\n progress = ClickIndeterminate(\"Splitting large groups\")\n progress.start()\n splitting = True\n stmt = select(Group).options(selectinload(Group.items), selectinload(Group.children))\n while splitting:\n splitting = False\n result = await dbsession.execute(stmt)\n for group in result.scalars():\n if len(group.children) == 0:\n if len(group.items) > 120 and len(group.items) < 300: # noqa: PLR2004\n if split_by_year(config, dbsession, group):\n splitting = True\n else:\n split_by_similarity(dbsession, group)\n splitting = True\n elif len(group.items) >= 300: # noqa: PLR2004\n if split_by_attribute(dbsession, group, \"concepts\"):\n splitting = True\n elif split_by_attribute(dbsession, group, \"subjects\"):\n splitting = True\n elif split_by_attribute(dbsession, group, \"materials\"):\n splitting = True\n elif split_by_attribute(dbsession, group, \"techniques\"):\n splitting = True\n elif split_by_year(config, dbsession, group):\n splitting = True\n else:\n split_by_similarity(dbsession, group)\n splitting = True\n await dbsession.commit()\n progress.stop()", "def make_chunks(l, n):\n return [l[i:i+n] for i in range(0, len(l), n)]", "def get_chunks(lst, n):\n size = int(len(lst) / n)\n output_list = []\n for i in range(0, n):\n sub_list = lst[i*size:i*size + size]\n output_list.append(sub_list)\n if len(lst) % n != 0:\n for i in range((n-1)*size+1, len(lst)):\n output_list[-1].append(lst[i])\n return output_list", "def partition(data, n):\n splits = []\n remaining = data.copy(deep=True)\n for i in range(n):\n split = remaining.sample(frac=1/(n-i), random_state=10)\n splits.append(split)\n remaining = remaining.drop(split.index)\n return splits", "def split(a, n):\n n = min(n, len(a))\n k, m = divmod(len(a), n)\n return [a[i * k + min(i, m) : (i + 1) * k + min(i + 1, m)] for i in range(n)]" ]
[ "0.7199404", "0.6824129", "0.6798619", "0.67813647", "0.65168804", "0.63456434", "0.62896436", "0.6288251", "0.6274654", "0.6268003", "0.6224157", "0.62179065", "0.62064743", "0.6132379", "0.6130663", "0.61159384", "0.61095643", "0.6088751", "0.60833097", "0.60803914", "0.60730547", "0.60712105", "0.6059543", "0.60574526", "0.6052149", "0.5996415", "0.598823", "0.5980147", "0.5955996", "0.5946225" ]
0.6846773
1
Used if val is an instance of numpy.float32.
def ts_float32(val): return np.float64(val)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_float(self, val):\n try:\n float(val)\n return True\n except ValueError:\n return False", "def is_float(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_float)", "def floatval(space, w_obj):\n return space.newfloat(w_obj.float_w(space))", "def isFloat(val):\n\tvalFloat = True\n\ttry:\n\t\ttVal = float(val)\n\texcept ValueError:\n\t\tvalFloat = False\n\ttVal = None\n\tr = (valFloat, tVal)\n\treturn r", "def could_be_float(val):\n if val == None:\n return False\n\n if isinstance(val, float):\n return True\n\n # allow coercion from str\n if isinstance(val, (str, unicode)):\n try:\n f = float(val)\n if not isinstance(f, float):\n raise ValueError\n else:\n return True\n except:\n return False\n\n #otherwise\n return False", "def is_float(value):\n if isinstance(value, float):\n return True\n\n if isinstance(value, np.ndarray):\n return value.dtype == np.float64\n\n return False", "def is_float(val):\n try:\n float(val)\n return True\n except ValueError:\n return False", "def is_floatscalar(x: Any) -> bool:\r\n return isinstance(x, (\r\n float,\r\n np.float16,\r\n np.float32,\r\n np.float64,\r\n ))", "def check_for_float(check):", "def isFloat(data):\n\tif type(data) == list or type(data) == np.ndarray:\n\t\tcol = pd.Series(data)\n\telse:\n\t\tcol = data\n\treturn col.dtype == np.float32 or col.dtype == np.float64", "def _floatOrCall(val):\n try:\n return float(val)\n except TypeError:\n pass\n try:\n return float(val())\n except TypeError:\n pass\n try:\n return val.value\n except AttributeError:\n # likely a complex\n return val", "def test_float32(self):\r\n start, stop, step = fscalars('start', 'stop', 'step')\r\n out = arange(start, stop, step)\r\n f = function([start, stop, step], out)\r\n\r\n if config.cast_policy == 'custom':\r\n assert out.dtype == start.type.dtype\r\n elif config.cast_policy == 'numpy':\r\n numpy_dtype = numpy.arange(numpy.array(0, dtype=start.dtype),\r\n numpy.array(1, dtype=stop.dtype),\r\n numpy.array(1, dtype=step.dtype)).dtype\r\n assert out.dtype == numpy_dtype\r\n elif config.cast_policy == 'numpy+floatX':\r\n assert out.dtype == config.floatX\r\n else:\r\n raise NotImplementedError(config.cast_policy)\r\n arg_vals = [(0, 5, 1), (2, 11, 4), (-5, 1.1, 1.2), (1.3, 2,\r\n -2.1), (10, 2, 2)]\r\n for arg_v in arg_vals:\r\n start_v, stop_v, step_v = arg_v\r\n start_v_, stop_v_, step_v_ = numpy.asarray(arg_v,\r\n dtype=start.type.dtype)\r\n f_val = f(start_v_, stop_v_, step_v_)\r\n if config.cast_policy == 'custom':\r\n expected_val = numpy.arange(start_v, stop_v, step_v,\r\n dtype=start.type.dtype)\r\n elif config.cast_policy in ('numpy', 'numpy+floatX'):\r\n expected_val = numpy.arange(start_v_, stop_v_, step_v_,\r\n dtype=out.dtype)\r\n else:\r\n raise NotImplementedError(config.cast_policy)\r\n assert numpy.all(f_val == expected_val)", "def test_float_log(self):\n htype = h5t.py_create('f', logical=True)\n self.assertIsInstance(htype, h5t.TypeFloatID)", "def _validate_scalar(obj):\n if not isinstance(obj, float):\n raise TypeError(\"scalar must be real\")", "def is_float(self, size=None):\n return False", "def test_float(self):\n htype = h5t.py_create('f')\n self.assertIsInstance(htype, h5t.TypeFloatID)", "def _restricted_float(val: float):\n try:\n val = float(val)\n except ValueError:\n raise argparse.ArgumentTypeError(f\"{val} not a floating-point literal\")\n\n if 0.0 < val > 1.0:\n raise argparse.ArgumentTypeError(f\"{val} not in range [0.0, 1.0]\")\n return val", "def Float(val):\n try:\n return float(val)\n except ValueError:\n return ''", "def to_float(val):\n if val is None:\n return None\n if str(val) == \"null\":\n return None\n return float(val)", "def get_real(val, precision):\n if precision == \"single\":\n return numpy.float32(val)\n elif precision == \"double\":\n return numpy.float64(val)\n else:\n raise ValueError (\"precision %s not supported!\"%(precision))", "def is_float_like(val):\n try:\n return str(float(val)) == str(val)\n except Exception:\n return False", "def is_number(self,val):\n try:\n float(val)\n return True\n except ValueError:\n return False", "def isfloatarray(cell):\n try:\n cell.astype(float)\n return True\n except ValueError:\n return False", "def _assert_float32(tensors):\n if not isinstance(tensors, dict):\n tensors = [tensors]\n else:\n tensors = tensors.values()\n for tensor in tensors:\n if tensor.dtype.base_dtype != dtypes.float32:\n raise TypeError('Expected dtype=float32, %s.' % tensor)", "def data_convert2float32 (self, data):\r\n data = data.astype(np.float32)\r\n\r\n return data", "def read_float32(self):\n return self.read(BitTypes.FLOAT_LE_32.value)", "def to_float32(elem):\n return elem.astype(np.float32)", "def _is_real_like(input):\n if type(input) is float:\n return True\n if isinstance(input, _ScalarConstant):\n if input.dtype in _float_types:\n return True\n return False", "def test_float(self, env: yaenv.Env):\n _val = env.float('FLOAT_VAR')\n assert _val == 10.0 and type(_val) == float\n _val = env.float('MISSING', -3.1)\n assert _val == -3.1 and type(_val) == float\n with pytest.raises(yaenv.EnvError) as err:\n _ = env.float('LIST_VAR')\n assert 'Invalid numerical' in str(err.value)\n assert env.float('MISSING') is None", "def could_be_number(val):\n if val == None:\n return False\n\n if isinstance(val, (float, int, long)):\n return True\n\n # allow coercion from str\n if isinstance(val, (str, unicode)):\n try:\n n = float(val)\n if not isinstance(n, float):\n raise ValueError\n else:\n return True\n except:\n return False\n\n #otherwise\n return False" ]
[ "0.6748577", "0.64464486", "0.6323974", "0.6297695", "0.62333214", "0.61988634", "0.6185714", "0.617719", "0.6106048", "0.6100606", "0.6089156", "0.6089081", "0.605124", "0.6049143", "0.59900147", "0.5979166", "0.58923584", "0.5887647", "0.5885443", "0.586517", "0.5850768", "0.58373535", "0.58262914", "0.58079755", "0.58030736", "0.57926375", "0.5792075", "0.57764685", "0.5775463", "0.5709201" ]
0.71901006
0
Set the site to the current site when the record is first created, or the ``update_site`` argument is explicitly set to ``True``.
def save(self, update_site=False, *args, **kwargs): if update_site or not self.id: self.site_id = current_site_id() super(SiteRelated, self).save(*args, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_site(self, site):\n assert self.default_model is not None\n self.default_model.add_site(site)", "def add_site_to_context(self):\n g.current_site = self", "def save_site(self, site):\n raise NotImplementedError('save_site')", "def _update_site_configuration(self):\n self.site.configuration.site_values = {'THIRD_PARTY_AUTH_ONLY_DOMAIN': self.email_domain_name}\n self.site.configuration.save()", "def set_site_id(self):\n self.site_id = entities.sites['next id']\n entities.sites['object list'].append(self)\n entities.sites['next id'] += 1", "def site_id(self, site_id):\n\n self._site_id = site_id", "def site_id(self, site_id):\n\n self._site_id = site_id", "def update_instance_url(setting):\n site_obj = Site.objects.all().order_by('id').first()\n site_obj.domain = setting.value\n site_obj.save()", "def add_site(self, site):\n assert isinstance(site, Site)\n self.site_list.append(site)\n site.model = self", "def put(cls, site):\n args = Site.parser.parse_args()\n _user_id = get_jwt_identity()\n\n # check if new site exists\n result = SiteModel.find_by_fullLink(args['site'])\n if result is not None:\n return {\"message\": \"Site already exists\"}, 409\n\n if not args['site']:\n return {\"message\": \"Cannot add an empty site\"}, 409\n\n item = SiteModel.find_by_fullLink(site)\n\n # edit existing item or enter new one\n if item is None:\n item = SiteModel(site, create_shortcut(), _user_id)\n else:\n item.full_link = args['site']\n\n item.save_to_db()\n return {\"message\": \"Item edited\"}, 200", "def load_site_if_needed(self):\n self.site.reload_if_needed()", "def add_site(self,site):\n if site.label==None:\n site.label = site_label_dir[str(self.Nsites)]\n if site.color==None:\n site.color = site_color_dir[str(self.Nsites)]\n self.sites.append(site)\n self.Nsites +=1", "def binding_site(self, binding_site):\n self._binding_site = binding_site", "def site(self):\n if not self.__site:\n self.__site = Site(self)\n return self.__site", "def site_name(self, site_name):\n\n self._site_name = site_name", "def save(self, *args, **kwargs):\n\n if self.id:\n firstcreation = False\n else:\n firstcreation = True\n\n #common save functionality for all models\n self._save_base()\n self.save_default(firstcreation)\n super(ComicSiteModel,self).save()", "def _addSite(self,site):\n self.sites.append(site)", "def wagtail_site():\n return Site.objects.get(is_default_site=True)", "def set_site_default_page(site):\n site.setDefaultPage('front-page')\n logger.info(u'Visão padrão do site estabelecida')", "def my_site(self):\n if \"mySite\" in self._prop_dict:\n return self._prop_dict[\"mySite\"]\n else:\n return None", "def setup_local_site(self):\n raise NotImplementedError", "def init(site, sites_path=None, new_site=False, force=False):\n\tlocal.touch = True\n\tif getattr(local, \"initialised\", None) and not force:\n\t\treturn\n\n\tif not sites_path:\n\t\tsites_path = '.'\n\n\tlocal.error_log = []\n\tlocal.message_log = []\n\tlocal.debug_log = []\n\tlocal.realtime_log = []\n\tlocal.flags = _dict({\n\t\t\"ran_schedulers\": [],\n\t\t\"currently_saving\": [],\n\t\t\"redirect_location\": \"\",\n\t\t\"in_install_db\": False,\n\t\t\"in_install_app\": False,\n\t\t\"in_import\": False,\n\t\t\"in_test\": False,\n\t\t\"mute_messages\": False,\n\t\t\"ignore_links\": False,\n\t\t\"mute_emails\": False,\n\t\t\"has_dataurl\": False,\n\t\t\"new_site\": new_site,\n\t\t\"log_identity\": \"\",\n\t})\n\tlocal.rollback_observers = []\n\tlocal.test_objects = {}\n\n\tlocal.site = site\n\tlocal.sites_path = sites_path\n\tlocal.site_path = os.path.join(sites_path, site)\n\n\tlocal.request_ip = None\n\tlocal.response = _dict({\"docs\":[]})\n\tlocal.task_id = None\n\n\tlocal.conf = get_site_config(force=force)\n\tlocal.lang = local.conf.lang or \"en\"\n\tlocal.lang_full_dict = None\n\n\tlocal.module_app = None\n\tlocal.app_modules = None\n\tlocal.system_settings = _dict()\n\n\tlocal.user = None\n\tlocal.user_perms = None\n\tlocal.session = None\n\tlocal.role_permissions = {}\n\tlocal.valid_columns = {}\n\tlocal.new_doc_templates = {}\n\tlocal.link_count = {}\n\n\tlocal.jenv = None\n\tlocal.jloader = None\n\tlocal.cache = {}\n\tlocal.latte_cache = {}\n\tlocal.document_cache = {}\n\tlocal.meta_cache = {}\n\tlocal.form_dict = _dict()\n\tlocal.session = _dict()\n\tlocal.sql_time = 0\n\tlocal.greenlet_start = perf_counter()\n\tlocal.greenlet_time = 0\n\tlocal.sql_logging_time = 0\n\tlocal.cache_access_time = 0\n\tlocal.cache_balancer_time = 0\n\tlocal.sql_selects = 0\n\tlocal.sql_updates = 0\n\tlocal.sql_deletes = 0\n\tlocal.sql_inserts = 0\n\tlocal.read_only_db_logs = []\n\tlocal.append_statement_to_query = ''\n\tlocal.sql_running = False\n\n\tsetup_module_map()\n\tlocal.initialised = True", "def setup_remote_site(self):\n raise NotImplementedError", "def persist_if_needed(self):\n if not self.id:\n super(ComicSiteModel,self).save()", "def _set_current_microsite(microsite_config_key, subdomain, domain):\r\n config = settings.MICROSITE_CONFIGURATION[microsite_config_key].copy()\r\n config['subdomain'] = subdomain\r\n config['site_domain'] = domain\r\n CURRENT_REQUEST_CONFIGURATION.data = config", "def post(self, request):\n site = models.SiteSettings.objects.get()\n form = forms.SiteForm(request.POST, request.FILES, instance=site)\n if not form.is_valid():\n data = {\"site_form\": form}\n return TemplateResponse(request, \"settings/site.html\", data)\n site = form.save(request)\n\n data = {\"site_form\": forms.SiteForm(instance=site), \"success\": True}\n return TemplateResponse(request, \"settings/site.html\", data)", "def site(self, obj):\n if obj.node.site:\n return obj.node.site\n return None", "def ftp_site(self, ftp_site: str):\n self._ftp_site = ftp_site", "def addSiteHandler(site, event):\n make_objectmanager_site(site)", "def assign_sites(self, action, site, paired_site=None):\n self._current_site += 1\n if paired_site:\n base_current, base_paired = self.action_to_pair[action]\n self._primary_list[site] = base_current\n self._primary_list[paired_site] = base_paired\n else:\n self._primary_list[site] = self.action_to_base[action]" ]
[ "0.7196496", "0.6944524", "0.6681965", "0.64669836", "0.6459213", "0.641109", "0.641109", "0.61772376", "0.61531913", "0.6105513", "0.6099537", "0.6084168", "0.6019811", "0.5998988", "0.59902525", "0.59790754", "0.594837", "0.59265506", "0.586496", "0.5850791", "0.5836316", "0.578422", "0.5670342", "0.5643416", "0.5611327", "0.56100154", "0.5583543", "0.55649376", "0.55621177", "0.5552993" ]
0.74763525
0
Initializes the node for the word given and creates and empty array for the children nodes.
def __init__(self, word): TreeNode.__init__(self) self.word = word self.children = []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n self.isEnd = False # whether this node is an end of a word\n self.children = dict() # map a char to the child node", "def __init__(self):\n self.children = {}\n self.is_a_word = (False,None)\n self.index = []", "def __init__(self):\n self.children = dict()\n self.isWord = False", "def addWord(self, word):\n node = self.root\n for i in range(len(word)):\n if word[i] in node.children:\n node = node.children[word[i]]\n else:\n break\n\n for j in range(i, len(word)):\n node.children[word[j]] = Node()\n node = node.children[word[j]]\n node.val = word", "def addWord(self, word: str) -> None:\n trav = self.root\n \n for c in word:\n if c not in trav.childs:\n trav.childs[c] = self.Node()\n trav = trav.childs[c]\n \n trav.rec += 1", "def __init__(self):\n self.root = WordNode()", "def addWord(self, word):\n node = self.root\n for item in word:\n if item in node.childrenValue:\n node = node.children[node.childrenValue[item]]\n else:\n newNode = TreeNode(item)\n node.childrenValue[item] = len(node.children)\n node.children.append(newNode)\n node = newNode\n if \"\" not in node.childrenValue:\n newNode = TreeNode(\"\") # 用来表示一个string的结束\n node.childrenValue[\"\"] = len(node.children)\n node.children.append(newNode)", "def insert(self, word: str) -> None:\n node = self.root\n for char in word:\n if char not in node.child:\n #append the children\n node.child[char] = Node(char)\n #descend node to node.child that has the previous char, simlar to node = node.left\n node = node.child.get(char)\n node.isWord = True", "def __init__(self) -> None:\n self.children: Dict[str, Trie] = {} # mapping from character to Node\n self.end_of_word: bool = False", "def addWord(self, word):\n p = self.root\n for c in word:\n ind = ord(c) - ord('a')\n if (p.children[ind] == None):\n p.children[ind] = Node(c)\n p = p.children[ind]\n \n p.value = len(word)", "def __init__(self):\n self.children = {}\n self.val = 0\n self.is_word = False", "def add(self, word):\n current_node = self.root\n\n for char in word:\n if char not in current_node.children: # checks if that char does not already exists in the children Trie\n current_node.children[char] = TrieNode() # if it doesnt add it to the children dict\n\n current_node = current_node.children[char] # else loop through and go in the node\n\n current_node.is_word = True # complete node by making is_word TRUE", "def addWord(self, word: str) -> None:\n node = self.root\n \n for c in word:\n node = node.children[c]\n \n node.word = True", "def insert(self, word: str) -> None:\n node = self.root\n for w in word:\n child = node.children.get(w)\n if not child:\n node.children[w] = TreeNode(w)\n node = node.children[w]\n node.end = True", "def addWord(self, word):\n node=self.root\n for c in word:\n if c not in node.children: node.children[c]=TrieNode()\n node=node.children[c]\n node.isWord=True", "def __init__(self, key, word=None, parent=None, data=None):\n self.__key = key\n self.__word = word\n self.__parent = parent\n self.__children = []\n self.__found_words = 0\n self.__data = data", "def addWord(self, word: str) -> None:\n tmp = self.root\n for i, letter in enumerate(word):\n if letter not in tmp.seq:\n tmp.seq[letter] = Node()\n \n tmp = tmp.seq[letter]\n \n tmp.value = word", "def __init__(self):\n self.children = [None for i in range(ALPHABET_SIZE)]\n self.is_end_of_word = False", "def addWord(self, word):\n node = self.root\n for ch in word:\n node = node.children[ch]\n node.isWord = True", "def add_word(root, input_word):\n\n cNode = root\n\n for char in list(input_word):\n does_exist_in_cNode_children = False\n\n for node in cNode.nodes:\n if node.char == char:\n does_exist_in_cNode_children = True\n cNode.num_words_at_letter += 1\n cNode = node\n\n if not does_exist_in_cNode_children:\n new_node = Node(char)\n cNode.nodes.append(new_node)\n cNode.num_words_at_letter += 1\n cNode = new_node\n\n #cNode.num_words_at_letter += 1\n cNode.is_word = True", "def add(self, word: str):\n node = self\n for char in word:\n found_in_child = False\n # Search for the character in the children of the present `node`\n for child in node.children:\n if child.char == char:\n # We found it, increase the hit_counter by 1 to keep track that another\n # word has it as well\n child.hit_counter += 1\n # And point the node to the child that contains this char\n node = child\n found_in_child = True\n break\n # We did not find it so add a new child\n if not found_in_child:\n new_node = TrieNode(char)\n node.children.append(new_node)\n # And then point node to the new child\n node = new_node\n # Everything finished. Mark it as the end of a word.\n node.word_finished = True", "def insert(self, word: str) -> None:\n node = self.root\n for c in word:\n if node.children.get(c) is None:\n node.children[c] = TrieNode()\n node = node.children[c]\n\n node.is_word = True", "def insert(self, word: str) -> None:\n current = self.root\n for i, letter in enumerate(word): \n if current.children.get(letter):\n current = current.children.get(letter)\n else:\n current.children[letter] = Node(letter)\n current = current.children[letter]\n if i == len(word) - 1:\n current.is_word = True", "def insert(self, word: str) -> None:\n for letter in word:\n if letter not in self.children:\n trie = Trie()\n self.children[letter] = trie\n self = self.children[letter]\n self.end_of_word = True", "def insert(self, word):\n node = self.root\n for c in word:\n if c in node.children:\n node = node.children[c]\n else:\n new_node = self.Node(c)\n node.children[c] = new_node\n node = new_node\n node.word_end = True\n return", "def addWord(self, word):\n ptr = self.root\n for k in word:\n if k in ptr.child:\n ptr = ptr.child[k]\n else:\n ptr.child[k] = TrieNode()\n ptr = ptr.child[k]\n ptr.isEnd = True", "def addWord(self, word: str) -> None:\n root = self.root\n for w in word :\n if not w in root.children :\n root.children[w] = TrieNode()\n root = root.children[w]\n \n root.isEnd = True", "def addWord(self, word):\n node = self.root\n \n for char in word:\n if char not in node.children:\n node.children[char] = TrieNode()\n node = node.children[char]\n \n node.is_word = True", "def addWord(self, word):\n cur = self.root\n\n for c in word:\n if c not in cur.children:\n cur.children[c] = TrieNode()\n \n cur = cur.children[c]\n\n cur.isWord = True", "def addWord(self, word: str) -> None:\n current = self.root\n for letter in word:\n if letter not in current.children:\n current.children[letter] = TrieNode(letter)\n\n current = current.children[letter]\n\n current.is_word = True" ]
[ "0.7513591", "0.71971166", "0.7183556", "0.70607185", "0.7028913", "0.6879527", "0.68564075", "0.6843299", "0.6797169", "0.67595696", "0.67295927", "0.6657748", "0.66432583", "0.66254884", "0.65849876", "0.65723974", "0.6533535", "0.6515169", "0.64951223", "0.6384697", "0.63581336", "0.6330441", "0.6322626", "0.63136667", "0.63017595", "0.62977797", "0.62972736", "0.62850434", "0.62679434", "0.6242983" ]
0.7939854
0
Returns a string representation of the node (with n whitespaces on the left).
def get_string(self, n): pad = self.get_pad(n) string = pad + self.word string += "\n" + self.children[0].get_string(n + 1) string += "\n" + self.children[1].get_string(n + 1) return string
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __repr__(self):\n lines = []\n nodes = [(self.root, 0)]\n while nodes:\n node, indent = nodes.pop()\n name = str(node) if node else 'None'\n lines.append(' ' * indent + name)\n if node:\n nodes.append((node.child[True], indent + 1))\n nodes.append((node.child[False], indent + 1))\n\n return os.linesep.join(lines)", "def __str__(self):\n\t\tself._synchronize_attributes()\n\t\ts = \"\"\n\t\tqueue = c3.Queue()\n\t\tlevel = 0\n\t\tqueue.enqueue((1, self._root))\n\t\twhile queue.peek():\n\t\t\tnodelev, node = queue.dequeue()._data\n\t\t\tif (not node):\n\n\t\t\t\t#NODE IS NOT THERE - just a placeholder\n\t\t\t\t#print spacing and enqueue fake left and right children\n\t\t\t\t#but stops if they would be past the max depth of the tree\n\t\t\t\tif ((self._depth - nodelev + 1) <= 0):\n\t\t\t\t\tcontinue\n\n\t\t\t\tif (nodelev != level):\n\t\t\t\t\ts += \"\\n\"\n\t\t\t\t\t#PRINT THE INDENT\n\t\t\t\t\tindent = \" \"*int((self._max_chars)*(2**(self._depth - nodelev) - 1))\n\t\t\t\t\ts += indent\n\t\t\t\t\tlevel = nodelev\n\n\t\t\t\t#PRINT THE SPACING\n\t\t\t\ts += \" \"*(self._max_chars)*(2**(self._depth - nodelev + 1) - 1)\n\n\t\t\t\t#PRINT SPACES TO REPLACE DATA\n\t\t\t\ts += \" \"*self._max_chars\n\n\t\t\t\t#Enqueue fake children\n\t\t\t\tqueue.enqueue((nodelev + 1, None))\n\t\t\t\tqueue.enqueue((nodelev + 1, None))\n\t\t\t\tcontinue\n\n\t\t\tif (nodelev != level):\n\t\t\t\ts += \"\\n\"\n\t\t\t\t#PRINT THE INDENT\n\t\t\t\tindent = \" \"*(self._max_chars)*(2**(self._depth - nodelev) - 1)\n\t\t\t\ts += indent\n\t\t\t\tlevel = nodelev\n\n\t\t\t#adds preceding \"|\"s if the str length of the data is smaller than the max\n\t\t\tfor i in range(int(self._max_chars - len(str(node.value())))):\n\t\t\t\ts += \"|\"\n\t\t\ts += str(node.value()) \n\n\t\t\t#PRINT THE SPACING\n\t\t\tspacing = \" \"*(self._max_chars)*(2**(self._depth - nodelev + 1) - 1)\n\t\t\ts += spacing\n\n\t\t\t#Enqueues\n\t\t\tif node.lchild():\n\t\t\t\tqueue.enqueue((nodelev + 1, node.lchild()))\n\t\t\telse:\n\t\t\t\t#ENQUEUES A FAKE NODE IN ORDER TO FORMAT THE TREE FOR MISSING NODES\n\t\t\t\tqueue.enqueue((nodelev + 1, None))\n\t\t\tif node.rchild():\n\t\t\t\tqueue.enqueue((nodelev + 1, node.rchild()))\n\t\t\telse:\n\t\t\t\t#ENQUEUES A FAKE NODE IN ORDER TO FORMAT THE TREE FOR MISSING NODES\n\t\t\t\tqueue.enqueue((nodelev + 1, None))\n\t\ts += \"\\n\"\n\t\treturn s", "def node_s(self, lvl=0):\n s = \"\"\n for n in self.kids:\n s += \" \" * (lvl + 1) + n.node_s(lvl + 1) + \"\\n\\n\"\n return s", "def __str__(self):\n reprStr = ''\n currNode = self.head\n while currNode:\n reprStr = reprStr + str(currNode.count) + ' ' + str(currNode.data) + '\\n'\n currNode = currNode.next\n return reprStr", "def node_to_string(self, node, tab_count=0, is_add_children=False):\n tabs_str = ''\n for i in range(0, tab_count):\n tabs_str += '\\t'\n\n node_str = tabs_str + str(node.value) + ': ' + str(node.count)\n\n children_str = ''\n if is_add_children:\n for child_node in node.children:\n children_str += '\\n\\t' + tabs_str + self.node_to_string(child_node, tab_count+1, True)\n\n return node_str + children_str", "def __str__(self):\n return \"NODE: \" + str(self.num_childs) + \" \" + str(self.num_metadata)", "def __repr__(self):\n return '\\n~Node (' + str(self._val) + ') has ' + str(len(self._children)) + ' children: ' + str(sorted([val for val in self._children])) + '~'", "def __str__(self):\n current = self.root\n nodes = [self.root]\n final = str(self.root) + \"\\n\"\n count = 0\n while len(nodes) != 0:\n count += 1\n if count == 10:\n return \"\"\n temp = []\n for node in nodes:\n if node.left != None:\n temp.append(node.left)\n final += str(node.left) + \" \"\n else:\n final += \"_ \"\n if node.right != None:\n temp.append(node.right)\n final += str(node.right) + \" \"\n else:\n final += \"_ \"\n if temp == []:\n if node == nodes[len(nodes) - 1]:\n break\n final += \"\\n\"\n nodes = temp\n self.in_order_traversal()\n for item in self.traverse:\n final += str(item.key) + \" \"\n final += \"\\n\"\n return final", "def __str__(self):\n string = ''\n\n # gets the nodes at each level and puts the values into a string\n for i in range(self.get_height()+1):\n nodes = self.get_nodes_on_level(i)\n level = [str(node.value) if node else '-' for node in nodes]\n string += '{}\\n'.format(' '.join(level))\n\n return string", "def as_str(node):\n node_string = ' '.join(k for k, _ in node.leaves())\n return u' '.join(node_string.split())", "def __str__(self):\n def recurse(node, level):\n s = \"\"\n if type(node) == LeafNode:\n return (\"| \" * level) + str(node) + \"\\n\"\n if node != None:\n s += recurse(node.rightOperand, level + 1)\n s += \"| \" * level\n s += str(node.operator) + \"\\n\"\n s += recurse(node.leftOperand, level + 1)\n return s\n return recurse(self, 0)", "def __str__(self):\n s = \"--\\n\"\n for node in self:\n s += node.__str__() + \"\\n\"\n return s + \"--\"", "def node_repr(node):\n\n result = History.name(node)\n if History.children(node):\n result += ':[' + ', '.join(map(History.node_repr, History.children(node))) + ']'\n return result", "def tree_string(self, indent=0): # pragma: no cover\r\n return \"\"", "def format_node(self, node):\n if node is None:\n return \"None\"\n\n if isinstance(node, list):\n return \"; \".join(self.format_node(elem) for elem in node)\n\n s = RE_SPACE.sub(' ', astor.to_source(node)).strip()\n if len(s) > self.NODE_MAX_LENGTH - len(\"...\"):\n s = s[:self.NODE_MAX_LENGTH] + \"...\"\n return repr(s)", "def __str__(self):\n s = ''\n for node in self.nodes:\n s += '\\n\\n'+str(node)+'\\n\\t'\n edges = node.getChildren()\n keys = edges.keys()\n keys.sort()\n for key in keys:\n bounds = edges[key].getSuffix()\n s += str(edges[key])+' '\n for i in xrange(bounds[0], bounds[1]):\n s += self.target[i]\n s += '\\n\\t'\n return s", "def get_node_tree_print_string(node: Node) -> str:\n node_io = io.StringIO()\n pre_order_travel(node, PrintVisitor(\n node_io,\n show_trigger=True,\n show_event=True,\n show_limit=True,\n show_meter=True,\n show_repeat=True,\n show_parameter=True,\n ))\n node_text = node_io.getvalue()\n return node_text", "def __str__(self):\n\n pg_str = pformat(self.content)\n repr_str = \"Node ID: {} \\nNode Name: {} \\n{}\".format(self.id, self.name, pg_str)\n\n return repr_str", "def __str__(self):\n string = \"\"\n cur_node = self.head\n while cur_node is not None:\n string += cur_node.data.__str__()\n cur_node = cur_node.next\n return string", "def print_node(self):\n print('{:15}{:3}'.format(self.data, self.count))", "def __str__(self):\n s = \"\"\n current = self.__head\n while current:\n s += str(current.data) + \"\\n\"\n current = current.next_node\n return s[:-1]", "def __str__(self):\n string = \"\"\n cur = self.__head\n while cur is not None:\n string += str(cur.data)\n cur = cur.next_node\n if cur is not None:\n string += \"\\n\"\n return string", "def __str__(self):\n _str = \"\"\n current_node = self._head\n while(current_node != None):\n _str += str(current_node.value)\n _str += \" -> \"\n current_node = current_node.next\n _str += \"None\"\n return _str", "def __str__(self):\n return '<Node%d> TC: %d BP: %d DN: %s\\n' % (self.id,\n self.travelCount,\n self.botPresent,\n repr(self.dirNodes))", "def __repr__(self):\r\n \r\n txt = super(GrfNodeCore, self).__repr__()\r\n\r\n txt += '; size = {0}'.format(self.__size) # generate formatted text\r\n return txt", "def __str__(self):\n # string representation includes values of all inner fields\n return \\\n \"Node Name: \" + str(self.name) + \"\\n\" + \\\n \"Node Attributes: \" + str(self.attributes) + \"\\n\" + \\\n \"Incident Edges: \" + \"\\n\".join([edge.__str__() for edge in self.incident_edges]) + \"\\n\"", "def __str__(self):\n # string representation includes values of all inner fields\n return \\\n \"Node Name: \" + str(self.name) + \"\\n\" + \\\n \"Node Attributes: \" + str(self.attributes) + \"\\n\" + \\\n \"Incident Edges: \" + \"\\n\".join([edge.__str__() for edge in self.incident_edges]) + \"\\n\"", "def __str__(self):\n temp = self.__head\n ss = []\n while temp is not None:\n ss.append(str(temp.data))\n temp = temp.next_node\n return ('\\n'.join(ss))", "def serialize(node):\r\n serial = node.val \r\n\r\n if node.left or node.right:\r\n serial += r'('\r\n\r\n if node.left:\r\n serial += serialize(node.left)\r\n \r\n serial += r'|' \r\n \r\n if node.right:\r\n serial += serialize(node.right)\r\n \r\n serial += r')'\r\n\r\n return serial", "def debug(node, tabs_count = 0):\n\ttext = \"\"\n\ttabs = \" \"\n\tfor i in range(tabs_count):\n\t\ttabs = tabs + \"\\t\"\n\n\tfor n in node.get_children():\n\t\tif type(n) is Node:\n\t\t\ttext = text + tabs + debug(n, tabs_count + 1)\n\t\t\ttext = text + \"\\n\"\n\t\telse:\n\t\t\ttext = text + \"\\n\" + tabs + n\n\n\treturn text" ]
[ "0.7305593", "0.7293151", "0.7116349", "0.7059454", "0.70524126", "0.6998434", "0.6998368", "0.6985005", "0.6963303", "0.695356", "0.6935341", "0.69200206", "0.68946445", "0.6891254", "0.6884507", "0.6871171", "0.68657887", "0.68058807", "0.6788324", "0.67802215", "0.6766152", "0.6763802", "0.6745109", "0.66978765", "0.66818285", "0.6665891", "0.6665891", "0.6656161", "0.662899", "0.6603579" ]
0.7496617
0
Initializes the classifier given the documents; it obtains the vocabulary and generates the tree.
def __init__(self, documents): Classifier.__init__(self, documents) documents = set(documents) term_document_matrix = TermDocumentMatrix(documents, compute_word_vectors=False, compute_document_vectors=False) self.vocabulary = set(term_document_matrix.vocabulary) self.tree = self.get_tree(documents, self.vocabulary)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, documents_path):\n self.documents = []\n self.vocabulary = []\n self.likelihoods = []\n self.documents_path = documents_path\n self.term_doc_matrix = None \n self.document_topic_prob = None # P(z | d)\n self.topic_word_prob = None # P(w | z)\n self.topic_prob = None # P(z | d, w)\n\n self.number_of_documents = 0\n self.vocabulary_size = 0", "def build_model(self, documents):\n self.vectorizer = TfidfVectorizer(\n stop_words='english', lowercase=True).fit(documents)\n self.vectors = self.vectorizer.transform(documents)", "def __init__(self, corpus):\n self.train(corpus)", "def _initialize_corpus(self):\n vocab = self.vocab # vocab is the word vector\n theta = self.theta # theta is the model parameter\n corpus = self.corpus\n\n for line in corpus:\n for word in line:\n if word not in vocab:\n vocab[word] = init_vector(self.n)\n theta[word] = init_vector(self.n)\n\n if self.verbose:\n print(f\"{len(vocab)} words have been loaded\")", "def __init__(self, tokenizer=simple_tokenize):\n # Set tokenizer to use for tokenizing new documents\n self.tokenize = tokenizer\n # The term document matrix is a sparse matrix represented as a\n # list of dictionaries. Each dictionary contains the word\n # counts for a document.\n self.sparse = []\n # Keep track of the number of documents containing the word.\n self.doc_count = {}", "def __init__(self):\n # Initialise class attributes (visibility ease)\n self.__corpus__ = None\n self.__pron_det_pos_words__ = None\n self.__triples_corpus__ = None\n self.__entities_in_doc__ = None\n self.__wvmodel__ = None\n \n # For purpose of parsing relation triplets later\n # Load pretrained embedding model\n #plog('Loading pretrained word embeddings. This will take some time to load...')\n #self.__wvmodel__ = api.load('fasttext-wiki-news-subwords-300')\n #plog('Pretrained word embeddings loaded!')", "def __init__(self, documents_path, lambda_b, lambda_c):\n self.documents = []\n self.vocabulary = []\n self.likelihoods = []\n self.documents_path = documents_path\n self.term_doc_matrix = None\n self.document_topic_prob = [] # P(z | d), pi\n self.topic_word_prob = None # P(w | z)\n self.topic_word_prob_background = None # P(w | z)\n self.topic_word_prob_collection_specific = []\n self.topic_prob_j = None # P(z | d, w)\n self.topic_prob_B = None # P(z | d, w)\n self.topic_prob_C = None # P(z | d, w)\n self.lambda_B = lambda_b\n self.lambda_C = lambda_c\n\n self.number_of_collections = 0\n self.number_of_documents = 0\n self.vocabulary_size = 0", "def __init__(self, corpus):\n self.unigram_count = Counter()\n self.bigram_count = defaultdict(Counter)\n self.vocabulary_size = 0\n self.num_words = 0\n self.backoff_multiplier = 0.4\n self.train(corpus)", "def __init__(self, corpus):\n self.unigramCounts = collections.defaultdict(lambda: 0)\n self.bigramCounts = collections.defaultdict(lambda: 0)\n self.trigramCounts = collections.defaultdict(lambda: 0)\n self.followingWords = collections.defaultdict(lambda: set())\n self.precedingWords = collections.defaultdict(lambda: set())\n self.total = 0\n self.discount = 0.75\n self.train(corpus)", "def __init__(self, lower=True, num_norm=True,\n use_char=True, initial_vocab=None):\n self._num_norm = num_norm\n self._use_char = use_char\n \n # TODO: check how to use this\n self._node_vocab = Vocabulary(lower=False)\n self._word_vocab = Vocabulary(lower=lower)\n self._char_vocab = Vocabulary(lower=False)\n #TODO: check usability\n self._label_vocab = Vocabulary(lower=False, unk_token=False)\n\n if initial_vocab:\n self._word_vocab.add_documents([initial_vocab])\n self._char_vocab.add_documents(initial_vocab)", "def __init__(self):\n self.word_list.extend(self.load_corpus(\"reuters\"))\n self.corpus = \" \".join(self.word_list) # use spaces to join all the elements in the list\n # load the corpus to create the word list\n # note that the network is needed to download the corpus\n\n self.count_ngrams() # count the n-grams\n self.load_confusion_matrix() # read the confusion matrix from files\n self.load_vocabulary() # read the vocabulary from a file", "def __init__(self, root, which_set, vocab, transform=None):\n self.root = root\n self.img_root = os.path.join(root, 'Img')\n self.ann = json.load(open(os.path.join(root, '{}_labels.json'.format(which_set)),'r'))\n\n self.vocab = vocab\n self.transform = transform\n self.img_list = list(self.ann.keys())\n # transfer categories id to labels\n self.cat2label = {}\n for i, k in enumerate(label_corpus):\n self.cat2label[k] = i\n\n self.num_cats = len(self.cat2label) \n\n # vgnome has varied number of annotations [1, 20], average 5.73\n # we still choose five as the parameter. It can be adjusted later on\n self.num_ann_onebatch = 5\n self.ids = [a for a in range(len(self.ann))]\n\n print('\\t {} train samples from {} set'.format(len(self.ids), which_set ))\n print('\\t {} of categories'.format(self.num_cats))", "def initialize(self):\n self.n_words = len(self.vocab)\n self.n_docs = len(self.documents)\n\n # Initialize the three count matrices.\n # The (i,j) entry of self.nmz is the number of words in document i assigned to topic j.\n self.nmz = np.zeros((self.n_docs, self.n_topics))\n # The (i,j) entry of self.nzw is the number of times term j is assigned to topic i.\n self.nzw = np.zeros((self.n_topics, self.n_words))\n # The (i)-th entry is the number of times topic i is assigned in the corpus.\n self.nz = np.zeros(self.n_topics)\n\n # Initialize the topic assignment dictionary.\n self.topics = {} # key-value pairs of form (m,i):z\n\n for m in range(self.n_docs):\n for i in self.documents[m]:\n # Get random topic assignment, i.e. z is a random integer in the range of topics\n z = np.random.randint(self.n_topics)\n # Increment count matrices\n self.nmz[m,z] += 1\n self.nzw[z,self.documents[m][i]] += 1\n self.nz[z] += 1\n # Store topic assignment\n self.topics[(m,i)] = z", "def train(self, documents):\n ###DONE\n\n #entire vocab in document set D\n vocab_sod = set()\n vocab_pop = set()\n \n #Calcuates prior probabilities\n priorSOD = 0 #how many docs are spam\n priorPOP = 0 #how many docs are ham\n \n #Cacluates Tct\n term_freq_sod = {} #{term:occur, term:occur}\n term_freq_pop = {}\n \n #Tct'\n Tct_sod = 0 #Tct' = sum of (every term occurence in class c + 1)\n Tct_pop = 0\n \n for doc in documents: \n if 'sod' in doc.label:\n priorSOD += 1\n for token in doc.tokens:\n Tct_sod += 1\n if token in term_freq_sod.keys():\n term_freq_sod[token] = term_freq_sod[token] + 1\n else:\n term_freq_sod[token] = 1\n vocab_sod.add(token) \n else:\n priorPOP += 1\n for token in doc.tokens:\n Tct_pop += 1\n if token in term_freq_pop.keys():\n term_freq_pop[token] = term_freq_pop[token] + 1\n else:\n term_freq_pop[token] = 1\n vocab_pop.add(token)\n \n \n #endfor\n # | is for set join\n self.vocab = vocab_sod | vocab_pop #gets rid of duplicate words (those in both 'ham' and 'spam') \n \n #Tct Primes\n #tct' = term freq of all terms in class c + 1*(total terms)\n Tct_sod = Tct_sod + len(self.vocab) \n Tct_pop = Tct_pop + len(self.vocab) \n \n \n print(\"PriorSod: \" + str(priorSOD))\n print(\"PriorPop: \" + str(priorPOP))\n print(\"LEN Docum: \" + str(len(documents)))\n \n self.priorSOD = priorSOD / len(documents)\n self.priorPOP = priorPOP / len(documents)\n \n for term in self.vocab:\n if term in term_freq_pop.keys():\n self.cond_prob_pop[term] = (term_freq_pop[term] + 1) / Tct_pop\n else:\n self.cond_prob_pop[term] = 1 / Tct_pop\n \n if term in term_freq_sod.keys():\n self.cond_prob_sod[term] = (term_freq_sod[term] + 1) / Tct_sod\n else:\n self.cond_prob_sod[term] = 1 / Tct_sod\n \n \n pass", "def __init__(self, corpus):\n self.ntokens = 0\n self.counts = collections.defaultdict(lambda: 0)\n self.s = collections.defaultdict(lambda: 0.0)\n self.train(corpus)", "def build_vocabulary(self):\n \n for iCount in range(0,len(self.documents)):\n for jCount in range(iCount,len(self.documents[iCount])):\n self.vocabulary.append(self.documents[iCount][jCount])\n\n self.vocabulary = set(self.vocabulary)\n\t\t\n self.vocabulary = sorted(self.vocabulary)\n\t\t#print(\"Value of the vocabulary\")\n self.vocabulary_size = len(self.vocabulary)", "def _initialize_trees(self):", "def __init__(self, num_docs, upper, lower, stemmer=True):\n\n # load data from json file\n self.num_docs = num_docs\n self.data = pd.read_json('data/arxiv-metadata-oai-snapshot.json', lines=True,\n nrows=self.num_docs)\n\n # create a list of list of documents from the abstracts, lowercase words, remove non-alphanumerical words\n self.stemmer = PorterStemmer() if stemmer == True else None\n self.docs = [self.preprocess(doc) for doc in self.data['abstract'].tolist()]\n\n # count occurrences per word and prune words that occur above some threshold\n self.counts = self.get_counts()\n self.prune(upper=upper, lower=lower)\n\n # obtain 'final' vocabulary, convert to integer ids.\n self.vocab = self.get_vocab()\n self.docs2ids()", "def __init__(self, corpus):\n self.total = 0\n self.reverseBigramCount = defaultdict(lambda : defaultdict(lambda : 0))\n self.bigramCount = defaultdict(lambda : defaultdict(lambda : 0))\n self.unigramCount = defaultdict(lambda: 0)\n self.train(corpus)", "def __init__(self, docs, dict_path= 'wordindex.npy'):\n super(NNModel, self).__init__()\n self.stopwords += self.additional_stopwords\n self.words = set(['OOB', 'UNK']) # OOB for out of boundary, UNK for unknown words\n self.docs = []\n\n for doc in docs:\n datum = []\n for word in self.cut_words(doc):\n self.words.add(word)\n datum.append(word)\n self.docs.append(datum)\n\n self.words = list(self.words)\n self.word2idx = dict([(self.words[i], i) for i in range(len(self.words))])\n logging.info(f'{len(docs)} articles loaded, with word bag length: {len(self.words)}')\n if dict_path != '': # save dict\n np.save(DATA_DIR + dict_path, self.word2idx)", "def __init__(self, words, corpus):\n self.words = words\n self.vocab_size = len(words)\n self.corpus = corpus\n counter = Counter(corpus)\n self.counts = np.array([counter[i] for i in range(self.vocab_size)])", "def __init__(self, corpus_path, corpus_files):\n\n msg(\"Importing treebank...\")\n \n # get a corpus reader object for our corpus using NLTK\n treebank = TaggedCorpusReader(corpus_path, corpus_files, sep=\"|\")\n \n # get all sentences from corpus in a tagged format\n self.tagged_sents = treebank.tagged_sents()\n \n # get all sentences from corpus in an untagged format\n self.sents = treebank.sents()\n \n msg(\"done!\\n\")", "def __init__(self, test):\n self.all_grams = Ngram(self.START_OF_SENTENCE_TOKEN)\n for label in self.label_type_map:\n self.words_labels_counts[label] = {}\n self.words_labels_counts[label][self.UNKNOWN_TOKEN] = 0\n if test:\n self.train(\"train.txt\")\n self.test(\"test.txt\")\n else:\n self.train(\"train_partial.txt\")\n self.validate(\"validation_partial.txt\")", "def __init__(self, corpus):\n self.unigrams = defaultdict(int)\n self.f1 = defaultdict(float)\n self.total = 0\n self.train(corpus)", "def __init__(self, corpus):\n self.corpus = corpus\n self.corpus_seg = None\n self.bm25_instance = None\n self.tokenizer = Tokenizer()", "def __init__(self, corpus, epsilon=7):\n # TODO your code here'\n self.v = 0\n self.total=0\n self.epsilon=epsilon\n self.vocab = defaultdict(lambda:defaultdict(lambda:0))\n self.word_counts= defaultdict(lambda:0)\n self.train(corpus)", "def __init__(self, corpus):\n self.unigramCounts = collections.defaultdict(lambda: 0)\n self.totalCount = 0\n self.zeroCount = 0\n self.train(corpus)", "def __init__(self):\n self.kids = [{}]\n self.root = 0\n self.vocabular = set([])", "def __init__(self, corpus):\n self.unigramCounts = collections.defaultdict(lambda: 0)\n self.bigramCounts = collections.defaultdict(lambda: 0)\n self.total = 0\n self.train(corpus)", "def __init__(self, clean_text_util):\n self.clean_text_util = clean_text_util\n\n self.dictionary_db = kc.DB()\n self.dictionary_db.open(DICTIONARY_DB_FILENAME, \n kc.DB.OWRITER | kc.DB.OCREATE)\n \n self.vectors_db = kc.DB()\n self.vectors_db.open(VECTOR_DB_FILENAME, \n kc.DB.OWRITER | kc.DB.OCREATE)\n \n self.vectors_norm_db = kc.DB()\n self.vectors_norm_db.open(VECTORS_NORM_DB_FILENAME, \n kc.DB.OWRITER | kc.DB.OCREATE)\n\n self.classifier_state_db = kc.DB()\n self.classifier_state_db.open(CLASSIFIER_STATE_FILENAME, \n kc.DB.OWRITER | kc.DB.OCREATE)\n\n # set the total number of documents in the corpus\n if not self.classifier_state_db.get(\"text_nb\"): \n self.classifier_state_db.add(\"text_nb\", \"0\") \n\n # Current number of words in the dictionary\n self.word_index = len(self.dictionary_db)" ]
[ "0.70484084", "0.668634", "0.6647606", "0.6513024", "0.64845014", "0.64544755", "0.64532274", "0.64206356", "0.6389602", "0.6320153", "0.625024", "0.6242476", "0.6230974", "0.6218114", "0.62008655", "0.6198922", "0.6155165", "0.613072", "0.6127168", "0.61096436", "0.60994977", "0.60977465", "0.6090163", "0.6082511", "0.60709333", "0.6066874", "0.6033337", "0.60275155", "0.60190433", "0.60076445" ]
0.7993907
0
Returns the predicted class for the given document based on the previously generated decision tree.
def get_prediction(self, document): return self.classify(document, self.tree)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict(self, doc):\n # >>> YOUR ANSWER HERE\n # For each class c, calculate the corresponding score of the doc\n scores = [(self.score(doc, c), c) for c in self.classes]\n # after the sort by score, return the most likely class\n scores.sort(key=lambda x: x[0])\n return scores[-1][1]\n # >>> END YOUR ANSWER", "def classify(self, document, tree):\n if type(tree) is ClassTreeNode:\n return tree.c\n else:\n if tree.word in document.bag_of_words:\n return self.classify(document, tree.children[0])\n else:\n return self.classify(document, tree.children[1])", "def predict(self, doc):\n \n prob_positive = self._predict_doc(doc, 1)\n prob_negative = self._predict_doc(doc, 0)\n\n if prob_positive > prob_negative:\n return 1\n return 0", "def classify(self, document):\n f_vector = self.extract_f_vector(document)\n f_vector = np.append(f_vector, np.array([1])) # adding last \"feature\" for prior log probability\n all_log_prob = self.my_model[\"all_log_prob\"]\n sum_of_probabilities = f_vector.dot(all_log_prob)\n index = np.argmax(sum_of_probabilities)\n return self.my_model[\"col_to_label\"][index]", "def _predict(self, inputs):\n node = self.tree_\n while node.left:\n if inputs[node.feature_index] < node.split:\n node = node.left\n else:\n node = node.right\n return node.predicted_class", "def classify_document(classification_file, classification_dict, document):\n\tdocument_dictionary = make_prob_dictionary(classification_file, classification_dict)\n\tdocument = read_doc(document)\n\tdoc_words = document[0]\n\tdoc_length = float(document[1])\n\tdocument = probabilities(doc_words, doc_length, classification_dict)\n\treturn document", "def classify(self, documents):\n predictions = []\n for doc in documents:\n\n score_sod = math.log(self.priorSOD)\n score_pop = math.log(self.priorPOP)\n for term in doc.tokens:\n if term in self.cond_prob_sod.keys():\n score_sod += math.log(self.cond_prob_sod[term])\n if term in self.cond_prob_pop.keys():\n score_pop += math.log(self.cond_prob_pop[term])\n if(score_pop >= score_sod): #defaults to ham if score = even \n predictions.append('pop')\n else:\n predictions.append('sod')\n \n return predictions \n pass", "def predict(self, X):\n if isinstance(self.model, ClassifierMixin):\n scores = self._decision_function(X)\n if len(scores.shape) == 1:\n indices = (scores > 0).astype(np.int)\n else:\n indices = scores.argmax(axis=1)\n return self.classes_[indices]\n else:\n return self._decision_function(X)", "def predict(self, X):\n scores = self.decision_function(X)\n if self.classes.shape[0] == 2:\n indices = np.array(scores > 0, dtype=np.int)\n else:\n indices = scores.argmax(axis=1)\n return self.classes[np.ravel(indices)]", "def predict(self):\n probabilities = self.probability_array()\n # THIS ASSUMES the classifiers are in order: 0th column of the\n # probabilities corresponds to label = 0, ..., 9th col is for 9.\n classes = np.argmax(probabilities, axis=1)\n return classes", "def predict(self, example):\n return self.decisionTree.traverse_tree(example)", "def predict(self, documents):\n raise NotImplementedError()", "def predict(self, data):\n try:\n getattr(self, \"tree\")\n except AttributeError:\n raise RuntimeError(\"You must train classifer before predicting data!\")\n\n predicts_proba = self.predict_proba(data)\n predicts = _classify_from_probs(predicts_proba)\n return predicts", "def classify(self, doc, default=None):\n probs = {}\n \n # Find the category with the highest probability\n max = Decimal(0)\n for cat in self.categories():\n probs[cat] = self.prob(doc, cat)\n if probs[cat] > max: \n max = probs[cat]\n best = cat\n\n if max == 0.0:\n return default\n \n # Make sure the probability exceeds threshold*next best\n for cat in probs:\n if cat == best:\n continue\n if probs[cat]*Decimal(str(self.get_threshold(best)))>probs[best]:\n return default\n \n print probs[best]\n return best", "def classifier(decision_tree,data):\n dt = copy.deepcopy(decision_tree) # copy to maintain original decision tree\n cur_attr = list(dt)[0] # 'cur_attr' is first selected attribute\n \n while True:\n dt = dt[cur_attr] # 'dt' is sub decision tree \n value = data[cur_attr] # 'value' is data's attribute value\n\n # if there is no dictionary type instance, dt[value] is class label\n if not isinstance(dt[value],dict): \n return dt[value]\n\n dt = dt[value] # 'dt' is branches of value\n cur_attr = list(dt)[0] # update cur_attr", "def _get_target(self, prediction: Tensor) -> Tensor:\n if self.model_config.mode == ModelMode.binary_classification:\n # TODO: Allow customization of the thresholds used below.\n if self.model_config.return_type.value == 'raw':\n return (prediction > 0).long().view(-1)\n if self.model_config.return_type.value == 'probs':\n return (prediction > 0.5).long().view(-1)\n assert False\n\n if self.model_config.mode == ModelMode.multiclass_classification:\n return prediction.argmax(dim=-1)\n\n return prediction", "def classify_text(classifier, sentence):\n\n sentence = Sentence(sentence)\n classifier.predict(sentence, multi_class_prob=True)\n return sentence.labels", "def predict(self, X):\n\t\tR = self.predict_soft(X)\t\t\t\t\t\t\t\t\t\t\t# compute soft output values\n\t\tY = R.argmax(1)\t\t\t\t\t\t\t\t\t\t\t\t\t\t# get index of maximum response\n\t\treturn self.classes[Y]\t\t\t\t\t\t\t\t\t\t\t\t# convert to saved class values", "def predict(self, X):\r\n \r\n # To speed up, we apply the scoring function to all the instances\r\n # at the same time.\r\n scores = X.dot(self.w)\r\n \r\n # Create the output array.\r\n # At the positions where the score is positive, this will contain\r\n # self.positive class, otherwise self.negative_class.\r\n out = numpy.select([scores>=0.0, scores<0.0], [self.positive_class, \r\n self.negative_class])\r\n return out", "def predict_one(tree, sample):\n if tree['leaf']:\n return tree['class']\n\n else:\n if sample[tree['feature']] <= tree['split']:\n return predict_one(tree['left'], sample)\n else:\n return predict_one(tree['right'], sample)", "def predict(self,entry):\n assert self.root is not None,\"Decision tree is not initialized\"\n return self.root.predict(entry)", "def classify(self, row, node):\n\n # Base case: we've reached a leaf\n if isinstance(node, Leaf):\n return node.predictions\n\n # Decide whether to follow the true-branch or the false-branch.\n # Compare the feature / value stored in the node,\n # to the example we're considering.\n if node.question.match(row):\n return self.classify(row, node.true_branch)\n else:\n return self.classify(row, node.false_branch)", "def predict(self,x):\n preds = [tree.predict(x) for tree in self.forest]\n if self.classify:\n cls_counts = [0] * self.param['numClasses']\n for p in preds:\n cls_counts[p] += 1\n return argmax(cls_counts)\n else:\n return sum(preds) / (len(preds)*1.0)", "def model_predict(img_path):\n img = open_image(img_path)\n pred_class, pred_idx, outputs = learn.predict(img)\n print(pred_class)\n return pred_class", "def predict(self, testData=[]):\n result = []\n for classValue in self._classAttrs:\n #print(f'Computing Label: {classValue}, {self._classLabelMap[classValue]}')\n result.append(self._computeCondProb(testData, classValue))\n return self._classLabelMap[result.index(max(result))]", "def get_prediction(engineered_set, data):\n clf = pickle.load(open(\"Decisiontreemodel_3months.pkl\", \"rb\"))\n try:\n predicted = clf.predict_proba(engineered_set)[:, 1]\n return predicted[0]\n except:\n sendErrorReport(data, \"error2\")\n return 0.9209 # value is not used", "def predict(self, review):\n raise NotImplementedError", "def prediction(self, user, item):\n return self._prob_to_class(self(user, item))", "def getPredictedResult(self):\n output = self.svclassifier.predict([self.inputData])\n return output[0]", "def predict_record(tree: dict, data: pd.Series):\r\n tree = tree.copy()\r\n while type(tree) == dict: # while the tree doesn't only contain a label (leaf)\r\n col = list(tree.keys())[0] # assign the next column to consider\r\n try:\r\n tree = tree[col][data[col]] # move to the next column part of the tree\r\n except:\r\n tree = tree[col][\r\n list(tree[col].keys())[0]] # if unknown class encountered (not included in the tree), pick first class\r\n\r\n predicted_value = tree\r\n return predicted_value" ]
[ "0.7630106", "0.69681805", "0.67407465", "0.6702287", "0.63191974", "0.6225889", "0.61062753", "0.6023611", "0.59961706", "0.5991221", "0.59716344", "0.59597355", "0.5931009", "0.5892068", "0.58535576", "0.580295", "0.57817346", "0.57742006", "0.57652587", "0.5760815", "0.5753141", "0.57489985", "0.5748628", "0.5742809", "0.5741759", "0.57388157", "0.5736624", "0.57360697", "0.5696499", "0.56871927" ]
0.7826694
0
Returns the predicted class for the document; it is done recursively by passing child nodes until leaf.
def classify(self, document, tree): if type(tree) is ClassTreeNode: return tree.c else: if tree.word in document.bag_of_words: return self.classify(document, tree.children[0]) else: return self.classify(document, tree.children[1])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_prediction(self, document):\n return self.classify(document, self.tree)", "def _predict(self, inputs):\n node = self.tree_\n while node.left:\n if inputs[node.feature_index] < node.split:\n node = node.left\n else:\n node = node.right\n return node.predicted_class", "def predict(self, doc):\n # >>> YOUR ANSWER HERE\n # For each class c, calculate the corresponding score of the doc\n scores = [(self.score(doc, c), c) for c in self.classes]\n # after the sort by score, return the most likely class\n scores.sort(key=lambda x: x[0])\n return scores[-1][1]\n # >>> END YOUR ANSWER", "def classify(self, row, node):\n\n # Base case: we've reached a leaf\n if isinstance(node, Leaf):\n return node.predictions\n\n # Decide whether to follow the true-branch or the false-branch.\n # Compare the feature / value stored in the node,\n # to the example we're considering.\n if node.question.match(row):\n return self.classify(row, node.true_branch)\n else:\n return self.classify(row, node.false_branch)", "def classifier(decision_tree,data):\n dt = copy.deepcopy(decision_tree) # copy to maintain original decision tree\n cur_attr = list(dt)[0] # 'cur_attr' is first selected attribute\n \n while True:\n dt = dt[cur_attr] # 'dt' is sub decision tree \n value = data[cur_attr] # 'value' is data's attribute value\n\n # if there is no dictionary type instance, dt[value] is class label\n if not isinstance(dt[value],dict): \n return dt[value]\n\n dt = dt[value] # 'dt' is branches of value\n cur_attr = list(dt)[0] # update cur_attr", "def predict(self,x):\n preds = [tree.predict(x) for tree in self.forest]\n if self.classify:\n cls_counts = [0] * self.param['numClasses']\n for p in preds:\n cls_counts[p] += 1\n return argmax(cls_counts)\n else:\n return sum(preds) / (len(preds)*1.0)", "def classify(data_point, tree):\r\n current = tree\r\n while(current.is_leaf == False): #while we're not at a leaf\r\n q = tree.issue\r\n v = data_point.dat_votes[ord(q) - 97]\r\n if(current is None): pass\r\n current = current.get_classification(v)\r\n #we should now be at a Leaf\r\n if(current is None): print(\"FATAL\")\r\n c =current.get_classification(\"\")\r\n # print(\"classified: \" + str(data_point) + \" as \" + str(c))\r\n return c", "def predict(self, data):\n try:\n getattr(self, \"tree\")\n except AttributeError:\n raise RuntimeError(\"You must train classifer before predicting data!\")\n\n predicts_proba = self.predict_proba(data)\n predicts = _classify_from_probs(predicts_proba)\n return predicts", "def predict_one(tree, sample):\n if tree['leaf']:\n return tree['class']\n\n else:\n if sample[tree['feature']] <= tree['split']:\n return predict_one(tree['left'], sample)\n else:\n return predict_one(tree['right'], sample)", "def _predict(self, treenode, X):\n if treenode.is_leaf:\n return treenode.leaf_score\n elif pd.isnull(X[1][treenode.feature]):\n if treenode.nan_direction == 0:\n return self._predict(treenode.left_child, X)\n else:\n return self._predict(treenode.right_child, X)\n elif X[1][treenode.feature] < treenode.threshold:\n return self._predict(treenode.left_child, X)\n else:\n return self._predict(treenode.right_child, X)", "def classify(self, features):\n node = self.tree\n answer = node.right_label + node.left_label\n while len(answer)>1:\n if node.model.classify(features)==+1:\n answer=node.left_label\n node=node.left\n else:\n answer=node.right_label\n node=node.right \n return answer[0]", "def __traverse_tree(self, node, sample_instance):\n if node.is_leaf:\n return node.predicted_class\n split = node.integer_splitting_rule\n feature = node.feature_index_split\n\n # left node gets assigned to data that is less than the integer\n # splitting rule within that feature\n if sample_instance[feature] < split:\n prediction = self.__traverse_tree(node.left_child,\n sample_instance)\n else:\n prediction = self.__traverse_tree(node.right_child,\n sample_instance)\n return prediction", "def predict(self):\n probabilities = self.probability_array()\n # THIS ASSUMES the classifiers are in order: 0th column of the\n # probabilities corresponds to label = 0, ..., 9th col is for 9.\n classes = np.argmax(probabilities, axis=1)\n return classes", "def classify(observation,tree):\n if tree.results!=None:\n return tree.results\n else:\n v=observation[tree.col]\n branch=None\n if isinstance(v, int) or isinstance(v, float):\n if v>=tree.value:\n branch=tree.tb\n else: \n branch=tree.fb\n else:\n if v==tree.value: \n branch=tree.tb\n \n else: \n branch=tree.fb\n return classify(observation,branch)", "def classif(self, text):\r\n content = self.prizn.tokenize(text)\r\n filec = self.vectorize_content(content)\r\n selected = {}\r\n for klas in self.prizn.klas_tridy:\r\n distance = 0.0\r\n wrdc = 0.0\r\n for wrd in filec:\r\n if wrd in self.prizn.klas_tridy[klas]:\r\n wrdc += 1.0\r\n distance += abs(float(filec[wrd]) - float(self.prizn.klas_tridy[klas][wrd]))\r\n if wrdc > 0:\r\n selected[klas] = float(distance) / float(wrdc)\r\n\r\n max_class = \"\"\r\n for i in range(0, 3):\r\n klas = max(selected, key=lambda k: selected[k])\r\n max_class = max_class + \" ,\" + klas\r\n del selected[klas]\r\n\r\n return max_class", "def classify(self, document):\n f_vector = self.extract_f_vector(document)\n f_vector = np.append(f_vector, np.array([1])) # adding last \"feature\" for prior log probability\n all_log_prob = self.my_model[\"all_log_prob\"]\n sum_of_probabilities = f_vector.dot(all_log_prob)\n index = np.argmax(sum_of_probabilities)\n return self.my_model[\"col_to_label\"][index]", "def _classify(self, example):\n neighbors = self.find_neighbor(example)\n class_label = self.find_response(neighbors)\n return class_label", "def predict_record(tree: dict, data: pd.Series):\r\n tree = tree.copy()\r\n while type(tree) == dict: # while the tree doesn't only contain a label (leaf)\r\n col = list(tree.keys())[0] # assign the next column to consider\r\n try:\r\n tree = tree[col][data[col]] # move to the next column part of the tree\r\n except:\r\n tree = tree[col][\r\n list(tree[col].keys())[0]] # if unknown class encountered (not included in the tree), pick first class\r\n\r\n predicted_value = tree\r\n return predicted_value", "def getClassifier(self):\n return self.classify", "def predict(tree, samples):\n ret = np.empty(samples.shape[0], dtype=float)\n ret.fill(-1)\n indices = np.arange(samples.shape[0])\n\n def tranverse(node, indices):\n nonlocal samples\n nonlocal ret\n\n if node['leaf']:\n ret[indices] = node['class']\n\n else:\n going_left = samples[indices, node['feature']] <= node['split']\n left_indices = indices[going_left]\n right_indices = indices[np.logical_not(going_left)]\n\n if left_indices.shape[0] > 0:\n tranverse(node['left'], left_indices)\n\n if right_indices.shape[0] > 0:\n tranverse(node['right'], right_indices)\n\n tranverse(tree, indices)\n return ret", "def __classify(self, instance, tree, default=None):\n attribute = str(list(tree.keys())[0])\n keys_of_attribute = list(tree[attribute].keys())\n if instance[attribute].iloc[0] in keys_of_attribute:\n subtree = tree[attribute]\n result = subtree[instance[attribute].iloc[0]]\n if isinstance(result, dict):\n return self.__classify(instance, result)\n else:\n return result\n else:\n return default", "def predictWithTree(observation, tree, classes, d_boundary=0.5):\n \n try:\n assert len(classes) == 2\n except Exception('Currently, predict with tree only works with two classes') as inst:\n print inst\n raise Exception\n \n probs= classProbs(observation,tree, classes)\n if probs[1] >= d_boundary:\n return classes[1]\n else:\n return classes[0]\n print \"There is some unexpected error, none of the probabilities is greater than the boundary probability\"\n print \"Perhaps this is a multiclass problem and the boundary probability was misspecified?\"\n return", "def __built_tree(self, data_set, features, target_feature, default_class):\n tree_features = features[:]\n data_set = data_set[:]\n result_class = Counter(x for x in data_set[target_feature])\n result_class_count = len(result_class)\n result_class_maximum_value = result_class.most_common(1)[0][0]\n\n # This branch is a leaf (all the results belong to the same class)\n if result_class_count == 1:\n result = list(result_class.keys())\n return result[0]\n\n # Check if the data set is empty or the attributes are not given.\n elif data_set.empty or (not tree_features):\n return default_class\n\n else:\n # Get default value for next branch.\n default_class = result_class_maximum_value\n\n # Get split feature.\n split_feature = self.__get_split_feature(data_set, target_feature, tree_features)\n tree = {split_feature: {}}\n\n # Remove current feature from feature list.\n remaining_features = tree_features\n remaining_features.remove(split_feature)\n\n # Create a subtree for each child feature\n for feature_value, data_subset in data_set.groupby(split_feature):\n subtree = self.__built_tree(data_subset, remaining_features, target_feature, default_class)\n tree[split_feature][feature_value] = subtree\n\n return tree", "def classify(self, doc, default=None):\n probs = {}\n \n # Find the category with the highest probability\n max = Decimal(0)\n for cat in self.categories():\n probs[cat] = self.prob(doc, cat)\n if probs[cat] > max: \n max = probs[cat]\n best = cat\n\n if max == 0.0:\n return default\n \n # Make sure the probability exceeds threshold*next best\n for cat in probs:\n if cat == best:\n continue\n if probs[cat]*Decimal(str(self.get_threshold(best)))>probs[best]:\n return default\n \n print probs[best]\n return best", "def classify(self, tree, datapoint):\n\n\t\tif type(tree) == type(\"string\"):\n\t\t\treturn tree\n\t\telse:\n\t\t\ta = list(tree.keys())[0]\n\t\t\tfor i in range(len(self.featureNames)):\n\t\t\t\tif self.featureNames[i]==a:\n\t\t\t\t\tbreak\n\t\t\t\n\t\t\ttry:\n\t\t\t\tt = tree[a][datapoint[i]]\n\t\t\t\treturn self.classify(t,datapoint)\n\t\t\texcept:\n\t\t\t\treturn None", "def classify(series, tree):\n feature = tree[0]\n subtree = tree[1]\n\n answer = series[feature]\n response = subtree[answer]\n\n if type(response) != list: #base case\n return subtree[answer]\n else:\n return classify(series, response) #recursive case", "def predict_class(self, inputs):\n if not self.trained:\n if self.verbose:\n print(\"KMeans Model Class - Predict Class Function: No trained model\")\n return -1\n\n\n return self.cluster_classes[self.model.predict(inputs)]", "def predict(tree, dataSet):\n\n\tcount = 0 #used for tracking how many times we've correctly classified our data\n\tfor index in range(len(dataSet)):\n\t\tdataPoint = dataSet[index]\n\t\tprint \"Current dataPoint: \", dataPoint.retrieve('id').getValue()\n\t\tnode = 0\n\t\tfor i in tree.fields[tree.nType].keys():\n\t\t\tif NodeType.ROOT == tree.getNodeType(i):\n\t\t\t\tnode = i #basically an index\n\t\t\t\tprint \"root node: \", node\n\t\t\t\tbreak\n\t\t\t#keep going down the tree until no children exist, then get output classification\n\n\t\tprint \"node type\", tree.getNodeType(node)\n\n\t\twhile tree.getNodeType(node) != NodeType.LEAF:\n\t\t\tsplitVal = tree.getSplitValue(node)\n\t\t\tprint \"tree split value: \", splitVal\n\t\t\tsplitAttribute = tree.getSplitAtribute(node)\n\t\t\tprint \"tree split attribute: \", splitAttribute\n\t\t\tval = dataPoint.retrieve(splitAttribute).getValue()\n\t\t\tif val == None:\t\t\n\t\t\t\tval = np.median(retrieveDataFromColumn(dataSet, splitAttribute))\n\n\t\t\tprint \"data point value for split attribute: \", val\n\t\t\tif FeatureType.CONTINUOUS == tree.getSplitType(node): \n\t\t\t\tif val >= splitVal:\n\t\t\t\t\tnode = tree.getChild0(node)\n\t\t\t\t\tprint \"node type\", tree.getNodeType(node)\n\t\t\t\t\tprint \"greater than\", \"going to next node\", node\n\t\t\t\telse:\n\t\t\t\t\tnode = tree.getChild1(node)\n\t\t\t\t\tprint \"lesser than\", \"going to next node\", node\n\t\t\t\t\tprint \"node type\", tree.getNodeType(node)\n\t\t\telif FeatureType.DISCRETE == tree.getSplitType(node):\n\t\t\t\tif val != splitVal:\n\t\t\t\t\tnode = tree.getChild0(node)\n\t\t\t\t\tprint \"not equal\", \" going to next node\", node\n\t\t\t\t\tprint \"node type\", tree.getNodeType(node)\n\t\t\t\telse:\n\t\t\t\t\tnode = tree.getChild1(node)\n\t\t\t\t\tprint \"equal\", \"goint to next node\", node\n\t\t\t\t\tprint \"node type\", tree.getNodeType(node)\n\t\tleafClass = tree.getMajorityClassification(node)\n\t\tprint \"leaf classification: \", leafClass\n\t\tleafAttribute = tree.getSplitAtribute(node)\n\t\tprint \"leaf attribute: \", leafAttribute\n\t\t\n\t\t# Need to fill the last column (which is the same column as leafAttribute) with the \n\t\t# value of the leaf (i.e. classify as winner or not)\n\t\tdataPoint.retrieve(leafAttribute).addValue(leafClass)\n\t\tprint \"prediction is: \", dataPoint.retrieve(leafAttribute).getValue()\n\n\tcreateFileCSV(dataSet)\n\treturn dataSet", "def predict(self, X):\n if isinstance(self.model, ClassifierMixin):\n scores = self._decision_function(X)\n if len(scores.shape) == 1:\n indices = (scores > 0).astype(np.int)\n else:\n indices = scores.argmax(axis=1)\n return self.classes_[indices]\n else:\n return self._decision_function(X)", "def predict(self, example):\n return self.decisionTree.traverse_tree(example)" ]
[ "0.6648654", "0.66332334", "0.65360904", "0.6085024", "0.60096526", "0.588489", "0.58713436", "0.58331215", "0.58079845", "0.57918006", "0.5757257", "0.5716266", "0.5665098", "0.5609045", "0.5589136", "0.5566739", "0.55376923", "0.55265206", "0.55241597", "0.552298", "0.5519425", "0.5516817", "0.5509821", "0.54992706", "0.5470824", "0.5465278", "0.54190326", "0.54105955", "0.54104125", "0.538174" ]
0.6998496
0
Returns the decision tree for the documents given the vocabulary; it is done recursively by passing the updated vocabulary at each call. Checks for the base cases first and returns a leaf node if they succeed; Otherwise, it finds the most informative word, splits the document and calls recursively for each child.
def get_tree(self, documents, vocabulary): if self.contain_one_class(documents): return ClassTreeNode(self.contain_one_class(documents)) elif len(vocabulary) == 0: return ClassTreeNode(self.get_majority_class(documents)) else: most_informative_word = self.get_most_informative_word(documents, vocabulary) tree_node = WordTreeNode(most_informative_word) with_word, without_word = self.get_split_data(most_informative_word, documents) vocabulary.remove(most_informative_word) for subset in [with_word, without_word]: if len(subset) == 0: tree_node.children.append(ClassTreeNode(self.get_majority_class(subset))) else: tree_node.children.append(self.get_tree(subset, vocabulary)) vocabulary.add(most_informative_word) return tree_node
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_binary_tree(self):\n heap = filter(lambda x: not self.index2word[x.index][0].isupper(), itervalues(self.vocab))\n treelen = len(heap)\n logger.info(\"constructing a huffman tree from %i words\" % treelen)\n\n # build the huffman tree\n \n heapq.heapify(heap)\n for i in xrange(treelen - 1):\n min1, min2 = heapq.heappop(heap), heapq.heappop(heap)\n heapq.heappush(heap, Vocab(count=min1.count + min2.count, index=i + len(self.vocab), left=min1, right=min2))\n\n # recurse over the tree, assigning a binary code to each vocabulary word\n if heap:\n max_depth, stack = 0, [(heap[0], [], [])]\n while stack:\n node, codes, points = stack.pop()\n if node.index < len(self.vocab):\n # leaf node => store its path from the root\n node.code, node.point = codes, points\n max_depth = max(len(codes), max_depth)\n else:\n # inner node => continue recursion\n points = array(list(points) + [node.index - len(self.vocab)], dtype=uint32)\n stack.append((node.left, array(list(codes) + [0], dtype=uint8), points))\n stack.append((node.right, array(list(codes) + [1], dtype=uint8), points))\n \n logger.info(\"built huffman tree with maximum node depth %i\" % max_depth)", "def classify(self, document, tree):\n if type(tree) is ClassTreeNode:\n return tree.c\n else:\n if tree.word in document.bag_of_words:\n return self.classify(document, tree.children[0])\n else:\n return self.classify(document, tree.children[1])", "def build(\n word_dict: AsrDictionary,\n subword_dict: AsrDictionary,\n subword_tokenizer: Callable[[str], List[str]] = None,\n ):\n\n root = lexical_prefix_tree(\n word_dict=word_dict,\n subword_dict=subword_dict,\n subword_tokenizer=subword_tokenizer,\n ) # build traditional tree data structure by reusing existing routines\n\n # Performs pre-order traversal of this tree to assign an index for each node\n max_num_children = 0\n nodes = [None] # nodes[0] is a dummy node for OOV\n node_to_id_dict = {}\n stack = [root]\n\n while len(stack) > 0:\n curr = stack.pop()\n node_id = len(nodes)\n nodes.append(curr)\n node_to_id_dict[curr] = node_id\n if len(curr.children) > max_num_children:\n max_num_children = len(curr.children)\n\n # Guarantee that the children are traversed ascendingly according to the subword index\n for _, next_node in sorted(\n curr.children.items(), key=lambda t: t[0], reverse=True\n ):\n stack.append(next_node)\n\n # Construct the tree\n num_nodes = len(nodes)\n children = np.full([num_nodes, max_num_children], 0, dtype=np.int64)\n prev_subword_idx = np.full([num_nodes], subword_dict.pad(), dtype=np.int64)\n word_idx = np.full([num_nodes], -1, dtype=np.int64)\n word_set_idx = np.full([num_nodes, 2], word_dict.pad(), dtype=np.int64)\n\n for node_id in range(1, len(nodes)): # skip 0, which is `None`\n node = nodes[node_id]\n # Guarantee that the children are traversed ascendingly according to the subword index\n for i, (subword_id, child) in enumerate(\n sorted(node.children.items(), key=lambda t: t[0])\n ):\n child_node_id = node_to_id_dict[child]\n children[node_id, i] = child_node_id\n prev_subword_idx[child_node_id] = subword_id\n\n word_idx[node_id] = node.word_idx\n if node.word_set is not None:\n word_set_idx[node_id] = node.word_set\n else:\n word_set_idx[node_id] = [0, len(word_dict) - 1]\n\n return TensorizedPrefixTree(\n children=torch.from_numpy(children),\n prev_subword_idx=torch.from_numpy(prev_subword_idx),\n word_idx=torch.from_numpy(word_idx),\n word_set_idx=torch.from_numpy(word_set_idx),\n )", "def decision_tree(original_training_data,call_depth):\n\n ''' Checking the stopping criterion. If yes then it returns the majority class (Muffin or CupCake) '''\n if check_if_stopping_criterion_is_met(original_training_data.values) or call_depth > 10:\n majority = classification(original_training_data)\n return majority\n\n else:\n ''' Each time we split the data and go deeper, we increment the depth of the tree '''\n call_depth += 1\n\n ''' Finding the best attribute, best threshold to split data, best minimum entropy '''\n best_split_index, best_attribute, best_threshold, best_minimum_entropy = find_best_attribute_threshold_entropy(original_training_data)\n original_training_data_values = original_training_data.values\n\n best_split_values = original_training_data_values[:,best_split_index]\n\n less_than_threshold = original_training_data[best_split_values <= best_threshold]\n more_than_threshold = original_training_data[best_split_values > best_threshold]\n\n ''' Initializing a variable called as condition which stores the format of the key for the resulting decision tree dictionary '''\n condition = original_training_data.columns[best_split_index] + \" <= \" + str(best_threshold)\n\n ''' Initializing a dictionary where key is condition and value is a list. This is the basic data structure in which the\n resulting decision tree is stored '''\n sub_tree = {condition: []}\n\n ''' Calling the decision tree recursively '''\n left_tree = decision_tree(less_than_threshold, call_depth)\n right_tree = decision_tree(more_than_threshold, call_depth)\n\n ''' For removing edge cases where on either split, the resulting decision tree gives the same result '''\n if left_tree == right_tree:\n sub_tree = left_tree\n else:\n ''' Appending the smaller trees in the final decision tree '''\n sub_tree[condition].append(left_tree)\n sub_tree[condition].append(right_tree)\n\n return sub_tree", "def create_vocabulary(directory, cutoff):\n\n top_level = os.listdir(directory)\n a = cutoff\n vocab = {}\n for d in top_level:\n subdir = d if d[-1] == '/' else d+'/'\n files = os.listdir(directory+subdir)\n for f in files:\n with open(directory+subdir+f,'r', encoding=\"utf-8\") as doc:\n for word in doc:\n word = word.strip()\n if not word in vocab and len(word) > 0:\n vocab[word] = 1\n elif len(word) > 0:\n vocab[word] += 1\n return sorted([word for word in vocab if vocab[word] >= cutoff])", "def get_most_informative_word(self, documents, vocabulary):\n most_informative_word = None\n most_informative_word_gain = 0\n for word in vocabulary:\n gain = self.get_information_gain(word, documents)\n if most_informative_word == None or gain >= most_informative_word_gain:\n most_informative_word = word\n most_informative_word_gain = gain\n return most_informative_word", "def decision_tree(data_frame, filename=0):\n\tprint \"Building decision tree...\"\n\tr = robjects.r\n\trpart = importr(\"rpart\")\n\tfit = rpart.rpart(\"category~bpm+speechiness+time_sig+key+duration+loudness+\\\n\t\t\tend_of_fade_in+start_of_fade_out+bpm_range+\\\n\t\t\tmax_bpm_spike+num_keys\", data=data_frame, method=\"class\", \n\t\t\tna_action='na.rpart', control='rpart.control(cp = .0001)')\n\trpart.printcp(fit)\n\tr.plot(fit, uniform=True, main=\"Classification Tree for Genre\")\n\tr.text(fit, use_n=True, all=True, cex=.8)\n\tif filename != 0:\n\t\trpart.post(fit, file=filename, title=\"Classification Tree for Genre\")\n\traw_input(\"> Press enter to continue.\")\n\treturn fit", "def get_trees(self, word): # -> list:\r\n raise NotImplementedError", "def get_root(word):\n try:\n query = {'word': word}\n cursor = database['Words'].find(query)\n \n if cursor is None:\n return None\n for document in cursor:\n if len(document['roots']) > 0:\n return document['roots']\n except Exception as e:\n print(e)\n return None\n\n '''\n nlp = stanfordnlp.Pipeline(lang='hi')\n doc = nlp(word)\n for sentence in doc.sentences:\n for word in sentence.words:\n return word.lemma\n '''", "def _get_bag_of_pos_with_dependency(words, index):\n pos_list = []\n\n def _get_governor(_index, name):\n governor_list = []\n if int(words[_index].governor) == 0:\n # case _index word has no governer\n return -1, governor_list\n governor_index = _index + (int(words[_index].governor) - int(words[_index].index))\n if governor_index < len(words):\n governor = words[governor_index]\n governor_list.append(_get_word_feature(governor) + '_' + name)\n else:\n governor_list.append(NONE_DEPENDENCY + '_' + name)\n return governor_index, governor_list\n\n def _get_children(_index, name):\n children = []\n child_list = []\n roots = [(i, w) for i, w in enumerate(words) if int(w.index) == 1]\n start_index = 0\n end_index = len(words) - 1\n for i, w in roots:\n if i <= _index:\n start_index = i\n else:\n end_index = i - 1\n break\n for i, w in enumerate(words[start_index:end_index + 1]):\n if int(w.governor) == int(words[_index].index):\n children.append(start_index + i)\n child_list.append(_get_word_feature(w) + '_' + name)\n return children, child_list\n\n # add governor\n governor_index, governor_list = _get_governor(index, 'governor')\n if 0 <= governor_index < len(words):\n # case index word has a governer\n pos_list.extend(governor_list)\n if int(words[governor_index].governor) != 0:\n # case _index word has a governer\n # add ancestor\n _, ancestor_list = _get_governor(governor_index, 'ancestor')\n pos_list.extend(ancestor_list)\n\n # add sibling\n siblings, sibling_list = _get_children(governor_index, 'sibling')\n i_index = siblings.index(index)\n del sibling_list[i_index]\n del siblings[i_index]\n pos_list.extend(sibling_list)\n\n # add sibling list\n for i in siblings:\n sibling_children, sibling_child_list = _get_children(i, 'sibling_child')\n pos_list.extend(sibling_child_list)\n\n # add child\n children, child_list = _get_children(index, 'child')\n pos_list.extend(child_list)\n for i in children:\n grandchildren, grandchild_list = _get_children(i, 'grandchild')\n pos_list.extend(grandchild_list)\n return pos_list", "def search(self, word):\n def _subSearch(node, word):\n if not word:\n return node.isWord\n\n contains = False\n if word[0] == '.':\n for c in node.children:\n contains |= _subSearch(node.children[c], word[1:])\n if contains:\n return True\n elif word[0] in node.children:\n contains |= _subSearch(node.children[word[0]], word[1:])\n\n return contains\n\n return _subSearch(self.root, word)\n\n\n # cur = self.root\n # nodes = []\n # nodes.append(cur)\n\n # for c in word:\n # # new_nodes = []\n # # for node in nodes\n # # if c == '.':\n # if c not in cur.children:\n # return False\n \n # cur = cur.children[c]", "def get_vocab(self):\n\n\t\tself.parse_transcript() \n\t\tself.purge_words()\n\t\tself.analyze_words()\n\t\tself.sort_word_analysis()", "def create_new_doc(self, doc: Doc, min_prob: float = 0.25) -> Doc:\n\n # print(\"running on\", doc[:10])\n\n if not self.form_frequencies:\n raise RuntimeError(\n \"Cannot truecase without a dictionary of form frequencies\")\n\n tokens = []\n spaces = []\n doctext = doc.text\n for tok in doc:\n toktext = tok.text\n\n # We only change casing for words in Title or UPPER\n if tok.is_alpha and toktext[0].isupper():\n cond1 = tok.is_upper and len(toktext) > 2 # word in uppercase\n cond2 = toktext[0].isupper(\n ) and not tok.is_sent_start # titled word\n if cond1 or cond2:\n token_lc = toktext.lower()\n if token_lc in self.form_frequencies:\n frequencies = self.form_frequencies[token_lc]\n if frequencies.get(toktext, 0) < min_prob:\n alternative = sorted(\n frequencies.keys(), key=lambda x: frequencies[x])[-1]\n\n # We do not change from Title to to UPPER\n if not tok.is_title or not alternative.isupper():\n toktext = alternative\n\n tokens.append(toktext)\n\n # Spacy needs to know whether the token is followed by a space\n if tok.i < len(doc)-1:\n spaces.append(doctext[tok.idx+len(tok)].isspace())\n else:\n spaces.append(False)\n\n # Creates a new document with the tokenised words and space information\n doc2 = Doc(self.model.vocab, words=tokens, spaces=spaces) #type: ignore\n # print(\"finished with doc\", doc2[:10])\n return doc2", "def vocabulary(corpus_tokenized):\n vocab = list()\n for element in corpus_tokenized:\n document = element['document']\n for word in document:\n if word not in vocab:\n vocab.append(word)\n return vocab", "def main(self, words_docs, cleaned_sentences, lang, model_dir, number_of_clusters, embedding_model, model_id):\n\t\ttry:\n\t\t\tif embedding_model == \"tfidf\": text_vector = self.create_tfidf_vectors(cleaned_sentences)\n\t\t\telif embedding_model == \"word2vec\": text_vector = self.create_w2v_vectors(words_docs)\n\t\t\tmodel, pred_dict = self.train_model(cleaned_sentences, text_vector, number_of_clusters, lang, model_id, model_dir)\n\t\t\tdf_dominant_topic = self.evaulate_clusters(pred_dict, model_dir)\n\n\t\texcept Exception as e:\n\t\t\tprint(\"\\n Error in main : \",e)\n\t\t\tprint(\"\\n Error details : \", traceback.format_exc())\n\n\t\treturn df_dominant_topic", "def build_vocab(self):\n if self.test_file is None:\n print('test_file is None')\n file_list = [self.train_file, self.dev_file]\n else:\n file_list = [self.train_file, self.dev_file, self.test_file]\n\n examples = []\n for file_name in file_list:\n examples += ParseExample.load_data(file_name)\n\n sents = []\n for example in examples:\n warrant0, warrant1, reason, claim, debate_meta_data, negclaim = example.get_six(type=WORD_TYPE)\n sents.append(warrant0)\n sents.append(warrant1)\n sents.append(reason)\n sents.append(claim)\n sents.append(debate_meta_data)\n\n vocab = data_utils.build_word_vocab(sents)\n\n return vocab", "def read_wiki_corpus(corpus_dir: str, corpus_split: str, max_seq_len: Optional[int] = 50, vocab: Optional[dict] = None,\n stop_after: Optional[int] = None) -> Corpus:\n def _read_vocabulary(vocab_path: str) -> W2I:\n with open(vocab_path, \"r\") as vocab_file:\n idx, words = zip(*enumerate(line.strip() for line in vocab_file.readlines()))\n w2i = dict(zip(words, idx))\n w2i[\"<pad>\"] = len(w2i)\n w2i = W2I(w2i) # Return <unk> index if word is not in vocab\n\n return w2i\n\n assert corpus_split in (\"train\", \"valid\", \"test\"), \"Invalid split selected!\"\n\n if vocab is None:\n print(f\"Reading vocabulary under {corpus_dir}/vocab.txt...\")\n if os.path.exists(f\"{corpus_dir}/vocab.txt\"):\n vocab = _read_vocabulary(f\"{corpus_dir}/vocab.txt\")\n else:\n print(\"No vocabulary file found, building vocabulary from scratch...\")\n vocab = defaultdict(lambda: len(vocab))\n\n # Read in corpus\n print(f\"Reading corpus under {corpus_dir}/{corpus_split}.txt...\")\n indexed_sentences = []\n\n with open(f\"{corpus_dir}/{corpus_split}.txt\", \"r\") as corpus_file:\n for i, line in enumerate(corpus_file.readlines()):\n line = line.strip()\n\n # Skip empty lines\n if line in (\"\", \"<eos>\"):\n continue\n\n tokens = line.split()\n\n if tokens[-1] != \"<eos>\":\n tokens.append(\"<eos>\")\n\n indexed_sentence = torch.LongTensor(list(map(vocab.__getitem__, tokens))) # Index lookup\n indexed_sentences.append(indexed_sentence)\n\n if stop_after is not None:\n if i > stop_after:\n break\n\n # If vocab was build from scratch, convert\n if not isinstance(vocab, W2I):\n vocab = W2I(vocab)\n\n corpus = Corpus(indexed_sentences, vocab, max_seq_len)\n\n return corpus", "def __search_tree(word, index=0, node=None):\n if index + 1 > len(word):\n return node\n\n current_key = word[index]\n\n child_node = _Node.__find_key_in_level(node, current_key)\n\n if not child_node:\n return False\n\n return _Node.__search_tree(word, index + 1, child_node)", "def get_all_words(self):\n words = []\n \n ______________________________________________\n \n words.append(self.root)\n \n for branch in self.branches.values():\n \n __________________________________________\n \n return _______________________________________", "def get_vocab(train_data, valid_data, test_data):\n \n print(\"-----------------------------------------------\")\n print(\"Constructing Vocabulary of Words and Characters\")\n print(\"-----------------------------------------------\")\n\n with open(train_data,'r') as f:\n train_corpus = f.readlines()\n f.close()\n\n with open(valid_data,'r') as f:\n valid_corpus = f.readlines()\n f.close()\n\n with open(test_data,'r') as f:\n test_corpus = f.readlines()\n f.close()\n\n word_vocab = {}\n char_vocab = {}\n max_len = 0\n\n word_vocab, char_vocab, max_len = make_vocab(train_corpus, word_vocab, char_vocab, max_len)\n word_vocab, char_vocab, max_len = make_vocab(valid_corpus, word_vocab, char_vocab, max_len)\n word_vocab, char_vocab, max_len = make_vocab(test_corpus, word_vocab, char_vocab, max_len)\n\n char_vocab['<SOT>'] = len(char_vocab)+1 \n char_vocab['<EOT>'] = len(char_vocab)+1\n\n print(\"Word Vocabulary Size : %d\"%len(word_vocab))\n print(\"Character Vocabulary Size : %d\"%len(char_vocab))\n print(\"Max Length of Word - 2 : %d\"%max_len)\n\n return word_vocab, char_vocab, max_len", "def build_decision_tree():\n\n decision_tree_root = None\n decision_tree_root = DecisionNode(None,None,lambda feature:feature[0]==1)\n decision_tree_root.left = DecisionNode(None,None,None,1)\n decision_tree_root.right = DecisionNode(None,None,lambda feature:feature[3]==1)\n decision_tree_root.right.left = DecisionNode(None,None,lambda feature:feature[1]==0)\n decision_tree_root.right.right = DecisionNode(None,None,lambda feature:feature[2]==1)\n decision_tree_root.right.left.left = DecisionNode(None,None,None,1)\n decision_tree_root.right.left.right = DecisionNode(None,None,None,0)\n decision_tree_root.right.right.left = DecisionNode(None,None,None,0)\n decision_tree_root.right.right.right = DecisionNode(None,None,None,1)\n return decision_tree_root", "def build_vocabulary(self):\n \n for iCount in range(0,len(self.documents)):\n for jCount in range(iCount,len(self.documents[iCount])):\n self.vocabulary.append(self.documents[iCount][jCount])\n\n self.vocabulary = set(self.vocabulary)\n\t\t\n self.vocabulary = sorted(self.vocabulary)\n\t\t#print(\"Value of the vocabulary\")\n self.vocabulary_size = len(self.vocabulary)", "def tree_iterator(tree, rare_words):\n if isinstance(tree, basestring): return\n\n if len(tree) == 3:\n # It is a binary rule.\n tree_iterator(tree[1], rare_words)\n tree_iterator(tree[2], rare_words)\n elif len(tree) == 2:\n # It is a unary rule.\n if tree[1] in rare_words: # Replace the rare words with _RARE_\n tree[1] = RARE\n\n return tree", "def main():\n\n ''' Reading the training data file '''\n original_training_data = pd.read_csv(\"DT_Data_CakeVsMuffin_v012_TRAIN.csv\")\n\n ''' Storing the final decision tree '''\n final_tree = decision_tree(original_training_data,0)\n\n ''' Printing the final decision tree '''\n print(\"This is the resulting decision tree: \\n\")\n print(final_tree)\n\n ''' Iterating through the dictionary by using the key values '''\n for key in final_tree.keys():\n ''' Parent = Flour <= 5.1636'''\n parent = key\n ''' left_child = [{'Oils <= 3.1265': [{'Flour <= 2.7291': [{'Proteins <= 2.6527': ['Muffin', 'CupCake']}, 'Muffin']}, 'CupCake']}'''\n left_child = final_tree[parent][0]\n ''' right_child = {'Oils <= 7.7793': ['Muffin', {'Flour <= 8.2225': ['CupCake', 'Muffin']}]}]'''\n right_child = final_tree[parent][1]\n\n ''' Writing a file which generates code for classification '''\n file = open('HW06_Parchand_Nihal_Classifier.py','w+')\n file.write(\"'''Importing libraries''' \"\n \"\\n\\nimport pandas as pd \\n\\ndef main():\"\n \"\\n\\tdata_df = pd.read_csv('DT_Data_CakeVsMuffin_v012_TEST.csv')\"\n \"\\n\\tresult = []\"\n \"\\n\\tfor row in range(0,len(data_df)):\"\n \"\\n\\t\\tFlour = data_df.loc[row][0]\"\n \"\\n\\t\\tSugar = data_df.loc[row][1]\"\n \"\\n\\t\\tOils = data_df.loc[row][2]\"\n \"\\n\\t\\tProteins = data_df.loc[row][3]\"\n \"\\n\\t\\tif {}:\\n\".format(parent))\n\n ''' Iterating through the left_tree '''\n for key in left_child.keys():\n file.write(\"\\t\\t\\tif {}:\\n\".format(key))\n\n ''' Iterating through the inner left_tree '''\n for inner_key in left_child[key][0].keys():\n file.write(\"\\t\\t\\t\\tif {}:\\n\".format(inner_key))\n\n for inner_inner_key in ((left_child[key][0])[inner_key])[0]:\n file.write(\"\\t\\t\\t\\t\\tif {}:\\n\".format(inner_inner_key))\n file.write(\"\\t\\t\\t\\t\\t\\tresult.append(0)\\n\")\n file.write(\"\\t\\t\\t\\t\\telse:\\n\".format(inner_inner_key))\n file.write(\"\\t\\t\\t\\t\\t\\tresult.append(1)\\n\")\n\n file.write(\"\\t\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\t\\tresult.append(0)\\n\")\n file.write(\"\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\tresult.append(1)\\n\")\n file.write(\"\\t\\telse:\\n\")\n\n ''' Iterating through the right_tree '''\n for key in right_child.keys():\n file.write(\"\\t\\t\\tif {}:\\n\".format(key))\n file.write(\"\\t\\t\\t\\tresult.append(0)\\n\")\n for inner_key in right_child[key][1].keys():\n file.write(\"\\t\\t\\telif {}:\\n\".format(inner_key))\n file.write(\"\\t\\t\\t\\tresult.append(1)\\n\")\n file.write(\"\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\tresult.append(0)\\n\\n\")\n\n ''' Writing the results of classifier to a csv file '''\n file.write(\n \"\\twith open('HW06_Parchand_Nihal_MyClassifications.csv', 'w+') as file2:\\n\"\n \"\\t\\tfor value in result:\\n\"\n \"\\t\\t\\tfile2.write(str(value))\\n\"\n \"\\t\\t\\tfile2.write('\\\\n')\\n\\n\"\n \"main()\")", "def _make_suggestions(self):\n\n #build concordance based on current approved\n concordance = dict()\n for term in self.tree.get_children('approved'):\n words = [word.strip(',.:;*').lower() \\\n for word in str(self.tree.item(term)['values'][0]).split(' ')]\n for word in words:\n# if word == 'ad':\n# messagebox.showwarning(\"word == 'ad'\",\"concordance={}\".format(concordance))\n# pass\n if word not in ['and', 'the', 'a', 'to', 'of'] \\\n and not word.isdigit():\n if word not in concordance:\n concordance[word] = set([term, ])\n else:\n concordance[word].add(term)\n# if word == 'ad':\n# messagebox.showwarning(\"word 'ad' added?\",\"concordance={}\".format(concordance))\n# pass\n \n \n #so concordance now holds a list of words in approved terms along with\\\n #list of index of terms() they occur in\n \n for term in self.tree.get_children('suggestions'):\n self._look_in_concordance(term, concordance)\n\n for term in self.tree.get_children('unknown'):\n self._look_in_concordance(term, concordance)\n\n self._collapse_all()", "def build_tokenized_files(filenames, input_root, output_root, tokenizer, splitter, override=True) -> Set[str]:\r\n voc = set()\r\n for filename in filenames:\r\n out_file = normalize_wiki_filename(filename[:filename.rfind(\".\")]) + \".txt\"\r\n out_file = join(output_root, out_file)\r\n if not override and exists(out_file):\r\n continue\r\n with open(join(input_root, filename), \"r\") as in_file:\r\n text = in_file.read().strip()\r\n paras = [x for x in text.split(\"\\n\") if len(x) > 0]\r\n paragraphs = [tokenizer.tokenize(x) for x in paras]\r\n merged_paragraphs = splitter.merge(paragraphs)\r\n\r\n for para in merged_paragraphs:\r\n for i, word in enumerate(para):\r\n voc.update(word)\r\n\r\n with open(out_file, \"w\") as in_file:\r\n in_file.write(\"\\n\\n\".join(\" \".join(para) for para in merged_paragraphs))\r\n return voc", "def index(self):\n print(\"Indexing...\")\n # ------------------------------------------------------------------\n # TODO: Create an inverted, positional index.\n # Granted this may not be a linked list as in a proper\n # implementation.\n # This index should allow easy access to both \n # 1) the documents in which a particular word is contained, and \n # 2) for every document, the positions of that word in the document \n # Some helpful instance variables:\n # * self.docs = List of documents\n # * self.titles = List of titles\n inv_index = defaultdict(set)\n self.tf = defaultdict(Counter)\n \n for word in self.vocab:\n inv_index[word] = {} # create dictionary with words in V\n\n # Generate inverted index here\n for doc in range(len(self.docs)):\n for word in self.docs[doc]:\n self.tf[doc][word] += 1 # represents how many times word 'word' is mentioned in document 'i'\n \n for doc, title in zip(self.docs, self.titles):\n for word in self.vocab:\n inv_index[word][title] = [] # list for each word in vocabulary for all titles\n for pos, word in enumerate(doc):\n inv_index[word][title].append(pos)\n\n self.inv_index = inv_index\n # ------------------------------------------------------------------\n\n # turn self.docs into a map from ID to bag of words\n id_to_bag_of_words = {}\n for d, doc in enumerate(self.docs):\n bag_of_words = set(doc)\n id_to_bag_of_words[d] = bag_of_words\n self.docs = id_to_bag_of_words", "def main():\n\n # Remove the files if existing before creating new ones\n OUT_VOCAB = \"results/vocabs.txt\"\n OUT_SYN = \"results/syn.txt\"\n OUT_EXCP = \"results/exception.txt\"\n for f in [OUT_SYN, OUT_VOCAB, OUT_EXCP]:\n if os.path.isfile(f):\n os.remove(f)\n\n \"\"\"\n Now we do the recursion:\n Step 1: Find the children link;\n Step 2: Add to the url_list;\n Step: Move to next entry of the url_list and Go to Step 1;\n \"\"\"\n with open(DATA, \"r\") as fin:\n url_list = fin.readlines()\n\n # Write down the urls as tuples, 0 as the first level\n url_list = [((PREFIX_URL + subURL).strip(), 0) for subURL in url_list]\n\n with open(OUT_VOCAB, \"w\") as vocab_file, \\\n open(OUT_SYN, \"w\") as syn_file, \\\n open(OUT_EXCP, \"w\") as exp_file:\n cur_index = 0\n while cur_index < len(url_list):\n # Current url of the term, current level number (hierarchy) of the term\n cur_url, cur_hrc = url_list[cur_index]\n try:\n # Get the current sub-url\n time.sleep(0.1)\n r = requests.get(cur_url + \"?\" + API_KEY)\n source_dict = json.loads(r.text)\n\n # Getting the vocabulary and the synonym from this page.\n vocab, syn = parse_words(source_dict=source_dict)\n if cur_hrc + 1 < int(level):\n # Fetch all the children's name and url\n children_urls = fetch_children_url(source_dict=source_dict, hrc=cur_hrc + 1)\n # print children_urls\n url_list = url_list + children_urls # Concatenate the lists\n\n if vocab is not None:\n syn_file.write(json.dumps({vocab: syn}) + \"\\n\")\n vocab_file.write(vocab + \"\\n\")\n\n if cur_index % 100 == 0:\n print \"\\tNow at index # {}, {} in total.\".format(str(cur_index), str(len(url_list)))\n except UnicodeEncodeError as unicode_err:\n exp_file.write(str(unicode_err))\n except KeyError as key_err:\n exp_file.write(str(key_err))\n except:\n pass\n cur_index += 1", "def readTree(text, ind, verbose=False):\n if verbose:\n print(\"Reading new subtree\", text[ind:][:10])\n\n # consume any spaces before the tree\n while text[ind].isspace():\n ind += 1\n\n if text[ind] == \"(\":\n if verbose:\n print(\"Found open paren\")\n tree = []\n ind += 1\n\n # record the label after the paren\n label = \"\"\n while not text[ind].isspace() and text != \"(\":\n label += text[ind]\n ind += 1\n\n tree.append(label)\n if verbose:\n print(\"Read in label:\", label)\n\n # read in all subtrees until right paren\n subtree = True\n while subtree:\n # if this call finds only the right paren it'll return False\n subtree, ind = readTree(text, ind, verbose=verbose)\n if subtree:\n tree.append(subtree)\n\n # consume the right paren itself\n ind += 1\n assert(text[ind] == \")\")\n ind += 1\n\n if verbose:\n print(\"End of tree\", tree)\n\n return tree, ind\n\n elif text[ind] == \")\":\n # there is no subtree here; this is the end paren of the parent tree\n # which we should not consume\n ind -= 1\n return False, ind\n\n else:\n # the subtree is just a terminal (a word)\n word = \"\"\n while not text[ind].isspace() and text[ind] != \")\":\n word += text[ind]\n ind += 1\n\n if verbose:\n print(\"Read in word:\", word)\n\n return word, ind", "def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,\n tokenizer=None, normalize_digits=True):\n if not gfile.Exists(vocabulary_path):\n print(\"Creating vocabulary %s from data %s\" % (vocabulary_path, data_path))\n vocab = {}\n with gfile.GFile(data_path, mode=\"r\") as f:\n counter = 0\n for line in f:\n counter += 1\n line = line.strip().split('\\t')[0]\n if counter % 100000 == 0:\n print(\" processing line %d\" % counter)\n tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)\n for w in tokens:\n word = re.sub(_DIGIT_RE, \"0\", w) if normalize_digits else w\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n sorted_vocab = sorted(vocab, key=vocab.get, reverse=True)\n vocab_list = _START_VOCAB + sorted_vocab\n if len(vocab_list) > max_vocabulary_size:\n vocab_list = vocab_list[:max_vocabulary_size]\n print(\"Corpus %s has %d tokens, %d uniq words, %d vocab at cutoff %d.\" % (\n data_path, sum(vocab.values()), len(vocab), max_vocabulary_size, vocab[sorted_vocab[max_vocabulary_size - len(_START_VOCAB)]] ) )\n else:\n print(\"Corpus %s has %d tokens, %d uniq words, %d vocab at cutoff %d.\" % (\n data_path, sum(vocab.values()), len(vocab), len(vocab), 0))\n\n with gfile.GFile(vocabulary_path, mode=\"wb\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + \"\\n\")" ]
[ "0.5787245", "0.56144047", "0.561384", "0.5368511", "0.53523356", "0.53232056", "0.51237017", "0.5088962", "0.5074895", "0.50748545", "0.50600576", "0.50482816", "0.50336653", "0.50259966", "0.5007876", "0.500019", "0.4998888", "0.4984954", "0.49779788", "0.49631277", "0.4940304", "0.4901353", "0.4896384", "0.48919964", "0.48733285", "0.4872166", "0.48502535", "0.48447356", "0.4841832", "0.48336142" ]
0.7309619
0
Returns None if the documents contain more than a class; returns the class otherwise.
def contain_one_class(self, documents): classes = [] for document in documents: if document.c not in classes: if len(classes) == 0: classes.append(document.c) else: return None if len(classes) == 1: return classes[0] else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def classes(self):\n if \"classes\" in self._prop_dict:\n return ClassesCollectionPage(self._prop_dict[\"classes\"])\n else:\n return None", "def get_majority_class(self, documents):\n counts = {}\n for document in documents:\n if document.c not in counts:\n counts[document.c] = 0\n counts[document.c] += 1\n majority_class = None\n majority_class_count = -1\n for c in counts:\n if counts[c] > majority_class_count:\n majority_class = c\n majority_class_count = counts[c]\n return majority_class", "def get_class(self, class_name, output_type=\"PythonClass\"):\n uris = self.cls_converter.get_uri(class_name)\n if type(uris) == list:\n warnings.warn(\"Found more than 1 classes defined within schema using label {}\".format(class_name))\n return [SchemaClass(_item, self, output_type) for _item in uris]\n else:\n return SchemaClass(class_name, self, output_type)", "def get_class(infobox_page):\n pattern = re.compile('OntologyClass:[-\\w: ]+')\n wiki_class = pattern.findall(infobox_page)\n\n if len(wiki_class) == 0:\n return None\n else:\n return wiki_class[0].replace('OntologyClass:', '')", "def _class(self, class_):\r\n\r\n if class_:\r\n if hasattr(class_, '__mro__'):\r\n #this is a class\r\n return class_\r\n else:\r\n #this is an instance\r\n return type(class_)", "def _class(self, *args):\r\n\r\n if hasattr(args[0], '__mro__'):\r\n #this is a class\r\n return args[0]\r\n else:\r\n #this is an instance\r\n return type(args[0])", "def classify(self, document, tree):\n if type(tree) is ClassTreeNode:\n return tree.c\n else:\n if tree.word in document.bag_of_words:\n return self.classify(document, tree.children[0])\n else:\n return self.classify(document, tree.children[1])", "def find_general_class(self, class_id):\n for class_ in my_classes:\n if class_.class_id == class_id:\n return class_\n\n return None", "def _find_class(self, class_name: str) -> Type:\n return self.class_resolver.find_class(class_name)", "def get_class_for(self, elem):\r\n\t\treturn self.__tag_to_cls.get(elem.tag, self.__default_cls)", "def _parse_class(self):\n first_pos = self.start_pos\n token_type, cname = self.next()\n if token_type != tokenize.NAME:\n debug.warning(\"class: syntax err, token is not a name@%s (%s: %s)\"\n % (self.start_pos[0], tokenize.tok_name[token_type], cname))\n return None\n\n cname = pr.Name(self.module, [(cname, self.start_pos)], self.start_pos,\n self.end_pos)\n\n super = []\n token_type, _next = self.next()\n if _next == '(':\n super = self._parse_parentheses()\n token_type, _next = self.next()\n\n if _next != ':':\n debug.warning(\"class syntax: %s@%s\" % (cname, self.start_pos[0]))\n return None\n\n # because of 2 line class initializations\n scope = pr.Class(self.module, cname, super, first_pos)\n if self.user_scope and scope != self.user_scope \\\n and self.user_position > first_pos:\n self.user_scope = scope\n return scope", "def get_class(self,name,object = None,already_done = []):\n if not object : \n object = self.metamodel\n if object in already_done : \n return ClassNotFound\n for classe in self.metamodel.__dict__.items():\n if name == \"%s\" % classe[0]:\n #print \"trouvé %s en tant que %s\" % (classe[0],classe[1]) \n return classe[1]\n #else: \n # print \"pas bon : %s\" % classe[0]\n already_done.append(object)\n #not found here\n for classe in self.metamodel.__dict__.items():\n return self.get_class(name,object = classe,already_done = already_done)\n print \"class %s not found\" % name\n return ClassNotFound", "def get_skill_class(cursor, _class):\n cursor.execute('SELECT id FROM classes WHERE temp_id = ?', (_class,))\n data = cursor.fetchone()\n try:\n return data[0]\n except TypeError:\n l.error(\"The Class {} doesn't exists.\".format(_class))", "def find_class(self, class_name: str) -> Type:\n pass", "def predict(self, doc):\n # >>> YOUR ANSWER HERE\n # For each class c, calculate the corresponding score of the doc\n scores = [(self.score(doc, c), c) for c in self.classes]\n # after the sort by score, return the most likely class\n scores.sort(key=lambda x: x[0])\n return scores[-1][1]\n # >>> END YOUR ANSWER", "def _class_default(self):\n if self.auto_create:\n return self.instance_class\n return utils.undefined", "def find(self):\n\n response = self.client.get(Classes.PATH_CLASSES)\n return response", "def _get_class(self, item):\n\t\t# it's already a class, return it\n\t\tif type(item) == type:\n\t\t\treturn item\n\n\t\t# get the class\n\t\treturn item.__class__", "def tagClassTypeDecidingMethod(self, parentTagType):\n # DOC {{{\n # }}}\n\n # CODE {{{\n # is always class no matter what\n return PythonTag.TT_CLASS\n # }}}", "def getclass(instance_or_cls):\n return instance_or_cls if inspect.isclass(instance_or_cls) \\\n else instance_or_cls.__class__", "def _get_class(self, obj):\n\n object_type = obj.object_type\n\n 'Background class'\n object_class = 0\n\n # Don't care classes\n if object_type in ['DontCare', 'Person_sitting'] or obj.truncation > 0.75 or obj.occlusion > 1:\n object_class = 1\n\n # Vehicle classes\n elif object_type in ['Car', 'Van']:\n object_class = 2\n\n # Pedestrian class\n elif object_type in ['Pedestrian']: # TODO: Consider change this with ==\n object_class = 3\n\n # Cyclist class\n elif object_type in ['Cyclist']: # TODO: Consider change this with ==\n object_class = 4\n\n return object_class", "def document_search_cls(self):\n return self.search_by_pid_type(DOCUMENT_PID_TYPE)", "def child_class(class_types: Collection[type], base_class: type) -> type | None:\n subclasses = set()\n for class_type in class_types:\n if class_type is base_class:\n continue\n if issubclass(class_type, base_class):\n subclasses.add(class_type)\n\n if len(subclasses) == 0:\n return None\n elif len(subclasses) == 1:\n return subclasses.pop()\n else:\n # If more than one class is a subclass of `base_class`\n # It is possible that one or more classes are subclasses of another\n # class (see example above).\n # Recursively find the child-most class. Break ties by returning any\n # child-most class.\n for c in subclasses:\n child = child_class(subclasses, c)\n if child is not None:\n return child\n return subclasses.pop()", "def get_class(self):\n\t\treturn self.CLASS", "def guess_type(object):\n # retrieve a list of classes\n classes = (\n re.match(\"<class '(.+)'>\", str(object.__class__)).groups()[0].split(\".\")\n )\n # Return the most specific one\n return classes[-1]", "def classify(self, doc, default=None):\n probs = {}\n \n # Find the category with the highest probability\n max = Decimal(0)\n for cat in self.categories():\n probs[cat] = self.prob(doc, cat)\n if probs[cat] > max: \n max = probs[cat]\n best = cat\n\n if max == 0.0:\n return default\n \n # Make sure the probability exceeds threshold*next best\n for cat in probs:\n if cat == best:\n continue\n if probs[cat]*Decimal(str(self.get_threshold(best)))>probs[best]:\n return default\n \n print probs[best]\n return best", "def _load_classes(self):\n classdocs = self._docset.get_classes()\n for classdoc in classdocs:\n files = [self._docmap[filedoc] for filedoc in classdoc.get_files()]\n classobj = Class(classdoc, files)\n self._docmap[classdoc] = classobj\n self._classes.add(classobj)", "def recognize_class(self, a, class_keyword):\n logging.debug(\"in recognize class\")\n self.produce(KEYWORD, class_keyword)\n self.begin('class')", "def document_record_cls(self):\n return self.record_class_by_pid_type(DOCUMENT_PID_TYPE)", "def num_class(self):\r\n return self._num_class" ]
[ "0.7011527", "0.6548541", "0.6406779", "0.63441235", "0.62862873", "0.6231706", "0.6178137", "0.59199756", "0.59037054", "0.5901459", "0.5809205", "0.5763075", "0.57288665", "0.57082504", "0.5704252", "0.56695133", "0.56586176", "0.56074035", "0.55650353", "0.55374116", "0.5531193", "0.55029345", "0.55022705", "0.54965794", "0.548373", "0.548171", "0.5451031", "0.5428114", "0.5424145", "0.54050845" ]
0.8311479
0
Returns the majority class for the documents.
def get_majority_class(self, documents): counts = {} for document in documents: if document.c not in counts: counts[document.c] = 0 counts[document.c] += 1 majority_class = None majority_class_count = -1 for c in counts: if counts[c] > majority_class_count: majority_class = c majority_class_count = counts[c] return majority_class
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def majority_cnt(class_list):\n counter = Counter(class_list)\n cls, _ = counter.most_common(1)\n\n return cls", "def majority_class(classes):\n num_pos = len(classes[np.where(classes == 1)])\n num_neg = len(classes) - num_pos\n return 1 if num_pos > num_neg else 0", "def majority_class (self, classData):\n\n\t\t###### your implementation below ######\n\t\ttempSet = {}\n\t\thighest = 0\n#\t\tmajority = \"\"\n\n\t\tfor i in range(len(classData)):\n\t\t\ttempSet[classData[i]] = (classData.count(classData[i]))\n\n\t\tfor i in tempSet:\n\t\t\tif (tempSet[i] >= highest):\n\t\t\t\thighest = tempSet[i]\n\t\t\t\t\n\t\treturn highest", "def _find_majority(values):\n counter = Counter(values)\n return counter.most_common(1)[0][0]", "def get_majority_vote(rating_scores):\n return collections.Counter(rating_scores).most_common()[0][0]", "def find_majority(dict_probs):\n # if there is no majority class, pick the first from the sorted\n max_val = max(dict_probs.values())\n max_keys = [key for key in dict_probs.keys()\n if dict_probs[key] == max_val]\n return sorted(max_keys)[0]", "def _majority(data_set):\r\n pair = _count_parties(data_set)\r\n democrats = pair[0]\r\n republicans = pair[1]\r\n if democrats > republicans: return \"D\"\r\n if democrats < republicans: return \"R\"\r\n else: return None", "def majority(x):\n c = Counter(x)\n value, _ = c.most_common()[0]\n return value", "def _get_majority_def(array: List[int]) -> Optional[int]:\n if len(array) == 0:\n return None\n counter = dict()\n for item in array:\n if item in counter:\n counter[item] += 1\n else:\n counter[item] = 1\n majority = max(counter, key=counter.get)\n if counter[majority] > len(array) // 2:\n return majority\n else:\n return None", "def class_num(self) -> int:\n return int(np.argmax(self.class_scores))", "def classify(self, doc, default=None):\n probs = {}\n \n # Find the category with the highest probability\n max = Decimal(0)\n for cat in self.categories():\n probs[cat] = self.prob(doc, cat)\n if probs[cat] > max: \n max = probs[cat]\n best = cat\n\n if max == 0.0:\n return default\n \n # Make sure the probability exceeds threshold*next best\n for cat in probs:\n if cat == best:\n continue\n if probs[cat]*Decimal(str(self.get_threshold(best)))>probs[best]:\n return default\n \n print probs[best]\n return best", "def findMajority(roundX, branch):\n majority = []\n for attributeType, value in roundX[\"attributes\"][branch[0]][\"attrTypes\"].items():\n # print(\"\\n\"+tabs+str(attributeType).upper())\n if value[\"values\"][\"purity\"] == 0 and value[\"values\"][\"numInstance\"] != 0: # we are at a termianl node, this node is pure\n # check the class values to get the class node value\n for key, val in value[\"values\"].items():\n if key != \"purity\" and key != \"numInstance\": # for class values only\n if val != 0:\n majority.append(key)\n counter = collections.Counter(majority)\n return counter.most_common()[0][0]", "def get_majority_vote_for_sequence(sequence, nb_classes):\n votes_per_class = np.zeros((nb_classes, 1))\n for i in range(len(sequence)):\n class_vote = np.argmax(sequence[i])\n votes_per_class[class_vote] += 1\n # Return random choice of the max if there's a tie.\n return np.random.choice(np.flatnonzero(votes_per_class == votes_per_class.max()))", "def majorityCount(votes):\n classCount = {}\n for vote in votes:\n if vote not in classCount.keys():\n classCount[vote] = 0\n classCount[vote] += 1\n return sorted(classCount.iteritems(),\n key=operator.itemgetter(1), reverse=True)[0][0]", "def majority_class(self, test_size):\n\n prediction = self.train.label.mode()[0]\n y_pred = [prediction] * test_size\n return y_pred", "def classif(self, text):\r\n content = self.prizn.tokenize(text)\r\n filec = self.vectorize_content(content)\r\n selected = {}\r\n for klas in self.prizn.klas_tridy:\r\n distance = 0.0\r\n wrdc = 0.0\r\n for wrd in filec:\r\n if wrd in self.prizn.klas_tridy[klas]:\r\n wrdc += 1.0\r\n distance += abs(float(filec[wrd]) - float(self.prizn.klas_tridy[klas][wrd]))\r\n if wrdc > 0:\r\n selected[klas] = float(distance) / float(wrdc)\r\n\r\n max_class = \"\"\r\n for i in range(0, 3):\r\n klas = max(selected, key=lambda k: selected[k])\r\n max_class = max_class + \" ,\" + klas\r\n del selected[klas]\r\n\r\n return max_class", "def contain_one_class(self, documents):\n classes = []\n for document in documents:\n if document.c not in classes:\n if len(classes) == 0:\n classes.append(document.c)\n else:\n return None\n if len(classes) == 1:\n return classes[0]\n else:\n return None", "def __get_best_score(scores):\n best = max(scores.items(), key=operator.itemgetter(1))[0]\n print(\"The best classification for this corpus is: \" + str(best))\n return best", "def classify(self, document, tree):\n if type(tree) is ClassTreeNode:\n return tree.c\n else:\n if tree.word in document.bag_of_words:\n return self.classify(document, tree.children[0])\n else:\n return self.classify(document, tree.children[1])", "def plurality_value(examples):\n common = defaultdict(int)\n for example_dict in examples:\n common[example_dict['class']] += 1\n return max(common.items(), key=itemgetter(1))[0]", "def majorityElementBoyerMoore(self, nums: List[int]) -> int:\n current_majority = None\n majority_count = 0\n majority_target = len(nums)//2\n\n for i in nums:\n if majority_count == 0:\n current_majority = i\n\n if i == current_majority:\n majority_count += 1\n else:\n majority_count -= 1\n \n return current_majority", "def majority_vote(labels):\n vote_counts = Counter(labels)\n winner, winner_count = vote_counts.most_common(1)[0]\n num_winners = len([count for count in vote_counts.values()\n if count == winner_count])\n if num_winners == 1:\n return winner\n else:\n #try again without the farthest\n return majority_vote(labels[:-1])", "def _get_majority_dc(array: List[int]) -> Optional[int]:\n array_length = len(array)\n if array_length == 1:\n return array[0]\n split_index = array_length // 2\n majority_left = _get_majority_dc(array[:split_index])\n majority_right = _get_majority_dc(array[split_index:])\n if majority_left == majority_right:\n return majority_left\n count_majority_left = 0\n count_majority_right = 0\n for item in array:\n if item == majority_left:\n count_majority_left += 1\n elif item == majority_right:\n count_majority_right +=1\n if count_majority_left > split_index:\n return majority_left\n elif count_majority_right > split_index:\n return majority_right\n else:\n return None", "def majority_vote(votes):\n import scipy.stats as ss\n mode, count = ss.mstats.mode(votes)", "def majorityNumber(self, nums):\n cnt = 0\n maj = 0\n for ind, num in enumerate(nums):\n if num == nums[maj]:\n cnt += 1\n else:\n cnt -= 1 # every time --, discard 2 different numbers\n\n if cnt < 0:\n maj = ind\n cnt = 1\n\n # assured that the majority exists, otherwise need to double check\n return nums[maj]", "def _filter_to_most_specific(self, graph, classlist):\n candidates = {}\n for brickclass in classlist:\n sc_query = f\"SELECT ?subclass WHERE {{ ?subclass rdfs:subClassOf+ <{brickclass}> }}\"\n subclasses = set([x[0] for x in graph.query(sc_query)])\n # if there are NO subclasses of 'brickclass', then it is specific\n if len(subclasses) == 0:\n candidates[brickclass] = 0\n continue\n # 'subclasses' are the subclasses of 'brickclass'. If any of these appear in\n # 'classlist', then we know that 'brickclass' is not the most specific\n intersection = set(classlist).intersection(subclasses)\n if len(intersection) == 1 and brickclass in intersection:\n candidates[brickclass] = 1\n else:\n candidates[brickclass] = len(intersection)\n most_specific = None\n mincount = float(\"inf\")\n for specific, score in candidates.items():\n if score < mincount:\n most_specific = specific\n mincount = score\n return most_specific", "def majority_element(arr):\n count = 0\n possible_majority = arr[0]\n for elem in arr:\n if count == 0:\n possible_majority = elem\n if elem == possible_majority:\n count += 1\n else:\n count -= 1\n\n return validate_majority(arr, possible_majority)", "def get_majority(lst):\n a = {}\n candidate = lst[0]\n for elem in lst:\n\tif elem not in a:\n\t a[elem] = 0\n\telse:\n\t a[elem] += 1\n for elem in lst:\n \tif (a[elem] >= len(lst) / 3):\n candidate = elem\n return candidate", "def most_common_class_label(subjects):\n result_set = defaultdict(int)\n for subject in subjects:\n result_set[subject.class_label[0]] += 1\n\n return max(result_set, key=result_set.get)", "def majority_vote(labels):\n\n conta = Counter(labels)\n\n winner, winner_count = conta.most_common(1)[0]\n\n num_winner = sum([1 for count in conta.values() if count == winner_count])\n\n if num_winner == 1:\n return winner\n else:\n return majority_vote(labels[:-1])" ]
[ "0.75053835", "0.7409201", "0.7204527", "0.7121844", "0.6862108", "0.6770251", "0.6752808", "0.659271", "0.6484694", "0.6423609", "0.63770854", "0.63620096", "0.63224113", "0.6277365", "0.6275072", "0.62714237", "0.6193381", "0.61306405", "0.60845435", "0.60079527", "0.5929004", "0.5897183", "0.58878595", "0.5880243", "0.5872712", "0.58614206", "0.5848218", "0.582985", "0.57976335", "0.5788735" ]
0.89835906
0
Returns the word in the given vocabulary with the highest information gain for the given documents.
def get_most_informative_word(self, documents, vocabulary): most_informative_word = None most_informative_word_gain = 0 for word in vocabulary: gain = self.get_information_gain(word, documents) if most_informative_word == None or gain >= most_informative_word_gain: most_informative_word = word most_informative_word_gain = gain return most_informative_word
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_largest_freq():\n words_list = {word for line in lines for word in line} # all words possible\n word_freqs = [(find_freq(word), word) for word in words_list] # list of tuples of words and their frequencies\n max_freq = max(word_freqs)\n return max_freq[0], max_freq[1]", "def label(self, input_doc=None):\n if input_doc == None:\n input_doc = self.stemmed_corpus\n X = self.vect.transform(input_doc)\n new_corpus = gensim.matutils.Sparse2Corpus(X, documents_columns=False)\n topics = self.ldamodel.get_document_topics(new_corpus)\n max_topic = []\n for tpc in list(topics):\n # get most relevant topic (tuple: 0 = topic, 1 = relevance distribution)\n max_topic.append(max(tpc,key=lambda item:item[1])[0]) \n return max_topic", "def max_word_value(words):\n return max(words, key=calc_word_value)", "def most_influential_words(model, vectorizer, genre_index=0, num_words=10):\n features = vectorizer.get_feature_names()\n max_coef = sorted(enumerate(model.coef_[genre_index]), key=lambda x:x[1], reverse=True)\n return [[features[x[0]], x[1] ] for x in max_coef[:num_words]]", "def get_information_gain(self, word, documents):\n gain = self.get_entropy(documents)\n with_word, without_word = self.get_split_data(word, documents)\n gain -= self.get_entropy(with_word) * len(with_word) / len(documents)\n gain -= self.get_entropy(without_word) * len(without_word) / len(documents)\n return gain", "def most_influential_words_doc(doc, tfidf_words):\n words_found = []\n for d in doc.split():\n for t in tfidf_words:\n if d == t[0]:\n if d not in words_found:\n words_found.append(d)\n return words_found", "def find_frequent_words(words, most_frequent): \n \n # common_words = Counter(sorted(words))\n # print common_words\n common_words = Counter(sorted(words)).most_common(most_frequent)\n print (common_words )\n most_common_words = [w for w, w_count in common_words]\n return most_common_words", "def mostRelevantDocs(textToCompare, numResults):\n\n from gensim import corpora, models, similarities\n import logging\n from getDocSparseVector import getDocumentCorpus, cleanAndTokenize\n import cPickle as pickle\n\n #reload(getDocSparseVector)\n logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n #Use heirarchical dirichlet allocation topic modeling from gensim to compute the relevance between documents\n \n \n documentDictionary = pickle.load(open(\"/Users/Larry/Code/EpistemicAssistant/relevanceComputations/documentDictionary.p\", \"rb\"))#load document dictionary\n corpus = pickle.load(open(\"/Users/Larry/Code/EpistemicAssistant/relevanceComputations/corpus.p\", \"rb\")) #load corpus\n hdp = pickle.load(open(\"/Users/Larry/Code/EpistemicAssistant/relevanceComputations/hdp.p\", \"rb\"))#load hdp model\n documents = pickle.load(open(\"/Users/Larry/Code/EpistemicAssistant/relevanceComputations/documents.p\", \"rb\"))#load documents\n \n #Cleans and tokenizes the input text \"cleanAndTokenize\"\n mainDocument = documentDictionary.doc2bow(cleanAndTokenize(textToCompare))\n \n corpusHdp = hdp[corpus]\n mainDocumentHdp = hdp[mainDocument]\n num_feat = len(documentDictionary.values()) #To get rid of warning, manually retreive dictionary feature size\n similarityIndex = similarities.MatrixSimilarity(corpusHdp, num_features=num_feat)\n sims = similarityIndex[mainDocumentHdp]\n sims = sorted(enumerate(sims), key=lambda item: -item[1])\n \n topNum=numResults; #The number of documents to use as the top matches\n topSims=sims[0:topNum]\n topDocs = []\n for sims in topSims:\n topDocs.append(documents[sims[0]])\n return topDocs #returns the most relevant documents to the textToCompare", "def pick_word(probabilities, int_to_vocab):\n # todo 需要编程:\n word = int_to_vocab[np.argmax(probabilities)]\n return word", "def task2(dictionary):\n word_count = Counter(dictionary)\n ans = word_count.most_common(10)\n print(ans)\n return ans", "def most_common_words(visual_fld, num_visualize):\n words = open(os.path.join(visual_fld, 'vocab.tsv'), 'r').readlines()[:num_visualize]\n words = [word for word in words]\n file = open(os.path.join(visual_fld, 'vocab_' + str(num_visualize) + '.tsv'), 'w')\n for word in words:\n file.write(word)\n file.close()", "def most_common_word(words, text):\n word_frequency = {w:text.count(w) for w in words}\n return sorted(words, key=word_frequency.get)[-1]", "def most_reducible(wordlist):\n\n\t# We create a memo for reducible words since is_reducible is \n\t# recursive. The keys are the words and the values are the \n\t# number of characters\n\tglobal reducible_words\n\treducible_words = dict()\n\treducible_words['a'], reducible_words['i'] = 1, 1\n\t\n\tword_dict = to_dictionary(wordlist)\n\tfor line in word_dict:\n\t\tis_reducible(line, word_dict)\n\n\t# Varible that will search the memo for the longest word\n\tcurrent_greatest = ''\n\tfor word in reducible_words:\n\t\tif reducible_words[word] > len(current_greatest):\n\t\t\tcurrent_greatest = word\n\tprint(current_greatest)", "def max_wupa(context_sentence, ambiguous_word):\r\n\r\n result = {}\r\n for i in wn.synsets(ambiguous_word):\r\n result[i] = sum(max([i.wup_similarity(k) for k in wn.synsets(j)]+[0]) \\\r\n for j in word_tokenize(context_sentence))\r\n result = sorted([(v,k) for k,v in result.items()],reverse=True)\r\n return result", "def getMaxKey(self):\n if len(self.word_to_freq) == 0:\n return \"\"\n\n tail = self.tail.prev\n while tail is not None:\n if len(tail.words) > 0:\n return next(iter(tail.words))\n else:\n tail = tail.prev\n\n return \"\"", "def pick_word(self, probabilities, mode):\n if mode == 'most_likely':\n choice = np.where(probabilities==max(probabilities))[0][0]\n else:\n choice = np.random.choice(len(probabilities), 1, p=probabilities)[0]\n return self.int_to_vocab[choice]", "def getLongestWord(words, tree_map):\n\tif not words:\n\t\treturn None\n\n\tword_list = []\n\tlongest_word = None\n\tcounter = 0\n\tfound_ctr = 0\n\tfor word in words:\n\t\tword_list.append(word.lower())\n\t\tfound_result = checkDict(word_list, tree_map)\n\t\tif isinstance(found_result, str):\n\t\t\tlongest_word = found_result\n\t\t\tfound_ctr = counter\n\t\tcounter+=1\n\n\treturn (found_ctr, longest_word)", "def maxcompChooseWord(hand, wordList, n):\n # 电脑给出最优解\n point = 0\n maxword = ''\n for word in wordList:\n newword1 = copy.deepcopy(word)\n newword2 = copy.deepcopy(word)\n if isValidWord(newword1, hand, wordList):\n p = getWordScore(newword2, n)\n if p > point:\n point = p\n maxword = word\n if point == 0:\n return None\n else:\n return maxword, point", "def most_common(self):\n # Example ouput : ['so', 6]\n return list(sorted(self.frequencies().items(), key = lambda x: x[1], reverse=True)[0])\n #sorted = sorted(self.frequencies().items(), key = lambda x: x[1], reverse=True)\n #return sorted[0] #not list", "def most_common(self, number=10):\n\n words_full_list = []\n\n for string in self.__corpora:\n words_full_list += string.split()\n\n print(Counter(words_full_list).most_common(number))", "def _get_max_likelihood_genus(self, words,\n word_posteriors, word_idxs):\n #Argmax prod( p(vi|G) )\n row_idxs = filter(not_none, map(word_idxs.get, words))\n likelihoods = word_posteriors[row_idxs].prod(0)\n # avoid .argmax() to solve tie problem.\n return (likelihoods == likelihoods.max()).nonzero()[0]", "def top_question_words(args, examples, word_dict):\n word_count = Counter()\n for ex in examples:\n for w in ex['question']:\n w = Dictionary.normalize(w)\n if args.uncased_question:\n w = w.lower()\n if w in word_dict:\n word_count.update([w])\n return word_count.most_common(args.tune_partial)", "def tag_word(self, w): \n if self.unknown(w):\n return self.default_tag\n else:\n return max(self.word_tags[w], key=self.word_tags[w].get)", "def choose_feature(self, features, examples, tags):\n features_gains_dict = {feature : self.get_gain(examples, tags, feature) for feature in features}\n max_gain = 0\n max_feature = features[0]\n for feature in features:\n if features_gains_dict[feature] > max_gain:\n max_gain = features_gains_dict[feature]\n max_feature = feature\n\n # return the feature with the highest gain\n return max_feature", "def most_similar(prediction):\r\n sTime = time()\r\n max_prediction = np.array([[0]])\r\n for key, value in dict_words_n_vectors.items():\r\n sim = cosine_similarity(prediction.reshape(1, -1), value.reshape(1, -1))\r\n if sim[0] > max_prediction[0]:\r\n max_prediction = sim\r\n word, vector = key, value\r\n m, s = divmod(calculate_time(sTime), 60)\r\n print(f\"--- done checking most similar word in {int(m):02d}:{int(s):02d} minutes --- \")\r\n return word, np.expand_dims(np.asarray(vector), axis=0)", "def keep_top_words(self, M, Mprint=20):\n freq = self.data.sum(axis=0)\n freq = np.squeeze(np.asarray(freq))\n idx = np.argsort(freq)[::-1]\n idx = idx[:M]\n self.keep_words(idx)\n print('most frequent words')\n for i in range(Mprint):\n print(' {:3d}: {:10s} {:6d} counts'.format(i, self.vocab[i], freq[idx][i]))\n return freq[idx]", "def predict_currword(word, top_n=10):\r\n try:\r\n return [\r\n (k, v) for k, v in model.WORDS_MODEL.most_common() if k.startswith(word)\r\n ][:top_n]\r\n except KeyError:\r\n raise Exception(\r\n \"Please load predictive models. Run:\\\r\n \\n\\tautocomplete.load()\"\r\n )", "def getMaxKey(self):\n print self.freq\n if self.freq:\n max_freq = max(self.freq.keys())\n return list(self.freq[max_freq])[0]\n\n return ''", "def correction(word):\r\n return max(candidates(word), key=P)", "def select_correction(word, corrections_map):\n if corrections_map is None or len(corrections_map) == 1:\n return corrections_map\n\n max_val = max(corrections_map.values())\n final_list = {term: val for term, val in corrections_map.items() if val == max_val}\n\n if len(final_list) == 1: # One value has the maximum\n if final_list.values()[0] > 0.7: # Highly valued terms are chosen by default\n return final_list\n\n first_word = final_list.keys()[0]\n\n # If the threshold value has not been reached we are looking for a second term\n del corrections_map[final_list.keys()[0]]\n\n max_val = max(corrections_map.values())\n tmp_list = {term: val for term, val in corrections_map.items() if val == max_val}\n\n if len(tmp_list) == 1: # One value has the second higher grade\n final_list.update(tmp_list)\n second_word = tmp_list.keys()[0]\n else: # Several terms with the same score\n # Differenciation on the Levenhstein distance\n tmp_list = select_lower_edit_distance(word, tmp_list.keys())\n\n if len(tmp_list) == 1: # One term has the lowest score\n final_list[tmp_list[0]] = max_val\n second_word = tmp_list[0]\n else: # Several terms with the same\n # Choose the best alphabetical term\n second_word = select_best_alphabetical_word(word, tmp_list)\n final_list[second_word] = max_val\n\n # Determine if we need one or two terms\n if log(final_list[first_word] / final_list[second_word]) >= 1:\n del final_list[second_word]\n\n return final_list\n elif len(final_list) != 2: # More than 2 values share the same maximum\n tmp_list = select_lower_edit_distance(word, final_list.keys())\n\n if len(tmp_list) == 1: # One word get the min edit distance\n first_word = tmp_list[0]\n tmp_final_list = final_list\n del tmp_final_list[first_word]\n\n tmp_list = select_lower_edit_distance(word, tmp_final_list.keys())\n\n if len(tmp_list) == 1: # One word get the second minimal edit distance\n final_list = {\n first_word: max_val,\n tmp_list[0]: max_val\n }\n\n return final_list\n else: # The second minimal edit distance is shared by several terms\n best_term = select_best_alphabetical_word(word, tmp_list)\n\n final_list = {\n first_word: max_val,\n best_term: max_val\n }\n\n return final_list\n elif len(tmp_list) == 2: # Exactly two word get the same min edit distance\n final_list = {\n tmp_list[0]: max_val,\n tmp_list[1]: max_val\n }\n\n return final_list\n else: #\n best_term_1 = select_best_alphabetical_word(word, tmp_list)\n\n tmp_list = [term for term in tmp_list if term != best_term_1]\n best_term_2 = select_best_alphabetical_word(word, tmp_list)\n\n final_list = {\n best_term_1: max_val,\n best_term_2: max_val\n }\n\n return final_list\n else: # Two words with the same score\n return final_list" ]
[ "0.64055604", "0.6338901", "0.62634706", "0.62199146", "0.6138516", "0.5942008", "0.5939371", "0.5936942", "0.59077406", "0.58693826", "0.58556145", "0.5848381", "0.58483124", "0.5796463", "0.57938373", "0.5783461", "0.5780535", "0.5689964", "0.56706756", "0.5655299", "0.56530297", "0.565177", "0.56429464", "0.5635017", "0.56045276", "0.5580552", "0.5555955", "0.5554036", "0.5538955", "0.55262655" ]
0.82679933
0
Returns the information gain of the given word for the documents; the subsets to subtract their entropies from the entropy of the original set are the sets with and without the word.
def get_information_gain(self, word, documents): gain = self.get_entropy(documents) with_word, without_word = self.get_split_data(word, documents) gain -= self.get_entropy(with_word) * len(with_word) / len(documents) gain -= self.get_entropy(without_word) * len(without_word) / len(documents) return gain
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def word_entropy(self, doc, lemmatized=False):\n # filter out words\n words = [token for token in doc if not token.is_punct and \"'\" not in token.text and not token.is_space]\n # create bag of words\n if lemmatized:\n list_words = [w.lemma_ for w in words]\n else:\n list_words = [w.text for w in words]\n num_words = len(list_words)\n word_freq = Counter(list_words)\n return -sum(\n [\n (word_freq[word] / num_words) * log2(word_freq[word] / num_words)\n for word in word_freq\n ]\n )", "def _information_gain(self, feature, node):\n return node.entropy() - self._entropy(feature, node)", "def _information_gain(self, y, subsets):\n n = y.shape[0]\n child_entropy = 0\n\n for y_i in subsets:\n child_entropy += self._entropy(y_i) * y_i.shape[0] / float(n)\n\n return self._entropy(y) - child_entropy", "def calc_information_gain(data, split_name, target_name):\r\n # Calculate the original entropy\r\n original_entropy = calc_entropy(data[target_name])\r\n \r\n # Find the median of the column we're splitting\r\n column = data[split_name]\r\n median = column.median()\r\n \r\n # Make two subsets of the data, based on the median\r\n left_split = data[column <= median]\r\n right_split = data[column > median]\r\n \r\n # Loop through the splits and calculate the subset entropies\r\n to_subtract = 0\r\n for subset in [left_split, right_split]:\r\n prob = (subset.shape[0] / data.shape[0]) \r\n to_subtract += prob * calc_entropy(subset[target_name])\r\n \r\n # Return information gain\r\n return original_entropy - to_subtract", "def calc_information_gain(data, split_name, target_name):\n # Calculate the original entropy\n original_entropy = calc_entropy(data[target_name])\n \n # Find the median of the column we're splitting\n column = data[split_name]\n median = column.median()\n \n # Make two subsets of the data, based on the median\n left_split = data[column <= median]\n right_split = data[column > median]\n \n # Loop through the splits and calculate the subset entropies\n to_subtract = 0\n for subset in [left_split, right_split]:\n prob = (subset.shape[0] / data.shape[0]) \n to_subtract += prob * calc_entropy(subset[target_name])\n \n # Return information gain\n return original_entropy - to_subtract", "def get_word_containement_measure(self,l2,l1):\n count = 0\n found_idfs = []\n unfound_idfs = []\n for w in l1:\n val = self.idf.get_tfidf_val(w)\n if (val > 10):\n val = 10\n if w in l2:\n count += 1\n found_idfs.append(val)\n else:\n unfound_idfs.append(val)\n if (len(found_idfs) == 0):\n avg_found = 0\n else:\n avg_found = np.mean(found_idfs)\n if (len(unfound_idfs) ==0):\n avg_unfound = 0\n else:\n avg_unfound = np.mean(unfound_idfs)\n\n\n\n return count / self.normalize_factor, avg_found, avg_unfound", "def informationGain2(data, attribute):\n \n split_data = splitBy(data, attribute) \n weighted_entropies = 0\n \n for set in split_data:\n weighted_entropies += len(set) / len(data) * entropy2(set) \n \n columnIG = entropy2(data) - weighted_entropies\n \n return columnIG", "def mutual_information(x, y, w):\r\n \r\n\r\n total_entropy = entropy(y,w)\r\n\r\n partitioned_x = partition(x)\r\n weighted_entropy = 0\r\n # calculate the weighted entropy over the partition of x\r\n vals,counts= np.unique(x,return_counts=True)\r\n for key in partitioned_x:\r\n weighted_entropy += np.sum([(np.sum(w[partitioned_x[key]])/np.sum(w)) * entropy(y[partitioned_x[key]],w[partitioned_x[key]])])\r\n\r\n information_gain = total_entropy - weighted_entropy\r\n return information_gain", "def info_content(self,lookup_word):\n\t if self.N == 0:\n\t # poor man's lazy evaluation\n\t for sent in brown.sents():\n\t for word in sent:\n\t word = word.lower()\n\t if not word in self.brown_freqs.keys():\n\t self.brown_freqs[word] = 0\n\t self.brown_freqs[word] = self.brown_freqs[word] + 1\n\t self.N = self.N + 1\n\t lookup_word = lookup_word.lower()\n\t n = 0 if not lookup_word in self.brown_freqs.keys() else self.brown_freqs[lookup_word]\n\t return 1.0 - (math.log(n + 1) / math.log(self.N + 1))", "def gain(self, target_attr, attr, debug=False):\n current_entropy = self.entropy(target_attr)[0]\n # print\n # print attr\n\n gain = current_entropy - self.remainder(target_attr=target_attr, attr=attr)\n if debug is True:\n print attr, \": \", gain\n return gain", "def get_word_containement_measure(self,l2,l1):\n count = 0\n found_idfs = []\n unfound_idfs = []\n word_count_dict = self.list_to_word_count_dict(l1)\n for w in l2:\n was_found = False\n val = self.idf.get_tfidf_val(w)\n if (val > 10):\n val = 10\n for i,w2 in enumerate(l1):\n if (w2 == w and word_count_dict[(w2,i)] == 0):\n word_count_dict[(w2,i)] = 1\n count += 1\n found_idfs.append(val)\n was_found = True\n break\n if (was_found):\n unfound_idfs.append(val)\n if (len(found_idfs) == 0):\n avg_found = 0\n else:\n avg_found = np.mean(found_idfs)\n if (len(unfound_idfs) ==0):\n avg_unfound = 0\n else:\n avg_unfound = np.mean(unfound_idfs)\n\n # full idf features\n unfound_vec = list(sorted(unfound_idfs, reverse=True))\n found_vec = list(sorted(found_idfs, reverse=True))\n unfound_vec = self.pad_or_cut_vec(unfound_vec, self.LENGTH_MAX)\n found_vec = self.pad_or_cut_vec(found_vec, self.LENGTH_MAX)\n\n return count , avg_found, avg_unfound, found_vec, unfound_vec", "def informationGain(data, class_label, attribute, indices=None):\n\tsubset = data[:] if indices == None else data.loc[indices]\n\t\n\tsublist = subset[attribute].tolist()\n\tvalues = list(set(sublist))\n\tinfoGain = entropyOnSubset(subset, class_label)\n\t\n\t#print (sublist)\n\t\n\tfor i in values:\n\t\tindex = list(subset.index[subset[attribute] == i])\n\t\tinfoGain -= sublist.count(i)/len(sublist) * entropyOnSubset(subset, class_label, index)\n\n\t\n\treturn infoGain", "def return_infogain(instances, labels):\n # some initial calculations\n infogain = dict.fromkeys(range(instances.shape[1]), 0)\n cnt = Counts(instances, labels)\n len_instances = instances.shape[0]\n feature_frequency = cnt.count_document_frequency()\n label_frequency = cnt.count_label_frequency()\n label_feature_frequency = cnt.count_label_feature_frequency()\n label_probability = [(label_frequency[label] / len_instances) for label in label_frequency.keys()]\n initial_entropy = -sum([prob * math.log(prob, 2) for prob in label_probability if prob != 0])\n # assign infogain values to each feature\n for feature in feature_frequency.keys():\n # calculate positive entropy\n frequency = feature_frequency[feature]\n if frequency > 0:\n feature_probability = frequency / len_instances\n positive_label_probabilities = []\n for label in labels:\n if label_feature_frequency[label][feature] > 0:\n positive_label_probabilities.append(label_feature_frequency[label][feature] / frequency)\n else:\n positive_label_probabilities.append(0)\n positive_entropy = -sum([prob * math.log(prob, 2) for prob in positive_label_probabilities if prob != 0])\n else:\n positive_entropy = 0\n # calculate negative entropy\n inverse_frequency = len_instances - feature_frequency[feature]\n negative_probability = inverse_frequency / len_instances\n negative_label_probabilities = [((label_frequency[label] - label_feature_frequency[label][feature]) / inverse_frequency) for label in labels]\n negative_entropy = -sum([prob * math.log(prob, 2) for prob in negative_label_probabilities if prob != 0])\n # based on positive and negative entropy, calculate final entropy\n final_entropy = positive_entropy - negative_entropy\n infogain[feature] = initial_entropy - final_entropy\n return infogain", "def information_gain(features, attribute_index, targets):\r\n\r\n possible_feature_values = [0,1]\r\n \r\n possible_classifications = [0,1]\r\n \r\n feature = features[:,attribute_index]\r\n \r\n \r\n number_of_samples = len(feature)\r\n \r\n import math\r\n \r\n \r\n #current_entropy = np.sum([-(len(targets[targets==possible_classification])/number_of_samples)*math.log(len(targets[targets==possible_classification])/number_of_samples, 2) for possible_classification in possible_classifications])\r\n \r\n terms_to_be_summed_for_current_entropy = []\r\n \r\n for classification in possible_classifications:\r\n \r\n number_of_elements_with_this_classification = len(targets[targets==classification])\r\n \r\n p_for_this_classification = number_of_elements_with_this_classification/len(targets)\r\n \r\n if p_for_this_classification != 0:\r\n terms_to_be_summed_for_current_entropy.append(-p_for_this_classification*math.log(p_for_this_classification,2))\r\n else:\r\n terms_to_be_summed_for_current_entropy.append(0)\r\n \r\n current_entropy = np.sum(terms_to_be_summed_for_current_entropy)\r\n \r\n \r\n \r\n terms_to_be_summed_for_weighted_entropy = []\r\n \r\n for possible_value in possible_feature_values:\r\n \r\n targets_split_by_feature_value = targets[feature.flatten() == possible_value]\r\n \r\n if len(targets_split_by_feature_value) != 0:\r\n \r\n \r\n weight_of_feature_value = len(targets_split_by_feature_value)/len(targets)\r\n \r\n terms_for_entropy_within_subset = []\r\n \r\n for classification in possible_classifications:\r\n \r\n number_of_subset_elements_with_this_classification = len(targets_split_by_feature_value[targets_split_by_feature_value==classification])\r\n \r\n p_in_subset_for_this_classification = number_of_subset_elements_with_this_classification/len(targets_split_by_feature_value)\r\n \r\n if p_in_subset_for_this_classification != 0:\r\n terms_for_entropy_within_subset.append(-p_in_subset_for_this_classification*math.log(p_in_subset_for_this_classification,2))\r\n else:\r\n terms_for_entropy_within_subset.append(0)\r\n \r\n entropy_within_subset = np.sum(terms_for_entropy_within_subset)\r\n \r\n terms_to_be_summed_for_weighted_entropy.append(weight_of_feature_value*entropy_within_subset)\r\n \r\n weighted_entropy = np.sum(terms_to_be_summed_for_weighted_entropy)\r\n \r\n \r\n #current_entropy = np.sum(terms_to_be_summed_for_current_entropy)\r\n \r\n #weighted_entropy = np.sum([(len(feature[feature==possible_value])/number_of_samples)*(len(targets[feature==possible_value][targets[feature==possible_value]==possible_classification])/len(targets[feature==possible_value]))*math.log((len(targets[feature==possible_value][targets[feature==possible_value]==possible_classification])/len(targets[feature==possible_value])), 2) for possible_classification in possible_classifications for possible_value in possible_feature_values])\r\n\r\n information_gain = current_entropy - weighted_entropy \r\n \r\n return information_gain", "def get_most_informative_word(self, documents, vocabulary):\n most_informative_word = None\n most_informative_word_gain = 0\n for word in vocabulary:\n gain = self.get_information_gain(word, documents)\n if most_informative_word == None or gain >= most_informative_word_gain:\n most_informative_word = word\n most_informative_word_gain = gain\n return most_informative_word", "def __gain(self, data_set, split_feature, target_feature):\n frequencies = self.__calculate_frequency(data_set, split_feature)\n data_entropy = 0.0\n\n # Calculate the entropy of the data.\n for value, frequency in frequencies.items():\n probability = frequency / sum(frequencies.values())\n data_subset = data_set[data_set[split_feature] == value]\n data_entropy += probability * self.__entropy(data_subset, target_feature)\n\n return self.__entropy(data_set, target_feature) - data_entropy", "def get_gain(self, examples, tags, feature):\n initial_entropy = self.calculate_entropy(tags)\n relative_entropy_per_feature = []\n feature_index = self.get_feature_index(feature)\n for possible_value in self.feature_domain_dict[feature]:\n examples_and_tags_vi = [(example, tag) for example, tag in zip(examples, tags)\n if example[feature_index] == possible_value]\n tags_vi = [tag for example, tag in examples_and_tags_vi]\n entropy_vi = self.calculate_entropy(tags_vi)\n if not examples:\n pass\n relative_entropy = (float(len(examples_and_tags_vi)) / len(examples)) * entropy_vi\n relative_entropy_per_feature.append(relative_entropy)\n\n return initial_entropy - sum(relative_entropy_per_feature)", "def info_gain(self, left, right, current_uncertainty):\n p = float(len(left)) / (len(left) + len(right))\n return current_uncertainty - p * self.gini(left) - (1 - p) * self.gini(right)", "def gain(X, y, column):\n prior_entropy = entropy(y)\n total = y.size\n\n values = X[column].unique()\n proportions = X[column].value_counts() / total\n return prior_entropy - sum(proportions[i] * \\\n entropy(y[np.array(X[column]) == i]) for i in values)", "def compute_gain(loudness, renormalize_loudness):\n gain = []\n for i in range(len(loudness)):\n delta_loudness = renormalize_loudness[i] - loudness[i]\n gain.append(np.power(10.0, delta_loudness / 20.0))\n return gain", "def coleman_liau(self, doc):\n num_words = _get_num_words(doc)\n if num_words <= 0:\n return 0\n\n num_sentences = _get_num_sentences(doc)\n letter_count = sum(\n [len(token) for token in doc if not token.is_punct and not token.is_digit]\n )\n if letter_count <= 0:\n return 0\n letters_to_words = letter_count / num_words * 100\n sent_to_words = num_sentences / num_words * 100\n return 0.0588 * letters_to_words - 0.296 * sent_to_words - 15.8", "def information_gain(Y, attr):\n initial_gain = entropy(Y)\n\n temp_Y = Y.tolist()\n temp_attr = attr.tolist()\n\n temp_attr = list(np.unique(attr))\n\n for a in temp_attr:\n l = []\n count = 0\n for j in attr:\n if (j == a):\n l.append(temp_Y[count])\n count+=1\n initial_gain -= ((len(l) / len(temp_Y)) * entropy(pd.Series(l)))\n return initial_gain", "def entropy_gain(node,attribute):\n data_subset1 = filter_data(node.data,node.ancestors)\n data_counts = list(Counter(data_subset1['Class']).values())\n base_entropy = entropy(data_counts,base=2)\n num_values = len(data_subset1)\n entropy_sum = 0\n \n for value in [0,1]:\n data_subset2 = filter_data(node.data, node.ancestors + [(attribute,value)])\n subset_counts = list(Counter(data_subset2['Class']).values())\n entropy_sum += (len(data_subset2)/num_values) * entropy(subset_counts,base=2)\n \n return base_entropy - entropy_sum", "def getWord(self, input_sentence, prev_word = None):\n\n all_words = torch.zeros(0, dtype=torch.long)\n all_probs = torch.zeros(0, dtype=torch.float32)\n\n for i in range(15):\n observe_word = input_sentence[i]\n words, probs = self.factors[i].observe(observe_word)\n probs *= self.weights[i]\n\n # join factors\n all_words, idx = torch.unique(torch.cat((all_words, words)), return_inverse = True)\n concat_probs = torch.cat((all_probs, probs))\n new_probs = torch.zeros_like(all_words, dtype=torch.float32)\n\n for j in range(concat_probs.size(0)):\n new_probs[idx[j]] += concat_probs[j]\n all_probs = new_probs\n\n\n if torch.is_tensor(prev_word):\n words, probs = self.transition.observe(prev_word)\n # join factors\n all_words, idx = torch.unique(torch.cat((all_words, words)), return_inverse = True)\n concat_probs = torch.cat((all_probs, probs))\n new_probs = torch.zeros_like(all_words, dtype=torch.float32)\n for j in range(concat_probs.size(0)):\n new_probs[idx[j]] += concat_probs[j]\n all_probs = new_probs\n\n # now all_words and all_probs contains all posible words with its probability\n try:\n chosen_idx = torch.argmax(all_probs)\n result = all_words[chosen_idx]\n if int(result) == UNK_ID:\n all_probs[chosen_idx] = 0\n chosen_idx = torch.argmax(all_probs)\n result = all_words[chosen_idx]\n _, idxx = torch.sort(all_probs, descending=False)\n print(all_probs[idxx[:10]])\n print(all_words[idxx[:10]])\n print(result)\n except:\n result = input_sentence[self.output_idx]\n\n \n \n return result", "def word_analogy(self):\n data = open(\"data/word_analogy_subset.en.ar.txt\").read().split('\\n')\n data = [x for x in data if len(x.split()) == 4]\n cnt = 0\n keys = list(self.embeddings_index.keys())\n vectors = np.array(list(self.embeddings_index.values()))\n norms = np.linalg.norm(vectors, axis=1)\n for i in data:\n i = self.preprocessor(i).split()\n try:\n v = self.embeddings_index[i[0]] - self.embeddings_index[i[1]] + self.embeddings_index[i[2]]\n except:\n continue\n unit = v / np.linalg.norm(v)\n dists = np.dot(vectors, unit) / norms\n best = np.argpartition(-dists, 10)[:10 + 1]\n best = best.take(np.argsort((-dists).take(best)))\n result = [(keys[sim], float(dists[sim]))\n for sim in best]\n sbv = result[:10]\n for j in sbv:\n if j[0] == i[3]:\n cnt += 1\n return cnt/ len(data)", "def fkg(word, sent, syll):\n flesch_kincaid_grade = (0.39* (word / sent)) + (11.8 * (syll / word)) - 15.59\n return flesch_kincaid_grade", "def med_in_english(word):\r\n\treturn int(med(TextBlob(word).correct(), word))", "def gain(Y, X):\n return entropy(Y) - cEntropy(Y,X)", "def info_gain(left, right, current_uncertainty):\n p = float(len(left)) / (len(left) + len(right))\n return current_uncertainty - p * gini(left) - (1 - p) * gini(right)", "def neutralize(word, g, word_to_vec_map):\n\n ### START CODE HERE ###\n # Select word vector representation of \"word\". Use word_to_vec_map. (≈ 1 line)\n e = word_to_vec_map[word]\n\n # Compute e_biascomponent using the formula given above. (≈ 1 line)\n e_biascomponent = np.dot(e, g) / np.sum(np.dot(g, g)) * g\n # e_biascomponent = np.sqrt(np.sum(np.dot(e,e))) * cosine_similarity(e, g) * g/np.sqrt(np.sum(np.dot(g,g)))\n # Neutralize e by subtracting e_biascomponent from it\n # e_debiased should be equal to its orthogonal projection. (≈ 1 line)\n e_debiased = e - e_biascomponent\n ### END CODE HERE ###\n\n return e_debiased" ]
[ "0.6584013", "0.6461961", "0.62527275", "0.6212763", "0.6181934", "0.603743", "0.5926501", "0.58806574", "0.58732754", "0.5852905", "0.58036405", "0.579155", "0.57895815", "0.57730293", "0.5754583", "0.57529724", "0.57256997", "0.57021683", "0.56705195", "0.5655812", "0.56313586", "0.55879575", "0.55768293", "0.55658334", "0.5557378", "0.5538471", "0.5530821", "0.5526918", "0.5514112", "0.55085665" ]
0.83862364
0
Returns the entropy of the given documents;
def get_entropy(self, documents): entropy = 0 for c in self.classes: count = 0 for document in documents: if document.c == c: count += 1 if count != 0 and len(documents) != 0: ratio = count / len(documents) entropy -= ratio * log(ratio, 2) return entropy
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def entropy(doc_or_tokens: types.DocOrTokens) -> float:\n words = utils.get_words(doc_or_tokens)\n word_counts = itertoolz.frequencies(word.text for word in words)\n n_words = sum(word_counts.values())\n probs = (count / n_words for count in word_counts.values())\n return -sum(prob * math.log2(prob) for prob in probs)", "def entropy(self, text_ngrams):\n return -1 * _mean(\n [self.logscore(ngram[-1], ngram[:-1]) for ngram in text_ngrams]\n )", "def entropy(d, total, word_count):\n\t# Entropie je - Sum_morf p(morf) * log_2 p(morf)\n\t# p(morf) = c(morf) / c(all)\n\te = 0\n\tfor count in d.values():\n\t\tp = count/total\n\t\ttype_e = - p * log2(p)\n\t\te += type_e * count\n\treturn e / word_count", "def entropy(data):\n strings, lens = Counter(data), np.float(len(data))\n return -sum(count / lens * np.log2(count / lens) for count in strings.values())", "def entropy(self, text):\n\n# text = self.myReplacer.replace(text)\n# text = self.tokenizer.tokenize(text)\n new_text = []\n for word in text:\n if word.count('\\'') > 0:\n words = word.split('\\'')\n for w in words:\n new_text.append(w)\n else:\n new_text.append(word)\n text = new_text\n \n e = 0.0\n lenth = len(text)\n if lenth == 0:\n return 0\n elif lenth < self._n:\n current_n = lenth\n else:\n current_n = self._n\n \n for i in range(current_n - 1, len(text)):\n context = tuple(text[(i - current_n + 1) : i])\n token = text[i]\n e += self.logprob(token, context)\n return e", "def entropy(data):\n\n freqs = {}\n suma = len(data)\n\n for i in range(0, len(data)):\n freqs[data[i]] = 1.0 + freqs.get(data[i], 0)\n\n res = 0.0\n for i in freqs:\n res += (freqs[i] / suma) * log((freqs[i] / suma), 2)\n return -res", "def entropy(self):\n ent = 0.0\n for f in self.byte_freq:\n if f > 0:\n freq = float(f) / self.byte_total\n ent = ent + freq * math.log(freq, 2)\n return -ent", "def compute_idfs(documents):\n\n frequencias = dict()\n for file_name, word_list in documents.items():\n for word in word_list:\n if word not in frequencias:\n frequencias[word] = {file_name}\n else:\n frequencias[word].add(file_name)\n\n for key, value in frequencias.items():\n frequencias[key] = math.log(len(documents) / len(value))\n\n return frequencias", "def entropy(self):\n raise NotImplementedError", "def entropy( freq ):\n N = 0.0\n entropy = 0.0\n for x, v in freq.items( ):\n N += v\n entropy -= v * math.log( v, 2 )\n return (N * math.log( N, 2 ) + entropy) / N", "def entropy(data):\n e = 0\n\n counter = collections.Counter(data)\n l = len(data)\n for count in counter.values():\n p_x = count / l\n e += - p_x * math.log2(p_x)\n\n return e", "def entropy(self):\n return self._normal.entropy()", "def entropy(s):\n p, lns = Counter(s), float(len(s))\n return -sum( count/lns * math.log(count/lns, 2) for count in p.values())", "def compute_idfs(documents):\n from math import log\n\n # number of documents\n TotalDocuments = len(documents)\n\n # create set of all words in all docs\n words = set()\n for words_list in documents.values():\n for word in words_list:\n words.add(word)\n\n # calculate how many doc containing each words, then calculate idfs\n nDocsContain = dict()\n idfs = dict()\n for word in words:\n nDocsContain[word] = 0\n for words_list in documents.values():\n if word in words_list:\n nDocsContain[word] += 1 \n idfs[word] = log(TotalDocuments/nDocsContain[word])\n\n return idfs", "def word_entropy(self, doc, lemmatized=False):\n # filter out words\n words = [token for token in doc if not token.is_punct and \"'\" not in token.text and not token.is_space]\n # create bag of words\n if lemmatized:\n list_words = [w.lemma_ for w in words]\n else:\n list_words = [w.text for w in words]\n num_words = len(list_words)\n word_freq = Counter(list_words)\n return -sum(\n [\n (word_freq[word] / num_words) * log2(word_freq[word] / num_words)\n for word in word_freq\n ]\n )", "def get_entropy(dictionary):\n my_sum = 0\n weighted_sum_of_logs = 0\n for freq in dictionary.values():\n if freq:\n my_sum += freq\n weighted_sum_of_logs += freq * math.log(freq)\n return math.log(my_sum) - weighted_sum_of_logs / my_sum", "def div(self):\n freqList = [i / sum(self.has.values()) for i in self.has.values()]\n entropies = [i * math.log(i, 2) for i in freqList]\n entropy = -sum(entropies)\n return entropy", "def _entropy(data):\n hist = np.array(PIL.Image.fromarray(data).histogram())\n hist = hist / hist.sum()\n hist = hist[hist != 0]\n return -np.sum(hist * np.log2(hist))", "def entropy(message):\n n = len(message)\n message = letter_freq(message)\n h = 0\n for n_i in message.values():\n p_i = n_i/n\n h += -p_i*(log2(p_i))\n return h", "def compute_idfs(documents):\n dictLen = len(documents)\n words_idf = {}\n for name in documents:\n words = documents[name]\n for w in words:\n if w in words_idf:\n continue\n wFreqncy = 0\n for n in documents:\n if w in documents[n]:\n wFreqncy += 1\n words_idf[w] = np.log(dictLen/wFreqncy)\n return words_idf", "def entropy(message):\n message = letter_freq(message)\n n = sum(message.values())\n h = 0\n for n_i in message.values():\n p_i = n_i / n\n h += -p_i * log2(p_i)\n return h", "def nolog_inverse_document_frequency(term, tokenized_documents_list):\n\tterm = processes_and_tokenize(term)[0]\t#make sure term is in correct form\n\n\tnum_documents = len(tokenized_documents_list)\n\tnum_documents_with_term = len([document for document in tokenized_documents_list if term in document])\n\t\n\tassert num_documents_with_term > 0\n\treturn num_documents / num_documents_with_term", "def compute_idfs(documents):\n # compute inverse document frequency values for each of the words\n idf_words = set()\n for filename in documents:\n # map the words in the filename for documents dictionary\n idf_words.update(documents[filename])\n idfs = dict()\n for word in idf_words:\n # n = number of documents in which word appears\n n = sum(word in documents[filename] for filename in documents)\n # import math -- log is natural base e\n # idf of a word = natural logarithm of the number of documents divided by the number of documents in which the word appears.\n idf = math.log(len(documents) / n)\n idfs[word] = idf\n return idfs", "def entropy(group_counts):\n total = sum(group_counts)\n entro = 0\n for item_count in group_counts:\n entro += item_entropy(item_count, total)\n return entro", "def get_entropy(distribution, samples):\n entropy = -tf.reduce_sum(distribution.log_prob(samples), axis=1)\n return entropy", "def entropy(message):\n # Should the import be here or should it be at the top of the page?\n freq_dict = letter_freq(message)\n length_message = len(message)\n bit_entropy = 0\n for occurrences in freq_dict.values():\n frequency = occurrences / length_message\n bit_entropy = bit_entropy - frequency * log2(frequency)\n return bit_entropy", "def compute_idfs(documents):\n words = set()\n for file in documents:\n words.update(documents[file])\n\n idfs = dict()\n for word in words:\n tw = sum(word in documents[file] for file in documents)\n idf = math.log(len(documents)/ tw )\n idfs[word] = idf\n\n return idfs", "def entropy(self):\n return -np.sum(self.log_likelihoods * np.exp(self.log_likelihoods))", "def entropy(distribution, unit=2):\n frequencies = distribution.frequencies(normalised=True)\n # check to see if it is a deterministic case (all but one are zero)\n zeros_size = frequencies[frequencies == 0].size\n if zeros_size + 1 == frequencies.size:\n return 0\n else:\n return np.sum(-frequencies * np.log2(frequencies) / np.log2(unit))", "def entropy(a):\n a = a.upper()\n\n freq = collections.defaultdict(int) # int() is the default constructor for non existent item, and returns 0\n for c in a:\n freq[c] = freq[c] + 1\n\n e = 0.0\n for f in freq.values():\n if f:\n p = f / len(a)\n e += p * math.log(p)\n\n return -e" ]
[ "0.767517", "0.71636075", "0.7113853", "0.70761496", "0.69885343", "0.6926311", "0.68369186", "0.6734515", "0.6729236", "0.671758", "0.6712377", "0.6683987", "0.6650099", "0.6646317", "0.66449845", "0.664072", "0.6622492", "0.6620809", "0.6606008", "0.66057396", "0.6556798", "0.6542844", "0.65275234", "0.65268147", "0.6473251", "0.64715075", "0.6466879", "0.64518654", "0.642519", "0.64204335" ]
0.8374573
0
Scan for csvs in a source_dir recursively. Place each file in a path of //.csv
def collect_csv(source_dir, dest_dir): source_dir = Path(source_dir) dest_dir = Path(dest_dir) for csvfile in source_dir.rglob("*.csv"): species = normalized_species(csvfile) species_dir = dest_dir / species species_dir.mkdir(exist_ok=True, parents=True) date_time = normalized_datetime(csvfile) print(f"Renaming {csvfile} to {species_dir / (date_time + '.csv')}") csvfile.rename(species_dir / (date_time + ".csv"))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def import_directory_csv(d_in, d_out, target_column, merge_columns):\n\n INPUT_FILES = grab_files(\"*.csv\", d_in)\n\n if not INPUT_FILES:\n logger.warning(\"No matching CSV files found, exiting\")\n exit(2)\n\n for f_csv in INPUT_FILES:\n f_csv_out = os.path.join(d_out, os.path.basename(f_csv))\n vals = (f_csv, f_csv_out, target_column, merge_columns)\n import_csv(vals)", "def scan_folder(folder):\n LOGGER.debug(\"Scanning folder: %s\", folder)\n for file in os.listdir(folder):\n if file.endswith(\".csv\"):\n yield os.path.join(folder, file)", "def __setupPaths(self):\n self.csv_file_names = filter(\n (lambda x: not re.match(\".*\\\\.csv$\", x) is None),\n os.listdir(self.path))", "def make_up(self, base_path='./data/'):\n for csv_file_path in [f\"{base_path}{_}\" for _ in os.listdir(base_path)]:\n self.append_file(csv_file_path)", "def csvp(startingPath, csv_ext='.csv'):\n print 'walking up path=', startingPath\n csvfn = [os.path.join(root, filename)\n for root, dirnames, filenames in os.walk(startingPath)\n for filename in filenames if filename.endswith(csv_ext)]\n print 'list is ', len(csvfn), ' images long'\n print 'starting with', csvfn[0]\n print 'ending with', csvfn[-1]\n return csvfn", "def stack_walks(direc):\n files = os.listdir(direc)\n csvs = []\n for x in files:\n if '.csv' in x:\n csvs.append(x)\n complete = np.vstack([get_nx10(direc+'/'+x) for x in csvs])\n return complete", "def walkSource(sourcedir):\n for parent, dnames, fnames in os.walk(sourcedir):\n for fname in fnames:\n if fname not in SKIP_FILES:\n filename = os.path.join(parent, fname)\n if filename.endswith('.java') and os.path.isfile(filename):\n with open(filename, 'r') as f:\n lines = f.readlines()\n yield (lines, fname)", "def get_descendant_file_path(parent_path):\n csv_relative_path = []\n for root, dirs, files in os.walk(parent_path):\n for file in files:\n words = file.split(r'.')\n if words[-1] == 'csv':\n file_path = os.path.join(parent_path, file)\n csv_relative_path.append(file_path)\n return csv_relative_path", "def sync_csv(arg):\n files = os.listdir(arg.input_dir)\n file_map = dict()\n for f in files:\n label = os.path.splitext(f)[0].split('_')\n if len(label) < 2:\n continue\n if file_map.get(label[0], None) is None:\n file_map[label[0]] = dict()\n if label[1] == 'grid':\n file_map[label[0]]['grid'] = f\n else:\n if file_map[label[0]].get('data', None) is None:\n file_map[label[0]]['data'] = dict()\n if file_map[label[0]]['data'].get(label[1], None) is None:\n file_map[label[0]]['data'][label[1]] = []\n file_map[label[0]]['data'][label[1]].append(f)\n tar_name = 't{}'.format(datetime.now().strftime('%Y%m%d'))\n tar_path = os.path.join(arg.output_dir, 'tar', tar_name)\n if not os.path.exists(tar_path):\n os.mkdir(tar_path)\n i = 0\n n = len(file_map)\n for city, v in file_map.items():\n i = i + 1\n print('------ handle city [{}/{}]: {} -------'.format(i, n, city))\n city_csv_path = os.path.join(arg.output_dir, 'csv', city)\n city_sql_path = os.path.join(arg.output_dir, 'sql', city)\n if not os.path.exists(city_csv_path):\n os.mkdir(city_csv_path)\n if not os.path.exists(city_sql_path):\n os.mkdir(city_sql_path)\n grid = v.get('grid', None)\n if grid:\n grid = os.path.splitext(grid)[0]\n print('** handling grid ...')\n tar_sql = os.path.join(tar_path, '{}.sql.gz'.format(grid))\n if os.path.exists(tar_sql):\n print('****** {} exist!'.format(tar_sql))\n else:\n shutil.copyfile(os.path.join(arg.input_dir, '{}.csv'.format(grid)),\n os.path.join(city_csv_path, '{}.csv'.format(grid)))\n trans_grid(city, city_csv_path, city_sql_path)\n print('****** GZIP grid sql')\n with open(os.path.join(city_sql_path, '{}.sql'.format(grid))) as fi, gzip.open(\n os.path.join(tar_path, '{}.sql.gz'.format(grid)), 'wb') as fo:\n fo.write(fi.read().encode())\n data = v.get('data', None)\n if data:\n print('** handling data ...')\n for week, data_files in data.items():\n print('**** week: {}'.format(week))\n tar_detail = os.path.join(tar_path, '{}_{}_detail.sql.tar.gz'.format(city, week))\n if os.path.exists(tar_detail):\n print('****** {} exist!'.format(tar_detail))\n else:\n for data_file in data_files:\n shutil.copyfile(os.path.join(arg.input_dir, data_file), os.path.join(city_csv_path, data_file))\n create_detail(city, week, 30000, city_csv_path, city_sql_path)\n print('****** TAR detail sql')\n with tarfile.open(tar_detail, 'w:gz') as f:\n for city_week_detail in os.listdir(city_sql_path):\n if city_week_detail.startswith('{}_{}_detail'.format(city, week)):\n f.add(os.path.join(city_sql_path, city_week_detail), arcname=city_week_detail)\n print('****** remove csv and sql file...')\n for data_file in data_files:\n os.remove(os.path.join(city_csv_path, data_file))\n sql_files = os.path.join(city_sql_path, '{}_{}_detail*sql'.format(city, week))\n for sql_file in glob.glob(sql_files):\n os.remove(sql_file)", "def list_file(csv_directory):\n list_of_files = [os.path.join(dirpath, file_name)\n for dirpath, dirnames, files in os.walk(csv_directory)\n for file_name in fnmatch.filter(files, '*.csv')]\n return list_of_files", "def enhance(parent_folder):\n parent_folder = Path(parent_folder).resolve()\n address_csv_files = sorted(parent_folder.glob('*.csv'))\n\n print(f'enhancing {len(address_csv_files)} csv files in {parent_folder}')\n\n data = Path(__file__).parent.parent.parent / 'data'\n workspace = (data / 'enhanced' / GDB_NAME).resolve()\n\n arcpy.env.workspace = str(workspace)\n\n for address_csv in address_csv_files:\n job = enhance_data(address_csv)\n\n prepare_output(job)\n convert_to_csv(job)\n remove_temp_tables(job)", "def walk_csv_data(**kwargs):\n for path, name in walk(**kwargs):\n if path.endswith('.csv'):\n with open(path, newline='') as f:\n text = f.read()\n reader = csv.DictReader(StringIO(text))\n try:\n fieldnames = reader.fieldnames\n rows = list(reader)\n yield (path, name, text, fieldnames, rows)\n except csv.Error:\n continue", "def get_file_list(rootdir): #{{{\n file_list = []\n for f in os.listdir(rootdir):\n if f == None or not f.endswith(\".csv\"):\n continue\n file_list.append(os.path.join(rootdir, f))\n \n return file_list", "def csv_files_in_folder_merger(file):\n stack = []\n for file_in_list in file:\n stack.append(file_to_generator(file_in_list))\n stacklijst = pd.concat(stack)\n\n return stacklijst", "def import_files(col, src):\n for root, _, files in os.walk(src, topdown=False):\n for fil in sorted(files):\n fname = os.path.join(root, fil)\n ext = get_file_extension(fil)\n fkey = get_file_key(fname)\n\n if ext in IGNORED_EXT:\n print('- Ignored: {}'.format(fname))\n continue\n\n if not col.is_blacklisted(fkey) and not col.is_whitelisted(fkey):\n target = fkey + ext\n target = os.path.join(col.get_airlock(), target)\n if not os.path.exists(target):\n shutil.copy(fname, target, follow_symlinks=True)\n print('+ Copied: {} -> {}'.format(fname, target))\n else:\n print('- Ignored: {}'.format(fname))", "def __init__(self, root_dir):\n self.paths = glob.glob(root_dir + \"/*.csv\")\n self.target = 'Default'\n # Grouping variable names", "def get_csv_paths(top_path):\n # exclude is a set holding all dirnames to be excluded\n exclude = {\"fails\", \"archive\", \"exclude\", \"fail\", \"backup\"}\n # files is a dict that defaults to lists, so values can be appended to keys\n files = defaultdict(list)\n for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):\n dirnames[:] = [d for d in dirnames if d.lower() not in exclude]\n\n for filename in filenames:\n\n # gather .csv and .tsv files\n if \".csv\" in str(filename).lower() or \".tsv\" in str(filename).lower():\n # Add filename to the key of dirpath\n files[dirpath].append(filename)\n return files", "def update_csv():\n return os.listdir('./data')", "def preprocess_files(file_path):\n # checking your current working directory\n cur_dir = os.getcwd()\n\n # Get your current folder and sub folder event data\n data_dir = os.path.join(cur_dir, 'event_data')\n\n # Create a for loop to create a list of files and collect each\n # file_path\n file_path_list = []\n for root, dirs, files in os.walk(data_dir):\n # join the file path and roots with the subdirectories using\n # glob\n file_path_list = glob.glob(os.path.join(root, '*'))\n\n full_data_rows_list = []\n\n # for every file_path in the file path list collect records\n for f in file_path_list:\n\n # reading csv file\n with open(f, 'r', encoding='utf8', newline='') as csvfile:\n\n # creating a csv reader object\n csvreader = csv.reader(csvfile)\n next(csvreader)\n\n # extracting each data row one by one and append it\n for line in csvreader:\n full_data_rows_list.append(line)\n\n csv.register_dialect('myDialect', quoting=csv.QUOTE_ALL,\n skipinitialspace=True)\n\n # create one file with all the records\n with open(file_path, 'w', encoding='utf8',\n newline='') as f:\n writer = csv.writer(f, dialect='myDialect')\n writer.writerow(\n ['artist', 'firstName', 'gender', 'itemInSession',\n 'lastName', 'length', 'level', 'location', 'sessionId',\n 'song', 'userId'])\n for row in full_data_rows_list:\n if row[0] == '':\n continue\n writer.writerow((row[0], row[2], row[3], row[4], row[5],\n row[6], row[7], row[8], row[12], row[13],\n row[16]))", "def find_files(main_path, column_names = []):\n main_path = main_path\n files = []\n onlyfiles = [f for f in listdir(main_path) if isfile(join(main_path, f))]\n for file in onlyfiles:\n files.append(File(main_path, file[:-4], \".csv\", column_names))\n return files", "def route_data(route):\n os.chdir(\"../Data/test\") #change to whatever directory your data files are stored in\n with open(\"../Sorted Data/\"+str(route)+\"_data.csv\",\"w\",newline=\"\") as result_file: #storing resulting data in csv file in different directory\n wr=csv.writer(result_file, dialect='excel') #setting up csv writer\n for file in glob.glob(\"*.csv\"): #looping through raw data files\n reader=csv.reader(open(file))\n for line in reader:\n if extract_bus_route(line[3])==route: #extract_bus_route returns the bus route from journey pattern id (col D)\n wr.writerow(line)", "def get_currency_file_paths(self, dir_path=\"*/*\", extension=\".csv\"):\n csv_files = [\n csv_file for csv_file in glob(dir_path + extension, recursive=True)\n ]\n return [\n currency_file for currency_file in csv_files if \"currency\" in currency_file\n ]", "def main():\n\n # Ensure the output directory exists\n if not os.path.exists(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR)\n\n process_csv()", "def split_data_into_exchanges(source_path, destination_path):\n for subdir, dirs, files in os.walk(source_path):\n for file in files:\n source_full_file = os.path.join(subdir, file)\n print(source_full_file)\n df = pd.read_csv(source_full_file)\n for group_name, df in df.groupby(['Ticker', 'Exchange']):\n file_name = destination_path / str(df['Date'].iloc[0]) / convertTuple(group_name)\n utils.make_dir(file_name)\n with open(file_name, \"w+\") as f:\n df.to_csv(f, index=False)", "def csv_path(name):\n return \"./data/%s\" % name", "def read_csv(self, path):\n for file in os.listdir(path):\n if file[-4:] == \".csv\":\n name = file[:-4]\n table_index_header = cfg.get_list(\"table_index_header\", name)\n filename = os.path.join(path, file)\n self.input_data[name] = pd.read_csv(\n filename,\n index_col=list(range(int(table_index_header[0]))),\n header=list(range(int(table_index_header[1]))),\n squeeze=(\"series\" not in name),\n )\n self.check_input_data(warning=False)\n self.add_meta_data()\n return self", "def csv_dir(self):\n return op.join(self.root_dir, 'csv')", "def collate_data(in_dir, extension='.csv', out_dir=None):\n if out_dir is None:\n out_dir = './' + re.search('^\\.(.*)', extension).groups(0)[0]\n\n if not os.path.isdir(out_dir):\n os.mkdir(out_dir)\n\n for p, d, fs in os.walk(in_dir):\n for f in fs:\n if extension in f:\n shutil.copy(p + '/' + f, out_dir + '/' + f)\n return", "def test_scan_dir_files(self):\n self.run_scan(self.subdir, self.nest_fcount + 1)", "def csv_file(input_file):\n\n current_dir = os.getcwd()\n directory_name = current_dir + '\\\\' + 'data' + '\\\\'\n csv_out = directory_name + input_file\n return csv_out" ]
[ "0.66698605", "0.66279465", "0.65627587", "0.65396017", "0.63316804", "0.6319263", "0.6246003", "0.6213397", "0.6184193", "0.6170909", "0.6162643", "0.6050794", "0.60200405", "0.5939705", "0.5917072", "0.5879509", "0.5850727", "0.5830297", "0.582743", "0.5807929", "0.5734628", "0.57019335", "0.5699497", "0.5689955", "0.56800246", "0.5668468", "0.56526405", "0.5651157", "0.56380093", "0.56301516" ]
0.71214396
0
Return the species of the column "species" of the csv_filename. Normalize it via lowercasing and transforming spaces into "_"
def normalized_species(csv_filename): with open(csv_filename) as csvfilename: reader = DictReader(csvfilename) first_row = next(reader) return first_row.get("species").lower().replace(" ", "_")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_species_name(fasta):\n name = fasta.description\n if ',' in name:\n name = ','.join(name.split(',')[:-1])\n name = ' '.join(name.split()[1:])\n if name.endswith(' '):\n name = name[:-1]\n if name.endswith(','):\n name = name[:-1]\n return name", "def species_name(self):\n return self.get(self._names[\"species_name\"])", "def rawSpecies(df, specie = \"Caenorhabditis elegans OX=6239\"):\n species = df[df[\"PG.Organisms\"] == specie]\n return species", "def tidy_cols(my_csv):\n return [re.sub(\" \", \"_\", col.lower()) for col in my_csv.columns]", "def get_scientific_name( species ):\n\n species = species.strip( ).lower( )\n result = species # fail-safe if there is no match in the loop\n for species_key in Species_Dict:\n if species in Species_Dict[ species_key ]:\n result = Species_Dict[ species_key ][ 3 ] # change assignment if you want to return another list element\n break\n return result", "def guess_species(dbo, s):\n s = str(s).lower()\n guess = db.query_int(dbo, \"SELECT ID FROM species WHERE LOWER(SpeciesName) LIKE '%\" + db.escape(s) + \"%'\")\n if guess != 0: return guess\n return configuration.default_species(dbo)", "def _process_species_dict(dict, species):\n for k, v in dict.items():\n if v == species:\n return str(k)", "def extract_valid_species_name(self, taxon):\n\n if ' bacterium' in taxon.lower() or 'sp.' in taxon.lower():\n return None\n\n taxon = taxon.replace('s__', '')\n taxon = taxon.replace('Candidatus', '')\n taxon = taxon.replace('candidatus', '')\n\n if not taxon or taxon[0].islower():\n return None\n\n taxon_split = taxon.split(' ')\n if len(taxon_split) < 2:\n return None\n\n # sanity check\n taxon = 's__' + ' '.join(taxon_split[0:2])\n self.validate_species_name(taxon)\n\n return taxon", "def get_filtered_record(csv_fname):\n with open(csv_fname, \"r\") as student_records:\n for student_record in csv.reader(student_records):\n converted = [re.sub('[^a-z0-9]+', '', x.lower()) for x in student_record]\n yield converted", "def species(self):\n return self.name", "def test_space_stripping(self):\n descriptor = clone(SPECIES_OBSERVATION_SCHEMA)\n species_name = 'Chubby Bat'\n record = {\n 'Observation Date': \"18/08/2016\",\n 'Latitude': -32,\n 'Longitude': 115,\n 'Species Name': ' Chubby Bat '\n }\n schema = SpeciesObservationSchema(descriptor)\n self.assertEqual(species_name, schema.cast_species_name(record))", "def execute(species_file_name = '../data_external/b_neat_species.txt', \r\n facts_file_name = '../data_external/b_neat_species_data.txt', \r\n out_file_name = '../data_prepared/facts_b_neat.txt', \r\n infile_encoding = 'utf16', \r\n outfile_encoding = 'utf16', \r\n field_separator = '\\t', \r\n row_delimiter = '\\r\\n'): # For windows usage.\r\n try:\r\n # Read species file and store taxonid:name in dictionary.\r\n # Header: id, hierarchy, species_name, author_year, last_modified\r\n speciesdict = {}\r\n speciesfile = codecs.open(species_file_name, mode = 'r', encoding = infile_encoding) \r\n # Iterate over rows in file.\r\n for rowindex, row in enumerate(speciesfile):\r\n if rowindex == 0: # First row is assumed to be the header row.\r\n headers = list(map(str.strip, row.split(field_separator)))\r\n # headers = list(map(unicode, headers))\r\n else:\r\n # Replace characters interpreted as latin-1.\r\n row = row.replace(u'µ', u'µ') # µ µ\r\n row = row.replace(u'ä', u'ä') # ä ä\r\n row = row.replace(u'Ã¥', u'å') # Ã¥ å\r\n row = row.replace(u'æ', u'æ') # æ æ\r\n row = row.replace(u'ë', u'ë') # ë ë \r\n row = row.replace(u'ö', u'ö') # ö ö \r\n row = row.replace(u'ü', u'ü') # ü ü\r\n row = row.replace(u'Ø', u'Ø') # Ø. Ø\r\n row = row.replace(u'ø', u'ø') # ø ø\r\n row = row.replace(u'é', u'é') # é é\r\n row = row.replace(u'ä', u'ä') # ä ä\r\n row = row.replace(u'ë', u'ë') # ë ë\r\n row = row.replace(u'ö', u'ö') # ö ö\r\n \r\n row = list(map(str.strip, row.split(field_separator))) \r\n # row = list(map(unicode, row))\r\n #\r\n speciesdict[row[0]] = row[2]\r\n #\r\n speciesfile.close()\r\n # Create outdatafile.\r\n out = codecs.open(out_file_name, mode = 'w', encoding = outfile_encoding)\r\n # Header, define and print.\r\n outheader = ['Scientific name', 'Note on taxonomy', 'Morphology', 'Ecology', 'Other remarks', \r\n 'Tropic type', 'Harmful', 'Note on harmfulness', 'Substrate', 'Life form',\r\n 'Width', 'Length', 'Size', 'Resting spore', 'Literature', 'Last modified']\r\n out.write(field_separator.join(outheader) + row_delimiter)\r\n # Open image file for reading.\r\n imagesfile = codecs.open(facts_file_name, mode = 'r', encoding = infile_encoding) \r\n # Iterate over rows in file.\r\n for rowindex, row in enumerate(imagesfile):\r\n if rowindex == 0: # First row is assumed to be the header row.\r\n # Header: id species_id note_on_taxonomy morphology ecology other_remarks \r\n # tropic_type harmful note_on_harmfulness substrate life_form \r\n # width length size resting_spore literature last_modified\r\n pass\r\n else:\r\n # Replace html tags. \r\n row = row.replace(u'<i>', u'<em>') \r\n row = row.replace(u'</i>', u'</em>') \r\n row = row.replace(u'<b>', u'<strong>') \r\n row = row.replace(u'</b>', u'</strong>') \r\n # Replace characters interpreted as latin-1.\r\n row = row.replace(u'µ', u'µ') # µ µ\r\n row = row.replace(u'ä', u'ä') # ä ä\r\n row = row.replace(u'Ã¥', u'å') # Ã¥ å\r\n row = row.replace(u'æ', u'æ') # æ æ\r\n row = row.replace(u'ë', u'ë') # ë ë \r\n row = row.replace(u'ö', u'ö') # ö ö \r\n row = row.replace(u'ü', u'ü') # ü ü\r\n row = row.replace(u'Ø', u'Ø') # Ø. Ø\r\n row = row.replace(u'ø', u'ø') # ø ø\r\n row = row.replace(u'é', u'é') # é é\r\n row = row.replace(u'ä', u'ä') # ä ä\r\n row = row.replace(u'ë', u'ë') # ë ë\r\n row = row.replace(u'ö', u'ö') # ö ö\r\n row = row.replace(u'Ö', u'Ö') # Ö Ö\r\n row = row.replace(u'Ã…', u'Å') # Ã… Å\r\n row = row.replace(u'ß', u'ß') # ß ß\r\n row = row.replace(u'Ü', u'Ü') # Ü Ü\r\n row = row.replace(u'á', u'á') # á á \r\n row = row.replace(u'í', u'í') # í í\r\n row = row.replace(u'ó', u'ó') # ó ó\r\n row = row.replace(u'’', u'’') # ’ ’\r\n row = row.replace(u'“', u'“') # “ “ \r\n row = row.replace(u'â€', u'”') # †” \r\n #\r\n row = list(map(str.strip, row.split(field_separator)))\r\n # row = list(map(unicode, row))\r\n #\r\n # 0 : id \r\n # 18 : last_modified\r\n scientificname = speciesdict[row[1]] # species_id\r\n\r\n # Create row.\r\n outrow = [scientificname, row[2].strip('\"'), row[3].strip('\"'), row[4].strip('\"'), row[5].strip('\"'), \r\n row[6].strip('\"'), row[7].strip('\"'), row[8].strip('\"'), row[9].strip('\"'), row[10].strip('\"'), \r\n row[11].strip('\"'), row[12].strip('\"'), row[13].strip('\"'), row[14].strip('\"'), row[15].strip('\"'), row[16].strip('\"')] \r\n # Print row.\r\n out.write(field_separator.join(outrow) + row_delimiter) \r\n # \r\n imagesfile.close()\r\n out.close \r\n #\r\n# except Exception as e:\r\n# print(\"ERROR: Exception %s\" % (e.args[0]))\r\n# print(\"ERROR: Script will be terminated.\")\r\n# sys.exit(1)\r\n finally:\r\n pass", "def species_lookup_by_data_provider(self, provider):\n return self.species_name_lookup(provider)", "def getSpeciesNames(self, params):\n #meanfile = params['job_folder']+'/result/stats/means.txt'\n #logging.info(str(meanfile))\n try:\n # Try to grab them from the mean.txt file\n print params\n if params['exec_type'] == 'deterministic':\n meanfile = params['job_folder'] + '/result/output.txt'\n else:\n meanfile = params['job_folder'] + '/result/stats/means.txt'\n \n #meanfile = params['job_folder']+'/result/stats/means.txt'\n file = open(meanfile,'rb')\n row = file.readline()\n logging.info(str(row))\n species_names = row.strip().split('\\t')\n file.close()\n except Exception, e:\n logging.info(str(e))\n return None\n \n # The first value is always 'time' \n return species_names[1:]", "def get_cites_species():\n mongo_db = mongo_client_db()\n cursor = mongo_db[CITES_COLLECTION].find({'full_name': {'$ne': None}}, {'full_name':1})\n return [r['full_name'].encode('utf8') for r in cursor]", "def format_field(self, value, format_spec):\n value = super(FilenameFormatter, self).format_field(value, format_spec)\n if self.lowercase:\n value = value.lower()\n if not self.nonwordchars:\n value = re.sub('[^\\w\\s]+', '', value)\n value = re.sub('\\s+', self.word_delimiter, value)\n return value", "def _reformat_csv(self, csv):\n\n # avoid using extra backslashes because sed uses them as delimiter\n date = csv[-19:-9].replace('/', '-')\n cmds = [f'cut -d , -f 1 --complement <{csv} >{csv}.new',\n f'mv {csv}.new {csv}',\n f'sed -i \"1d\" {csv}',\n f'sed -i \"s/AS//g\" {csv}',\n f'sed -i \"s/,/\\t/g\" {csv}',\n f'sed -i \"s/$/\\t{date}/\" {csv}']\n\n utils.run_cmds(cmds)", "def clean_paths(row):\n return os.path.basename(row['oldfractal'])", "def divide_to_species(self):\n titles = []\n for i in self.rest:\n titles.append(i.title.split(\" \"))\n for i in range(len(titles)):\n for j in range(i, len(titles)):\n if titles[i][0] == titles[j][0] and titles[i][1] == titles[j][1]:\n if \" \".join(titles[i]) not in [z.title for z in self.species[\" \".join(titles[i][:2])]]:\n self.rest[i].species = \" \".join(titles[i])\n self.species[\" \".join(titles[i][:2])].append(self.rest[i])\n if \" \".join(titles[j]) not in [z.title for z in self.species[\" \".join(titles[j][:2])]]:\n self.rest[j].species = \" \".join(titles[j])\n self.species[\" \".join(titles[j][:2])].append(self.rest[j])\n\n self.name_of_species = list(self.species.keys())\n\n for i in self.species.keys():\n self.count_species[i] = len(self.species[i])", "def pre_process_line(self, line):\n\n line = line.lower()\n line = line.translate(co.NORM_TABLE)\n line = line.translate(co.PUNC_TABLE)\n line = line.split()\n line = line[self.configs[\"resources\"][self.resource_type][\"lstrip\"]:]\n if self.configs[\"resources\"][self.resource_type][\"rstrip\"]:\n line = line[:-self.configs[\"resources\"][self.resource_type][\"rstrip\"]]\n return \" \".join(line)", "def preprocess_var(bd, var):\n filepath_sv = f\"team67-ptp/data/{var}.csv\"\n filepath = bd\n data = feather.read_dataframe(filepath)\n df = data.copy()\n df2 = df[var]\n df2 = df2.to_frame()\n if df2[var].dtype is \"category\":\n df2[var] = df2[var].astype(\"category\").cat.codes\n filename = filepath_sv\n df2.to_csv(filename, index=False)\n print(\"Succesfully exported to csv\")\n else:\n filename = filepath_sv\n df2.to_csv(filename, index=False)\n print(\"Succesfully exported to csv\")", "def BOM_3DtoList(filename, SN_col=2):\n SN_col-=1\n with open(filename,'rb') as csvfile:\n reader = csv.reader(csvfile)\n liste = []\n for row in reader:\n if row[SN_col] != \"\":\n SN = row[SN_col].partition('_')[0].strip()\n if SN.count('-') != 2 : \n row.append(\"UKN\")\n else:\n if re.search('[a-zA-Z]', SN.partition('-')[-1].rpartition('-')[0]) or re.search('[a-zA-Z]', SN.partition('-')[0].strip()) :\n row.append(\"UKN\")\n else:\n row.append(SN)\n liste.append(row)\n return liste", "def namingConvention(columnName):\n words = columnName.lower().split(\"_\")\n\n def cap(word):\n if word.lower() == \"id\":\n return word.upper()\n else:\n return word.capitalize()\n\n return words[0] + \"\".join(map(cap, words[1:]))", "def __obtain_csv_fieldnames__(self, csvfile):\n self.__fieldnames__ = csvfile.readline()\n self.__obtain_csv_delimiter__(self.__fieldnames__)\n self.__fieldnames__ = self.__remove_break_line__(self.__fieldnames__)\n self.__fieldnames__ = self.__split_for_delimiter__(self.__fieldnames__)", "def selectSpecies(self, f, species):\n\n if isinstance(species, types.StringTypes):\n k = self.speciesIndex(species)\n return f[k]\n elif species:\n fs = []\n k = 0\n for s in species:\n k = self.speciesIndex(s)\n fs.append(f[k])\n return asarray(fs)\n else:\n return asarray(f)", "def change_to_video_name(csv_name, suffix):\n return csv_name[:-10]+\".\"+suffix", "def split_name(filename):\n # *********** My filename are in the format ./CaAl2Si2O8_T3_nvt_a12.5.outcar.msd.dat\n # ******* so I can split their name with _ and take the compound and T from their name\n filename = filename.strip('./')\n temperature = filename.split('_')[1]\n acell = filename.split('.outcar')[0].split('_')[3].strip('a')\n return temperature, acell", "def known_species(self):\n # Import #\n from forest_puller.conversion.tree_species_info import df as species_info\n # Filter #\n df = species_info[['genus', 'species']]\n # Return #\n return df", "def speciesName(self,k):\n return _cantera.phase_getstring(self._phase_id,2,k)", "def find_meta(filename, source_directory):\n metafile = os.path.join(source_directory, filename + '_Metadata.csv')\n metadf = pd.read_csv(metafile)\n metadf = metadf.rename(str.lower, axis='columns')\n\n schfile = metadf['schedule_file_name'][0].split('\\\\')[-1].split('.sdu')[0].split('-')[1]\n param = schfile.replace('_', '.')\n\n return param" ]
[ "0.5889498", "0.58427936", "0.5762657", "0.55987436", "0.55368763", "0.5307403", "0.53045946", "0.5302626", "0.5264975", "0.51979125", "0.514122", "0.5115525", "0.5044933", "0.50338084", "0.50125307", "0.49970248", "0.4994492", "0.49843335", "0.4976661", "0.49747512", "0.49402267", "0.49375182", "0.4923133", "0.4908322", "0.48935997", "0.48874816", "0.48653832", "0.48645467", "0.4856212", "0.4854508" ]
0.83327395
0
Return the datetime of the column "observation_date" of the csv_filename. Normalize it with the format YYYYMMDDHHMM
def normalized_datetime(csv_filename): with open(csv_filename) as csvfilename: reader = DictReader(csvfilename) first_row = next(reader) src_date_fmt = "%d/%m/%Y %H:%M" dst_date_fmt = "%Y%m%d%H%M" obs_date = datetime.strptime(first_row.get("observation_date"), src_date_fmt) return obs_date.strftime(dst_date_fmt)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _resolve_date_from_csv_row(self, csv_row: dict):\n date_str = csv_row[CSV_SENSOR_MAP[CarSensorID.DATE]]\n return datetime.datetime.strptime(date_str, '%d-%b-%Y %H:%M:%S.%f')", "def extract_datetime(filename) -> datetime:\n date_part = filename[-26:-7]\n return datetime.strptime(date_part, '%Y-%m-%d_%H-%M-%S')", "def read_csv_file(input_csv_file_path):\n with open(input_csv_file_path, 'r', encoding='utf-8') as file_path:\n csv_reader = csv.reader(file_path)\n for row in itertools.islice(csv_reader, 1, None):\n yield (\n int(row[0]), row[2],\n datetime.datetime.strftime(\n datetime.datetime.strptime(row[-1], '%m/%d/%y'),\n '%Y-%m-%d'))", "def get_datetimes(file_name):\n csv_file = open(file_name, 'rb')\n file_content = csv.reader(csv_file)\n\n # ignore header\n file_content.next()\n\n datetimes = []\n\n for row in file_content:\n datetimes.append(row[0])\n\n csv_file.close()\n\n return datetimes", "def csv_handle_changedate(self,col_name,col_type):\n table = self.csv_dataframe\n if col_type == 'date':\n table[col_name] = pd.to_datetime(table[col_name]).dt.date\n elif col_type == 'datetime':\n table[col_name] = pd.to_datetime(table[col_name]).dt.to_pydatetime()\n elif col_type == 'year':\n table[col_name] = pd.to_datetime(table[col_name].apply(lambda x: str(x)+'/1/1')).dt.date", "def load_obs_csv(self, csv_file, date_fmt=\"%Y/%m/%d %H:%M\", mission_lst=None, only_geom=False):\n\n try:\n obs_data = np.loadtxt(csv_file, delimiter=',', dtype='str')\n msg = \"observation data loaded from file ***{}***\".format(csv_file)\n FileLogger.info(msg)\n except IOError as exc:\n msg = \"could not load observations from csv file ***{}***\".format(csv_file)\n msg += \" ({})\".format(exc)\n FileLogger.fatal(msg)\n raise RuntimeError(msg)\n\n nt,ncol = obs_data.shape\n date_lst = [ dt.datetime.strptime(obs_data[i,0], date_fmt) for i in xrange(nt) ]\n date_a = np.array(date_lst)\n time_start_data = date_lst[0]\n time_end_data = date_lst[-1]\n #-- logging\n msg = \"detected ntimepts={} #columns={} in csv file\".format(nt, ncol)\n FileLogger.info(msg)\n\n #-- potential adjustment to specified temporal domain\n if self.time_start!=None:\n time_start = self.time_start\n else:\n time_start = time_start_data\n if self.time_end!=None:\n time_end = self.time_end\n else:\n time_end = time_end_data\n\n #-- first 8 columns are always:date, vza, vaa, sza, saa, sat_flag, lat, lon\n\n if ncol==10:\n msg = \"start reading S1 observations...\"\n FileLogger.info(msg)\n # date, vza, vaa, sza, saa, sat_flag, lat, lon, vh, vv\n vh_lst = []\n vv_lst = []\n self.obs_dct['S1'] = ObsTable()\n self.obs_dct['S1'].geom = satgeo.SensorGeometry()\n self.obs_dct['S1'].sat_id_lst = []\n #-- abreviate\n sat_geom = self.obs_dct['S1'].geom\n sat_geom.date_utc = []\n sat_geom.vza = []\n sat_geom.vaa = []\n sat_geom.sza = []\n sat_geom.saa = []\n for i,act_date in enumerate(date_lst):\n if act_date<time_start:\n continue\n elif act_date>time_end:\n break\n #-- actual satellite/mission\n act_mission = obs_data[i,7].upper()\n if mission_lst!=None and not act_mission in mission_lst:\n msg = \"observation at date {} is from mission={} and ignored here.\".format(\n act_date.strftime('%Y-%m-%dT%H:%M'), act_mission)\n FileLogger.info(msg)\n continue\n #-- read actual geometry\n sat_geom.date_utc.append(act_date)\n sat_geom.vza.append( float(obs_data[i,1]) )\n sat_geom.vaa.append( float(obs_data[i,2]) )\n sat_geom.sza.append( float(obs_data[i,3]) )\n sat_geom.saa.append( float(obs_data[i,4]) )\n #-- lon,lat (columns 5,6) not needed\n #-- satellite flag (column 7)\n self.obs_dct['S1'].sat_id_lst.append(act_mission)\n #-- VH,VV in 0-indexed columns 8,9\n vh_lst.append( float(obs_data[i,8]) )\n vv_lst.append( float(obs_data[i,9]) )\n\n #-- geometries/satellite flags are done here\n if only_geom:\n return\n\n #-- turn into arrays\n vh = np.array(vh_lst)\n vv = np.array(vv_lst)\n #-- logging\n msg = \"observational backscatter values are assumed to be in linear units!\"\n FileLogger.info(msg)\n msg = \"VH backscatter values read: VH[linear] min/max={}/{}\".format(\n vh.min(), vh.max())\n FileLogger.info(msg)\n msg = \"VV backscatter values read: VV[linear] min/max={}/{}\".format(\n vv.min(), vv.max())\n FileLogger.info(msg)\n #-- uncertainty computation\n #-- XX_db = XX_db(XX) = 10*log10(XX)\n #-- XX = XX(XX_db) = 10**(XX_db/10)\n #\n # for the uncertainty in linear/raw unit we apply conservative estimation:\n # 2*sXX = [ XX(XX_db+sXX_db) - XX(XX_db-sXX_db) ] (XX=VH,VV)\n # = [ XX(XX_db)*10**(sXX_db/10.) - XX(XX_db)*10**(-sXX_db/10.)]\n # = XX(XX_db)*[10**(sXX_db/10.) - 10**(-sXX_db/10.)]\n # = XX * [10**(sXX_db/10.) - 10**(-sXX_db/10.)]\n ds = 0.5* (10**(self.s1_unc_db/10.) - 10**(-1*self.s1_unc_db/10.))\n #-- S1 uncertainty floor *may* be user-supplied\n if self.s1_vv_uncfloor!=None:\n dsvv_floor = self.s1_vv_uncfloor\n else:\n dsvv_floor = 10**(self.s1_floor_db/10.)*ds\n if self.s1_vh_uncfloor!=None:\n dsvh_floor = self.s1_vh_uncfloor\n else:\n dsvh_floor = 10**(self.s1_floor_db/10.)*ds\n msg = \"assuming S1 observational uncertainty of {} [dB] \".format(self.s1_unc_db)\n msg += \"yields relative uncertainty of {} [linear unit].\".format(ds)\n FileLogger.info(msg)\n msg = \"assuming vv={} vh={} S1 observational uncertainty floor [linear unit].\".format(\n dsvv_floor, dsvh_floor)\n FileLogger.info(msg)\n svh = np.maximum(vh*ds, dsvh_floor)\n svv = np.maximum(vv*ds, dsvv_floor)\n #-- apply floor value\n nlo_svh = np.count_nonzero(vh*ds<dsvh_floor)\n nlo_svv = np.count_nonzero(vv*ds<dsvv_floor)\n svh = np.maximum(svh, dsvh_floor)\n svv = np.maximum(svv, dsvv_floor)\n msg = \"number of applied uncertainty floor values on VH={} VV={}\".format(\n nlo_svh, nlo_svv)\n FileLogger.info(msg)\n msg = \"determined VH uncertainty in linear units, min/max={}/{}\".format(\n svh.min(), svh.max())\n FileLogger.info(msg)\n msg = \"determined VV uncertainty in linear units, min/max={}/{}\".format(\n svv.min(), svv.max())\n FileLogger.info(msg)\n #-- potential filtering of polarisations\n if not self.s1_pol is None:\n if not 'VH' in self.s1_pol:\n vh = self.obs_fill_value\n svh = self.obs_fill_value\n if not 'VV' in self.s1_pol:\n vv = self.obs_fill_value\n svv = self.obs_fill_value\n #-- \n nt_use = len(sat_geom.date_utc)\n self.obs_dct['S1'].data = np.empty((nt_use,2), dtype=np.float64) #-- 'VH','VV'\n self.obs_dct['S1'].data[:,0] = vh\n self.obs_dct['S1'].data[:,1] = vv\n self.obs_dct['S1'].dataunc = np.empty((nt_use,2), dtype=np.float64)\n self.obs_dct['S1'].dataunc[:,0] = svh\n self.obs_dct['S1'].dataunc[:,1] = svv\n #-- logging\n msg = \"...reading S1 observations DONE\"\n FileLogger.info(msg)\n else:\n #-- logging\n msg = \"start reading S2 observations...\"\n FileLogger.info(msg)\n # date, vza, vaa, sza, saa, sat_flag, lat, lon, BRF1,...,BRF13\n self.obs_dct['S2'] = ObsTable()\n self.obs_dct['S2'].geom = satgeo.SensorGeometry()\n self.obs_dct['S2'].sat_id_lst = []\n #-- abreviate\n sat_geom = self.obs_dct['S2'].geom\n sat_geom.date_utc = []\n sat_geom.vza = []\n sat_geom.vaa = []\n sat_geom.sza = []\n sat_geom.saa = []\n brf_lst = [ [] for i in xrange(NB_S2) ] #-- prepare lists for 13 BRF bands\n for i,act_date in enumerate(date_lst):\n if act_date<time_start:\n continue\n elif act_date>time_end:\n break\n #-- actual satellite/mission\n act_mission = obs_data[i,7].upper()\n if mission_lst!=None and not act_mission in mission_lst:\n msg = \"observation at date {} is from mission={} and ignored here.\".format(\n act_date.strftime('%Y-%m-%dT%H:%M'), act_mission)\n FileLogger.info(msg)\n continue\n #-- read actual geometry\n sat_geom.date_utc.append(act_date)\n sat_geom.vza.append( float(obs_data[i,1]) )\n sat_geom.vaa.append( float(obs_data[i,2]) )\n sat_geom.sza.append( float(obs_data[i,3]) )\n sat_geom.saa.append( float(obs_data[i,4]) )\n #-- lon/lat in columns 5, 6 not used here\n #-- satellite flag\n self.obs_dct['S2'].sat_id_lst.append(obs_data[i,7])\n #-- BRFs start at 0-indexed column 8 in data csv file\n for ib in xrange(NB_S2):\n icol = ib+8\n brf_lst[ib].append( float(obs_data[i, icol]) )\n\n #-- geometries/satellite flags are done here\n if only_geom:\n return\n #--\n nt_use = len(sat_geom.date_utc)\n brf_data = np.empty((nt_use,NB_S2), dtype=np.float64) #-- BRF1-13\n for ib in xrange(NB_S2):\n brf_data[:,ib] = np.array(brf_lst[ib])\n #-- check observational consistency\n nneg = np.count_nonzero( brf_data<0 )\n if nneg>0:\n msg = \"detected negative BRF values: nneg={}.\".format(nneg)\n msg += \" These will be set to fill-value!\"\n FileLogger.warn(msg)\n brf_data[ brf_data<0 ] = self.obs_fill_value\n nhi = np.count_nonzero( brf_data>1 )\n if nhi>0:\n msg = \"detected high BRF outlier values>1: nout={}.\".format(nhi)\n msg += \" These will be set to fill-value!\"\n FileLogger.warn(msg)\n brf_data[ brf_data>1 ] = self.obs_fill_value\n\n #-- data uncertainty\n msg = \"BRF uncertainty is derived by applying {} relative uncertainty, \".format(\n self.s2_relunc)\n msg += \"and an uncertainty floor value of {}\".format(self.s2_uncfloor)\n FileLogger.info(msg)\n brf_dataunc = np.maximum(brf_data*self.s2_relunc, self.s2_uncfloor)\n brf_dataunc[ brf_dataunc<0 ] = self.obs_fill_value\n brf_dataunc[ brf_data==self.obs_fill_value ] = self.obs_fill_value\n #-- restriction to seleted bands\n if not self.s2_bnds is None:\n bnd_msk = np.ones((NB_S2,), dtype=np.bool)*True\n bnd_msk[self.s2_bnds] = False\n brf_data[:,bnd_msk] = self.obs_fill_value\n brf_dataunc[:,bnd_msk] = self.obs_fill_value\n #-- set into structure\n self.obs_dct['S2'].data = brf_data\n self.obs_dct['S2'].dataunc = brf_dataunc\n #-- logging\n msg = \"...reading S2 observations DONE\"\n FileLogger.info(msg)", "def datetime(self):\r\n if 'observation_time_rfc822' in self.data \\\r\n and self.data['observation_time_rfc822']:\r\n tstr = self.data['observation_time_rfc822']\r\n tstr = ' '.join(tstr.split(' ')[:-2])\r\n return datetime.strptime(tstr, '%a, %d %b %Y %H:%M:%S')\r\n elif 'observation_time' in self.data:\r\n return datetime.strptime(self.data['observation_time'] \\\r\n +' %s'%datetime.now().year,\r\n 'Last Updated on %b %d, %H:%M %p %Z %Y')\r\n return ''", "def sas_date_converter(row, base_date='1960-01-01'):\n if row is None:\n return row\n return datetime.strptime(base_date, '%Y-%m-%d') + timedelta(int(row))", "def csv_str_to_date(datestr: str) -> datetime:\n return datetime.strptime(datestr, \"%Y-%b-%d %I:%M %p\")", "def readdate(line):\n splitted = line.split('::') \n \n # Convert the date\n date = dt.datetime.strptime(splitted[1].strip(), '%a %b %d %H:%M:%S')\n correctdate = date.replace(year=YEAR)\n return correctdate", "def filename_to_timestamp(file_path: str, target_path: str) -> None:\n # Extract the date and time from filenames\n # We assume the name convention local_path/proj_root/path_to_data/lander_planet_date_time.csv\n dt = ''.join(file_path.split('.')[-2].split('_')[-2:])\n new_target_path = get_project_root() + '/' + target_path + file_path.split('/')[-1]\n with open(file_path, 'r') as infile, open(new_target_path, 'w') as outfile:\n reader = csv.reader(infile)\n writer = csv.writer(outfile)\n writer.writerow(next(reader) + ['timestamp'])\n for row in reader:\n writer.writerow(row + [str(datetime.strptime(dt, '%Y%m%d%H%M%S'))])", "def read_weather_data_from_csv(csv_path):\n\n # Read the original DataFrame and select the relevant columns\n original_df = pd.read_csv(csv_path)[[\"DateUTC\",\"TemperatureF\"]]\n\n # Round up the hour of each Date to the nearest whole hour\n original_df[\"Date\"] = original_df[\"DateUTC\"].apply(round_utc_hour_up)\n\n # Rename Temperature field to include city name\n city = csv_path.split(\"_\")[1].split(\"/\")[1]\n original_df[city + \"_TemperatureF\"] = original_df[\"TemperatureF\"]\n original_df = original_df.drop([\"TemperatureF\", \"DateUTC\"], axis=1)\n\n return original_df", "def parse_data(filename):\n x, y = [], []\n with open(filename) as f:\n reader = csv.reader(f)\n for row in reader:\n x.append(datetime.strptime(row[1], DATE_FORMAT))\n y.append(row[0])\n\n return x, y", "def get_loc_year_csv(csv_name):\n fname = (csv_name.split('.'))[0].split('-')\n return fname[0], fname[1]", "def df_multicolumn_date_to_datetime(row):\n year = row['arrival_date_year']\n month = row['arrival_date_month']\n day = row['arrival_date_day_of_month']\n # create datetime object from string of form \"YearMonthDay\" using full month name\n return datetime.datetime.strptime(f\"{year}{month}{day}\", '%Y%B%d').date()", "def _reformat_csv(self, csv):\n\n # avoid using extra backslashes because sed uses them as delimiter\n date = csv[-19:-9].replace('/', '-')\n cmds = [f'cut -d , -f 1 --complement <{csv} >{csv}.new',\n f'mv {csv}.new {csv}',\n f'sed -i \"1d\" {csv}',\n f'sed -i \"s/AS//g\" {csv}',\n f'sed -i \"s/,/\\t/g\" {csv}',\n f'sed -i \"s/$/\\t{date}/\" {csv}']\n\n utils.run_cmds(cmds)", "def get_date(row):\n year, month = row[['CompetitionOpenSinceYear', 'CompetitionOpenSinceMonth']]\n if not pd.isnull(year):\n return pd.Timestamp(int(year), int(month), 1)", "def _read_antti_datetime(dt_file):\n # NOTE: genfromtxt() doesn't work with gzipped files as it should, so we\n # unzip the file ourself, and use io.BytesIO to fake out genfromtext()\n if dt_file.split('.')[-1] == 'gz':\n ff = gzip.open(dt_file, 'r')\n else:\n ff = open(dt_file, 'r')\n\n sIO = io.BytesIO(ff.read().encode())\n ff.close()\n\n ymdHMS = np.genfromtxt(sIO, comments=\"%\")\n DT = np.array([dt.datetime(*elem) for elem in ymdHMS.astype('int')])\n sIO.close()\n\n return DT", "def get_parsed_date(row):\n input_years = row.get('date', '').strip()\n return parse_year(input_years)", "def correctDateTime(file,offsetTime):\n\n header = []\n\n f = open(file)\n\n for line in f:\n line = line.strip()\n\n if line[:1].isalpha():\n \t# Save the header\n \theader.append(line)\n elif len(line[:1]) == 0:\n # Skip empty lines\n \tcontinue\n else:\n ddmmyyyyHHMM, obs, pred, res = line.split(\"\\t\")\n\n # Convert the date and time to seconds since the epoch\n c = time.strptime(ddmmyyyyHHMM,\"%d.%m.%Y %H:%M\")\n t = time.mktime(c)\n t = t - (offsetTime*3600)\n tc = time.strftime(\"%d.%m.%Y %H:%M\",time.localtime(t))\n\n # Format the times properly so we can avoid running the \n # bash script at all.\n dd, mm, yyyyHHMM, = tc.split(\".\")\n yyyy, HHMM = yyyyHHMM.split(\" \")\n HH, MM = HHMM.split(\":\")\n\n print yyyy, mm, dd, HH, MM, \"00\", float(obs)/100, float(res)/100", "def file_name_to_date(prediction_file_name):\n\n error_checking.assert_is_string(prediction_file_name)\n pathless_file_name = os.path.split(prediction_file_name)[-1]\n\n valid_date_string = pathless_file_name.split('.')[0].split('_')[1]\n _ = time_conversion.string_to_unix_sec(valid_date_string, DATE_FORMAT)\n\n return valid_date_string", "def _cleanup_date(self, date):\n if (not date or\n not isinstance(date, str) or\n 'N/A' in date):\n return None\n\n date, time = date.split()\n day, month, year = date.split('.')\n hour, minute = time.split(':')\n year, month, day, hour, minute = [int(x) for x in (year, month, day, hour, minute)]\n\n # For 2-digit years, 1969/2068 is the wrap-around (POSIX standard)\n if (69 <= year < 100):\n year += 1900\n elif (0 <= year < 69):\n year += 2000\n\n return datetime.datetime(year, month, day, hour, minute)", "def datetime_column(filepath, skiprows, skipcolumns):\n df = pd.read_csv(filepath, skiprows=skiprows)\n df = df.drop(columns = skipcolumns)\n# df = df.head(10)\n \n# return df\n\n def try_parse(df):\n# print(df.iloc[1, :])\n # try parsing some rows from each column as date\n head = df.head()\n tail = df.tail()\n for column in df.columns:\n try:\n# print(dateutil.parser.parse(df[column].iloc[-1]))\n dt_head = dateutil.parser.parse(head[column].iloc[-1])\n dt_tail = dateutil.parser.parse(tail[column].iloc[-1])\n# print('possible datetime')\n# if not date.time() == datetime.time():\n if not dt_head.time() == dt_tail.time():\n if not dt_head.date() == dt_tail.date():\n # time seems to be present (not default parser value)\n return column\n except:\n continue\n return None\n \n # try without modifying values\n rv = try_parse(df=df)\n if rv:\n return rv\n \n # try modifying values\n chars = ['-', '_', '/', '#']\n for char in chars:\n dfc = df.copy()\n for col in dfc.columns:\n try:\n dfc[col] = dfc[col].str.split(char).str.join(' ')\n except:\n pass # will only work for str type\n# print(char, dfc.iloc[1, :])\n rv = try_parse(df=dfc)\n if rv:\n return rv", "def update_metadata_csv(self, source):\n timestamp = os.path.getmtime(source)\n filedate = datetime.datetime.fromtimestamp(timestamp)\n return self.update_metadata_date(filedate)", "def change_date(csv_list_of_rows, time_column_name, date_format, seconds_forward):\n time_column_indices = find_item_positions(csv_list_of_rows, time_column_name, 0)\n if time_column_indices == []:\n sys.stderr.write('The following column of data was not found: ' + time_column_name + '\\n')\n row_count = 0\n for time_column_index in time_column_indices:\n while row_count < len(csv_list_of_rows):\n if row_count > 0:\n try:\n if '�' not in csv_list_of_rows[row_count][time_column_index]:\n csv_date = datetime.datetime.strptime(csv_list_of_rows[row_count][time_column_index], date_format)\n eastern_time = csv_date + datetime.timedelta(seconds = seconds_forward)\n csv_list_of_rows[row_count][time_column_index] = eastern_time.strftime(date_format)\n else:\n sys.stderr.write('The URC turned a date field for row ' + str(row_count) + ' into something unparseable: ' + str(csv_list_of_rows[row_count][time_column_index]) + ' so we are dropping the row\\n')\n del csv_list_of_rows[row_count]\n row_count -= 1\n except Exception as ex:\n sys.stderr.write('Row ' + str(row_count) + ' causes an exception so we will leave this date alone: ' + str(ex) + '\\n')\n row_count += 1\n return csv_list_of_rows", "def dataset_from_csv(self, filename, time_column='point_in_time'):\n return pd.from_csv(filename, parse_dates=[time_column])", "def _guess_time_format(self, csv_file):\n import csv\n\n fmt_lst = ['%Y/%m/%d %H:%M', '%Y-%m-%d %H:%M:%S']\n\n fmt_found = None\n\n with open(csv_file,'r') as fp:\n reader = csv.DictReader(fp)\n for i,line in enumerate(reader):\n for k,v in line.iteritems():\n if k.find('date')>=0: #-- this should be the date column\n date_str = v\n break\n if i>0:\n break\n\n msg = \"found first date in file ---{}---\".format(v)\n FileLogger.info(msg)\n\n for fmt in fmt_lst:\n try:\n dt.datetime.strptime(date_str,fmt)\n fmt_found = fmt\n break\n except ValueError:\n pass\n\n msg = \"detected time-format '{}'\".format(fmt_found)\n FileLogger.info(msg)\n\n return fmt_found", "def numeric_date_recover(self):\n \n sp_time_zone, current_datetime = self.setup_datetime() \n converter2sptimezone = current_datetime.astimezone(sp_time_zone)\n \n return converter2sptimezone.strftime('%d-%m-%Y')", "def parse_date(self) -> str:\r\n for line in self.lines:\r\n line = ''.join(line)\r\n if 'updated' in line:\r\n index = line.find('Last updated')\r\n if index != -1:\r\n substring = line[index + 10: index + 50].split('.')[0][-13:]\r\n print(substring)\r\n return pd.to_datetime(substring)\r\n if 'Scottish test n' in line:\r\n index_date = line.find('h test n')\r\n print(index_date)\r\n if index_date != -1:\r\n return pd.to_datetime(line[index_date+15:index_date+29])", "def get_CIMS_data4flight_TEMP_LOWER():\n folder = '{}/{}/'.format(get_local_folder('ARNA_data'), 'CIMS')\n filename = 'ARNA2_HNO3_UPPER_LOWER_PPT.csv'\n df = pd.read_csv(folder+filename)\n # Add a datetime object index\n dt_var = 'date_time'\n format = '%d/%m/%Y %H:%M'\n df.index = pd.to_datetime(df[dt_var].values, format=format)\n del df[dt_var]\n return df" ]
[ "0.6497183", "0.56915534", "0.56462073", "0.562195", "0.5613925", "0.55695003", "0.5566172", "0.54354125", "0.5421051", "0.5420912", "0.5407202", "0.53834975", "0.53240067", "0.53164774", "0.5309389", "0.53038645", "0.529336", "0.52784854", "0.5274101", "0.52193314", "0.5218596", "0.5213133", "0.52129114", "0.5104958", "0.50941426", "0.50902635", "0.5089285", "0.50888276", "0.5072271", "0.50675654" ]
0.7952646
0
Backup a list of tables using pg_dump to a backup_filename Notify via Slack in case of failure
def backup_tables(tables, backup_filename): tables_switches = " ".join(f"-t {table}" for table in tables) jobs = cpu_count() cmd = f"pg_dump {tables_switches} -j {jobs} -Fc > {backup_filename}" pg_dump = run(cmd, shell=True, capture_output=True) if pg_dump.returncode != 0: webhook_url = environ.get("SLACK_WEBHOOK_URL") if webhook_url: msg = "Failed to {cmd}:\n{pg_dump.stderr.decode()}" notify_via_slack(webhook_url, msg) exit(pg_dump.returncode)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def backup_database():\n logger.info(\"start database_backup\")\n management.call_command('dbbackup', compress=True)\n logger.info(\"end database_backup\")", "def backup_all_db():\n filename = BACKUP_DIR + \"/\" + str(datetime.datetime.now().isoformat()) + \".yaml\"\n with open(filename, 'w+') as base_fp:\n for model in [Framework, Project, Document, Component, Arch, # Meta models\n WorkItem, AutoCase, Linkage, Bug, AutoCaseFailure]:\n base_fp.write(serializers.serialize('yaml', model.objects.all(), fields=model._min_dump))", "def execute_pgdump(dbname, as_username='postgres'):\n\n filedescriptor, filename = tempfile.mkstemp()\n statement = get_database_backup_statement(filename, dbname, as_username)\n BackupterStart.send(statement)\n os.system(statement)\n BackupterEnd.send(filename)\n\n return filename", "def run_backup():\n host = re.search(\"([\\w.-]+)[:]?\", env.host).group()\n date = time.strftime('%Y%m%d%H%M%S')\n fname = '%(host)s-backup-%(date)s.gz' % {'date': date, 'host': host}\n green(\"Ingrese la contraseña de la clave privada local.\")\n sudo(\"pg_dump kine | gzip > /tmp/%s\" % fname, user=\"postgres\")\n get(\"/tmp/%s\" % fname, os.path.join(backup_dir, fname))\n sudo(\"rm /tmp/%s\" % fname, user=\"postgres\")", "def backup_database(self):\n backup_file = \"{}-{}.sql\".format(\n config.DATABASE_NAME, datetime.today().strftime(\"%Y-%m-%d--%H%M\")\n )\n backup_uri = \"{}/{}\".format(config.DATABASE_BACKUP_BUCKET, backup_file)\n step = \"Backing Up Database:\\nbackup={}\".format(backup_uri)\n try:\n self.slacker.send_thread_reply(step)\n backup_command = [\n \"gcloud\",\n \"sql\",\n \"export\",\n \"sql\",\n config.DATABASE_INSTANCE_NAME,\n backup_uri,\n \"--database={}\".format(config.DATABASE_NAME),\n \"--verbosity=debug\",\n ]\n subprocess.run(backup_command, check=True)\n except Exception as e:\n self.raise_step_error(step=step, error=e)", "def generate_database_dumps(self):\n # make sure we are not missing any table\n for table in self.get_table_list():\n is_table_dumped = False\n for dumped_tables in self.dump_files.values():\n # check if the table is in the dumped tables (note that if dumped table is an empty list it means we\n # are dumping everything)\n is_table_dumped = is_table_dumped or (table in dumped_tables) or (dumped_tables == [])\n if not is_table_dumped:\n raise ValueError(\n \"Table {table} of database {database_name} is not dumped in any file,\\\n please add it to `self.dump_files` in database_utils.\".format(\n table=table, database_name=self.database_name\n )\n )\n\n self.logger.info(\" > Update {} database dump files\".format(self.service))\n my_sql_command = \"docker exec {} mysqldump {} --set-gtid-purged=OFF --skip-dump-date --skip-extended-insert {} \"\n my_sql_command = my_sql_command.format(self.mysql_container, self.mysql_credentials, self.database_name)\n for dump_filename in sorted(self.dump_files.keys()):\n tables = self.dump_files[dump_filename]\n file_path = self.dump_path.format(dump_filename)\n with open(file_path, \"w\") as dump_file:\n self.shell(my_sql_command + \" \".join(tables),\n message=\" generate {}\".format(dump_filename), stdout=dump_file)\n\n # clean up the dump file\n self.clean_dump(file_path)", "def command(database, filename):\n\n click.secho(\n \"Backing up the database '{database}' on host '{host}' to file '{filename}'...\".format(\n database=settings.DATABASES[database]['NAME'],\n host=settings.DATABASES[database]['HOST'],\n filename=filename,\n )\n )\n # Make sure the backup path exists\n backup_path = get_backup_path()\n if not os.path.exists(backup_path):\n os.makedirs(backup_path)\n\n os.environ[\"PGPASSWORD\"] = settings.DATABASES[database]['PASSWORD']\n os.system(\n 'pg_dump -Fc -c -x -h {host} -U {username} --file={filename} {database}'.format(\n host=settings.DATABASES[database]['HOST'],\n username=settings.DATABASES[database]['USER'],\n database=settings.DATABASES[database]['NAME'],\n filename=filename,\n )\n )\n os.environ[\"PGPASSWORD\"] = ''", "def get_database_backup_statement(filename, dbname, as_username='postgres'):\n now = datetime.now()\n statement = 'sudo -u {username} pg_dump {dbname} > {filename}'.format(\n username=as_username, dbname=dbname, filename=filename\n )\n return statement", "def backup_dump(self):\n errors = Queue.Queue()\n threads = []\n for host in self.shards:\n t = threading.Thread(target=host.mongodump, args=(errors,))\n threads.append(t)\n if self.config_server is not None:\n t = threading.Thread(target=self.config_server.mongodump, args=(errors,))\n threads.append(t)\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n if not errors.empty():\n # We don't really care for all errors, so just through the first one\n raise Exception(errors.get())", "def DBdump(self, filename, tables=None):\n\n if self.connector == 'mysql':\n\n # If tables is None, all tables are included in dump\n if tables is None:\n table_list = ''\n\n else:\n\n # It tables is not a list, make the appropriate list\n if type(tables) is str:\n tables = [tables]\n\n table_list = ' ' + ' '.join(tables)\n\n try:\n dumpcmd = 'mysqldump -h ' + self.server + ' -u ' + self.user + \\\n ' -p' + self.password + ' ' + self.dbname + table_list + \\\n ' > ' + filename\n os.system(dumpcmd)\n except: \n print('Error when creating dump. Check route to filename')\n\n else:\n\n print('Database dump only supported for MySQL databases')\n\n return", "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n try:\n cur.execute(query)\n conn.commit()\n \n except psycopg2.Error as e:\n print(e)", "def backup_all_tables(self,\n db_type: t.Union[DBType, str],\n ts: t.Union[dt.datetime, str],\n job_dir: t.Optional[t.Union[Path, str]] = None) -> None:\n db_type = DBType(db_type)\n ts = parse_timestamp(ts, raise_parse_error=True)\n backup_prefix = self.get_backup_prefix(db_type=db_type, ts=ts)\n\n if self.s3u.prefix_exists(backup_prefix):\n raise ValueError(f\"Cannot backup to given timestamped prefix because it already exists: {backup_prefix}\")\n\n try:\n td = None\n if job_dir:\n job_dir = Path(job_dir).resolve()\n job_dir.mkdir(exist_ok=True)\n else:\n td = TemporaryDirectory()\n job_dir = Path(td.name)\n\n print(f\"Backing up tables to S3 {backup_prefix}.\", file=sys.stderr)\n self.export_all_tables(db_type=db_type, export_base_dir=job_dir)\n self.s3u.upload_dir(local_dir=job_dir, prefix_path=backup_prefix)\n finally:\n if td:\n td.cleanup()", "def backup_all_tables_for_all_dbs(self,\n ts: t.Union[dt.datetime, str],\n job_dir: t.Optional[t.Union[Path, str]] = None) -> None:\n ts = parse_timestamp(ts, raise_parse_error=True)\n\n for dbt in DBType:\n td = None\n try:\n if job_dir:\n job_dir = Path(job_dir).resolve()\n backup_job_dir = Path(job_dir, dbt.value)\n backup_job_dir.mkdir(exist_ok=False)\n else:\n td = TemporaryDirectory()\n backup_job_dir = Path(td.name)\n\n self.backup_all_tables(\n db_type=dbt,\n ts=ts,\n job_dir=backup_job_dir\n )\n finally:\n if td:\n td.cleanup()", "def dump_pg(hostname, port, dbname, dbuser, dbpass, output_filename):\n pg_dump_executable = \"pg_dump\"\n\n try:\n env = os.environ\n env[\"PGPASSWORD\"]= dbpass\n subprocess.check_call(\n [\n pg_dump_executable,\n \"-h\", hostname,\n \"-p\", str(port),\n \"-U\", dbuser,\n \"-f\", output_filename,\n dbname\n ],\n env=env\n )\n except Exception as e:\n logger.error(\"Failed to dump: %s\" % (e,))\n raise", "def copy_db():\n with cd(\"/tmp\"), lcd(\"/tmp\"):\n sudo(\"pg_dump gsi > /tmp/latest.sql\", user=\"postgres\")\n run(\"tar zcvf latest.sql.tgz latest.sql\")\n get(\"/tmp/latest.sql.tgz\", \"latest.sql.tgz\")\n sudo(\"rm /tmp/latest.sql.tgz /tmp/latest.sql\")", "def main(dest_dir, db_host, db_port, db_name, db_schema, db_username, db_password, ssl_mode,\n force, cores, memory_per_core, default_partition_col, partition_col,\n nr_partitions):\n partition_col_dict = {k: v for k, v in partition_col}\n nr_partitions_dict = {k: v for k, v in nr_partitions}\n\n dest_dir_path = Path(dest_dir)\n dest_dir_path.mkdir(exist_ok=True, parents=True)\n\n db_params = PostgresDBParams(user=db_username, host=db_host, password=db_password,\n port=db_port, db=db_name, schema=db_schema, ssl_mode=ssl_mode)\n\n with PostgresDBConnectionWrapper(db_params) as db_wrapper:\n tables = db_wrapper.list_tables()\n\n spark_cfg = spark_wrapper.default_spark_config(cores, memory_per_core, use_utc=True)\n with spark_wrapper.create_spark_session_from_config(spark_cfg) as spark:\n dumper = PostgresTableDumper(db_params, spark)\n for t in tables:\n logging.info('Dumping table %s', t)\n\n tbl_path = Path(dest_dir_path, t)\n\n if not tbl_path.exists() and not force:\n default_col = None\n\n if default_partition_col:\n cols = db_wrapper.list_columns(t)\n if default_partition_col in cols:\n default_col = default_partition_col\n else:\n logging.warning(\n \"Default partition column %s not found among columns [%s]\",\n default_partition_col, ','.join(cols))\n\n p_col = partition_col_dict.get(t, default_col)\n nr_part = nr_partitions_dict.get(t, None)\n\n dumper.dump_table(t, tbl_path, p_col, nr_part)\n else:\n logging.info('Path %s already exists, not dumping table %s',\n tbl_path, t)\n\n counts_match = row_counts_match(tbl_path, t, db_wrapper, spark)\n\n if counts_match:\n logging.info(\"Counts for %s match\", t)\n else:\n logging.error(\"Counts for %s don't match\", t)", "def test_backup_list(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster_validate()\n self.backup_list_validate()", "def dump_testdb(c, dbname=\"test_template\", fpath=\"tests/test_db.sql\"):\n default_env = {\n \"PATH\": os.environ[\"PATH\"],\n \"LANG\": \"en_US.UTF-8\",\n }\n\n env = os.environ\n env.update(default_env)\n\n c.run(f\"pg_dump -h localhost -p 5432 -U postgres {dbname} > {fpath}\", env=env)", "def dump(self, dbname, filename):\n path = os.path.join(self.base_dir, filename)\n params = [\n self.get_bin_path(\"pg_dump\"),\n \"-p {}\".format(self.port),\n \"-f\", path,\n dbname\n ]\n\n with open(self.error_filename, \"a\") as file_err:\n ret = subprocess.call(params, stderr=file_err)\n if ret:\n raise ClusterException(\"Dump creation failed\")", "def database_dump(self):\r\n print('=====Dumping database=====')\r\n self.database_table_dump(query.TABLE_STATS)\r\n print()\r\n self.database_table_dump(query.TABLE_TWEETS)\r\n print()\r\n self.database_table_dump(query.TABLE_POSTS)\r\n print()\r\n self.database_table_dump(query.TABLE_FOLLOWS)", "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n try:\n cur.execute(query)\n conn.commit()\n except Exception as e:\n print(e)", "def prepare_push():\n print(\"Preparing to push\")\n cur = conn.cursor()\n try:\n for tname in TABLES:\n with open(f'{tname}.db', 'w') as f:\n print(f\"Copying {tname}\")\n cur.copy_to(f, f'\"{tname}\"')\n return True\n except IOError:\n print(\"IO ERROR\")\n return False\n finally:\n cur.close()", "def run_backup():\n\n from common.models import InvenTreeSetting\n\n if not InvenTreeSetting.get_setting('INVENTREE_BACKUP_ENABLE', False, cache=False):\n # Backups are not enabled - exit early\n return\n\n interval = int(InvenTreeSetting.get_setting('INVENTREE_BACKUP_DAYS', 1, cache=False))\n\n # Check if should run this task *today*\n if not check_daily_holdoff('run_backup', interval):\n return\n\n logger.info(\"Performing automated database backup task\")\n\n call_command(\"dbbackup\", noinput=True, clean=True, compress=True, interactive=False)\n call_command(\"mediabackup\", noinput=True, clean=True, compress=True, interactive=False)\n\n # Record that this task was successful\n record_task_success('run_backup')", "def truncate_backup_tables(self, db_type: t.Union[DBType, str]) -> None:\n db_type = DBType(db_type)\n db_engine = self.get_db_engine(db_type=db_type)\n\n for table_name in self.BACKUP_TABLES_MAP[db_type]:\n print(f\"Truncating Table:{table_name} ... \")\n truncate_table(\n db_engine=db_engine,\n table=table_name,\n cascade=True\n )", "def weekly(dbname, as_username='postgres'):\n\n filename = '{dbname}-{indate}.dump.sql'.format(\n dbname=dbname, indate=datetime.now().strftime('%Y-%m-%d'))\n backup_weekly_dir = os.path.join(BACKUPS_STORE_DIR, 'weekly')\n if not os.path.isdir(backup_weekly_dir):\n os.makedirs(backup_weekly_dir)\n\n dumpfile = execute_pgdump(dbname, as_username)\n filename = os.path.join(backup_weekly_dir, filename)\n logger.info('moving {src} into {dst}'.format(src=dumpfile, dst=filename))\n shutil.move(dumpfile, filename)\n logger.info('{dst} has a size of {size} bytes.'.format(\n dst=filename, size=get_file_size(filename)))", "def dump_tables(self):\n # check read only\n if self.__read_only:\n raise IOError(\"DB is for reading only.\")\n # remove files for deleted tables\n for deleted_name in self.__dropped_tables:\n filepath = self._filepath(deleted_name)\n if os.path.exists(filepath):\n os.remove(filepath)\n # reset the state of dbdriver\n self.__dropped_tables.clear()\n # overwrite existing tables\n for name, table in self.__tables.items():\n if table is None:\n # skip unchanged tables\n continue\n filepath = self._filepath(name)\n table.to_df().to_csv(filepath, sep=\";\")", "def backup(context, user=get_local_user(), remote=False, instance=None, stack=None):\n command = \"run --rm postgres backup\"\n run_command(context, user, remote, instance, stack, command)", "def create_backup_file(*args):\n\n for file in list(*args):\n try:\n date_pattern = BDate.date_pattern()\n BDate.copy_file(file, f'{file}.{date_pattern}')\n except FileNotFoundError as err:\n print(f'[ERROR] {err.filename} : No such file or directory')", "def dumpRancherDatabase(self, backupPath, listDatabaseSettings):\n\n if backupPath is None or backupPath == \"\":\n raise KeyError(\"backupPath must be provided\")\n if isinstance(listDatabaseSettings, dict) is False:\n raise KeyError(\"listDatabaseSettings must be provided\")\n\n if \"type\" not in listDatabaseSettings:\n raise KeyError(\"You must provide the database type\")\n if \"host\" not in listDatabaseSettings:\n raise KeyError(\"You must provide the database host\")\n if \"port\" not in listDatabaseSettings:\n raise KeyError(\"You must provide the database port\")\n if \"user\" not in listDatabaseSettings:\n raise KeyError(\"You must provide the database user\")\n if \"password\" not in listDatabaseSettings:\n raise KeyError(\"You must provide the database password\")\n if \"name\" not in listDatabaseSettings:\n raise KeyError(\"You must provide the database name\")\n\n commandService = Command()\n target_dir = \"%s/database\" % (backupPath)\n image = \"mysql:latest\"\n logger.info(\"Dumping the Rancher database '%s' in '%s'\", listDatabaseSettings['name'], target_dir)\n\n if os.path.isdir(target_dir) is False:\n os.makedirs(target_dir)\n logger.debug(\"Create directory '%s'\", target_dir)\n else:\n logger.debug(\"Directory '%s' already exist\", target_dir)\n\n commandService.runCmd(\"docker pull %s\" % image)\n command = \"sh -c 'mysqldump -h %s -P %s -u %s %s > %s/%s.dump'\" % (listDatabaseSettings['host'], listDatabaseSettings['port'], listDatabaseSettings['user'], listDatabaseSettings['name'], target_dir, listDatabaseSettings['name'])\n dockerCmd = \"docker run --rm -v %s:%s -e 'MYSQL_PWD=%s' %s %s\" % (target_dir, target_dir, listDatabaseSettings['password'], image, command)\n commandService.runCmd(dockerCmd)\n logger.info(\"Dump Rancher database is finished\")", "def mysqldump():\n run(\"mysqldump -u database_user database_name -p > ~/tmp/exported_db.sql\")" ]
[ "0.669284", "0.6538053", "0.64016324", "0.63371104", "0.62812054", "0.6249745", "0.6203552", "0.6097536", "0.60546255", "0.6033636", "0.6009517", "0.5995691", "0.592359", "0.5910764", "0.5885135", "0.5865547", "0.58210963", "0.5818948", "0.58124495", "0.5811393", "0.58043474", "0.57759255", "0.5753146", "0.573948", "0.57377887", "0.5716826", "0.56644917", "0.56624013", "0.5638804", "0.56381047" ]
0.8861323
0
Notify via Slack webhook url
def notify_via_slack(webhook_url, msg): slack_data = {"text": msg} post(webhook_url, json=slack_data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def webhook_sender(url=WEBHOOK_URL):\n data = runner()\n print(json.dumps(data))\n try:\n r = requests.post(url,json=data)\n print(r)\n except requests.exceptions.RequestException as e:\n raise SystemExit(e)", "def webhook(event, context):\n bot = configure_telegram()\n logger.info('Event: {}'.format(event))\n\n if event.get('httpMethod') == 'POST' and event.get('body'):\n logger.info('Message received')\n update = telegram.Update.de_json(json.loads(event.get('body')), bot)\n chat_id = update.message.chat.id\n text = update.message.text\n\n if text == '/start' or text == 'help':\n reply = \"Hey 👋🏻, Aashutosh here!\" \\\n \"\\nTo start stalking, just enter username and we will fetch their profile for you.\\n\" \\\n \"Give us a star at https://github.com/aashutoshrathi/git-profiler-bot\\n\" \\\n \"You can reach out to me at: https://aashutosh.dev\"\n else:\n reply = stalk(text)\n bot.sendMessage(chat_id=chat_id, parse_mode='HTML', text=reply)\n logger.info('Message sent')\n return OK_RESPONSE\n\n return ERROR_RESPONSE", "def webhook():\n if request.headers.get('content-type') == 'application/json':\n\n json_string = request.get_data().decode('utf-8')\n update = Update.de_json(json_string)\n bot.process_new_updates([update])\n return ''\n\n else:\n abort(403)", "def slack_it(request):\n # Validate the Boon AI JWT\n jwt_valid = True\n encoded_jwt = request.headers.get('X-BoonAI-Signature-256').encode('utf-8')\n try:\n jwt.decode(encoded_jwt, os.environ['SECRET'], algorithms=[\"HS256\"])\n except jwt.InvalidSignatureError:\n jwt_valid = False\n\n # Send a slack message with the payload information.\n body = {\n \"text\": \"Webhook received from Boon AI\",\n \"blocks\": [\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"plain_text\",\n \"text\": \"Webhook received from Boon AI\",\n \"emoji\": True\n }\n },\n {\n \"type\": \"divider\"\n },\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": f\"*JWT Validated*: {jwt_valid}\\n \"\n f\"*JWT*: {request.headers.get('X-BoonAI-Signature-256')}\\n \"\n f\"*Content-Type*: {request.content_type}\\n \"\n f\"*Webhook Payload*\\n```{pprint.pformat(request.get_json(force=True))}```\"\n }\n }\n ]\n }\n requests.post(os.environ['SLACK_URL'], json=body)\n\n return {}", "def __notify_slack(self):\n\t\ttry:\n\t\t\tprint(\"[+] Sending Slack notifications...\")\n\t\t\tslack_http_headers = {\n\t\t\t\t'User-Agent': 'GitHubScrap',\n\t\t\t\t'Content-type': 'application/json',\n\t\t\t}\n\t\t\tslack_http_data = {}\n\t\t\tfor ix in range(0,len(self.final_results[\"results\"]),SLACK_CHUNK_SIZE):\n\t\t\t\tdata_to_send = \"\"\n\t\t\t\tchunk_results = self.final_results[\"results\"][ix:ix+SLACK_CHUNK_SIZE]\n\t\t\t\tfor url in chunk_results:\n\t\t\t\t\tdata_to_send += \"{} ({})\\n\".format(url[\"query\"], url[\"link\"])\n\n\t\t\t\tslack_http_data.update({\n\t\t\t\t\t'text': data_to_send,\n\t\t\t\t})\n\t\t\t\trequests.post(\n\t\t\t\t\tself.slack_webhook,\n\t\t\t\t\theaders = slack_http_headers,\n\t\t\t\t\tdata = json.dumps(slack_http_data),\n\t\t\t\t)\n\t\t\t\tsleep(SLACK_HTTP_DELAY)\n\n\t\texcept Exception as exception:\n\t\t\traise MsgException('Slack notifications could not be sent', exception)", "def send_message_to_slack(text):\n\n try:\n post = {\n \"text\": \":fire: :sad_parrot: *SSL Certificate BACKUP SCRIPT Status for HTTPD Proxy:* :sad_parrot: :fire:\",\n \"attachments\": [\n {\n \"text\": \"{0}\".format(text),\n \"color\": \"#B22222\",\n \"attachment_type\": \"default\",\n \"fields\": [\n {\n \"title\": \"Priority\",\n \"value\": \"High\",\n \"short\": \"false\"\n }\n ],\n \"footer\": \"AWS HTTPD\",\n \"footer_icon\": \"https://platform.slack-edge.com/img/default_application_icon.png\"\n }\n ]\n }\n\n ssm_param_name = 'slack_notification_webhook'\n ssm = boto3.client('ssm', config=CONFIG, region_name='eu-west-2')\n try:\n response = ssm.get_parameter(\n Name=ssm_param_name, WithDecryption=True)\n except ClientError as e:\n if e.response['Error']['Code'] == 'ParameterNotFound':\n LOGGER.info(\n 'Slack SSM parameter %s not found. No notification sent', ssm_param_name)\n return\n else:\n logging.error(\n \"Unexpected error when attempting to get Slack webhook URL: %s\", e)\n return\n if 'Value' in response['Parameter']:\n url = response['Parameter']['Value']\n\n json_data = json.dumps(post)\n req = urllib.request.Request(\n url,\n data=json_data.encode('ascii'),\n headers={'Content-Type': 'application/json'})\n LOGGER.info('Sending notification to Slack')\n response = urllib.request.urlopen(req)\n\n else:\n LOGGER.info(\n 'Value for Slack SSM parameter %s not found. No notification sent', ssm_param_name)\n return\n\n except Exception as err:\n logging.error(\n 'The following error has occurred on line: %s',\n sys.exc_info()[2].tb_lineno)\n logging.error(str(err))", "async def send_webhook(self):\n if webhook_url:\n webhook = Webhook.from_url(webhook_url, adapter=AsyncWebhookAdapter(self.session))\n await webhook.send(embed=self.embed)", "def notify(nt_id, application, action, remedy, subj, heading):\n\n email = get_email(nt_id)\n lambda_client = boto3.client('lambda')\n messages = create_messages(application, action, remedy)\n print(email)\n email_data = {\n 'sender_mail': SENDER_EMAIL,\n 'email': email,\n 'subj': subj,\n 'heading': heading,\n 'messages': messages,\n 'region': os.environ.get(\"AWS_DEFAULT_REGION\")\n }\n invoke_email_response = lambda_client.invoke(\n FunctionName= os.environ.get(\"formatted_email\"),\n InvocationType= \"RequestResponse\",\n Payload= json.dumps(email_data)\n )\n err = checkError(invoke_email_response, \"Error sending email!\")\n if err:\n print(str(err))\n\n slack_data = {\n 'application_url': APP_URL,\n 'channel': CHANNEL,\n 'message': messages[1].rsplit(\"\\n\",5)[0],\n 'channel_id': CHANNEL_ID,\n 'nt_ids': [nt_id]\n }\n invoke_slack_response = lambda_client.invoke(\n FunctionName= os.environ.get(\"slack_message\"),\n InvocationType= \"RequestResponse\",\n Payload= json.dumps(slack_data)\n )\n err = checkError(invoke_slack_response, \"Error sending slack message!\")\n if err:\n print(str(err))", "def command_webhook(request):\n print(json.dumps(request.POST.copy(), indent=2))\n\n return JsonResponse({\"text\": \"ChangeTip services have been discontinued. See https://www.reddit.com/r/changetip/comments/5dn3rc/changetip_shutting_down/ Please close your account and disconnect ChangeTip from Slack.\"})\n\n if request.POST.get(\"noop\"):\n return JsonResponse({\"text\": \"Hi!\"})\n\n # Separated so we can still support the legacy webhook integration\n if 'command' in request.POST.keys():\n return slash_command(request)\n else:\n return outgoing_webhook(request)", "def send(self):\n payload = self.format_payload()\n\n # Makes sure that the required fields are provided before\n # sending the payload.\n if not self.webhook_url:\n print ('Error: Webhook URL is required.')\n\n elif not payload:\n print ('Error: Message payload cannot be empty.')\n\n else:\n try:\n request = requests.post(self.webhook_url,\n data=json.dumps(payload),\n headers={'Content-Type': 'application/json'})\n\n request.raise_for_status()\n\n except requests.exceptions.RequestException as error:\n print('Error: %s' % error)", "def webhook():\n if request.method == 'POST':\n data = request.get_json(force=True)\n # For logging and debugging, print incoming requests\n print(request, '\\n', data)\n # The two LNbits extensions used return data in\n # different formats. This try-except handles both.\n try:\n sats = int(data['amount'] / 1000)\n comment = data['comment']\n except KeyError:\n sats = int(data['amount'])\n comment = data['description']\n if not comment:\n comment = \"No message!\"\n amount = convert_to_fiat(sats, 'usd')\n url = \"https://streamlabs.com/api/v1.0/donations\"\n data = {\n \"name\": \"bitcoin\",\n \"message\": f\"{str(sats)} sats: {comment}\",\n \"identifier\": \"bitcoin_donos\",\n \"amount\": amount,\n \"currency\": fiat.upper(),\n \"access_token\": access_token,\n }\n response = requests.post(url, data=data)\n # For logging/debugging purposes\n print(response.json())\n return \"Success!\", 200\n else:\n abort(400)", "def send_slack_notification(url: str, title: str, message: str):\n\n content = {\n \"text\": f\"{title}\",\n \"blocks\": [\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": f\"{message}\",\n },\n }\n ],\n }\n\n response = requests.post(url, json=content)\n\n # Raise exception if response is not 200\n response.raise_for_status()", "def get_slackwebhook():\n return SlackWebhook(\n subject='[Messages] Integration Test',\n body='Conducting Integration Testing',\n profile='integration_tester',\n attachments='https://imgs.xkcd.com/comics/python.png',\n save=False)", "def example_webhook(self, incoming_request):\n return \"Example\"", "def example_webhook(self, incoming_request):\n return \"Example\"", "def flask_slack_test():\n _log('@channel: slack is working?')\n return 'slack test'", "def execute(self):\n return LOGGER.info(f\"{datetime.datetime.now()} - Sending notification in Slack\")", "def notification_slack(Config, **notfication_data): \n slackConf = Config[\"NOTIFICAION_SLACK\"]\n slack_token = slackConf.get('slack_token')\n slack_channel = slackConf.get('slack_channel')\n slack_icon_url = slackConf.get('slack_icon_url')\n notification_description = slackConf.get('notification_description')\n \n result = NotifySlack(\n slack_token, \n slack_channel, \n slack_icon_url, \n notification_description,\n **notfication_data).send_message()\n \n if result['ok']:\n logging.debug('Sent a notification to {} channel'.format(slack_channel) )\n else:\n logging.debug(\"Failed sending notification, Error message - '{}'\".format(result['error']))", "def _send_notification() -> None:\n send_notification(\n self,\n \"slack:@aaron\",\n \"New {0} Version: {1}\".format(\n self.properties[CONF_APP_NAME], new_version\n ),\n title=\"New Software 💿\",\n )", "def send(data, webhook_url):\n dis_data = data\n url = webhook_url\n headers = {\"Content-Type\": \"application/json\"}\n discord_request = requests.post(url, data=json.dumps(dis_data), headers=headers)\n\n try:\n discord_request.raise_for_status()\n except requests.exceptions.HTTPError as err:\n print(err)\n else:\n print(\"Payload delivered successfully, code {}.\".format(discord_request.status_code))", "def slackMessage(binState):\n log = logging.getLogger('iob')\n\n if binState:\n location = \"Out\"\n else:\n location = \"In\"\n \n url = \"https://hooks.slack.com/services/{}\"\n \n payload = {\"text\": \"Bin is: {}\".format(location)}\n\n headers = {\"Content-Type\": \"application/json\"}\n\n response = requests.request(\n \"POST\",\n url,\n data=json.dumps(payload),\n headers=headers\n )\n\n log.debug(response.text)\n return", "def test_slackWH_send_good(get_slackwebhook, capsys):\n s = get_slackwebhook\n s.send()\n out, err = capsys.readouterr()\n assert \"Message sent\" in out", "def slack(message):\n slack_hook = 'https://hooks.slack.com/services/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'\n headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}\n requests.post(slack_hook, json.dumps({'text': message}), headers=headers)", "def slack_me(msg):\n # sanitise.\n msg = unicodedata.normalize('NFKD',msg).encode('ascii','ignore').decode('ascii')\n msg = re.sub('[^\\w\\s\\-.,;?!@#()\\[\\]]','', msg)\n r = requests.post(url=os.environ['SLACK_WEBHOOK'],\n headers={'Content-type': 'application/json'},\n data=f\"{{'text': '{msg}'}}\")\n if r.status_code == 200 and r.content == b'ok':\n return True\n else:\n return False", "def webhook():\n\n json_request = request.get_json(silent=True, force=True)\n\n\n # Extract the data from the json-request (first get the result section of the json)\n result = json_request.get(\"result\")\n\n # Then get the parameters and action_name from the result\n parameters = result.get(\"parameters\")\n\n # Get the action name\n action_name = result.get(\"action\")\n\n facebook_id = json_request.get(\"originalRequest\").get(\"data\").get(\"sender\").get(\"id\")\n\n # Retreives the username by looking up with the unique facebook id\n username = None\n try:\n username = DatabaseConnector.get_values(\"Select username from user where facebook_id = \\\"\" + facebook_id + \"\\\"\")[0][0]\n except:\n username = None\n\n\n # Retrieve the course code\n course_code = parameters.get(\"course_code\")\n parameter = [username, course_code, facebook_id]\n\n # Creates the string that should be sent back to the user\n print(action_name, facebook_id, course_code, parameter[0], parameter[1])\n speech = process_actions(parameter, action_name)\n\n # Create a response to API.AI and return it\n response = json.dumps(speech, indent=4)\n created_response = make_response(response)\n created_response.headers['Content-Type'] = 'application/json'\n\n return created_response", "def send_slack_notification(sender, instance, **kwargs):\n if instance.task or instance.release_service:\n return\n\n if instance.release.state in [\"waiting\", \"initializing\"]:\n return\n\n try:\n send_status_notification(instance.release.pk)\n except Exception as err:\n logger.warning(f\"Problem sending status notification to Slack: {err}\")", "def send_slack(self, message):\n self.slack_client.api_call('chat.postMessage', channel=self.slack_channel, text=message, username=self.username, icon_emoji=self.slack_icon_emoji)\n print(\"Slack Notification sent\")", "def hears(request):\n\n #Wit makes our responses timeout, so we ignore Slack retries\n if \"HTTP_X_SLACK_RETRY_NUM\" in request.META:\n return HttpResponse(\"OK\", 200)\n\n slack_event = json.loads(request.body)\n\n # ============= Slack URL Verification ============ #\n # In order to verify the url of our endpoint, Slack will send a challenge\n # token in a request and check for this token in the response our endpoint\n # sends back.\n # For more info: https://api.slack.com/events/url_verification\n if \"challenge\" in slack_event:\n return HttpResponse(slack_event[\"challenge\"], 200)\n #removed {\"content_type\":\"application/json\"} from flask response\n\n # ============ Slack Token Verification =========== #\n # We can verify the request is coming from Slack by checking that the\n # verification token in the request matches our app's settings\n if pyBot.verification != slack_event.get(\"token\"):\n print \"Invalid Slack verification token: %s \\npyBot has: \\\n %s\\n\\n\" % (slack_event[\"token\"], pyBot.verification)\n # By adding \"X-Slack-No-Retry\" : 1 to our response headers, we turn off\n # Slack's automatic retries during development.\n return HttpResponse(message, 403)\n\n # ====== Process Incoming Events from Slack ======= #\n # If the incoming request is an Event we've subcribed to\n if \"event\" in slack_event:\n event_type = slack_event[\"event\"][\"type\"]\n # Then handle the event by event_type and have your bot respond\n return _event_handler(event_type, slack_event)\n\n # If our bot hears things that are not events we've subscribed to,\n # send a quirky but helpful error response\n return HttpResponse(\"[NO EVENT IN SLACK REQUEST] These are not the droids\\\n you're looking for.\", 404)", "def listen():\n if request.method == 'GET':\n print request\n return verify_webhook(request)\n\n if request.method == 'POST':\n payload = request.json\n event = payload['entry'][0]['messaging']\n for x in event:\n if is_user_message(x):\n text = x['message']['text']\n sender_id = x['sender']['id']\n respond(sender_id, text)\n\n return \"ok\"", "def slack_post(title=\"Test\", message=\"Hello world!\", color=\"#999999\"):\n attach = dict(fallback=message, title=title, text=message, color=color)\n r = client.chat_postMessage(\n channel=CHANNEL, attachments=[attach], username=f\"{HOSTNAME} DBA alert\"\n )\n return r" ]
[ "0.67908823", "0.6733161", "0.6709683", "0.6708932", "0.66876864", "0.668719", "0.6677454", "0.6653485", "0.6611078", "0.65982354", "0.6563635", "0.65241367", "0.65184104", "0.64943904", "0.64943904", "0.6471424", "0.64697933", "0.6464106", "0.6430812", "0.64093184", "0.6364326", "0.6333589", "0.63286775", "0.63231754", "0.62681097", "0.62581825", "0.6256578", "0.62347156", "0.62320215", "0.6173908" ]
0.77923703
0
Gets an image of arbitrary size and return an array of the same size containing 4 different versions of the image by filtering the rows and colums using a low pass or a high pass filter with the different combinations and quantized by the quantization array
def dwt(image_array, quantization_Array): # Create the high pass and low pass filters # both filters are non-causal # symmetric # [-2, -1, 0, 1, 2] LPF = [-0.125, 0.25, 0.75, 0.25, -0.125] LPF_center = 2 # [ -2,-1, 0] HPF = [-0.5, 1, -0.5] HPF_center = 2 nrow, ncol = image_array.shape # create an array that will contain the 4 different subbands of the image LL = np.zeros((nrow, ncol)) LH = np.zeros((nrow, ncol)) HL = np.zeros((nrow, ncol)) HH = np.zeros((nrow, ncol)) filtered_image = [LL, LH, HL, HH] # filtering the rows using a low pass and high pass filters LowPass_rows = np.zeros((nrow, ncol)) HighPass_rows = np.zeros((nrow, ncol)) for i in range(0, nrow): LowPass_rows[i, :] = lfilter(LPF, image_array[i, :], LPF_center) HighPass_rows[i, :] = lfilter(HPF, image_array[i, :], HPF_center) # down sample rows. # which means we will have half the number of columns for i in range(0, len(filtered_image)): filtered_image[i] = filtered_image[i][:, ::2] # apply filters accross columns for i in range(0, ncol): LL[:, i] = lfilter(LPF, LowPass_rows[:, i], LPF_center) LH[:, i] = lfilter(HPF, LowPass_rows[:, i], HPF_center) HL[:, i] = lfilter(LPF, HighPass_rows[:, i], LPF_center) HH[:, i] = lfilter(HPF, HighPass_rows[:, i], HPF_center) # down sample columns and quantize for i in range(0, len(filtered_image)): filtered_image[i] = filtered_image[i][::2, :] filtered_image[i] = np.round( filtered_image[i]/quantization_Array[i]).astype(int) return filtered_image
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter_array(image: Image, filter_id: str) -> Image:\n \n if filter_id == \"3\":\n image = three_tone(image,\"aqua\",\"blood\",\"lemon\")\n elif filter_id == \"X\":\n image = extreme_contrast(image)\n elif filter_id == \"T\":\n image = sepia_filter(image)\n elif filter_id == \"P\":\n image = posterize(image)\n elif filter_id == \"E\":\n image = detect_edges(image,15)\n elif filter_id == \"V\":\n image = flip_vertical(image)\n elif filter_id == \"H\":\n image = flip_horizontal(image)\n \n return image", "def _filter_images(data, hmin):\n #Laziest way to get a circle mask\n fp = CircularAperture((0,0), r=hmin).to_mask().data>.1\n fp = fp.astype(bool)\n\n # Apply maximum filter, flux filter\n filt_image = maximum_filter(data, footprint=fp,\n mode='constant', cval=0)\n origins = product([0,-1], [0,-1])\n max_4sum = np.amax([_conv_origin(data, o) for o in origins], axis=0)\n return(filt_image, max_4sum)", "def filtering(image):\n output = np.array(image)\n for x in xrange(0,1):\n bilateralFilter_img = cv2.bilateralFilter(output,5, 75, 75)\n\n return bilateralFilter_img", "def filtering(self):\r\n # 1 ###########################################################################################################\r\n fft_image = np.fft.fft2(self.image)\r\n # 2 ###########################################################################################################\r\n fft_shift_image = np.fft.fftshift(fft_image)\r\n\r\n ###\r\n mag_dft = np.log(np.abs(fft_shift_image))\r\n mag_dft = (255 * (mag_dft / np.max(mag_dft))).astype(dtype='uint8')\r\n ###\r\n\r\n # 3 ###########################################################################################################\r\n if self.filter_name == 'butterworth_l' or self.filter_name == 'butterworth_h':\r\n mask = self.filter(fft_shift_image.shape, self.cutoff, self.order)\r\n else:\r\n mask = self.filter(fft_shift_image.shape, self.cutoff)\r\n # 4 ###########################################################################################################\r\n # multiply the dft (fft shift image) by the mask\r\n filtered_image = fft_shift_image * mask\r\n\r\n ###\r\n mag_filtered_image = mag_dft * mask\r\n ###\r\n\r\n # 5 ###########################################################################################################\r\n inverse_fft_shift_image = np.fft.ifftshift(filtered_image)\r\n # 6 ###########################################################################################################\r\n inverse_fft_image = np.fft.ifft2(inverse_fft_shift_image)\r\n # 7 ###########################################################################################################\r\n mag_image = np.zeros(inverse_fft_image.shape, dtype=complex)\r\n for i in range(inverse_fft_image.shape[0]):\r\n for j in range(inverse_fft_image.shape[1]):\r\n if inverse_fft_image[i][j] < 0:\r\n mag_image[i][j] = -1 * inverse_fft_image[i][j]\r\n else:\r\n mag_image[i][j] = inverse_fft_image[i][j]\r\n # magnitude of inverse fft is complete\r\n # 8 ###########################################################################################################\r\n full_contrast_image = self.post_process_image(mag_image)\r\n\r\n return [mag_dft, mag_filtered_image, full_contrast_image]", "def filter_image(img):\n return cv2.bilateralFilter(img, 9, 50, 50)", "def filtering(self):\n from numpy import fft\n import numpy as np\n\n _image_dft = fft.fft2(self.image)\n _image_dft = fft.fftshift(_image_dft)\n # dft = DFT.DFT()\n # plt.figure(1) \n # plt.imshow(self.image)\n # plt.figure(2)\n # plt.imshow(20*np.log10(abs(_image_dft))) \n # print(_image_dft)\n # print(abs(_image_dft))\n # plt.show()\n filter = self.filter(self.image.shape, self.cutoff, self.order) \\\n if self.filter_name.startswith('butterworth') \\\n else self.filter(self.image.shape, self.cutoff)\n \n _image_dft_filtered = _image_dft * filter\n _image_filtered = abs(fft.ifft2(_image_dft_filtered))\n \n return [ self.post_process_image(_image_filtered), \\\n self.post_process_image(20*np.log10(abs(_image_dft)+.00001)), \\\n self.post_process_image(20*np.log10(abs(_image_dft_filtered)+.00001)) ]", "def _reduce(im, filter_vec):\n im_small = _reduce_rows(im, filter_vec)\n im_small = _reduce_rows(im_small.transpose(), filter_vec).transpose()\n return im_small", "def filteringEngine(original, debug=False):\n\n processedImage1 = filterNotInRange(original, LABmin_healthy, LABmax_healthy, cv2.COLOR_BGR2LAB)\n processedImage2 = filterNotInRange(original, LABmin_terrain, LABmax_terrain, cv2.COLOR_BGR2LAB)\n # Image containing many FPs\n processedImage3 = filterNotInRange(original, HSVmin_yellow, HSVmax_yellow, cv2.COLOR_BGR2HSV)\n\n sum1 = cv2.add(processedImage1, processedImage2)\n sub1 = differentialNode(original, sum1)\n\n processedImage = filterNotInRange(sub1, LABmin, LABmax, cv2.COLOR_BGR2LAB)\n # sum2 = cv2.add(processedImage, processedImage3)\n\n kernel = np.ones((6, 6), np.uint8)\n temp = closing(processedImage, kernel)\n\n kernel = np.ones((3, 3), np.uint8)\n out = opening(temp, kernel)\n\n if debug:\n cv2.imshow('processedImage1', processedImage1)\n cv2.imshow('processedImage2', processedImage2)\n cv2.imshow('processedImage3', processedImage3)\n cv2.imshow('sum1', sum1)\n cv2.imshow('sub1', sub1)\n cv2.imshow('processedImage', processedImage)\n cv2.imshow('sum2', sum2)\n cv2.imshow('out', out)\n\n return out", "def custom_filter(image: Image) -> Image:\n image = image.filter(ImageFilter.Kernel(\n size=(3, 3), kernel=(1, 0, 1, 0, 0, 0, 1, 0, 1)))\n return image", "def find_img2d_candidates(image, **kwargs):\n\n # filter_kernel = np.array([[-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, 8/225, 8/225, 8/225, 8/225, 8/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, 8/225, 8/225, 8/225, 8/225, 8/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, 8/225, 8/225, 8/225, 8/225, 8/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, 8/225, 8/225, 8/225, 8/225, 8/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, 8/225, 8/225, 8/225, 8/225, 8/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225]])\n\n filter_kernel = np.array([[-2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2/324, -2/324, -2/324],\n [-2 / 324, -2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1/324, -2/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, -2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, -2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1/324, -2/324, -2/324],\n [-2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2/324, -2/324, -2/324]])\n\n res = sg.convolve2d(image, filter_kernel, mode='same', boundary='fill', fillvalue=0)\n coord_x, coord_y = find_max_coords(np.absolute(res))\n\n return coord_x, coord_y", "def quantizeImage(imgOrig:np.ndarray, nQuant:int, nIter:int)->(List[np.ndarray],List[float]):\r\n if isRGB(imgOrig):\r\n RGB=True\r\n imgYIQ = transformRGB2YIQ(imgOrig)\r\n Y=imgYIQ[:,:,0]\r\n unnormImg = unnormalize(Y).astype('int')\r\n else:\r\n RGB = False\r\n unnormImg = unnormalize(imgOrig).astype('int')\r\n\r\n img_lst=[imgOrig]\r\n err_lst=[]\r\n histOrig = calHist(unnormImg)\r\n h,w = unnormImg.shape[:2]\r\n\r\n partSize = (h* w) / nQuant\r\n z = [1]\r\n sum = 0\r\n for i in range(len(histOrig)):\r\n sum+=histOrig[i]\r\n if (sum>=partSize):\r\n z.append(i)\r\n sum=0\r\n\r\n z.append(255)\r\n\r\n for i in range(nIter):\r\n q = []\r\n for i in range(1,nQuant+1):\r\n cutHist=histOrig[z[i-1]:z[i]]\r\n avg=int(np.average(range(z[i-1], z[i]),axis=None, weights=cutHist, returned=False))\r\n q.append(avg)\r\n for i in range(1,nQuant):\r\n z[i]=int((q[i-1]+q[i])/2)\r\n\r\n img=np.zeros(unnormImg.shape)\r\n for i in range(0, nQuant):\r\n img[unnormImg>=z[i]]=q[i]\r\n errMat=pow((unnormImg-img),2)/(h*w)\r\n err=np.average(errMat)\r\n err_lst.append(err)\r\n\r\n\r\n if RGB:\r\n img = normalize(img)\r\n imgYIQ[:, :, 0] = img\r\n img = transformYIQ2RGB(imgYIQ)\r\n\r\n img_lst.append(img)\r\n\r\n\r\n return img_lst, err_lst", "def filter_img(img, new_img, f):\n\n datas = img.getdata()\n new_data = []\n for item in datas:\n if f(item[0]) and f(item[1]) and f(item[2]):\n new_data.append((0, 0, 0, 0))\n else:\n new_data.append(item)\n new_img.putdata(new_data)", "def sampling_from_image(image, filter_size):\n sampling_kernel = np.zeros(image.shape)\n \n for i in range(sampling_kernel.shape[0]):\n for j in range(sampling_kernel.shape[1]):\n if (i%filter_size) == (filter_size//2) and (j%filter_size) == (filter_size//2):\n sampling_kernel[i,j] = 1.0\n else:\n pass\n \n return np.multiply(image, sampling_kernel)", "def image_pre_filtering(left_img: np.ndarray, right_img: np.ndarray) -> tuple:\n\n def clahe(image: np.ndarray) -> np.ndarray:\n \"\"\"\n Apply Contrast Limited Adaptive Histogram Equalization\n :param image: the image to be filtered\n :return: the image filtered with CLAHE\n \"\"\"\n clahe_filter = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))\n return clahe_filter.apply(image)\n\n def logarithmic(image: np.ndarray) -> np.ndarray:\n \"\"\"\n Apply Logarithmic Transform\n :param image: the image to be filtered\n :return: the image filtered with logarithmic transform\n \"\"\"\n c = max_disparity / math.log(1 + np.max(image))\n sigma = 1\n for i in range(0, image.shape[1]): # image width\n for j in range(0, image.shape[0]): # image height\n # compute logarithmic transform\n image[j, i] = int(c * math.log(1 + ((math.exp(sigma) - 1) * image[j, i])))\n return image\n\n def exponential(image: np.ndarray) -> np.ndarray:\n \"\"\"\n Perform pre-processing - raise to the power, as this subjectively appears\n to improve subsequent disparity calculation\n :param image:\n :return:\n \"\"\"\n return np.power(image, 0.75).astype('uint8')\n\n def apply_filter(image: np.ndarray) -> np.ndarray:\n \"\"\"\n Choose which filter to apply to both images, this could be a combination too\n :param image: the image to be filtered\n :return:\n \"\"\"\n # choose filters to apply\n return clahe(image)\n\n return apply_filter(left_img), apply_filter(right_img)", "def cs4243_filter_faster(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n filtered_image = np.zeros((Hi, Wi))\n\n ###Your code here####\n \n # pad image to handle border pixels\n pad_height = (int)((Hk - 1)/2)\n pad_width = (int)((Wk - 1)/2)\n image_pad = pad_zeros(image, pad_height, pad_width)\n \n # compute effective output size, assume stride=1\n out_height = 1 + Hi - Hk + 2*pad_height\n out_width = 1 + Wi - Wk + 2*pad_width\n \n # get initial nodes of receptive fields\n recep_fields_h = [i for i in range(out_height)]\n recep_fields_w = [i for i in range(out_width)]\n \n # extract receptive area into matrix of shape (Hi*Wi, Hk*Wk)\n recep_areas = []\n for i in recep_fields_h:\n for j in recep_fields_w:\n recep_areas.append(image_pad[i: i+Hk, j: j+Wk].reshape(-1))\n out = np.stack(recep_areas)\n \n # Flip the kernel horizontal and vertical\n kernel = cs4243_rotate180(kernel).reshape(Hk*Wk, 1)\n \n # dot product kernel and receptive areas\n filtered_image = np.dot(out, kernel).reshape(Hi, Wi)\n \n ###\n\n return filtered_image", "def img_from_weights(self, rows=None, cols=None, row_gap=1, col_gap=1, eps=1e-4):\r\n\r\n n_in, n_out = self.f_list[0].value.shape\r\n\r\n if rows is None and cols is None:\r\n rows = int(numpy.sqrt(n_out))\r\n if cols is None:\r\n cols = n_out // rows\r\n if n_out % rows: cols+=1\r\n if rows is None:\r\n rows = n_out // cols\r\n if n_out % cols: rows+=1\r\n\r\n filter_shape = self.filter_shape\r\n height = rows * (row_gap + filter_shape[0]) - row_gap\r\n width = cols * (col_gap + filter_shape[1]) - col_gap\r\n\r\n out_array = numpy.zeros((height, width, 3), dtype='uint8')\r\n\r\n w = self.w.value\r\n w_col = 0\r\n def pixel_range(x):\r\n return 255 * (x - x.min()) / (x.max() - x.min() + eps)\r\n\r\n for r in xrange(rows):\r\n out_r_low = r*(row_gap + filter_shape[0])\r\n out_r_high = out_r_low + filter_shape[0]\r\n for c in xrange(cols):\r\n out_c_low = c*(col_gap + filter_shape[1])\r\n out_c_high = out_c_low + filter_shape[1]\r\n out_tile = out_array[out_r_low:out_r_high, out_c_low:out_c_high,:]\r\n\r\n if c % 3 == 0: # linear filter\r\n if w_col < w.shape[1]:\r\n out_tile[...] = pixel_range(w[:,w_col]).reshape(filter_shape+(1,))\r\n w_col += 1\r\n if c % 3 == 1: # E filters\r\n if w_col < w.shape[1]:\r\n #filters after the 3rd do not get rendered, but are skipped over.\r\n # there are only 3 colour channels.\r\n for i in xrange(min(self.n_E_quadratic,3)):\r\n out_tile[:,:,i] = pixel_range(w[:,w_col+i]).reshape(filter_shape)\r\n w_col += self.n_E_quadratic\r\n if c % 3 == 2: # S filters\r\n if w_col < w.shape[1]:\r\n #filters after the 3rd do not get rendered, but are skipped over.\r\n # there are only 3 colour channels.\r\n for i in xrange(min(self.n_S_quadratic,3)):\r\n out_tile[:,:,2-i] = pixel_range(w[:,w_col+i]).reshape(filter_shape)\r\n w_col += self.n_S_quadratic\r\n return Image.fromarray(out_array, 'RGB')", "def filterfish(nofish, rawfish):\n\n # we're going to need negative numbers for this next bit\n fish = np.int16(rawfish)\n\n # find the difference between the images\n thresh = 30\n fish = fish - thresh\n fish = fish - nofish\n\n# # RGB thresholds\n# r_thresh = 3\n# g_thresh = 3\n# b_thresh = 3\n#\n# # any color below its threshold gets zeroed\n# # any color above its threshold gets set to 255\n# fish[:,:,0] = 255*(fish[:,:,0]>r_thresh)\n# fish[:,:,1] = 255*(fish[:,:,1]>g_thresh)\n# fish[:,:,2] = 255*(fish[:,:,2]>b_thresh)\n\n # add up all the colors and average them\n fish = (fish[:,:,0] + fish[:,:,1] + fish[:,:,2]) // 3\n\n # anything that's less than 255 gets tossed\n # we only want pixels that passed on all three thresholds\n fish[fish<255]=0\n fish[fish==255]=1\n\n labeled, how_many_labels = ndimage.label(fish)\n sizes = ndimage.sum(fish, labeled, range(how_many_labels + 1))\n max_label = list(sizes).index(max(sizes))\n labeled[labeled!=max_label]=0\n labeled[labeled==max_label]=255\n\n return np.uint8(labeled)", "def image_enhancement(img):\r\n new_img = []\r\n height = len(img)\r\n width = len(img[0])\r\n for j in range(height-1):\r\n new_img += [[]]\r\n for i in range(width-1):\r\n new_img[2*j] += [img[j][i]]\r\n new_img[2*j] += [np.uint8((int(img[j][i]) + int(img[j][i+1])) / 2)]\r\n new_img[2*j] += [img[j][width-1], img[j][width-1]]\r\n\r\n new_img += [[]]\r\n for i in range(width-1):\r\n new_img[2*j+1] += [np.uint8((int(img[j][i]) + int(img[j+1][i])) / 2)]\r\n new_img[2*j+1] += [np.uint8((int(img[j][i]) + int(img[j][i+1]) + int(img[j+1][i]) + int(img[j+1][i+1])) / 4)]\r\n new_img[2*j+1] += [np.uint8((int(img[j][width-1]) + int(img[j+1][width-1])) / 2),\r\n np.uint8((int(img[j][width-1]) + int(img[j+1][width-1])) / 2)]\r\n\r\n return np.array(new_img)", "def filter(self, img: np.ndarray) -> np.ndarray:\n raise NotImplemented", "def slice_array():\n img = Image.open(\"flamingo.jpg\")\n image_as_array = np.array(img)\n width, height, depth = image_as_array.shape\n\n red_channel = image_as_array[:, :, 0]\n green_channel = image_as_array[:, :, 1]\n blue_channel = image_as_array[:, :, 2]\n\n top_left_corner = image_as_array[:height // 2, :width // 2, :]\n top_right_corner = image_as_array[:height // 2, width // 2:, :]\n random_middle_pixels = image_as_array[11:29, 101:400, :]", "def downsampling(inp_img):\n\n\n img = np.array(inp_img)\n f = max(1, np.rint(np.amin(img)/256))\n\n if f > 1:\n lpf = np.ones((f, f))\n f = (1/(f*f))*lpf\n img = cv2.filter2D(img, -1, kernel=f)\n out = np.hstack((img[:, :, 0], img[:, :, 1], img[:, :, 2]))\n\n return out", "def cut4(image):\r\n i, j = image.shape\r\n a1 = image[:i // 2, :j // 2]\r\n a2 = image[i // 2:, :j // 2]\r\n a3 = image[:i // 2, j // 2:]\r\n a4 = image[i // 2:, j // 2:]\r\n return a1, a2, a3, a4", "def guided_filter(I, p, r, eps, s=None):\n if p.ndim == 2:\n p3 = p[:,:,np.newaxis]\n else:\n p3 = p\n\n out = np.zeros_like(p3)\n for ch in range(p3.shape[2]):\n out[:,:,ch] = _gf_colorgray(I, p3[:,:,ch], r, eps, s)\n return np.squeeze(out) if p.ndim == 2 else out", "def imgFiltering(inputPath, outputPath):\n\t# open the target image\n\tpollenImg = IJ.openImage(inputPath)\n\t\n\t# Create duplicator\n\tduplicator = Duplicator()\n\t\n\t# Duplicate the image with channel 1\n\tpollenImgCopy = duplicator.run(pollenImg, 1, 1, 1, 1, 1, 1);\n\t\n\t# set auto threshold\n\t# IJ.setAutoThreshold(pollenImgCopy, \"Default dark\");\n\t\n\t# set threshold\n\tIJ.setThreshold(pollenImgCopy, 17000, 65520)\n\t\n\t# Call the Thresholder to convert the image to a mask\n\tIJ.run(pollenImgCopy, \"Convert to Mask\", \"\")\n\t\n\t# create result table\n\trt = ResultsTable()\n\t\n\t# create particle analyzer\n\tpAnalyzer = ParticleAnalyzer(ParticleAnalyzer.SHOW_NONE, Measurements.ALL_STATS, rt, 20.0, 1000.0, 0.5 ,1.0)\n\t\n\t# Analyze the particle\n\tpAnalyzer.analyze(pollenImgCopy)\n\t\n\t# Save results as csv\n\trt.saveAs(outputPath)", "def glGetConvolutionFilter( baseFunction, target, format, type ):\r\n from OpenGL.error import glCheckError\r\n glCheckError(None)\r\n dims = (\r\n glGetConvolutionParameteriv( target, GL_CONVOLUTION_WIDTH )[0],\r\n )\r\n if target != GL_CONVOLUTION_1D:\r\n dims += (\r\n glGetConvolutionParameteriv( target, GL_CONVOLUTION_HEIGHT )[0],\r\n )\r\n # is it always 4? Seems to be, but the spec/man-page isn't really clear about it...\r\n dims += (4,)\r\n array = images.images.SetupPixelRead( format, dims, type )\r\n arrayType = arrays.GL_CONSTANT_TO_ARRAY_TYPE[\r\n images.images.TYPE_TO_ARRAYTYPE.get(type,type)\r\n ]\r\n baseFunction(\r\n target, format, type,\r\n ctypes.c_void_p( arrayType.dataPointer(array))\r\n )\r\n return array", "def apply_filter(image: np.ndarray) -> np.ndarray:\n # choose filters to apply\n return clahe(image)", "def read_images(image_info, image_dims):\r\n num_examples = len(image_info)\r\n num_pixels = int(image_dims[0]*image_dims[1]*image_dims[2])\r\n locations, classes = zip(*image_info)\r\n output_array = np.zeros((num_examples, num_pixels+1), dtype=np.float32)\r\n for entry in range(num_examples):\r\n if entry % 100 == 0:\r\n print('reading image: '+str(entry)+'/'+str(num_examples))\r\n output_array[entry, 0] = classes[entry] # image classes\r\n input_image = skio.imread(locations[entry], as_grey=False) # read in a grayscale image\r\n output_image = sktf.resize(input_image, image_dims) # interpolate down to image_dims (including channels)\r\n \"\"\"normalize images by color channel, with fuzz factor to avoid div0\"\"\"\r\n maxpx = np.zeros((1, image_dims[2])) # store max/min for each channel\r\n minpx = np.zeros((1, image_dims[2]))\r\n for i in range(image_dims[2]): # find max/min for each channel\r\n maxpx[0, i] = np.max(output_image[:, :, i])\r\n if maxpx[0, i] == float(0):\r\n maxpx[0, i] = 1e-12 # fuzz factor\r\n minpx[0, i] = np.min(output_image[:, :, i])\r\n \"\"\"flatten and store\"\"\"\r\n for i in range(image_dims[2]):\r\n output_array[entry, 1+i*(image_dims[0]*image_dims[1]):1+(i+1)*(image_dims[0]*image_dims[1])] = \\\r\n np.ravel((output_image[:, :, i] - minpx[0, i]) / (maxpx[0, i] - minpx[0, i]))\r\n return output_array", "def filters(im, detail=False, sharpen=False, **kwargs):\n filters = []\n if detail:\n filters.append(('detail', True))\n if sharpen:\n filters.append(('sharpen', True))\n return im", "def chip_image1(img, chip_size=(300, 300)):\n width, height, _ = img.shape\n wn, hn = chip_size\n images = np.zeros((int(width / wn) * int(height / hn), wn, hn, 3))\n k = 0\n for i in tqdm(range(int(width / wn))):\n for j in range(int(height / hn)):\n chip = img[wn * i:wn * (i + 1), hn * j:hn * (j + 1), :3]\n images[k] = chip\n\n k = k + 1\n\n return images.astype(np.uint8)", "def quantize(im_orig, n_quant, n_iter):\n color_flag = False\n image = im_orig\n error = list()\n\n\n\n if len(im_orig.shape) == 3: #RGB image\n color_flag = True\n y_im = rgb2yiq(im_orig)\n image = y_im[:, :, 0]\n\n if np.all(image <= 1):\n image *= NORMALIZE\n my_hist, bins = np.histogram(image, 256, (0,255))\n hist_cum = np.cumsum(my_hist)\n\n\n\n z_array = np.array([0]*(n_quant+1)) #init the z_array\n z_array[0] = 0 #minimal value\n z_array[-1] = 255 #maximal value\n\n q_array = np.zeros(n_quant) #init the q_array\n pixel_per_z = (hist_cum[-1] / n_quant)\n\n\n\n\n for i in range(1, n_quant): #Getting the z_array (not optimal)\n z_array[i] =np.argwhere(hist_cum>=(pixel_per_z*i)).astype(np.uint8)[0][0] #first element to be true\n\n g = np.arange(256)\n\n for index in range(n_iter):\n z_copy = z_array.copy()\n\n errors_per_iter = np.zeros(n_quant)\n\n for i in range(n_quant): #q calculation\n start = (z_array[i])+1\n end = (z_array[i+1] + 1)\n hist_work = my_hist[start:end]\n g_work = g[start:end]\n sum_up = np.sum(g_work * hist_work) # g*hist\n sum_down = np.sum(hist_work)\n if sum_down!=0:\n q_array[i] = sum_up/sum_down\n else:\n q_array[i] = 0\n\n for i in range(n_quant): # error calculating after optimisation of z\n start = int(z_array[i])+1\n end = int(z_array[i + 1]) + 1\n err = np.sum(((np.around(q_array[i]) - g[start:end]) ** 2) * my_hist[start:end])\n errors_per_iter[i] = err\n error.append(np.sum(errors_per_iter))\n\n for i in range(1, n_quant): #First and last element already defined\n z_array[i] = ((q_array[i-1]) + (q_array[i])) / 2 #optimization of the z parts\n\n if np.array_equal(z_array, z_copy):\n break\n\n\n\n\n\n\n look_up_table = np.array([]) #create look up table\n look_up_table = np.append(look_up_table, [q_array[0]])\n\n for i in range(1, 1 + n_quant):\n num = q_array[i-1]\n array_use = np.array([num] * int(z_array[i] - z_array[i-1]))\n temp_array = np.append(look_up_table, array_use) #fill the look up table\n look_up_table = temp_array\n\n look_up_table = np.append(look_up_table, [q_array[-1]])\n\n im_quant = look_up_table[image.astype(np.uint8)]\n im_quant /= NORMALIZE\n\n if color_flag:\n y_im[:, :, 0] = im_quant\n im_quant = yiq2rgb(y_im)\n\n return [im_quant, error]" ]
[ "0.6416802", "0.6152852", "0.6090951", "0.60450697", "0.58665884", "0.57959175", "0.57541287", "0.5740798", "0.57349724", "0.57258534", "0.57112247", "0.56901896", "0.561851", "0.56069106", "0.5605767", "0.55927825", "0.5592644", "0.55604106", "0.5551992", "0.55455166", "0.55439425", "0.5542755", "0.55086595", "0.55020183", "0.54996544", "0.54946005", "0.54860365", "0.5478259", "0.5465874", "0.54439896" ]
0.6429137
0
Gets an array of 4 elements (the output of the dwt function) and return an array by replacing the elements of the list that are addressed through the levels array by dwt versions of them (replace 1 element with a List of 4 elements)
def dwt_levels(filtered_image, levels, quantization_Array): assert len(levels) <= 4 for level in levels: filtered_image[level[0]] = dwt( filtered_image[level[0]], quantization_Array) try: # continue recursively dwt_levels(filtered_image[level[0]], level[1], quantization_Array) except IndexError: # happens when level has one element # we are done, no recursive lists left continue
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getLevels():", "def render_pyramid(pyr, levels):\n positionLst = []\n finalLst = []\n if levels > len(pyr):\n print(\"error. number of levels to display is more than max_levels\")\n width = 0\n\n for i in range(levels):\n # streching each layer\n pyr[i] = strech_helper(pyr[i])\n width += pyr[i].shape[1]\n positionLst.append((pyr[i].shape[0], pyr[i].shape[1]))\n\n for i in range(levels):\n zeros = np.zeros(shape=(pyr[0].shape[0], pyr[i].shape[1]))\n zeros[:positionLst[i][0], :positionLst[i][1]] = pyr[i]\n finalLst.append(zeros)\n res = np.concatenate(finalLst, axis=1)\n return res", "def fullfact(levels):\n n = len(levels) # number of factors\n nb_lines = np.prod(levels) # number of trial conditions\n H = np.zeros((nb_lines, n))\n \n level_repeat = 1\n range_repeat = np.prod(levels)\n for i in range(n):\n range_repeat //= levels[i]\n lvl = []\n for j in range(levels[i]):\n lvl += [j]*level_repeat\n rng = lvl*range_repeat\n level_repeat *= levels[i]\n H[:, i] = rng\n \n return H", "def levels(self):\n return np.array(self._levels()).T", "def reconstructWPT(self,new_wp,wavelet,listleaves):\n # Sort the list of leaves into order bottom-to-top, left-to-right\n working = listleaves.copy()\n working = working[-1::-1]\n\n level = int(np.floor(np.log2(working[0] + 1)))\n while level > 0:\n first = 2 ** level - 1\n while working[0] >= first:\n # Note that it assumes that the whole list is backwards\n parent = (working[0] - 1) // 2\n p = self.ConvertWaveletNodeName(parent)\n\n new_wp[p].data = pywt.idwt(new_wp[self.ConvertWaveletNodeName(working[1])].data,new_wp[self.ConvertWaveletNodeName(working[0])].data, wavelet)[:len(new_wp[p].data)]\n\n # Delete these two nodes from working\n working = np.delete(working, 1)\n working = np.delete(working, 0)\n # Insert parent into list of nodes at the next level\n ins = np.where(working > parent)\n if len(ins[0]) > 0:\n ins = ins[0][-1] + 1\n else:\n ins = 0\n working = np.insert(working, ins, parent)\n level = int(np.floor(np.log2(working[0] + 1)))\n return new_wp", "def demodulate(wave):\n\n levels = np.frombuffer(wave, np.uint8)\n levels = np.array(levels, dtype = np.float16)\n max_data = np.max(levels) # Assuming it contains real '\\xff'\n\n # Leveling data\n\n bins = np.linspace(0, max_data, 5)\n levels= np.digitize(levels, bins) - 1\n levels[levels == 4] = 3 # Resolving edge issue\n \n return levels", "def _distribute(p,t):\r\n\t\t\r\n\t\t# begin list with power\r\n\t\tl = [[p]]\r\n\t\tfor i in range(t - 1):\r\n\t\t\t\r\n\t\t\t# expand the first members\r\n\t\t\tfor n,j in enumerate(l):\r\n\t\t\t\tf = Li._fracture(j[0])\r\n\t\t\t\t\r\n\t\t\t\t# recombine with tails\r\n\t\t\t\tl[n] = [k + j[1:] for k in f]\r\n\t\t\t\r\n\t\t\t# unpack lists\r\n\t\t\tl = [k for j in l for k in j]\r\n\t\t\t\r\n\t\t# make tuples\r\n\t\tl = [tuple(i) for i in l]\r\n\t\t\t\t\r\n\t\treturn l", "def utility(result, depth):\n if result == 'X':\n return [10 - depth, [0, 0]]\n elif result == 'O':\n return [depth - 10, [0, 0]]\n else:\n return [0, [0, 0]]", "def calc_levels(ply_order, n_plies_in_groups, n_groups):\r\n levels_in_groups = [None]*n_groups\r\n for ind_group in range(n_groups):\r\n levels_in_groups[ind_group] = []\r\n\r\n ind_all_plies = 0\r\n for ind_group in range(n_groups):\r\n for ind_plies in range(n_plies_in_groups[ind_group]):\r\n levels_in_groups[ind_group].append(ply_order[ind_all_plies])\r\n ind_all_plies += 1\r\n\r\n return levels_in_groups", "def make_steer_frs(dims, numlevels, numorientations, bandwidth):\n \n result = []\n bands=[]\n p = numorientations-1\n const = math.sqrt(float(math.pow(2,(2*p))*math.pow(math.factorial(p),2)) / float(math.factorial(2*p)*(p+1)))\n f1 = freqspace(dims[0])\n f2 = freqspace(dims[1])\n wx, wy = np.meshgrid(f1, f2)\n size = wx.shape\n r = np.sqrt(wx**2 + wy**2)\n theta = np.arctan2(wy, wx) \n \n bands = np.full((numlevels, numorientations, dims[0], dims[1]), const*1j)\n for level in range(numlevels):\n for orientation in range(numorientations):\n theta_offset = orientation * np.pi / numorientations\n ctrfreq = pi / math.pow(2, (level+1)*bandwidth)\n band = np.cos(theta - theta_offset)**p * log_raised_cos(r, ctrfreq, bandwidth)\n bands[level,orientation,:,:] *= band\n \n hi = log_raised_coshi(r, pi / math.pow(2, bandwidth), bandwidth)\n\n lo = log_raised_coslo(r, pi / math.pow(2, bandwidth * numlevels), bandwidth)\n \n result.append(hi)\n result.append(bands)\n result.append(lo)\n return result", "def max_diaphragmatic_level(levels):\n return [max(x) for x in levels]", "def variateOneWeight(weights):\n sts = []\n for i in range(len(weights)):\n st = [x[0] for x in weights[:i]]\n for w in weights[i][1:]:\n subs = []\n subs += st\n subs.append(w)\n for w2 in weights[i+1:]:\n subs.append(w2[0])\n sts.append((w.name, subs))\n return sts", "def reconstruct_w_modified_dcoeffs(full_coeffs, mod_dcoeffs, wavelet_type: str = 'db2', level: int = 1):\n full_coeffs[-level] = mod_dcoeffs\n return waverec(coeffs=full_coeffs, wavelet=wavelet_type)", "def init(cls, levels: List[str]) -> List[Level]:\n return [cls(lvl, val) for val, lvl in enumerate(levels)]", "def levels_to_data(levels):\n\n b4_conv_fact = [1, 4, 16, 64]\n levels = levels.reshape(levels.size / 4, 4)\n data = np.array(np.dot(levels, b4_conv_fact), dtype = np.uint8)\n\n return data", "def zernike_Double_Index(nlevels):\n \n\t \n if not (nlevels>=0):\n print('Input parameter nlevels must be >= 0')\n raise AssertionError()\n \n if (nlevels == 0):\n \n m = 0\n n = 0\n \n return n, m\n \n else:\n \n # ++++ Defining layout for row number n and colunmn number m ++++++++\n\n row_n = nlevels+1\n col_m = 2*nlevels +1\n x = np.arange(row_n)\n y = np.arange(-(col_m-1)//2, (col_m+1)//2,1)\n Q = [(i,j) for i in x for j in y]\n #\n\n\n nm_index = []\n \n top = (col_m + 1)/2\n leftside = row_n*col_m - col_m + 1\n rightside = row_n*col_m \n\n k1 = 0; k2 = 0\n\n for i in xrange(top,row_n*col_m+1, 2*col_m):\n\n nm_index.append(Q[i-1])\n s1 = i + col_m + 1\n s2 = i + col_m - 1 \n jj1 = k1\n jj2 = k2\n\n\n while (s2 <= leftside): \n\n nm_index.append(Q[s2-1])\n s2 +=col_m - 1\n jj1 += 1\n jj2 -= 1\n\n leftside +=2\n\n jj1 = k1\n jj2 = k2\n\n while (s1 <= rightside): \n\n # \n nm_index.append(Q[s1-1])\n s1 +=col_m + 1\n jj1 += 1\n jj2 += 1\n\n rightside -=2\n k1 = 0; k2 += 2\n\n n = np.array(nm_index)[:,0]\n m = np.array(nm_index)[:,1]\n\n return n, m", "def new_w(w, d):\n\n if w.sum() > 0:\n next_w = w.copy()\n next_w[next_w > 0] -= 1\n return next_w\n else:\n if d[0] == 1:\n return np.array([51,0,0])\n elif d[1] == 1:\n return np.array([0,51,0])\n else:\n return np.array([0,0,51])", "def level_sets(self):\n in_degrees = self.in_degree(labels=True)\n level = [x for x in in_degrees if in_degrees[x]==0]\n Levels = []\n while len(level) != 0:\n Levels.append(level)\n new_level = []\n for x in level:\n for y in self.neighbors_out(x):\n in_degrees[y] -= 1\n if in_degrees[y] == 0:\n new_level.append(y)\n level = new_level\n return Levels", "def permute(l):\n\n if len(l) <= 1:\n return [l]\n\n elif len(l) == 2:\n return [l, l[::-1]]\n\n else:\n output = []\n current_level = l[0]\n prev_level = permute(l[1:])\n\n for element in prev_level: # For each answer from previous level\n for pos in range(len(element) + 1): # For each possible position to put this level value\n temp_list = []\n temp_element = element.copy()\n for i in range(len(element) + 1):\n if pos == i:\n temp_list.append(current_level)\n else:\n temp_list.append(temp_element.pop())\n output.append(temp_list)\n return output", "def get_levels(tree, level=0):\n if type(tree) == list:\n return [level]+get_levels(tree[0], level+1)+get_levels(tree[1], level+1)\n elif type(tree) == tuple:\n return [level, level]+get_levels(tree[1], level+1)\n else:\n return [level]", "def generate_cuts(depths, side=SIDE_LENGTH):\n for num, den in depths:\n ad = num * side / den\n poly = Polygon([(0, 0), (side, 0), (side, ad), (0, ad)])\n yield poly", "def depth_setsW(self):\n return [map(self._vertex_to_element, depth) for depth in\n self._hasse_diagram.depth_sets()]", "def drizzle_array_groups(sci_list, wht_list, wcs_list, scale=0.1, kernel='point', pixfrac=1., verbose=True):\n from drizzlepac.astrodrizzle import adrizzle\n from stsci.tools import logutil\n log = logutil.create_logger(__name__)\n \n # Output header / WCS \n header, outputwcs = compute_output_wcs(wcs_list, pixel_scale=scale)\n shape = (header['NAXIS2'], header['NAXIS1'])\n \n # Output arrays\n outsci = np.zeros(shape, dtype=np.float32)\n outwht = np.zeros(shape, dtype=np.float32)\n outctx = np.zeros(shape, dtype=np.int32)\n \n # Do drizzle\n N = len(sci_list)\n for i in range(N):\n if verbose:\n log.info('Drizzle array {0}/{1}'.format(i+1, N))\n \n adrizzle.do_driz(sci_list[i].astype(np.float32, copy=False), \n wcs_list[i], \n wht_list[i].astype(np.float32, copy=False),\n outputwcs, outsci, outwht, outctx, 1., 'cps', 1,\n wcslin_pscale=wcs_list[i].pscale, uniqid=1, \n pixfrac=pixfrac, kernel=kernel, fillval=0, \n stepsize=10, wcsmap=None)\n \n return outsci, outwht, outctx, header, outputwcs", "def init_four_d_array(dimens, val):\n w, x, y, z = dimens\n return [[[[val for l in range(z)]\n for k in range(y)]\n for j in range(x)]\n for i in range(w)]", "def power_4(s):\n result = [[]]\n for i in s:\n result.extend(list([subset + [i] for subset in result]))\n return result", "def get_depths(self, variables):\n\n return [0.]", "def parse_level_data(build_dir=\"\"):\n build_dir = os.path.join(build_dir, \"ENSDF\")\n\n level_list = []\n files = sorted([f for f in glob.glob(os.path.join(build_dir, \"ensdf.*\"))])\n for f in files:\n print(\" building level data from {0}\".format(f))\n level_list = ensdf.levels(f, level_list)\n\n level_list_array = np.array(level_list, dtype=level_dtype)\n\n return level_list_array", "def z(self):\n data = np.ma.array(np.zeros(self.n_levels()), mask=True)\n for i in range(self.n_levels()):\n if self.profile_data[i]['Missing']: continue\n data[i] = self.profile_data[i]['Depth']\n return data", "def build_tree(self, w):\n w_abs = np.abs(w)\n if sum(w_abs) != 1.:\n w_abs = w_abs / sum(w_abs)\n self.w = w_abs\n self.tree = np.zeros(w.shape)\n self._build_node(w_abs, 1)\n self.w_apx = extract_distribution(self.tree)\n\n n_levels = np.ceil(np.log2(len(w)))\n self.lfsr = []\n for n in range(int(n_levels)):\n seed = np.random.randint(1, int(2**(self.lfsr_nbits-n)-1))\n self.lfsr.append(LFSR(self.lfsr_nbits-n, seed))", "def get_levels(std0, slope, nsigma):\n nslope = nsigma * slope\n levels = [0]\n while levels[-1] <= 1:\n levels.append((levels[-1] * (1 + nslope) + 2 * nsigma * std0) / (1 - nslope))\n levels.pop()\n return levels" ]
[ "0.5550804", "0.53843755", "0.5264342", "0.5262979", "0.5254136", "0.5248846", "0.52218556", "0.52117604", "0.5206292", "0.51703376", "0.51466304", "0.5125481", "0.51155955", "0.5099238", "0.50898075", "0.5077054", "0.50622743", "0.5059354", "0.501936", "0.4992671", "0.49830204", "0.49751493", "0.49742717", "0.49435902", "0.49386346", "0.49373344", "0.49124524", "0.49062523", "0.48692027", "0.48628694" ]
0.71002924
0
This function takes the output of the dwt_levels and serializes the list. The serialization is done by order of apperance in the filtered_image
def dwt_serialize(filtered_image, output, length): for i in filtered_image: if isinstance(i, list): # append the output of the recursion to the main arguments (output, # length) output_temp, length_temp = dwt_serialize(i, [], []) output = output + output_temp length.append(length_temp) else: # append the data of the serialized elements to the main arguments # (output,length) new_output = (serialize(i, True).tolist()) output = output+new_output length = length+[len(new_output)] return output, length
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dwt_levels(filtered_image, levels, quantization_Array):\n assert len(levels) <= 4\n for level in levels:\n filtered_image[level[0]] = dwt(\n filtered_image[level[0]], quantization_Array)\n try:\n # continue recursively\n dwt_levels(filtered_image[level[0]],\n level[1], quantization_Array)\n except IndexError:\n # happens when level has one element\n # we are done, no recursive lists left\n continue", "def _sort_ds(self):\n d = []\n for layer in self.structure:\n if (layer.type == 'Layer' or layer.type == 'Substrate'):\n d.append(layer.thickness)\n d.insert(0, self.structure[0].thickness)\n d.append(self.structure[-1].thickness)\n d = np.asarray(d)\n return d", "def getLevels():", "def test_image_at_levels(image_name, percentages, blur=False, blur_amount=10):\n # img = skimage.io.imread(os.path.join(IMAGE_DIR, image_name[:-3]+'JPEG'))\n img = cv2.imread(os.path.join(IMAGE_DIR, image_name[:-3]+'JPEG'))\n # mask_img = skimage.io.imread(os.path.join(MASK_DIR, image_name))\n mask_img = cv2.imread(os.path.join(MASK_DIR, image_name))\n results = []\n level_list = get_ntiles_for_img(mask_img, percentages)\n print(level_list)\n for level in level_list:\n masked_image = make_masked_image(img, mask_img, level, blur, blur_amount)\n cv2.imshow('img',masked_image)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n # Transform image for VGG\n masked_image = cv2.resize(masked_image, (224,224)).astype(np.float32)\n masked_image[:,:,0] -= 103.939\n masked_image[:,:,1] -= 116.779\n masked_image[:,:,2] -= 123.68\n masked_image = masked_image.transpose((1,0,2))\n masked_image = np.expand_dims(masked_image, axis=0)\n out = model.predict(masked_image)\n ordered_idx = np.argsort(-out)\n print(out.max(), ordered_idx[0][0])\n result = (CallResult.lines[int(ordered_idx[0][0])], out[0][ordered_idx[0]][0])\n results.append(result)\n\n return results", "def getDepths(imgs, img_names, intersectionCoords, stakeValidity, templateIntersections,\n upperBorder, tensors, actualTensors, intersectionDist, blobDistTemplate, debug, debug_directory,\n image_dates, imageSummary):\n\n # list containing median depths for each image\n median_depths = list()\n median_depths_est = list()\n\n # contains output data for JSON file\n depth_output = {}\n\n # num of images\n num_images = len(imgs)\n\n # create output dictionary for images\n depths = dict()\n\n # create excel workbook and add worksheet\n dest = str(debug_directory) + 'snow-depths.xlsx'\n workbook = xlsxwriter.Workbook(dest)\n worksheet = workbook.add_worksheet()\n worksheet.set_column(0, len(tensors) + 3, 25)\n\n # create format\n cell_format = workbook.add_format()\n cell_format.set_align('center')\n\n # add titles\n worksheet.write(0, 0, \"Image\", cell_format)\n worksheet.write(0, 1, \"Date\", cell_format)\n worksheet.write(0, len(tensors) + 2, \"Median Depth (mm)\", cell_format)\n worksheet.write(0, len(tensors) + 3, \"Median Estimate (mm)\", cell_format)\n for i, j in enumerate(tensors):\n worksheet.write(0, i+2, (\"Stake %s\" % str(i)), cell_format)\n\n # start from the first cell\n row = 1\n col = 0\n\n # image iterator\n iterator = 0\n\n # iterate through images\n for img_ in tqdm.tqdm(imgs):\n # create an image to overlay points on if debugging\n if(debug):\n img_overlay = img_.copy()\n\n # list to hold calculated depths\n depths_stake = list()\n estimate_stake = list()\n\n # get image name\n img_name = img_names[iterator]\n\n # reset column\n col = 0\n\n # write to excel file\n worksheet.write(row, col, img_name, cell_format)\n if isinstance(image_dates[iterator], datetime.datetime):\n worksheet.write(row, col + 1, image_dates[iterator].strftime('%x %X'), cell_format)\n col = 2\n\n # get intersection coordiantes\n coords_stake = intersectionCoords[img_name]\n\n # get blob intersection distances\n intersection_dist_stake = intersectionDist[img_name]\n\n # iterate through stakes in image\n for i, stake in enumerate(coords_stake):\n # if stake is valid and intersection point was found\n if stakeValidity[img_name][i] and stake[\"average\"][1] != False:\n # add reference circles to output image if debugging\n # shows intersection point of image with reference to template\n if(debug):\n cv2.circle(img_overlay, (int(templateIntersections[i][0]), int(templateIntersections[i][1]) - upperBorder), 5, (255,0,0), 3)\n cv2.circle(img_overlay, (int(stake[\"average\"][0]), int(stake[\"average\"][1])), 5, (0,255,0), 2)\n\n # calculate change in snow depth in mm\n tensor = actualTensors[img_name][i] if actualTensors[img_name][i] != True else tensors[i]\n depth_change = ((templateIntersections[i][1] - upperBorder) - stake[\"average\"][1]) * tensor\n\n # calculate change in snow depth using blob distances\n distances_stake = list()\n for w, x in enumerate(intersection_dist_stake[i]):\n if x != False:\n distances_stake.append((abs(blobDistTemplate[i][w]) - abs(x)) * tensor)\n distance_estimate = statistics.median(distances_stake) if len(distances_stake) > 0 else 0\n\n # write to excel file\n worksheet.write(row, col + i, \"%.2f (%.2f)\" % (depth_change, distance_estimate), cell_format)\n\n # add to list\n depths_stake.append(depth_change)\n estimate_stake.append(distance_estimate)\n\n # if stake wasn't valid or intersection point not found\n else:\n # if stake was valid\n if stakeValidity[img_name][i]:\n worksheet.write(row, col + i, \"Not Found\", cell_format)\n # invalid stake\n else:\n worksheet.write(row, col + i, \"Invalid Stake\", cell_format)\n\n # append false to array\n depths_stake.append(False)\n estimate_stake.append(False)\n\n # output debug image\n if(debug):\n cv2.imwrite(debug_directory + img_name, img_overlay)\n\n # add list to dictionary\n depths[img_name] = depths_stake\n\n # determine median depth\n valid_depths = [x for x in depths_stake if x != False]\n valid_estimates = [x for x in estimate_stake if x != False]\n\n if(len(valid_depths) > 0):\n median = statistics.median(valid_depths)\n median_est = statistics.median(valid_estimates)\n else:\n median = False\n median_est = False\n\n # add to median depth list\n median_depths.append(median)\n median_depths_est.append(median_est)\n\n # write median to excel file\n if median != False and median > 0:\n worksheet.write(row, len(tensors) + 2, \"%.2f\" % median, cell_format)\n worksheet.write(row, len(tensors) + 3, \"%.2f\" % median_est, cell_format)\n elif median != False:\n worksheet.write(row, len(tensors) + 2, \"0.0\", cell_format)\n worksheet.write(row, len(tensors) + 3, \"0.0\", cell_format)\n else:\n worksheet.write(row, len(tensors) + 2, \"n/a\", cell_format)\n worksheet.write(row, len(tensors) + 3, \"n/a\", cell_format)\n\n # increment row\n row += 1\n\n # increment iterator\n iterator += 1\n\n # update image summary\n imageSummary[img_name][\" \"] = \"\"\n imageSummary[img_name][\"Stake (Depth Calculation)\"] = \"Depth (mm) Estimate (mm)\"\n for e, depth in enumerate(depths_stake):\n if isinstance(depth, float):\n imageSummary[img_name][\" %d \" % (e+1)] = \"%0.2f %0.2f \" % \\\n (depth, estimate_stake[e])\n else:\n imageSummary[img_name][\" %d \" % (e+1)] = \"%s %s \" % \\\n (\"n/a\", \"n/a\")\n\n # close workbook\n workbook.close()\n\n # remove negative values\n filterSet = zip(median_depths, median_depths_est, image_dates)\n filterSet = [(x, y, z) for x, y, z in filterSet if x != False]\n median_depths, median_depths_est, image_dates = zip(*filterSet)\n median_depths = np.asarray(median_depths).clip(0)\n median_depths_est = np.asarray(median_depths_est).clip(0)\n\n # generate plot\n fig,ax = plt.subplots(1)\n plt.plot(image_dates, median_depths)\n plt.plot(image_dates, median_depths_est)\n plt.gcf().autofmt_xdate()\n plt.legend(['Median Depth', 'Median Estimate'], loc='upper left')\n ax.set_xlabel(\"Date\")\n ax.set_ylabel(\"Snow Depth (mm)\")\n plt.xticks(rotation=75)\n plt.tight_layout()\n\n # save figure\n plt.savefig(debug_directory + \"depth-graph.jpg\")\n plt.close()\n\n # return dictionary containing snow depth changes\n return depths, imageSummary", "def write_readouts(path, dataset_dict, image_list, datasettype, mask_part,\n do_wt1_signal, do_dach1_signal, do_stereology_pred, do_stereology_gt):\n\n titles = []\n for i in range(len(image_list)):\n image_name = os.path.split(image_list[i])[1]\n titles.append(image_name[:-4])\n\n # Segmentation of only 1 class was applied (e.g. glomerulus or podocytes)\n if len(mask_part) == 1:\n mask_el = mask_part.pop()\n\n if mask_el == \"glomerulus\":\n network_area = \"glomerulus_area\"\n # Add a column if GET_WT1_SIGNAL_FOR_GLOMERULUS = True\n if do_wt1_signal:\n df = pd.DataFrame(\n {'image_name': pd.Series(titles),\n network_area: pd.Series(dataset_dict['area_preds_%s' % mask_el]),\n 'mean_WT1_signal_in_glom': pd.Series(dataset_dict['mean_WT1_glom_preds']),\n 'var_WT1_signal_in_glom': pd.Series(dataset_dict['var_WT1_glom_preds']),\n 'median_WT1_signal_in_glom': pd.Series(dataset_dict['median_WT1_glom_preds']),\n 'min_WT1_signal_in_glom': pd.Series(dataset_dict['min_WT1_glom_preds']),\n 'max_WT1_signal_in_glom': pd.Series(dataset_dict['max_WT1_glom_preds']),\n 'perc25_WT1_signal_in_glom': pd.Series(dataset_dict['perc25_WT1_glom_preds']),\n 'perc75_WT1_signal_in_glom': pd.Series(dataset_dict['perc75_WT1_glom_preds'])})\n else:\n df = pd.DataFrame({'image_name': pd.Series(titles),\n network_area: pd.Series(dataset_dict['area_preds_%s' % mask_el])})\n\n elif mask_el == \"podocytes\":\n network_count = \"podocyte_count\"\n network_area = \"podocyte_nuclear_area\"\n # Add a column if GET_DACH1_SIGNAL_FOR_PODOCYTES = True\n if do_dach1_signal:\n df = pd.DataFrame({'image_name': pd.Series(titles),\n network_count: pd.Series(dataset_dict['count_preds_%s' % mask_el]),\n network_area: pd.Series(dataset_dict['area_preds_%s' % mask_el]),\n 'mean_DACH1_signal_in_podo': pd.Series(dataset_dict['mean_DACH1_podo_preds']),\n 'var_DACH1_signal_in_podo': pd.Series(dataset_dict['var_DACH1_podo_preds']),\n 'median_DACH1_signal_in_podo': pd.Series(dataset_dict['median_DACH1_podo_preds']),\n 'min_DACH1_signal_in_podo': pd.Series(dataset_dict['min_DACH1_podo_preds']),\n 'max_DACH1_signal_in_podo': pd.Series(dataset_dict['max_DACH1_podo_preds']),\n 'perc25_DACH1_signal_in_podo': pd.Series(dataset_dict['perc25_DACH1_podo_preds']),\n 'perc75_DACH1_signal_in_podo': pd.Series(dataset_dict['perc75_DACH1_podo_preds'])\n })\n else:\n df = pd.DataFrame({'image_name': pd.Series(titles),\n network_count: pd.Series(dataset_dict['count_preds_%s' % mask_el]),\n network_area: pd.Series(dataset_dict['area_preds_%s' % mask_el])})\n\n else:\n raise ValueError('The name of the segmentation is not known:', mask_el)\n\n savepath = str(os.path.join(path, datasettype + '_Dataframe_' + mask_el))\n df.to_csv(savepath + '.csv')\n df.to_excel(savepath + '.xlsx')\n\n # Segmentation of 2 classes were applied (e.g. glomerulus and podocytes)\n elif len(mask_part) == 2:\n df = pd.DataFrame(\n {'image_name': pd.Series(titles),\n \"glomerulus_area\": pd.Series(dataset_dict['area_preds_%s' % mask_part[0]]),\n \"podocyte_count\": pd.Series(dataset_dict['count_preds_%s' % mask_part[1]]),\n \"podocyte_nuclear_area\": pd.Series(dataset_dict['area_preds_%s' % mask_part[1]])})\n\n # Add a column if GET_WT1_SIGNAL_FOR_GLOMERULUS = True\n if do_wt1_signal:\n df['mean_WT1_signal_in_glom'] = dataset_dict['mean_WT1_glom_preds']\n df['var_WT1_signal_in_glom'] = dataset_dict['var_WT1_glom_preds']\n df['median_WT1_signal_in_glom'] = dataset_dict['median_WT1_glom_preds']\n df['min_WT1_signal_in_glom'] = dataset_dict['min_WT1_glom_preds']\n df['max_WT1_signal_in_glom'] = dataset_dict['max_WT1_glom_preds']\n df['perc25_WT1_signal_in_glom'] = dataset_dict['perc25_WT1_glom_preds']\n df['perc75_WT1_signal_in_glom'] = dataset_dict['perc75_WT1_glom_preds']\n\n # Add a column if GET_DACH1_SIGNAL_FOR_PODOCYTES = True\n if do_dach1_signal:\n df['mean_DACH1_signal_in_podo'] = dataset_dict['mean_DACH1_podo_preds']\n df['var_DACH1_signal_in_podo'] = dataset_dict['var_DACH1_podo_preds']\n df['median_DACH1_signal_in_podo'] = dataset_dict['median_DACH1_podo_preds']\n df['min_DACH1_signal_in_podo'] = dataset_dict['min_DACH1_podo_preds']\n df['max_DACH1_signal_in_podo'] = dataset_dict['max_DACH1_podo_preds']\n df['perc25_DACH1_signal_in_podo'] = dataset_dict['perc25_DACH1_podo_preds']\n df['perc75_DACH1_signal_in_podo'] = dataset_dict['perc75_DACH1_podo_preds']\n\n if do_stereology_pred:\n stereo_dict = get_stereology_readouts(dataset_dict, mask_part, titles, mode='pred')\n # Add it to df\n df['stereology_on_prediction-glomerular_volume_per_million'] = stereo_dict[\"glomerular_volume_per_million\"]\n df['stereology_on_prediction-podocyte_count'] = stereo_dict[\"podocyte_count\"]\n df['stereology_on_prediction-podocyte_density'] = stereo_dict[\"podocyte_density\"]\n\n if do_stereology_gt:\n stereo_dict = get_stereology_readouts(dataset_dict, mask_part, titles, mode='gt')\n # Add it to df\n df['stereology_on_groundtruth-glomerular_volume_per_million'] = stereo_dict[\"glomerular_volume_per_million\"]\n df['stereology_on_groundtruth-podocyte_count'] = stereo_dict[\"podocyte_count\"]\n df['stereology_on_groundtruth-podocyte_density'] = stereo_dict[\"podocyte_density\"]\n\n savepath = str(os.path.join(path, datasettype + '_Dataframe_' + mask_part[0] + mask_part[1]))\n df.to_csv(savepath + '.csv')\n df.to_excel(savepath + '.xlsx')\n return", "def get_vrt_band_list():\n logger.debug('get_vrt_band_list() called')\n vrt_band_list = []\n#===============================================================================\n# sensor_dict = self.bands[tile_type_id][(dataset_info['satellite_tag'], dataset_info['sensor_name'])]\n# # log_multiline(logger.debug, sensor, 'Sensor', '\\t')\n# for file_number in sorted(sensor_dict.keys()):\n# band_info = sensor_dict[file_number]\n# if band_info['level_name'] == 'NBAR':\n# dataset_dir = dataset_info['nbar_dataset_path']\n# dataset_id = dataset_info['nbar_dataset_id']\n# processing_level = dataset_info['nbar_level_name']\n# nodata_value = dataset_info['nbar_nodata_value']\n# resampling_method = dataset_info['nbar_resampling_method']\n# elif band_info['level_name'] == 'ORTHO':\n# dataset_dir = dataset_info['l1t_dataset_path']\n# dataset_id = dataset_info['l1t_dataset_id']\n# processing_level = dataset_info['l1t_level_name']\n# nodata_value = dataset_info['l1t_nodata_value']\n# resampling_method = dataset_info['l1t_resampling_method']\n# else:\n# continue # Ignore any pan-chromatic and derived bands\n# \n# dataset_dir = os.path.join(dataset_dir, 'scene01')\n# filename = find_file(dataset_dir, band_info['file_pattern'])\n# vrt_band_list.append({'file_number': band_info['file_number'], \n# 'filename': filename, \n# 'name': band_info['band_name'],\n# 'dataset_id': dataset_id,\n# 'band_id': band_info['band_id'],\n# 'processing_level': processing_level,\n# 'nodata_value': nodata_value,\n# 'resampling_method': resampling_method,\n# 'tile_layer': band_info['tile_layer']})\n#===============================================================================\n \n #TODO: Make this able to handle multiple derived layers\n for band_level in ['FC']:\n derived_bands = self.bands[tile_type_id][('DERIVED', band_level)]\n for file_number in sorted(derived_bands.keys()):\n band_info = derived_bands[file_number]\n file_pattern = band_info['file_pattern']\n dataset_dir = os.path.join(dataset_info['fc_dataset_path'], 'scene01')\n dataset_id = dataset_info['fc_dataset_id']\n filename = find_file(dataset_dir, file_pattern) \n processing_level = dataset_info['fc_level_name']\n nodata_value = dataset_info['fc_nodata_value'] # Should be None for FC\n resampling_method = dataset_info['fc_resampling_method']\n vrt_band_list.append({'file_number': None, \n 'filename': filename, \n 'name': band_info['band_name'],\n 'dataset_id': dataset_id,\n 'band_id': band_info['band_id'],\n 'processing_level': processing_level,\n 'nodata_value': nodata_value,\n 'resampling_method': resampling_method,\n 'tile_layer': 1})\n \n log_multiline(logger.debug, vrt_band_list, 'vrt_band_list = %s', '\\t')\n return vrt_band_list", "def write(self, data, filename):\n id_ = 1\n weightlist_el = Element('weight-list')\n for dataset in data:\n weight_el = SubElement(weightlist_el, 'weight')\n id_el = SubElement(weight_el, 'id')\n id_el.text = str(id_)\n date_el = SubElement(weight_el, 'date')\n date_el.text = str(dataset.date) + 'T12:00:00'\n value_el = SubElement(weight_el, 'value')\n value_el.text = str(dataset.weight)\n comment_el = SubElement(weight_el, 'comment')\n comment_el.text = dataset.note\n id_ += 1\n st_tree = ElementTree(weightlist_el)\n st_tree.write(filename, encoding='UTF-8')", "def encode(self):\r\n result = []\r\n\r\n # Generate header\r\n result += Ensemble.generate_header(self.ds_type,\r\n self.num_elements,\r\n self.element_multiplier,\r\n self.image,\r\n self.name_len,\r\n self.Name)\r\n\r\n # Add the data\r\n for beam in range(self.element_multiplier):\r\n for bin_num in range(self.num_elements):\r\n val = self.Amplitude[bin_num][beam]\r\n result += Ensemble.float_to_bytes(val)\r\n\r\n return result", "def __level_entries_list__(self):\n # | - __level_entries_list__\n level_entries_dict = self.level_entries\n level_labels = self.tree_level_labels\n\n level_entries_list = []\n for param_i in level_labels:\n # for name, params_list in level_entries_dict.iteritems():\n for name, params_list in level_entries_dict.items():\n if param_i == name:\n level_entries_list.append(params_list)\n\n return(level_entries_list)\n # __|", "def depr_depth_images(self):\n if not hasattr(self, '_depr_depth_images'):\n depth_images = self._archive['gaps_depth']\n self._depr_depth_images = depth_images.reshape([1, 20, 224, 224, 1])\n return self._depr_depth_images", "def get_output_bands(self):\n dlist=self.dest_list.children()\n out_list=[]\n for item in dlist:\n out_list.append((self.output_bands[item][0],\n self.output_bands[item][1]))\n return out_list", "def create_json(self):\n data = {\"image_id\": self.ids, \"img_path\": self.img_paths, \"bg\": self.bgs}\n if hasattr(self, \"bbox\"):\n data[\"bbox\"] = self.bbox\n if hasattr(self, \"masks\"):\n data[\"masks\"] = self.masks\n with open(f\"{self.save_path}{self.name}/json/images_info.json\", \"w\") as f:\n json.dump(data, f)", "def build_sp_levels(self, im, height, use_band_fb=True):\n\n if height <= 0:\n return [[im]]\n\n bands = []\n\n if use_band_fb:\n bands_tmp = correlate_and_downsample(im, self.filter_set.band_fb)\n #imshow(bands_tmp, 'bands_tmp')\n for i in range(0, bands_tmp.shape[2]):\n bands.append(bands_tmp[:,:,i])\n else:\n for filt in self.filter_set.band_filts:\n band = correlate_and_downsample(im, filt)\n bands.append(band)\n\n lo = correlate_and_downsample(im, self.filter_set.lo_filt, 2)\n\n print lo.shape\n pyramid_below = self.build_sp_levels(lo, height-1)\n\n return [bands] + pyramid_below", "def write_info_file(self, n_levels, voxel_size=(1800, 1800, 2000)):\n if not os.path.exists(self.dest):\n os.mkdir(self.dest)\n d = dict(data_type = self.dtype.name,\n mesh=\"mesh\",\n num_channels=1,\n type=\"image\")\n scales = []\n z_extent = self.z_extent\n y_extent = self.y_extent\n x_extent = self.x_extent\n for level in range(1, n_levels + 1):\n resolution = self.resolution(level)\n scales.append(\n dict(chunk_sizes=[[64, 64, 64]],\n encoding=\"raw\",\n key=\"%d_%d_%d\" % (resolution, resolution, resolution),\n resolution=[resolution * _ for _ in voxel_size],\n size=[x_extent, y_extent, z_extent],\n voxel_offset=[0, 0, 0]))\n z_extent = (z_extent + 1) // 2\n y_extent = (y_extent + 1) // 2\n x_extent = (x_extent + 1) // 2\n d[\"scales\"] = scales\n with open(os.path.join(self.dest, \"info\"), \"w\") as fd:\n json.dump(d, fd, indent=2, sort_keys=True)", "def transform(self, imgList):\n res = []\n for img in tqdm(imgList):\n y_mean = np.mean(img, axis=1)\n self.get_filtration(y_mean)\n seg = self.get_segments()\n seg = sorted(seg, key=lambda x:x[0])\n res.append(seg)\n return res", "def get_sorted_img_list():\n dirPath=settings.BASE_DIR\n imgdir=\"/pttWeb/static/topicmodel\"\n fileID=glob.glob(dirPath+imgdir+\"/*.png\")\n fileID=[i.replace('/home/stream/Documents/minimum_django/pttWeb/static/','') for i in fileID]\n fileID=[Week_Image(i) for i in fileID]\n fileID.sort(key=lambda x: x.date, reverse=True)\n #translate . to / since javascript parsing date has some issue!\n fileID=[(i.filename,date_trans_z(i.date.strftime(\"%Y.%m.%d\"))) for i in fileID]\n return fileID", "def dwt(image_array, quantization_Array):\n # Create the high pass and low pass filters\n # both filters are non-causal\n # symmetric\n # [-2, -1, 0, 1, 2]\n LPF = [-0.125, 0.25, 0.75, 0.25, -0.125]\n LPF_center = 2\n\n # [ -2,-1, 0]\n HPF = [-0.5, 1, -0.5]\n HPF_center = 2\n\n nrow, ncol = image_array.shape\n\n # create an array that will contain the 4 different subbands of the image\n LL = np.zeros((nrow, ncol))\n LH = np.zeros((nrow, ncol))\n HL = np.zeros((nrow, ncol))\n HH = np.zeros((nrow, ncol))\n filtered_image = [LL, LH, HL, HH]\n\n # filtering the rows using a low pass and high pass filters\n LowPass_rows = np.zeros((nrow, ncol))\n HighPass_rows = np.zeros((nrow, ncol))\n for i in range(0, nrow):\n LowPass_rows[i, :] = lfilter(LPF, image_array[i, :], LPF_center)\n HighPass_rows[i, :] = lfilter(HPF, image_array[i, :], HPF_center)\n\n # down sample rows.\n # which means we will have half the number of columns\n for i in range(0, len(filtered_image)):\n filtered_image[i] = filtered_image[i][:, ::2]\n\n # apply filters accross columns\n for i in range(0, ncol):\n LL[:, i] = lfilter(LPF, LowPass_rows[:, i], LPF_center)\n LH[:, i] = lfilter(HPF, LowPass_rows[:, i], HPF_center)\n HL[:, i] = lfilter(LPF, HighPass_rows[:, i], LPF_center)\n HH[:, i] = lfilter(HPF, HighPass_rows[:, i], HPF_center)\n\n # down sample columns and quantize\n for i in range(0, len(filtered_image)):\n filtered_image[i] = filtered_image[i][::2, :]\n filtered_image[i] = np.round(\n filtered_image[i]/quantization_Array[i]).astype(int)\n\n return filtered_image", "def sort(self):\n\n img_files = os.listdir(self.path)\n\n img_list = {}\n\n for img_file in img_files:\n filename = os.path.join(self.path, img_file)\n\n try:\n img = Image.open(filename)\n except:\n continue\n\n print \"Analyzing %s\" % img_file\n\n points = self.points(img.size[0], img.size[1])\n key = \"\"\n for point in points:\n\n # Get the average color for each point\n ave_points = self.diamond_points(point[0], point[1])\n red = 0\n green = 0\n blue = 0\n for ave_point in ave_points:\n try:\n rgb = img.getpixel(ave_point)\n red += rgb[0]\n green += rgb[1]\n blue += rgb[2]\n except IndexError:\n pass\n red /= len(ave_points)\n green /= len(ave_points)\n blue /= len(ave_points)\n\n # Bitdepths:\n # 12 bit - 4096 colors, range 0-F, divide by 16\n # 9 bit - 512 colors, range 0-7, divide by 32\n # 6 bit - 64 colors, range 0-3, divide by 64\n # 3 bit - 8 colors, range 0-1, divide by 128\n\n if self.num_colors == 8:\n div = 128\n elif self.num_colors == 64:\n div = 64\n elif self.num_colors == 512:\n div = 32\n elif self.num_colors == 4096:\n div = 16\n else:\n self.usage()\n\n # Lower the bitdepth\n red = int(red / div)\n green = int(green / div)\n blue = int(blue / div)\n\n # Add to the key\n key += \"%x%x%x\" % (red, green, blue)\n\n # Add the key if needed\n if key not in img_list:\n img_list[key] = []\n\n # Add the file to the list\n img_list[key].append(img_file)\n\n # Go through and rename the files, based on the img_list dictionary\n # and the prefix\n num = 1\n for img in sorted(img_list.iterkeys()):\n for filename in sorted(img_list[img]):\n name, ext = os.path.splitext(filename)\n new_filename = \"%s%04d%s\" % (self.prefix, num, ext)\n full_filename = os.path.join(self.path, filename)\n full_new_filename = os.path.join(self.path, new_filename)\n if os.path.isfile(full_new_filename):\n print \"File %s exists - aborting!\" % full_new_filename\n return\n\n os.rename(full_filename, full_new_filename)\n print \"Renamed %s to %s.\" % (filename, new_filename)\n num += 1", "def buildRegFilterList(self, filename, listname='regFilterList'):", "def create_line_list(self,depth_arr):\n\n '''\n depth_arr- depth image as numpy array\n '''\n\n try:\n body=[['shoulder_line',[self.rpts[11],self.rpts[12]]],['waist_line',[self.rpts[23],self.rpts[24]]],['left_shoulder_waist',[self.rpts[11],self.rpts[23]]],['right_shoulder_waist',[self.rpts[12],self.rpts[24]]],['right_thigh',[self.rpts[24],self.rpts[26]]],['left_thigh',[self.rpts[23],self.rpts[25]]],['right_leg',[self.rpts[26],self.rpts[28]]],['left_leg',[self.rpts[25],self.rpts[27]]],['right_forearm',[self.rpts[14],self.rpts[16]]],['left_forearm',[self.rpts[13],self.rpts[15]]],['right_bicep',[self.rpts[12],self.rpts[14]]],['left_bicep',[self.rpts[11],self.rpts[13]]]]\n self.linelist.points=[]\n self.linelist.header.frame_id = \"kinect_frame\"\n self.linelist.header.stamp = rospy.Time.now()\n self.linelist.type = Marker.LINE_LIST\n \n self.linelist.id=1\n self.linelist.action = Marker.ADD \n self. linelist.scale.x = 0.05\n\n self.linelist.color.g = 1.0\n self.linelist.color.a = 1.0\n\n \n\n for _,pointl in body:\n for pt in pointl:\n depth_val=float(depth_arr[pt[1], pt[0]])\n ptl_x,ptl_y,ptl_z=self.depth_to_xyz(pt[0],pt[1],depth_val)\n \n self.linelist_point=Point()\n self.linelist_point.x = ptl_x\n self.linelist_point.y = ptl_y\n self.linelist_point.z = ptl_z\n self.linelist.points.append(self.linelist_point)\n \n except:\n pass", "def OutputList(self):\n\n if hasattr(self,'fp'):\n fp = self.fp\n else:\n fp = 999\n\n if hasattr(self,'Vdot_ratio'):\n Vdot_ratio = self.Vdot_ratio\n else:\n Vdot_ratio = 1 \n \n return [\n ('M1','-',self.M[0]),\n ('M2','-',self.M[1]),\n ('M3','-',self.M[2]),\n ('M4','-',self.M[3]),\n ('M5','-',self.M[4]),\n ('M6','-',self.M[5]),\n ('M7','-',self.M[6]),\n ('M8','-',self.M[7]),\n ('M9','-',self.M[8]),\n ('M10','-',self.M[9]),\n ('P1','-',self.P[0]),\n ('P2','-',self.P[1]),\n ('P3','-',self.P[2]),\n ('P4','-',self.P[3]),\n ('P5','-',self.P[4]),\n ('P6','-',self.P[5]),\n ('P7','-',self.P[6]),\n ('P8','-',self.P[7]),\n ('P9','-',self.P[8]),\n ('P10','-',self.P[9]),\n ('Heat Loss Fraction','-',fp),\n ('Displacement scale factor','-',Vdot_ratio),\n ('Power','W',self.W),\n ('Mass flow rate','kg/s',self.mdot_r),\n ('Inlet Temperature','K',self.Tin_r),\n ('Inlet Pressure','kPa',self.pin_r),\n ('Outlet Temperature','K',self.Tout_r),\n ('Outlet Pressure','kPa',self.pout_r),\n ('Inlet Enthalpy','J/kg',self.hin_r),\n ('Outlet Enthalpy','J/kg',self.hout_r),\n ('Overall isentropic efficiency','-',self.eta_oi),\n ('Pumped flow rate','m^3/s',self.Vdot_pumped)\n ]", "def export_filters(filters):\n result = []\n fses = sorted(filters[0].get_fses())\n\n for fs in fses:\n filter_array = []\n group_spec = {'fs': fs, 'channels': filter_array}\n for filt in filters:\n filter_array.append(filt.export(fs))\n result.append(group_spec)\n\n result = {'filters':result}\n return result", "def output_files(self):\n o = []\n if 'unweighted' in self.event_types:\n o.append(self.name + \"_unweighted_events.lhe.gz\")\n if 'weighted' in self.event_types:\n o.append(self.name + \"_events.lhe.gz\")\n return o", "def to_json(self):\n return json.dumps({\n 'boundary_nodes': self.boundary_nodes,\n 'nodes': self.nodes,\n 'levels': self.levels,\n }, indent=4)", "def make_steer_frs(dims, numlevels, numorientations, bandwidth):\n \n result = []\n bands=[]\n p = numorientations-1\n const = math.sqrt(float(math.pow(2,(2*p))*math.pow(math.factorial(p),2)) / float(math.factorial(2*p)*(p+1)))\n f1 = freqspace(dims[0])\n f2 = freqspace(dims[1])\n wx, wy = np.meshgrid(f1, f2)\n size = wx.shape\n r = np.sqrt(wx**2 + wy**2)\n theta = np.arctan2(wy, wx) \n \n bands = np.full((numlevels, numorientations, dims[0], dims[1]), const*1j)\n for level in range(numlevels):\n for orientation in range(numorientations):\n theta_offset = orientation * np.pi / numorientations\n ctrfreq = pi / math.pow(2, (level+1)*bandwidth)\n band = np.cos(theta - theta_offset)**p * log_raised_cos(r, ctrfreq, bandwidth)\n bands[level,orientation,:,:] *= band\n \n hi = log_raised_coshi(r, pi / math.pow(2, bandwidth), bandwidth)\n\n lo = log_raised_coslo(r, pi / math.pow(2, bandwidth * numlevels), bandwidth)\n \n result.append(hi)\n result.append(bands)\n result.append(lo)\n return result", "def dump(self):\n cfg = {\n \"Detector\" : \n { \n \"8680\": \n {\n \"CONVENTIONAL\": \n {\n \"1.0\": \n {\n \"1x\": (3.87, 8.51),\n \"2.4x\": (1.6, 6.74),\n \"4.9x\": (0.72, 6.23)\n },\n \n \"3.0\": \n {\n \"1x\": (10.12, 14.07),\n \"2.4x\": (4.2, 11.17),\n \"4.9x\": (1.89, 10)\n }\n \n },\n \n \n \"EMGAIN\": \n {\n \"1.0\" : \n {\n \"1x\": (18.97, 32.25),\n \"2.4x\": (7.61, 19.41),\n \"4.9x\": (3.47, 16.31)\n },\n \n \"3.0\" : \n {\n \"1x\": (46.56, 54.01),\n \"2.4x\": (19.82, 33.3),\n \"4.9x\": (8.84, 26.25)\n },\n \n \"5.0\" : \n {\n \"1x\": (46.49, 70.66),\n \"2.4x\": (19.53, 45.11),\n \"4.9x\": (8.9, 35.87)\n },\n \n \"10.0\" : \n {\n \"2.4x\": (22.45, 52.98),\n \"4.9x\": (10.43, 45.37),\n } \n } \n }, \n \n \"8325\": \n {\n \"CONVENTIONAL\": \n {\n \"1.0\": \n {\n \"1x\": (3.98, 8.64),\n \"2.5x\": (1.6, 6.75),\n \"5.1x\": (0.72, 6.23)\n },\n \n \"3.0\": \n {\n \"1x\": (10.45, 14.42),\n \"2.5x\": (4.14, 10.97),\n \"5.1x\": (1.89, 10.24)\n }\n },\n \n \"EMGAIN\": \n {\n \"1.0\" : \n {\n \"1\": (19.73, 34.13),\n \"2.5x\": (7.88, 20.49),\n \"5.1x\": (3.54, 16.99)\n }, \n \n \"3.0\" : \n {\n \"1\": (48.23, 54.5),\n \"2.5x\": (19.77, 33.41),\n \"5.1x\": (9.04, 27.84)\n },\n \n \"5.0\" : \n {\n \"1\": (50.66, 77.0),\n \"2.5x\": (20.46, 48.08),\n \"5.1x\": (8.7, 35.5)\n },\n \n \"10.0\" : \n {\n \"2.5x\": (22.44, 53.63),\n \"5.1x\": (11.3, 52.55),\n } \n }\n },\n \n \"10522\": \n {\n \"CONVENTIONAL\": \n {\n \"0.1\": \n {\n \"Gain 1\": (3.37, 8.54),\n \"Gain 2\": (0.8, 3.15),\n },\n \n \"1.0\": \n {\n \"Gain 1\": (3.36, 6.51),\n \"Gain 2\": (0.79, 4.59),\n }\n },\n \n \"EMGAIN\": \n {\n \"1.0\" : \n {\n \"Gain 1\": (16.4, 25.1),\n \"Gain 2\": (3.96, 12.3),\n }, \n \n \"10.0\" : \n {\n \"Gain 1\": (16.5, 76.0),\n \"Gain 2\": (4.05, 45.1),\n },\n \n \"20.0\" : \n {\n \"Gain 1\": (17.2, 193.0),\n \"Gain 2\": (4.64, 76.6),\n },\n \n \"30.0\" : \n {\n \"Gain 1\": (18.2, 272.0),\n \"Gain 2\": (5.46, 145.0),\n } \n }\n },\n \n \"10570\": \n {\n \"CONVENTIONAL\": \n {\n \"0.1\": \n {\n \"Gain 1\": (3.31, 8.82),\n \"Gain 2\": (0.8, 3.39),\n },\n \n \"1.0\": \n {\n \"Gain 1\": (3.30, 6.52),\n \"Gain 2\": (0.79, 4.83),\n }\n },\n \n \"EMGAIN\": \n {\n \"1.0\" : \n {\n \"Gain 1\": (16.4, 25.1),\n \"Gain 2\": (4.01, 12.6),\n }, \n \n \"10.0\" : \n {\n \"Gain 1\": (16.5, 85.6),\n \"Gain 2\": (4.06, 42.7),\n },\n \n \"20.0\" : \n {\n \"Gain 1\": (17.5, 142.0),\n \"Gain 2\": (4.81, 76.0),\n },\n \n \"30.0\" : \n {\n \"Gain 1\": (19.3, 256.0),\n \"Gain 2\": (5.88, 166.0),\n } \n }\n }\n },\n \n \"Phot\" :\n {\n \"fwhmpsf\": 6.0,\n \"sigma\": 10.0,\n \"exposure\": \"EXPTIME\",\n \"calgorithm\": \"centroid\",\n \"cbox\" : 8,\n \"maxshift\": 5, \n \"salgorithm\": \"median\",\n \"annulus\": 14,\n \"dannulus\": 16,\n \"apertures\": 12,\n \"zmag\": 27.11\n }\n }\n \n \n # Dump the configuration to json output file\n with open(self.conf_fname, \"w\") as fd:\n json.dump(cfg, fd) \n \n return", "def __str__(self):\n output = ['Tile ID {}'.format(self._tileid)]\n for ex, files in self._exposure_files.items():\n filenames = '- exposure {:08d}\\n'.format(ex)\n for f in files:\n filenames = '{} + {}\\n'.format(filenames, f)\n output.append(filenames)\n\n return '\\n'.join(output)", "def getLevels(self):\n levels = self.levels.keys()\n levels.sort()\n a = str(levels)\n \n logger.info('[biospatial.gbif.taxonomy.NestedTaxonomy]\\n Available Levels %s' %a)\n return a", "def build_gaussian_pyramid(im, max_levels, filter_size):\n\n filter_vec = gaus_1d(filter_size).reshape(1,filter_size)\n pyr = []\n pyr.append(im)\n for i in range(max_levels - 1):\n if(im.shape[0] <= 16 or im.shape[1] <= 16):\n break\n\n im = ndimage.filters.convolve(im, filter_vec.T, mode='mirror')\n im = ndimage.filters.convolve(im, filter_vec, mode='mirror')\n\n im = im[::2, ::2]\n pyr.append(im)\n\n return [pyr,filter_vec]" ]
[ "0.670749", "0.51908505", "0.5164347", "0.51631737", "0.5148783", "0.5069291", "0.505505", "0.5051109", "0.50357753", "0.5027362", "0.49688357", "0.49663234", "0.49348238", "0.4930135", "0.48974124", "0.48842168", "0.4855037", "0.4851072", "0.48385602", "0.48064154", "0.4803556", "0.47496969", "0.47371507", "0.473163", "0.47232834", "0.47117445", "0.46980488", "0.46939486", "0.46724656", "0.46721876" ]
0.6651616
1
Add a beam stop of the given radius. If outer, make an annulus.
def set_beam_stop(data, radius, outer=None): if hasattr(data, 'qx_data'): q = np.sqrt(data.qx_data**2 + data.qy_data**2) data.mask = (q < radius) if outer is not None: data.mask |= (q >= outer) else: data.mask = (data.x < radius) if outer is not None: data.mask |= (data.x >= outer)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_arc_piece(self, center, radius, start_angle, stop_angle):\r\n self.pieces.append(Arc(center, radius, start_angle, stop_angle))\r\n self.total_length += self.pieces[-1].get_length()", "def mzi_with_bend(radius: float = 10):\n c = gf.Component()\n mzi = c.add_ref(gf.components.mzi())\n bend = c.add_ref(gf.components.bend_euler(radius=radius))\n bend.connect(\"o1\", mzi.ports[\"o2\"])\n c.add_port(\"o1\", port=mzi.ports[\"o1\"])\n c.add_port(\"o2\", port=bend.ports[\"o2\"])\n return c", "def set_radius(self, radius):\n self.__begin.set_radius(radius)\n self.__end.set_radius(radius)", "def radius(self, radius):\n if radius < 0:\n raise ValueError(\"Radius cannot be negative!\")\n self.radius_log.append(radius)\n self._radius = radius", "def beam_radius(w0, lambda0, z, z0=0, M2=1):\n zz = (z-z0)/z_rayleigh(w0, lambda0, M2)\n return w0*np.sqrt(1+zz**2)", "def elemStop(self, elem):\n stopColor = elem.get('stop-color')\n if not stopColor:\n style = css2dict(elem.get('style'))\n if 'stop-color' in style:\n stopColor = style['stop-color']\n else:\n stopColor = '#000000'\n color = cssColor2Eps(stopColor, 'CMYKRGB')\n offsetString = elem.get('offset').strip()\n if offsetString[-1] == '%':\n offset = float(offsetString[:-1])\n else:\n offset = float(offsetString) * 100\n self.gradients[self.curGradientId]['stops'].append( (offset, color) )", "def decay_radius(initial_radius, i, time_constant):\n return initial_radius * np.exp(-i / time_constant)", "def fillet(self,\n radius,\n points_per_2pi=128,\n max_points=_max_points,\n precision=1e-3):\n two_pi = 2 * numpy.pi\n fracture = False\n\n for jj in range(len(self.polygons)):\n vec = self.polygons[jj].astype(float) - numpy.roll(\n self.polygons[jj], 1, 0)\n length = numpy.sqrt(numpy.sum(vec**2, 1))\n ii = numpy.flatnonzero(length)\n if len(ii) < len(length):\n self.polygons[jj] = self.polygons[jj][ii]\n vec = self.polygons[jj] - numpy.roll(self.polygons[jj], 1, 0)\n length = numpy.sqrt(numpy.sum(vec**2, 1))\n vec[:, 0] = vec[:, 0] / length\n vec[:, 1] = vec[:, 1] / length\n dvec = numpy.roll(vec, -1, 0) - vec\n norm = numpy.sqrt(numpy.sum(dvec**2, 1))\n ii = numpy.flatnonzero(norm)\n dvec[ii, 0] = dvec[ii, 0] / norm[ii]\n dvec[ii, 1] = dvec[ii, 1] / norm[ii]\n theta = numpy.arccos(numpy.sum(numpy.roll(vec, -1, 0) * vec, 1))\n ct = numpy.cos(theta * 0.5)\n tt = numpy.tan(theta * 0.5)\n\n new_points = []\n for ii in range(-1, len(self.polygons[jj]) - 1):\n if theta[ii] > 0:\n a0 = -vec[ii] * tt[ii] - dvec[ii] / ct[ii]\n a0 = numpy.arctan2(a0[1], a0[0])\n a1 = vec[ii + 1] * tt[ii] - dvec[ii] / ct[ii]\n a1 = numpy.arctan2(a1[1], a1[0])\n if a1 - a0 > numpy.pi:\n a1 -= two_pi\n elif a1 - a0 < -numpy.pi:\n a1 += two_pi\n n = max(\n int(\n numpy.ceil(abs(a1 - a0) / two_pi *\n points_per_2pi) + 0.5), 2)\n a = numpy.linspace(a0, a1, n)\n l = radius * tt[ii]\n if l > 0.49 * length[ii]:\n r = 0.49 * length[ii] / tt[ii]\n l = 0.49 * length[ii]\n else:\n r = radius\n if l > 0.49 * length[ii + 1]:\n r = 0.49 * length[ii + 1] / tt[ii]\n new_points.extend(r * dvec[ii] / ct[ii] +\n self.polygons[jj][ii] + numpy.vstack(\n (r * numpy.cos(a),\n r * numpy.sin(a))).transpose())\n else:\n new_points.append(self.polygons[jj][ii])\n self.polygons[jj] = numpy.array(new_points)\n if len(new_points) > max_points:\n fracture = True\n\n if fracture:\n self.fracture(max_points, precision)\n return self", "def ring(radius = 10, width = 0.5, angle_resolution = 2.5, layer = 0):\n D = Device(name = 'ring')\n inner_radius = radius - width/2\n outer_radius = radius + width/2\n n = int(np.round(360/angle_resolution))\n t = np.linspace(0, 360, n+1) * pi/180\n inner_points_x = (inner_radius*cos(t)).tolist()\n inner_points_y = (inner_radius*sin(t)).tolist()\n outer_points_x = (outer_radius*cos(t)).tolist()\n outer_points_y = (outer_radius*sin(t)).tolist()\n xpts = inner_points_x + outer_points_x[::-1]\n ypts = inner_points_y + outer_points_y[::-1]\n D.add_polygon(points = (xpts, ypts), layer = layer)\n return D", "def add_entry(self, start_day, start_hour, stop_day, stop_hour, mode, radar=[\"ALL\"]):\n self.entries.append(Entry(self.year, self.month, start_day, start_hour, stop_day, stop_hour, mode, radar))", "def trail(self, trail_radius=0):\n try:\n if self.sum_ang > 2 * pi:\n self.obj.make_trail = False\n else:\n if trail_radius == 0:\n self.obj.trail_radius = trail_radius\n self.obj.make_trail = True\n except AttributeError:\n print(\"ERROR: wrong arguments type while initializing!!\")", "def turn(self,\n radius,\n angle,\n number_of_points=0.01,\n max_points=_max_points,\n final_width=None,\n final_distance=None,\n layer=0,\n datatype=0):\n exact = True\n if angle == 'r':\n delta_i = _halfpi\n delta_f = 0\n elif angle == 'rr':\n delta_i = _halfpi\n delta_f = -delta_i\n elif angle == 'l':\n delta_i = -_halfpi\n delta_f = 0\n elif angle == 'll':\n delta_i = -_halfpi\n delta_f = -delta_i\n elif angle < 0:\n exact = False\n delta_i = _halfpi\n delta_f = delta_i + angle\n else:\n exact = False\n delta_i = -_halfpi\n delta_f = delta_i + angle\n if self.direction == '+x':\n self.direction = 0\n elif self.direction == '-x':\n self.direction = numpy.pi\n elif self.direction == '+y':\n self.direction = _halfpi\n elif self.direction == '-y':\n self.direction = -_halfpi\n elif exact:\n exact = False\n self.arc(radius, self.direction + delta_i, self.direction + delta_f,\n number_of_points, max_points, final_width, final_distance,\n layer, datatype)\n if exact:\n self.direction = _directions_list[int(\n round(self.direction / _halfpi)) % 4]\n return self", "def makeCircleOutline(self):\n #circle defined\n global circ_main\n circ_main = Circle(stroke_color=BLUE).scale(2).shift(LEFT*5)\n\n #dot at circle and dot at center\n global dot_circ\n dot_circ = always_redraw(\n lambda : Dot(circ_main.get_end())\n )\n global dot_center\n dot_center = Dot(LEFT*5)\n \n #line from origin to circle\n global line_circ\n line_circ = always_redraw(\n lambda : Line(start=dot_center.get_center(), end=dot_circ.get_center())\n )\n \n #write stuff\n self.play(Write(dot_circ), Write(line_circ), Write(dot_center))\n self.play(Write(circ_main), run_time=3, rate_func=double_smooth)", "def beamradius(params,z):\n \n w0=params[0] # beam width at waist [e.g. meters]\n zw=params[1] # waist position [e.g. meters]\n lam = params[2] # wavelength [meters]\n \n zR=np.pi*w0**2/lam # Raleigh length [e.g. meters]\n w=w0*np.sqrt(1+((z-zw)/zR)**2) # beam width at z [e.g. meters]\n R=z*(1+(zR/z)**2) # beam phasefront curvature at z\n\n return w,R,zR # values at pos z [e.g. meters]", "def append_circle(p, v, n, center, radius, start_angle, end_angle):\n\n # Fraction of the circle we're covering, in radians.\n angle_span = end_angle - start_angle\n\n # The number of segments we want to use for this span. Use 20 for a full circle.\n segment_count = int(math.ceil(20*math.fabs(angle_span)/tau))\n\n for i in range(segment_count + 1):\n th = start_angle + angle_span*i/segment_count\n point = center + v*math.cos(th)*radius + n*math.sin(th)*radius\n p.append(point)", "def add_circle(self, center: Point, point2: Point,\n counts_as_step: bool = True, interesting: bool = False) -> Circle:\n circle = Circle(center=center, point2=point2)\n self.add_step_premade(circle, counts_as_step=counts_as_step, interesting=interesting)\n return circle", "def decay(self, decay_rate, decay_radius):\n self.radius *= (1.0 - decay_radius)\n self.learning_rate *= (1.0 - decay_rate)\n self.neighborhood.radius = self.radius", "def despeckle_by_opening(img_plane, radius=2):\n kernel = morphology.disk(radius)\n morphology.opening(img_plane, out=img_plane, footprint=kernel)\n return img_plane", "def add_ring(self, fill, surf, box=False, rot=None):\n self.radii.append(surf)\n self.box.append(box)\n self.fills.append(fill)\n self.rot.append(rot)", "def set_radius(self, radius):\n self._radius = radius\n self._reset_slot_bounds()", "def update_radius(intersection, new_rad):\n return intersection.update_radius(new_rad)", "def radius(self, radius):\n if radius < 0:\n raise ValueError(\"The radius cannot be negative!\")\n self._radius = radius", "def arc(self,\n radius,\n initial_angle,\n final_angle,\n number_of_points=0.01,\n max_points=_max_points,\n final_width=None,\n final_distance=None,\n layer=0,\n datatype=0):\n warn = True\n cx = self.x - radius * numpy.cos(initial_angle)\n cy = self.y - radius * numpy.sin(initial_angle)\n self.x = cx + radius * numpy.cos(final_angle)\n self.y = cy + radius * numpy.sin(final_angle)\n if final_angle > initial_angle:\n self.direction = final_angle + numpy.pi * 0.5\n else:\n self.direction = final_angle - numpy.pi * 0.5\n old_w = self.w\n old_distance = self.distance\n if final_width is not None:\n self.w = final_width * 0.5\n if final_distance is not None:\n self.distance = final_distance\n if isinstance(number_of_points, float):\n number_of_points = 2 * int(\n abs((final_angle - initial_angle) *\n (radius + max(old_distance, self.distance) *\n (self.n - 1) * 0.5 + max(old_w, self.w)) /\n number_of_points) + 0.5) + 2\n number_of_points = max(number_of_points, 3)\n pieces = int(numpy.ceil(number_of_points / float(max_points)))\n number_of_points = number_of_points // pieces\n widths = numpy.linspace(old_w, self.w, pieces + 1)\n distances = numpy.linspace(old_distance, self.distance, pieces + 1)\n angles = numpy.linspace(initial_angle, final_angle, pieces + 1)\n if (self.w != 0) or (old_w != 0):\n for jj in range(pieces):\n for ii in range(self.n):\n self.polygons.append(numpy.zeros((number_of_points, 2)))\n r0 = radius + ii * distances[jj + 1] - (\n self.n - 1) * distances[jj + 1] * 0.5\n old_r0 = radius + ii * distances[jj] - (\n self.n - 1) * distances[jj] * 0.5\n pts2 = number_of_points // 2\n pts1 = number_of_points - pts2\n ang = numpy.linspace(angles[jj], angles[jj + 1], pts1)\n rad = numpy.linspace(old_r0 + widths[jj],\n r0 + widths[jj + 1], pts1)\n self.polygons[-1][:pts1, 0] = numpy.cos(ang) * rad + cx\n self.polygons[-1][:pts1, 1] = numpy.sin(ang) * rad + cy\n if widths[jj + 1] == 0:\n pts1 -= 1\n pts2 += 1\n if widths[jj] == 0:\n self.polygons[-1][:pts1 - 1, :] = numpy.array(\n self.polygons[-1][1:pts1, :])\n pts1 -= 1\n pts2 += 1\n ang = numpy.linspace(angles[jj + 1], angles[jj], pts2)\n rad = numpy.linspace(r0 - widths[jj + 1],\n old_r0 - widths[jj], pts2)\n if (rad[0] <= 0 or rad[-1] <= 0) and warn:\n warnings.warn(\n \"[GDSPY] Path arc with width larger than radius \"\n \"created: possible self-intersecting polygon.\",\n stacklevel=2)\n warn = False\n self.polygons[-1][pts1:, 0] = numpy.cos(ang) * rad + cx\n self.polygons[-1][pts1:, 1] = numpy.sin(ang) * rad + cy\n self.length += abs((angles[jj + 1] - angles[jj]) * radius)\n if isinstance(layer, list):\n self.layers.extend(\n (layer * (self.n // len(layer) + 1))[:self.n])\n else:\n self.layers.extend(layer for _ in range(self.n))\n if isinstance(datatype, list):\n self.datatypes.extend(\n (datatype * (self.n // len(datatype) + 1))[:self.n])\n else:\n self.datatypes.extend(datatype for _ in range(self.n))\n return self", "def increase_radius(self, character):\n character.bombradius += 1", "def create_radius_stops(breaks, min_radius, max_radius):\n num_breaks = len(breaks)\n radius_breaks = scale_between(min_radius, max_radius, num_breaks)\n stops = []\n\n for i, b in enumerate(breaks):\n stops.append([b, radius_breaks[i]])\n return stops", "def addToRunoff(runoff, sro, top, slope, pressure, mannings, delta):\n # We would like to do this:\n # sro += \\\n # runoff * np.sqrt(np.abs(slope)) / mannings * \\\n # np.power(ptop, 5.0/3.0) * delta\n # ... but we can't since it might result in NaN values where\n # runoff is False. Instead, only compute the runoff exactly\n # at locations where runoff is true:\n ww = np.where(runoff)\n sro[ww] += \\\n np.sqrt(np.abs(slope[ww])) / mannings[ww] * \\\n np.power(ptop[ww], 5.0/3.0) * delta[ww]", "def span_step(center: float, span: float, step: float, endpoint: bool=True):\n # True*step/100 in the arange ensures the right boundary is included\n return np.arange(center-span/2, center+span/2+endpoint*step/100, step)", "def add_pin_square_inside(\n component, port, port_length=0.1, layer=LAYER.PORT, label_layer=LAYER.TEXT\n):\n p = port\n a = p.orientation\n ca = np.cos(a * np.pi / 180)\n sa = np.sin(a * np.pi / 180)\n rot_mat = np.array([[ca, -sa], [sa, ca]])\n\n d = p.width / 2\n dx = port_length\n\n dbot = np.array([0, -d])\n dtop = np.array([0, d])\n dbotin = np.array([-dx, -d])\n dtopin = np.array([-dx, +d])\n\n p0 = p.position + _rotate(dbot, rot_mat)\n p1 = p.position + _rotate(dtop, rot_mat)\n ptopin = p.position + _rotate(dtopin, rot_mat)\n pbotin = p.position + _rotate(dbotin, rot_mat)\n polygon = [p0, p1, ptopin, pbotin]\n component.add_polygon(polygon, layer=layer)", "def __init__(self, radius):\n self.radius = radius", "def __init__(self, radius=1, thickness=1, inner_radius=0):\n\n super().__init__()\n self.radius = radius\n self.inner_radius = inner_radius\n self.thickness = thickness" ]
[ "0.5315736", "0.5069563", "0.50269866", "0.4964889", "0.49412605", "0.48725465", "0.48532206", "0.48426268", "0.48292616", "0.47962764", "0.4789855", "0.47101232", "0.47040084", "0.4683362", "0.46718004", "0.46376756", "0.46133748", "0.46002012", "0.45546836", "0.45401353", "0.45390978", "0.45316038", "0.45250463", "0.45225108", "0.45032072", "0.44927177", "0.4488349", "0.44742978", "0.4466552", "0.44662014" ]
0.6520495
0
Choose a parameter range based on parameter name and initial value.
def parameter_range(p, v): if p.endswith('_pd_n'): return [0, 100] elif p.endswith('_pd_nsigma'): return [0, 5] elif p.endswith('_pd_type'): return v elif any(s in p for s in ('theta', 'phi', 'psi')): # orientation in [-180,180], orientation pd in [0,45] if p.endswith('_pd'): return [0, 45] else: return [-180, 180] elif 'sld' in p: return [-0.5, 10] elif p.endswith('_pd'): return [0, 1] elif p == 'background': return [0, 10] elif p == 'scale': return [0, 1e3] elif p == 'case_num': # RPA hack return [0, 10] elif v < 0: # Kxy parameters in rpa model can be negative return [2*v, -2*v] else: return [0, (2*v if v > 0 else 1)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def range_params(self, ran, kw):\n specs = {\"range\": (SchemaNode(\"value\"),\n SchemaNode(\"param\").set_attr(\"name\",\"minInclusive\"),\n SchemaNode(\"param\").set_attr(\"name\",\"maxInclusive\")),\n \"length\": (SchemaNode(\"param\").set_attr(\"name\",\"length\"),\n SchemaNode(\"param\").set_attr(\"name\",\"minLength\"),\n SchemaNode(\"param\").set_attr(\"name\",\"maxLength\"))}\n (exact, min_, max_) = specs[kw]\n if (len(ran) == 1 or ran[0] == ran[1]) and ran[0][0] != \"m\":\n elem = exact\n elem.text = ran[0]\n return [elem]\n res = []\n if ran[0][0] != \"m\":\n elem = min_\n elem.text = ran[0]\n res.append(elem)\n if ran[1][0] != \"m\":\n elem = max_\n elem.text = ran[1]\n res.append(elem)\n return res", "def conf_load_par_range(par_def):\n try:\n s,e,n = [float(i) for i in par_def.split(':')]\n except ValueError, e:\n raise ValueError(\n \"Excpected float1:float2:float3 for the range defiction. {}\".format(e)\n )\n par_list = list(np.arange(s,e,n))\n if len(par_list) == 0:\n raise ValueError(\"No parameter values generated.\")\n return par_list", "def _check_paramrange(value, parameter):\n\n if parameter not in PARAMETER_RANGES.keys():\n raise ValueError('parameter {} not found in dictonary {}'\n .format(parameter, PARAMETER_RANGES))\n ranges = PARAMETER_RANGES[parameter]\n lo = ranges[0]\n hi = ranges[1]\n INRANGE = True\n if not (lo <= value < hi):\n INRANGE = False\n\n return INRANGE, lo, hi", "def define_range():\n\n def_range = {'lt': [0.0, 24.0],\n 'lon': [0.0, 360.0],\n 'angle': [0.0, 2.0 * np.pi]}\n\n return def_range", "def check_ranges(self, param_name, param_value):\n\n if np.isscalar(param_value):\n if param_value < 0. or param_value > 1.:\n raise ValueError((param_name, ' should be within range [0,1]'))\n else:\n if param_name == 'zoom_range' or param_name == 'illumination_range':\n if param_value == 1:\n self.dict[param_name] = [1, 1]\n else:\n self.dict[param_name] = [1 - param_value, 1 + param_value]\n else:\n self.dict[param_name] = [0., param_value]\n\n elif len(param_value) == 2:\n\n if param_name != 'zoom_range' and param_name != 'illumination_range' \\\n and (param_value[0] < 0. or param_value[0] > 1. or param_value[1] < 0. or param_value[1] > 1.):\n raise ValueError((param_name, ' should be within range [0,1]'))\n else:\n self.dict[param_name] = [param_value[0], param_value[1]]\n else:\n raise ValueError((param_name, ' should be a float or '\n 'a tuple or list of two floats. '\n 'Received arg: '), param_value)", "def parameter_bounds(self):\n for name, bound in self.named_parameter_bounds():\n yield bound", "def add_default_bounds_to_params(params):\n defaults = pd.DataFrame(\n {\"lower_bound\": -np.inf, \"upper_bound\": np.inf},\n index=params.index,\n )\n params = params.combine_first(defaults)\n\n return params", "def __init__(self, low, high, step_name, variable_name):\n super().__init__(step_name, variable_name, list(range(low, high + 1)))\n self.low = min(low, high)\n self.high = max(low, high)", "def estimateParameterValues(self, name, rawData):\n\n if name == self.parameterNames[0]:\n # The parameter of the Bernoulli model is naturally constrained to the [0, 1] interval\n return cint(0, 1, 1000)\n else:\n raise ConfigurationError('Bernoulli model does not contain a parameter \"{}\".'.format(name))", "def set_par_range(self, mins, maxs, frozen):\n self.parmins = mins\n self.parmaxs = maxs\n self.pars_frozen = frozen\n return", "def parse_range(spec, sep='..', default_low=1, default_high=40):\n parsed = spec.split(sep)\n\n x = int(parsed[0]) if parsed[0] != '' else default_low\n if len(parsed) == 2:\n y = int(parsed[1]) if parsed[1] != '' else default_high\n else:\n y = x\n\n return x, y", "def __init__(self, name, minval, maxval, step, startval, maxjumpsize, nmaxsample,\n frozen=False):\n self.name = name\n self.frozen = frozen # if frozen, the value never changes\n\n # parameter space and random walk parameters\n self.values = np.arange(minval, maxval + step, step)\n self._nvalue = len(self.values)\n self._minval = minval\n self._maxval = maxval\n self._step = step\n self._maxjumpsize = maxjumpsize\n self._startval = startval\n\n self._neighborhoods = []\n for value in self.values:\n # neighborhood = all (different) values separated by up to *maxjumpsizes*\n neighboorhood = [i for i in range(self._nvalue)\n if 0 < abs(value - self.values[i]) <= maxjumpsize]\n self._neighborhoods.append(neighboorhood)\n\n # parameter's current index\n i = np.argmin(np.abs(self.values - self._startval))\n if np.abs(self.values[i] - self._startval) > step:\n raise Exception('Starting value out of range')\n self._currentindex = i\n\n # initializing proposed next index\n self._proposednextindex = None\n\n # parameter's samples\n self.samples = np.zeros(nmaxsample)\n self.nsample = 0", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['range'] = self.range\n return paramDict\n # no other additional parameters required", "def named_parameter_bounds(self):\n for name, _ in self.named_parameters():\n yield name, self.bound_for(name)", "def __init__(self, low, high, step_name, variable_name):\n super().__init__(step_name, variable_name, sp_uniform(low, high - low))\n self.low = min(low, high)\n self.high = max(low, high)", "def param_generater(**p):\r\n if p[\"type\"] == int:\r\n return np.arange(p[\"min\"], p[\"max\"] + p[\"delta\"], p[\"delta\"], np.int)\r\n elif p[\"type\"] == float:\r\n return np.arange(p[\"min\"], p[\"max\"] + p[\"delta\"], p[\"delta\"], np.float)\r\n elif p[\"type\"] == bool:\r\n return np.array([0, 1])\r\n else:\r\n raise TypeError", "def get_bounds(self, parameter_name=None):\n if parameter_name is None:\n return [self.get_bounds(p) for p in self.shape_parameters.keys()]\n if parameter_name in self.shape_parameters:\n anchor_settings = list(self.shape_parameters[parameter_name][0].keys())\n return min(anchor_settings), max(anchor_settings)\n elif parameter_name.endswith('_rate_multiplier'):\n for source_name, allow_negative in zip(self.source_name_list,self.source_allowed_negative):\n if parameter_name.startswith(source_name) and allow_negative==True:\n return float('-inf'), float('inf')\n return 0, float('inf')\n else:\n raise InvalidParameter(\"Non-existing parameter %s\" % parameter_name)", "def param_bounds(self) -> Optional[Sequence[Tuple[float, float]]]:\n return [(-1.0, 1.0)] * len(list(self.params()))", "def estimateParameterValues(self, name, rawData):\n observations = np.array([d[0] for d in rawData])\n min = np.nanmin(observations)\n max = np.nanmax(observations)\n delta = max - min\n\n if name == self.parameterNames[0]:\n return oint(min-delta, max+delta, 1000)\n else:\n raise ConfigurationError('Gaussian mean model does not contain a parameter \"{}\".'.format(name))", "def __init__(__self__, *,\n max: pulumi.Input[int],\n min: pulumi.Input[int]):\n pulumi.set(__self__, \"max\", max)\n pulumi.set(__self__, \"min\", min)", "def __init__(__self__, *,\n max: pulumi.Input[int],\n min: pulumi.Input[int]):\n pulumi.set(__self__, \"max\", max)\n pulumi.set(__self__, \"min\", min)", "def get_hyperparameter_bounds():\n minf = float(\"-inf\")\n inf = float(\"inf\")\n params = dict(mu=(minf,inf), nu=(0.0 ,inf), r=(0.0, inf), s=(0.0, inf))\n return params", "def __init__(self, ranges=None, *args, **kwargs):\n self.ranges = ranges\n super(DiscreteGeneticAlgorithm, self).__init__(*args, **kwargs)", "def estimateParameterValues(self, name, rawData):\n if name == self.parameterNames[0]:\n # lower is boundary is zero by definition, upper boundary is chosen as 1.25*(largest observation)\n return oint(0, 1.25*np.nanmax(np.ravel(rawData)), 1000)\n else:\n raise ConfigurationError('Poisson model does not contain a parameter \"{}\".'.format(name))", "def variable_range(examples, var):\n if var[1] == 'd':\n range = set()\n for datum in examples:\n range.add(datum[var[0]])\n return range\n else:\n range_min, range_max = 0, 0\n for datum in examples:\n data_val = float(datum[var[0]])\n range_min, range_max = min(range_min, data_val), max(range_max, data_val)\n return (range_min, range_max)", "def get_param_ranges(line_model):\n\n line_models = ['voigt', 'rosato', 'stehle', 'stehle_param', ]\n n_upper_range = [(np.nan, np.nan), (3, 7), (3, 30), (3, 9)]\n e_dens_range = [(np.nan, np.nan), (1e19, 1e22), (1e16, 1e25), (0., 1e22)]\n temp_range = [(np.nan, np.nan), (0.32, 32), (0.22, 110), (0., 1000)]\n b_field_range = [(np.nan, np.nan), (0, 5), (0, 5), (0, 5)]\n\n param_ranges = list(zip(line_models, n_upper_range, e_dens_range, temp_range, b_field_range))\n columns = ['line_model_name', 'n_upper_range', 'e_dens_range', 'temp_range', 'b_field_range']\n param_ranges = pd.DataFrame(data=param_ranges, columns=columns)\n\n n_upper_range = param_ranges['n_upper_range'][param_ranges['line_model_name'] == line_model].values[0]\n e_dens_range = param_ranges['e_dens_range'][param_ranges['line_model_name'] == line_model].values[0]\n temp_range = param_ranges['temp_range'][param_ranges['line_model_name'] == line_model].values[0]\n b_field_range = param_ranges['b_field_range'][param_ranges['line_model_name'] == line_model].values[0]\n\n return n_upper_range, e_dens_range, temp_range, b_field_range", "def conf_load_parameter(fin):\n err_msg = \"Unknown parameter definition. Excpected $par_name=(list|range|linspace).\"\n spec = fin.readline().strip().split('=')\n if len(spec) != 2:\n raise EnvironmentError(err_msg)\n par_name, par_def = [s.strip() for s in spec]\n if len(par_def) > 1 and par_def[0] == '[' and par_def[-1] == ']':\n return par_name, conf_load_par_list(par_def)\n elif len(par_def) > 3 and par_def.count(':') == 2 and par_def[-1] == 'l':\n return par_name, conf_load_par_linspace(par_def)\n elif par_def.count(':') == 2:\n return par_name, conf_load_par_range(par_def)\n else:\n raise EnvironmentError(err_msg + \" Found {0} for {1}\".format(par_def,par_name))", "def test_get_range(self):\n pass", "def __init__(self, min: float, max: float):\n super().__init__()\n\n # store input parameters\n self.min = min\n self.max = max", "def from_range(low, high, step=None, value_name='value'):\n def convert(value):\n if not in_range(low, high, step)(value):\n msg = '{} not in range: {}; must be {} <= {} < {}'.format(\n value_name, value, low, value_name, high\n )\n\n if step is not None:\n msg += ' in steps of {}'.format(step)\n raise ValueError(msg)\n\n if step is not None:\n return (value - low) / step\n return value - low\n return convert" ]
[ "0.63689256", "0.6123985", "0.6007537", "0.5822891", "0.5709171", "0.5673385", "0.56482494", "0.56261474", "0.5619638", "0.56101793", "0.5587491", "0.55766535", "0.5572528", "0.5567625", "0.5552738", "0.5534712", "0.55107725", "0.5504369", "0.54925025", "0.5484107", "0.5484107", "0.54195255", "0.5413695", "0.5395087", "0.53910404", "0.5389298", "0.5387072", "0.5371342", "0.5369403", "0.5346455" ]
0.6637906
0
Suppress theta_pd for now until the normalization is resolved. May also suppress complete polydispersity of the model to test models more quickly.
def suppress_pd(pars): pars = pars.copy() for p in pars: if p.endswith("_pd_n"): pars[p] = 0 return pars
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def du(self):\n du = setup_nonlinear_model_du() # Ux, Uy, theta_p\n return du", "def remove_norms(module_: \"WN\") -> \"WN\":\n module_.start = torch.nn.utils.remove_weight_norm(module_.start_conv)\n module_.cond_layer = torch.nn.utils.remove_weight_norm(module_.cond_layer)\n for i, layer_ in enumerate(module_.in_layers):\n layer_ = DepthwiseSeparableConv1d.remove_batch_norm(layer_)\n module_.in_layers[i] = layer_\n for i, layer_ in enumerate(module_.res_skip_layers):\n layer_ = torch.nn.utils.remove_weight_norm(layer_)\n module_.res_skip_layers[i] = layer_\n return module_", "def lnprior_norm_notkin(theta):\n alpha,plateau,ratio = theta\n if alpha < 0 or alpha > 6:\n return -np.inf\n if plateau < 40 or plateau > 150:\n return -np.inf\n if ratio > 1 or ratio < 0:\n return -np.inf\n return 0.0", "def normalisedProductionPDF(p, theta, mDarkPhoton, epsilon, norm):\n return (1. / norm) * dNdPdTheta(p, theta, mDarkPhoton, epsilon)", "def remove_norms(self):\n dev = next(self.parameters()).device\n for name, module in self.named_modules():\n try:\n nn.utils.remove_spectral_norm(module, name='weight_hh_l0')\n print(\"Removed spectral norm from {}\".format(name))\n except:\n pass\n try:\n nn.utils.remove_spectral_norm(module, name='weight_hh_l0_reverse')\n print(\"Removed spectral norm from {}\".format(name))\n except:\n pass\n try:\n nn.utils.remove_weight_norm(module)\n print(\"Removed wnorm from {}\".format(name))\n except:\n pass\n self.to(device=dev)", "def lnprob_norm_notkin(theta,*obs):\n lp = lnprior_norm_notkin(theta)\n if not np.isfinite(lp):\n return -np.inf\n return lp + lnlike_norm_notkin(theta,obs)", "def remove_weight_norm_(self):\n\n def _remove_weight_norm(m):\n try:\n torch.nn.utils.remove_weight_norm(m)\n except ValueError:\n return\n\n self.apply(_remove_weight_norm)", "def normalize(self) -> NoReturn:\n self._ionic_fractions = self._ionic_fractions / np.sum(self._ionic_fractions)", "def turn_off_learning(self):\n self.epsilon = 0\n self.alpha = 0", "def uninformative(self, theta=np.array([])):\n\n return 0.", "def test_hydro_precision_loss():\n frequency_range = np.arange(10000, 800000, 200000)\n pars = [frequency_range, 187, 0.000464748 * 100, 4.09335e-08, 4.88e-6, 997, 997, None]\n reference_psd = np.array([9.82828137e-12, 8.11392808e-16, 8.63496544e-17, 2.24961617e-17])\n np.testing.assert_allclose(passive_power_spectrum_model_hydro(*pars), reference_psd)", "def param_unconstrain(\n self, theta: FloatArray, *, out: Optional[FloatArray] = None\n ) -> FloatArray:\n dims = self.param_unc_num()\n if out is None:\n out = np.zeros(shape=dims)\n elif out.size != dims:\n raise ValueError(\n f\"out size = {out.size} != unconstrained params size = {dims}\"\n )\n err = ctypes.pointer(ctypes.c_char_p())\n rc = self._param_unconstrain(self.model, theta, out, err)\n if rc:\n raise self._handle_error(err.contents, \"param_unconstrain\")\n return out", "def remove_weight_norm(self):\n\n def _remove_weight_norm(m):\n try:\n logging.debug(f\"Weight norm is removed from {m}.\")\n torch.nn.utils.remove_weight_norm(m)\n except ValueError: # this module didn't have weight norm\n return\n\n self.apply(_remove_weight_norm)", "def remove_weight_norm(self):\n\n def _remove_weight_norm(m):\n try:\n logging.debug(f\"Weight norm is removed from {m}.\")\n torch.nn.utils.remove_weight_norm(m)\n except ValueError: # this module didn't have weight norm\n return\n\n self.apply(_remove_weight_norm)", "def reset_uncertainties(self):\n\n # Make a new temporary ExoParameter using the original self.template\n # dictionary and copy the uncertainty values.\n blank = ExoParameter(\"fake\", attr_dict=self.template)\n self.uncertainty = blank.uncertainty\n self.uncertainty_lower = blank.uncertainty_lower\n self.uncertainty_upper = blank.uncertainty_upper", "def _remove_noise_in_o2m():\n if line.reconcile_partial_id:\n if currency_id == line.currency_id.id:\n if line.amount_residual_currency <= 0:\n return True\n else:\n if line.amount_residual <= 0:\n return True\n return False", "def _remove_noise_in_o2m():\n if line.reconcile_partial_id:\n if currency_id == line.currency_id.id:\n if line.amount_residual_currency <= 0:\n return True\n else:\n if line.amount_residual <= 0:\n return True\n return False", "def drop_dose(self):\n for fld in (\"doses\", \"ns\", \"means\", \"stdevs\"):\n arr = getattr(self, fld)[:-1]\n setattr(self, fld, arr)\n self._validate()", "def _remove_noise_in_o2m():\n if line.reconcile_partial_id:\n sign = 1\n if currency_id == line.currency_id.id:\n if line.amount_residual_currency * sign <= 0:\n return True\n else:\n if line.amount_residual * sign <= 0:\n return True\n return False", "def neg_lnlike(theta, dtarray, dmagarray, sigmaarray):\n return(-1.0*lnlike(theta, dtarray, dmagarray, sigmaarray))", "def reset_parameters(self):\n for p in self.parameters():\n if len(p.shape) > 1 and p.size(-1) != 2:\n nn.init.xavier_normal_(p)", "def reset_parameters(self):\n for p in self.parameters():\n if len(p.shape) > 1 and p.size(-1) != 2:\n nn.init.xavier_normal_(p)", "def disableAxisClipping():\n dislin.noclip()", "def reset_parameters(self):\n if self.W is not None:\n tanh_gain = weight_init.calculate_gain(\"tanh\")\n weight_init.xavier_normal_(self.W, tanh_gain)\n # self.W.data.uniform_(-0.001, 0.001)", "def no_gradient_fusion():\n pass", "def remove_spurious_landmarks(self):\r\n \r\n remove = np.argwhere(self.lm_counter < 0)\r\n self.lm = np.delete(self.lm, remove, axis=0)\r\n self.lm_cvar = np.delete(self.lm_cvar, remove, axis=0)\r\n self.lm_counter = np.delete(self.lm_counter, remove)\r\n \r\n return # Replace this.\r", "def __init__(self):\n super().__init__()\n self.nan_penalty = nan_penalty\n self.nan_tol = nan_tol", "def _get_noisy_dLdP(self, dLdP_enc):\n raise NotImplementedError(\"MLE de-aggregation is not implemented.\")", "def test_detrend_no_gradient(self):\r\n results = detrend_pcoa(input_fp=self.tmp_pc_fp,\r\n map_fp=None, gradient_variable=None,\r\n suppress_prerotate=False, output_dir=self.output_dir,\r\n HALT_EXEC=False)\r\n self.assertEqual(results['summary'], None)\r\n coords = results['coords']\r\n lines = coords.readlines()\r\n\r\n # ensure one line per sample in detrended pcoa\r\n self.assertEqual(len(lines), len(test_pc.split('\\n')) - 4)\r\n # ensure three columns tab delimited\r\n self.assertEqual(len(lines[0].split('\\t')), 3)", "def setVerboseOff(self):\n self.edLogging.setVerboseOff()" ]
[ "0.5372836", "0.5269935", "0.52588075", "0.5241501", "0.5218389", "0.5217096", "0.5178467", "0.5143914", "0.5118224", "0.5112162", "0.5072177", "0.5007369", "0.5006693", "0.5006693", "0.48975027", "0.4889109", "0.4889109", "0.48763412", "0.48687136", "0.48680446", "0.48546803", "0.48546803", "0.48477894", "0.4846249", "0.484556", "0.48440143", "0.4824529", "0.48224223", "0.480769", "0.48065332" ]
0.5798115
0
Return a model calculator using the OpenCL calculation engine.
def eval_opencl(model_info, data, dtype='single', cutoff=0.): try: model = core.build_model(model_info, dtype=dtype, platform="ocl") except Exception as exc: print(exc) print("... trying again with single precision") model = core.build_model(model_info, dtype='single', platform="ocl") calculator = DirectModel(data, model, cutoff=cutoff) calculator.engine = "OCL%s"%DTYPE_MAP[dtype] return calculator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def makeCalc(self, dataSet):\n\n #cyl = sasmodels.core.load_model_info('cylinder')\n #hs = sasmodels.core.load_model_info('hardsphere')\n #cylhs = sasmodels.core.load_model_info('cylinder@hardsphere')\n cylhmsa = sasmodels.core.load_model_info('cylinder@hayter_msa')\n\n # Build using c version instead of python. Avoids pyopencl\n model = sasmodels.core.build_model(cylhmsa, platform='dll')\n self.calculator = sasmodels.direct_model.DirectModel(dataSet, model)\n\n return", "def main():\n model = Calculator()", "def calculator(**pars):\n # paying for parameter conversion each time to keep life simple, if not fast\n pars = revert_pars(model_info, pars)\n for k, v in pars.items():\n parts = k.split('.') # polydispersity components\n if len(parts) == 2:\n model.dispersion[parts[0]][parts[1]] = v\n else:\n model.setParam(k, v)\n return theory()", "def eval_ctypes(model_info, data, dtype='double', cutoff=0.):\n if dtype == 'quad':\n dtype = 'longdouble'\n model = core.build_model(model_info, dtype=dtype, platform=\"dll\")\n calculator = DirectModel(data, model, cutoff=cutoff)\n calculator.engine = \"OMP%s\"%DTYPE_MAP[dtype]\n return calculator", "def make_engine(model_info, data, dtype, cutoff):\n if dtype == 'sasview':\n return eval_sasview(model_info, data)\n elif dtype.endswith('!'):\n return eval_ctypes(model_info, data, dtype=dtype[:-1], cutoff=cutoff)\n else:\n return eval_opencl(model_info, data, dtype=dtype, cutoff=cutoff)", "def run_calc(self):\n\n from openquake.calculators import base, getters\n from openquake.baselib import config, performance, zeromq\n if self.vtag >= 11:\n from openquake.baselib import version\n else:\n from openquake.baselib import __version__ as version\n\n with self.calculator._monitor:\n self.calculator._monitor.username = ''\n try:\n # Pre-execute setups\n self.calculator.pre_execute()\n\n #self.calculator.datastore.swmr_on()\n oq = self.calculator.oqparam\n dstore = self.calculator.datastore\n self.calculator.set_param()\n self.calculator.offset = 0\n\n # Source model\n #print('self.__dict__ = ')\n #print(self.calculator.__dict__)\n if oq.hazard_calculation_id: # from ruptures\n dstore.parent = self.calculator.datastore.read(\n oq.hazard_calculation_id)\n elif hasattr(self.calculator, 'csm'): # from sources\n self.calculator_build_events_from_sources()\n #self.calculator.build_events_from_sources()\n if (oq.ground_motion_fields is False and oq.hazard_curves_from_gmfs is False):\n return {}\n elif 'rupture_model' not in oq.inputs:\n logging.warning(\n 'There is no rupture_model, the calculator will just '\n 'import data without performing any calculation')\n fake = logictree.FullLogicTree.fake()\n dstore['full_lt'] = fake # needed to expose the outputs\n dstore['weights'] = [1.]\n return {}\n else: # scenario\n self.calculator._read_scenario_ruptures()\n if (oq.ground_motion_fields is False and oq.hazard_curves_from_gmfs is False):\n return {}\n\n # Intensity measure models\n if oq.ground_motion_fields:\n if self.vtag >= 12:\n imts = oq.get_primary_imtls()\n nrups = len(dstore['ruptures'])\n base.create_gmf_data(dstore, imts, oq.get_sec_imts())\n dstore.create_dset('gmf_data/sigma_epsilon',\n getters.sig_eps_dt(oq.imtls))\n dstore.create_dset('gmf_data/time_by_rup',\n getters.time_dt, (nrups,), fillvalue=None)\n elif self.vtag == 11:\n imts = oq.get_primary_imtls()\n nrups = len(dstore['ruptures'])\n base.create_gmf_data(dstore, len(imts), oq.get_sec_imts())\n dstore.create_dset('gmf_data/sigma_epsilon',\n getters.sig_eps_dt(oq.imtls))\n dstore.create_dset('gmf_data/time_by_rup',\n getters.time_dt, (nrups,), fillvalue=None)\n else:\n pass\n\n # Prepare inputs for GmfGetter\n nr = len(dstore['ruptures'])\n logging.info('Reading {:_d} ruptures'.format(nr))\n if self.vtag >= 12:\n rgetters = getters.get_rupture_getters(dstore, oq.concurrent_tasks * 1.25,\n srcfilter=self.calculator.srcfilter)\n elif self.vtag == 11:\n rgetters = getters.gen_rupture_getters(dstore, oq.concurrent_tasks)\n else:\n rgetters = getters.gen_rupture_getters(dstore, self.calculator.srcfilter, oq.concurrent_tasks)\n\n \n args = [(rgetter, self.calculator.param) for rgetter in rgetters]\n mon = performance.Monitor()\n mon.version = version\n mon.config = config\n rcvr = 'tcp://%s:%s' % (config.dbserver.listen,\n config.dbserver.receiver_ports)\n skt = zeromq.Socket(rcvr, zeromq.zmq.PULL, 'bind').__enter__()\n mon.backurl = 'tcp://%s:%s' % (config.dbserver.host, skt.port)\n mon = mon.new(\n operation='total ' + self.calculator.core_task.__func__.__name__, measuremem=True)\n mon.weight = getattr(args[0], 'weight', 1.) # used in task_info\n mon.task_no = 1 # initialize the task number\n args += (mon,)\n\n self.args = args\n self.mon = mon\n self.dstore = dstore\n\n finally:\n print('FetchOpenQuake: OpenQuake Hazard Calculator defined.')\n # parallel.Starmap.shutdown()", "def main():\n pycalcApp = QApplication(sys.argv)\n pycalcView = PyCalcUi()\n pycalcView.show()\n model = evaluateExpression\n PyCalcController(model=model, view=pycalcView)\n sys.exit(pycalcApp.exec())", "def with_cpu(ops, model):\n ...", "def cmd_calculation():", "def get_model_code():\n\n return \"\"\"\n functions {\n matrix cov_matrix_ard(int N, int D, vector[] x, vector ls, real alpha_sq, int cov_id) {\n matrix[N,N] S;\n real dist_sum;\n real sqrt3;\n real sqrt5;\n sqrt3=sqrt(3.0);\n sqrt5=sqrt(5.0);\n\n // For RBF ARD kernel\n if (cov_id == 1) {\n for(i in 1:(N-1)) {\n for(j in (i+1):N) {\n dist_sum = 0;\n for(d in 1:D) {\n dist_sum = dist_sum + square(x[i][d] - x[j][d]) / square(ls[d]);\n }\n S[i,j] = alpha_sq * exp( -0.5 * dist_sum);\n }\n }\n }\n\n // Fill upper triangle\n for(i in 1:(N-1)) {\n for(j in (i+1):N) {\n S[j,i] = S[i,j];\n }\n }\n\n // Create diagonal\n for(i in 1:N) {\n S[i,i] = alpha_sq;\n }\n\n return S;\n }\n\n matrix distance_matrix_on_vectors(int N, vector[] x) {\n matrix[N, N] distmat;\n for(i in 1:(N-1)) {\n for(j in (i+1):N) {\n distmat[i, j] = square(distance(x[i], x[j]));\n }\n }\n return distmat;\n }\n\n matrix cov_matrix_matern(int N, matrix dist, real ls, real alpha_sq, int cov_id) {\n matrix[N,N] S;\n real dist_ls;\n real sqrt3;\n real sqrt5;\n sqrt3=sqrt(3.0);\n sqrt5=sqrt(5.0);\n\n // For Matern kernel with parameter nu=1/2 (i.e. absolute exponential kernel)\n if (cov_id == 2) {\n for(i in 1:(N-1)) {\n for(j in (i+1):N) {\n dist_ls = fabs(dist[i,j])/square(ls);\n S[i,j] = alpha_sq * exp(-1 * dist_ls);\n }\n }\n }\n\n // For Matern kernel with parameter nu=3/2\n else if (cov_id == 3) {\n for(i in 1:(N-1)) {\n for(j in (i+1):N) {\n dist_ls = fabs(dist[i,j])/ls;\n S[i,j] = alpha_sq * (1 + sqrt3 * dist_ls) * exp(-sqrt3 * dist_ls);\n }\n }\n }\n\n // For Matern kernel with parameter nu=5/2\n else if (cov_id == 4) {\n for(i in 1:(N-1)) {\n for(j in (i+1):N) {\n dist_ls = fabs(dist[i,j])/ls;\n S[i,j] = alpha_sq * (1 + sqrt5 * dist_ls + 5 * pow(dist_ls,2)/3) * exp(-sqrt5 * dist_ls);\n }\n }\n }\n\n // For Matern kernel with parameter nu tending to infinity (i.e. RBF kernel)\n else if (cov_id == 1) {\n for(i in 1:(N-1)) {\n for(j in (i+1):N) {\n dist_ls = fabs(dist[i,j])/ls;\n S[i,j] = alpha_sq * exp( -0.5 * pow(dist_ls, 2) );\n }\n }\n }\n\n // Fill upper triangle\n for(i in 1:(N-1)) {\n for(j in (i+1):N) {\n S[j,i] = S[i,j];\n }\n }\n\n // Create diagonal\n for(i in 1:N) {\n S[i,i] = alpha_sq;\n }\n\n return S;\n }\n\n }\n\n data {\n int<lower=1> D;\n int<lower=1> N;\n vector[D] x[N];\n vector[N] y;\n real<lower=0> ig1;\n real<lower=0> ig2;\n real<lower=0> n1;\n real<lower=0> n2;\n real<lower=0> sigma;\n int kernel_id;\n }\n\n parameters {\n real<lower=0> rho;\n vector<lower=0>[D] rhovec;\n real<lower=0> alpha;\n }\n\n model {\n int cov_id;\n matrix[N, N] cov;\n matrix[N, N] L_cov;\n matrix[N, N] distmat;\n\n // RBF kernel single lengthscale\n if (kernel_id == 1) {\n cov = cov_exp_quad(x, alpha, rho) + diag_matrix(rep_vector(square(sigma), N));\n L_cov = cholesky_decompose(cov);\n rho ~ inv_gamma(ig1, ig2);\n alpha ~ normal(n1, n2);\n y ~ multi_normal_cholesky(rep_vector(0, N), L_cov);\n }\n\n // Matern kernel single lengthscale\n else if (kernel_id >= 2 && kernel_id <= 4) {\n if (kernel_id == 2) { cov_id = 2; }\n if (kernel_id == 3) { cov_id = 3; }\n if (kernel_id == 4) { cov_id = 4; }\n\n distmat = distance_matrix_on_vectors(N, x);\n cov = cov_matrix_matern(N, distmat, rho, square(alpha), cov_id) + diag_matrix(rep_vector(square(sigma), N));\n L_cov = cholesky_decompose(cov);\n rho ~ inv_gamma(ig1, ig2);\n alpha ~ normal(n1, n2);\n y ~ multi_normal_cholesky(rep_vector(0, N), L_cov);\n }\n\n // RBF kernel with ARD (D-dimensional) lengthscale\n else if (kernel_id == 5) {\n cov_id = 1;\n cov = cov_matrix_ard(N, D, x, rhovec, square(alpha), cov_id) + diag_matrix(rep_vector(square(sigma), N));\n L_cov = cholesky_decompose(cov);\n for(d in 1:D) {\n rhovec[d] ~ inv_gamma(ig1, ig2);\n }\n alpha ~ normal(n1, n2);\n y ~ multi_normal_cholesky(rep_vector(0, N), L_cov);\n }\n\n }\n \"\"\"", "def run(self):\n return self.opencl_kernel(*self.arg_list)", "def init_opencl(self):\n\n # Startup script shamelessly taken from CS205 homework\n\n if self.context is None:\n platforms = cl.get_platforms()\n print 'The platforms detected are:'\n print '---------------------------'\n for platform in platforms:\n print platform.name, platform.vendor, 'version:', platform.version\n\n # List devices in each platform\n for platform in platforms:\n print 'The devices detected on platform', platform.name, 'are:'\n print '---------------------------'\n for device in platform.get_devices():\n print device.name, '[Type:', cl.device_type.to_string(device.type), ']'\n print 'Maximum clock Frequency:', device.max_clock_frequency, 'MHz'\n print 'Maximum allocable memory size:', int(device.max_mem_alloc_size / 1e6), 'MB'\n print 'Maximum work group size', device.max_work_group_size\n print 'Maximum work item dimensions', device.max_work_item_dimensions\n print 'Maximum work item size', device.max_work_item_sizes\n print '---------------------------'\n\n # Create a context with all the devices\n devices = platforms[0].get_devices()\n if not self.use_interop:\n self.context = cl.Context(devices)\n else:\n self.context = cl.Context(properties=[(cl.context_properties.PLATFORM, platforms[0])]\n + cl.tools.get_gl_sharing_context_properties(),\n devices= devices)\n print 'This context is associated with ', len(self.context.devices), 'devices'\n\n # Create a simple queue\n self.queue = cl.CommandQueue(self.context, self.context.devices[0],\n properties=cl.command_queue_properties.PROFILING_ENABLE)\n\n # Compile our OpenCL code...render MAKO first.\n lookup = mlo.TemplateLookup(directories=[file_dir])\n template = lookup.get_template('colony_growth.mako')\n template.strict_undefined = True\n\n buf = sio.StringIO()\n mako_context = mrt.Context(buf, **self.ctx_info)\n try:\n template.render_context(mako_context)\n except:\n with open('mako_exception.html', 'w') as fi:\n fi.write(mako.exceptions.html_error_template().render())\n assert False, 'Mako rendering failed...quitting...'\n\n with open('temp_kernels_DLA_colony.cl', 'w') as fi:\n fi.write(buf.getvalue())\n\n self.kernels = cl.Program(self.context, buf.getvalue()).build(options='')", "def create_model(self, input_shape, num_actions, mode, args, model_name='q_network'):\n assert (mode in (\"linear\", \"duel\", \"dqn\"))\n with tf.variable_scope(model_name):\n input_data = Input(shape=input_shape, name=\"input\")\n if mode == \"linear\":\n # #version 4 elu:\n # flatten_hidden = Flatten(name=\"flatten\")(input_data)\n # FC_1 = Dense(512, activation='elu', name='FC1-elu')(flatten_hidden)\n # FC_2 = Dense(512, activation='elu', name='FC2-elu')(FC_1)\n # FC_3 = Dense(512, activation='elu', name='FC3-elu')(FC_2)\n # FC_4 = Dense(512, activation='elu', name='FC4-elu')(FC_3)\n # output = Dense(num_actions, activation='elu', name=\"output\")(FC_4)\n\n #version 4 elu:\n flatten_hidden = Flatten(name=\"flatten\")(input_data)\n FC_1 = Dense(1024, activation='elu', name='FC1-elu')(flatten_hidden)\n FC_2 = Dense(1024, activation='elu', name='FC2-elu')(FC_1)\n FC_3 = Dense(1024, activation='elu', name='FC3-elu')(FC_2)\n FC_4 = Dense(512, activation='elu', name='FC4-elu')(FC_3)\n output = Dense(num_actions, activation='elu', name=\"output\")(FC_4)\n\n else:\n if not (args.recurrent):\n # # # version 1:\n # h1 = Convolution2D(32, (8, 8), strides=4, activation=\"relu\", name=\"conv1\")(input_data)\n # h2 = Convolution2D(64, (4, 4), strides=2, activation=\"relu\", name=\"conv2\")(h1)\n # h3 = Convolution2D(64, (3, 3), strides=1, activation=\"relu\", name=\"conv3\")(h2)\n # context = Flatten(name=\"flatten\")(h3)\n\n # # version 2:\n # conv1 = Convolution2D(1, (5, 5), strides=1, activation=\"elu\", name=\"conv1\")(input_data)\n # flatten = Flatten(name=\"flatten\")(conv1)\n # FC_2 = Dense(512, activation='elu', name='FC2-elu')(flatten)\n # context = Dense(512, activation='elu', name='FC4-elu')(FC_2)\n\n # version 3:\n conv1 = Convolution2D(32, (2, 2), strides=1, activation=\"relu\", name=\"conv1\")(input_data)\n flatten = Flatten(name=\"flatten\")(conv1)\n FC_2 = Dense(128, activation='relu', name='FC2-relu')(flatten)\n FC_3 = Dense(128, activation='relu', name='FC3-relu')(FC_2)\n context = Dense(128, activation='elu', name='FC4-elu')(FC_3)\n\n\n\n # else:\n # print('>>>> Defining Recurrent Modules...')\n # input_data_expanded = Reshape((input_shape[0], input_shape[1], input_shape[2], 1),\n # input_shape=input_shape)(input_data)\n # input_data_TimeDistributed = Permute((3, 1, 2, 4), input_shape=input_shape)(input_data_expanded)\n # h1 = TimeDistributed(Convolution2D(32, (8, 8), strides=4, activation=\"relu\", name=\"conv1\"), \\\n # input_shape=(args.num_frames, input_shape[0], input_shape[1], 1))(\n # input_data_TimeDistributed)\n # h2 = TimeDistributed(Convolution2D(64, (4, 4), strides=2, activation=\"relu\", name=\"conv2\"))(h1)\n # h3 = TimeDistributed(Convolution2D(64, (2, 2), strides=1, activation=\"relu\", name=\"conv3\"))(h2)\n # flatten_hidden = TimeDistributed(Flatten())(h3)\n # hidden_input = TimeDistributed(Dense(512, activation='relu', name='flat_to_512'))(flatten_hidden)\n # if not (args.a_t):\n # context = LSTM(512, return_sequences=False, stateful=False, input_shape=(args.num_frames, 512))(\n # hidden_input)\n # else:\n # if args.bidir:\n # hidden_input = Bidirectional(\n # LSTM(512, return_sequences=True, stateful=False, input_shape=(args.num_frames, 512)),\n # merge_mode='sum')(hidden_input)\n # all_outs = Bidirectional(\n # LSTM(512, return_sequences=True, stateful=False, input_shape=(args.num_frames, 512)),\n # merge_mode='sum')(hidden_input)\n # else:\n # all_outs = LSTM(512, return_sequences=True, stateful=False,\n # input_shape=(args.num_frames, 512))(hidden_input)\n # # attention\n # attention = TimeDistributed(Dense(1, activation='tanh'))(all_outs)\n # # print(attention.shape)\n # attention = Flatten()(attention)\n # attention = Activation('softmax')(attention)\n # attention = RepeatVector(512)(attention)\n # attention = Permute([2, 1])(attention)\n # sent_representation = merge([all_outs, attention], mode='mul')\n # context = Lambda(lambda xin: K.sum(xin, axis=-2), output_shape=(512,))(sent_representation)\n # # print(context.shape)\n\n if mode == \"dqn\":\n h4 = Dense(512, activation='elu', name=\"fc\")(context)\n output = Dense(num_actions, name=\"output\")(h4)\n # elif mode == \"duel\":\n # value_hidden = Dense(512, activation='relu', name='value_fc')(context)\n # value = Dense(1, name=\"value\")(value_hidden)\n # action_hidden = Dense(512, activation='relu', name='action_fc')(context)\n # action = Dense(num_actions, name=\"action\")(action_hidden)\n # action_mean = Lambda(lambda x: tf.reduce_mean(x, axis=1, keep_dims=True), name='action_mean')(\n # action)\n # output = Lambda(lambda x: x[0] + x[1] - x[2], name='output')([action, value, action_mean])\n model = Model(inputs=input_data, outputs=output)\n print(model.summary())\n return model", "def _get_model_equation(self):\n\n res = self._model.fit()\n\n explanatory_variables = []\n for variable in self._model.exog_names:\n if variable is 'Intercept':\n explanatory_variables.append(self._FLOAT_STRING_FORMAT.format(res.params[variable]))\n else:\n explanatory_variables.append(self._FLOAT_STRING_FORMAT.format(res.params[variable]) + variable)\n\n response_variable = self._model.endog_names\n\n # TODO: Correct formula format for negative coefficients (minus)\n\n model_equation = response_variable + ' = ' + ' + '.join(explanatory_variables)\n\n return SimpleTable(data=[[model_equation]], headers=['Linear regression model:'])", "def eval_model(args):\n cfg, lbl = util.get_label_cfg_by_args(args)\n uid = cfg['uniqueid']\n print('We are playing with %s' % uid)\n outdir='models/%s/gate_expert' % uid\n outname='gate_expert_model.pt'\n if KLLOSS:\n outname = 'gate_expert_kldiv_model.pt'\n if args.warm:\n outname = outname.replace('.pt', '_warm.pt')\n mdl_path = os.path.join(outdir, outname)\n gate_expert = GateExpertNet(mdl_path, args.argmax)\n eval_fun = gate_expert.get_y\n\n data = npload(cfg['file_path'], uid)\n datax = data[cfg['x_name']]\n datay = data[cfg['y_name']]\n evaly = eval_fun(datax)\n print(np.histogram(evaly[:, 48]))\n fig, ax = pld.get3dAxis()\n ax.scatter(datax[:, 0], datax[:, 1], evaly[:, 48])\n loss = l1loss(evaly, datay)\n err_norm = np.mean(loss, axis=1)\n fig, ax = plt.subplots()\n ax.hist(err_norm)\n plt.show()", "def evaluate(self, definition):\n self.checkModelOpen()\n calcEngine = CalcEngine.factory(self.client_session)\n return calcEngine.evaluate(definition)", "def create_eval(model, metrics, device):\n metrics = metrics or {}\n\n if device:\n model.to(device)\n\n def _inference(engine, batch):\n model.eval()\n with torch.no_grad():\n data, label = batch\n num_channels = 1 if len(data.shape) == 2 else data.shape[1]\n data = data.view(-1, 1, data.shape[-1])\n data = data.to(device)\n label = label.to(device)\n label = label.float()\n\n output = model(data)\n output = output.view(-1, num_channels, output.shape[-1])\n\n return output, label\n\n engine = Engine(_inference)\n\n for name, metric in metrics.items():\n metric.attach(engine, name)\n\n return engine", "def _model(self):\n common_scale = self.edp_par['common_scale'].value\n model = self.F_trans() * self.F_cont()\n # get F(h=1,k=0), which is used for normalization \n # common_scale is a common scaling factor => F(h=1,k=0) = 100*common_scale\n F_10 = model[(self.h==1)&(self.k==0)]\n model = model / np.absolute(F_10) * 100 * common_scale\n return model", "def dynamic_model(self, input_val: float) -> float:\n pass", "async def _calc(self, ctx, *, m):\r\n m = \"\".join(m)\r\n math_filter = re.findall(\r\n r\"[\\[\\]\\-()*+/0-9=.,% ]|>|<|==|>=|<=|\\||&|~|!=|^|sum\"\r\n + \"|range|random|randint|choice|randrange|True|False|if|and|or|else\"\r\n + \"|is|not|for|in|acos|acosh|asin|asinh|atan|atan2|atanh|ceil\"\r\n + \"|copysign|cos|cosh|degrees|e|erf|erfc|exp|expm1|fabs|factorial\"\r\n + \"|floor|fmod|frexp|fsum|gamma|gcd|hypot|inf|isclose|isfinite\"\r\n + \"|isinf|isnan|ldexp|lgamma|log|log10|log1p|log2|modf|nan|pi\"\r\n + \"|pow|radians|sin|sinh|sqrt|tan|tanh|round\",\r\n m,\r\n )\r\n calculate_stuff = eval(\"\".join(math_filter))\r\n if len(str(calculate_stuff)) > 0:\r\n em = await Embed.create(\r\n ctx,\r\n title=\"CollectorDevTeam Calculator\",\r\n thumbnail=self.thumbnail,\r\n description=\"**Input**\\n`{}`\\n\\n**Result**\\n`{}`\".format(m, calculate_stuff),\r\n )\r\n em.add_field(name=\"Type Math\", value=\"Get Fun\")\r\n await ctx.send(embed=em)", "def cmd_calc(self, event, command, usercommand):\n try:\n result = str(self.parser.eval(usercommand.arguments))\n response = '*** Calc: {}'.format(escape(result))\n except:\n fail = '*** Could not evaluate expression.'\n\n if self.wolfram:\n try:\n res = self.wolfram.query(usercommand.arguments)\n if len(res.pods) > 1:\n answer = res.pods[1].text\n\n # fix unicode\n answer = answer.encode('unicode-escape')\n answer = answer.replace(b'\\\\\\\\:', b'\\u')\n answer = answer.decode('unicode-escape')\n\n response = '*** W|A: {}'.format(escape(answer))\n else:\n response = fail\n except Exception as ex:\n if 'Computation error' in str(ex):\n response = fail\n else:\n print('exception:', ex)\n response = '*** Sorry, we ran into a problem. Please try again later'\n else:\n response = fail\n\n event['from_to'].msg(response)", "def evaluate_model():\n\n print '\\n\\tevaluate result'\n os.system('./conlleval.pl -d \\'\\t\\' < ' + encoded_test + ' >> ' + result_file)\n print '\\t--done\\n'", "def _compute_(self):\n dic = \"data/sim/{dn}/{rad}/\".format(dn=self.event.strftime(\"%Y.%m.%d.%H.%M\"), rad=self.rad)\n fbgc = \"data/sim/{dn}/{rad}/exp.bgc.bm({bm}).elv(<elv>).csv\".format(dn=self.event.strftime(\"%Y.%m.%d.%H.%M\"), \n rad=self.rad, bm=self.bmnum)\n fflare = \"data/sim/{dn}/{rad}/exp.flare.bm({bm}).elv(<elv>).csv\".format(dn=self.event.strftime(\"%Y.%m.%d.%H.%M\"),\n rad=self.rad, bm=self.bmnum)\n cmd = \"export DIR_MODELS_REF_DAT=/home/shibaji/Collaboration_NCAR/code_rt_sd/pharlap/pharlap_4.1.3/dat;\\\n cd pharlap;\\\n matlab -nodisplay -nodesktop -nosplash -nojvm -r \\\"UT=[{ut}];rad='{rad}';dic='{dic}';fbgc='{fbgc}';bm={bm};\\\n fflare='{fflare}';rt_1D_sim;exit;\\\"\".format(ut=self.event.strftime(\"%Y %m %d %H %S\"), rad=self.rad,\n dic=dic, bm=self.bmnum, fbgc=fbgc, fflare=fflare)\n os.system(cmd)\n return", "def operands(app):\n return cdr(app)", "def create_engine(model, optimizer, loss, device):\n model.to(device)\n\n def _update(engine, batch):\n data, label = batch\n num_channels = 1 if len(data.shape) == 2 else data.shape[1]\n data = data.view(-1, 1, data.shape[-1])\n data = data.to(device)\n label = label.to(device)\n label = label.float()\n\n model.train()\n model.zero_grad()\n optimizer.zero_grad()\n\n output = model(data)\n output = output.view(-1, num_channels, output.shape[-1])\n _loss = loss(output, label)\n _loss.backward()\n optimizer.step()\n\n return _loss.item()\n\n return Engine(_update)", "def get_operator(self):\n distribution = self.get_distribution_operator()\n temp = self.get_unit_conversion_operator()\n aperture = self.get_aperture_integration_operator()\n filter = self.get_filter_operator()\n projection = self.get_projection_operator()\n hwp = self.get_hwp_operator()\n polarizer = self.get_polarizer_operator()\n integ = self.get_detector_integration_operator()\n trans_inst = self.instrument.get_transmission_operator()\n trans_atm = self.scene.atmosphere.transmission\n response = self.get_detector_response_operator()\n\n with rule_manager(inplace=True):\n H = CompositionOperator([\n response, trans_inst, integ, polarizer, hwp * projection,\n filter, aperture, trans_atm, temp, distribution])\n if self.scene == 'QU':\n H = self.get_subtract_grid_operator()(H)\n return H", "def operator(app):\n return car(app)", "def calculator(operation): \n \n operation = MATH[operation]\n a = int(request.args.get(\"a\"))\n b = int(request.args.get(\"b\"))\n total = operation(a, b)\n\n return f\"<h1>TOTAL: {total}</h1>\"", "def _get_model(self, fl_ctx: FLContext):\n if isinstance(self.model, str):\n # treat it as model component ID\n model_component_id = self.model\n engine = fl_ctx.get_engine()\n self.model = engine.get_component(model_component_id)\n if not self.model:\n self.log_error(fl_ctx, f\"cannot find model component '{model_component_id}'\")\n return\n if self.model and isinstance(self.model, dict):\n # try building the model\n try:\n engine = fl_ctx.get_engine()\n # use provided or default optimizer arguments and add the model parameters\n if \"args\" not in self.model:\n self.model[\"args\"] = {}\n self.model = engine.build_component(self.model)\n except Exception as e:\n self.system_panic(\n f\"Exception while parsing `model`: \" f\"{self.model} with Exception {e}\",\n fl_ctx,\n )\n return\n if self.model and not isinstance(self.model, torch.nn.Module):\n self.system_panic(fl_ctx, f\"expect model to be torch.nn.Module but got {type(self.model)}: {self.model}\")\n return\n if self.model is None:\n self.system_panic(fl_ctx, f\"Model wasn't built correctly! It is {self.model}\")\n return\n self.log_info(fl_ctx, f\"Running model {self.model}\")", "def cls(\n workspace,\n output_file,\n measurement,\n patch,\n testpoi,\n teststat,\n backend,\n optimizer,\n optconf,\n):\n with click.open_file(workspace, 'r') as specstream:\n spec = json.load(specstream)\n\n ws = Workspace(spec)\n\n is_qtilde = teststat == 'qtilde'\n\n patches = [json.loads(click.open_file(pfile, 'r').read()) for pfile in patch]\n model = ws.model(\n measurement_name=measurement,\n patches=patches,\n modifier_settings={\n 'normsys': {'interpcode': 'code4'},\n 'histosys': {'interpcode': 'code4p'},\n },\n )\n\n # set the backend if not NumPy\n if backend in ['pytorch', 'torch']:\n set_backend(tensor.pytorch_backend(precision='64b'))\n elif backend in ['tensorflow', 'tf']:\n set_backend(tensor.tensorflow_backend(precision='64b'))\n elif backend in ['jax']:\n set_backend(tensor.jax_backend())\n tensorlib, _ = get_backend()\n\n optconf = {k: v for item in optconf for k, v in item.items()}\n\n # set the new optimizer\n if optimizer:\n new_optimizer = getattr(optimize, optimizer) or getattr(\n optimize, f'{optimizer}_optimizer'\n )\n set_backend(tensorlib, new_optimizer(**optconf))\n\n result = hypotest(\n testpoi, ws.data(model), model, qtilde=is_qtilde, return_expected_set=True\n )\n result = {\n 'CLs_obs': tensorlib.tolist(result[0])[0],\n 'CLs_exp': tensorlib.tolist(tensorlib.reshape(result[-1], [-1])),\n }\n\n if output_file is None:\n click.echo(json.dumps(result, indent=4, sort_keys=True))\n else:\n with open(output_file, 'w+') as out_file:\n json.dump(result, out_file, indent=4, sort_keys=True)\n log.debug(\"Written to {0:s}\".format(output_file))" ]
[ "0.70291424", "0.70197284", "0.6265564", "0.5914659", "0.5827897", "0.57237893", "0.57056856", "0.55929047", "0.55883527", "0.5585512", "0.5575414", "0.55035704", "0.54941154", "0.5486263", "0.5481206", "0.5464576", "0.5423178", "0.53772604", "0.53308785", "0.5314042", "0.5310777", "0.52998984", "0.5299071", "0.5293714", "0.5271887", "0.5265874", "0.52230453", "0.5204014", "0.5201566", "0.51977146" ]
0.7391274
0
Generate an empty dataset, used with the model to set Q points and resolution. opts contains the options, with 'qmax', 'nq', 'res', 'accuracy', 'is2d' and 'view' parsed from the command line.
def make_data(opts): qmax, nq, res = opts['qmax'], opts['nq'], opts['res'] if opts['is2d']: data = empty_data2D(np.linspace(-qmax, qmax, nq), resolution=res) data.accuracy = opts['accuracy'] set_beam_stop(data, 0.0004) index = ~data.mask else: if opts['view'] == 'log' and not opts['zero']: qmax = math.log10(qmax) q = np.logspace(qmax-3, qmax, nq) else: q = np.linspace(0.001*qmax, qmax, nq) if opts['zero']: q = np.hstack((0, q)) data = empty_data1D(q, resolution=res) index = slice(None, None) return data, index
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dataset(options):\n pass", "def get_dataset(opts):\n dataset_type = opts.dataset_params.dataset_type\n if dataset_type in 'synth':\n return synthgraph.SynthGraphDataset(opts, opts.dataset_params)\n elif dataset_type in 'synthnoise':\n return synthgraph.SynthNoiseGraphDataset(opts, opts.dataset_params)\n elif dataset_type in 'synthoutlier':\n return synthgraph.SynthOutlierGraphDataset(opts, opts.dataset_params)\n elif dataset_type in 'rome16kgeom':\n return spreal.GeomKNNRome16KDataset(opts, opts.dataset_params)\n elif dataset_type in 'graffiti':\n return graffiti.GraffitiDataset(opts, opts.dataset_params)\n else:\n print(\"ERROR: Dataset type {} not implemented yet\".format(dataset_type))\n sys.exit(1)", "def get_default_kwargs(data_dir, opts, model_opts):\n dataset_agnostic_data_kwargs = dict(\n seed=opts['data_random_seed'], \n use_attr=opts['use_attr']\n )\n dataset_kwargs = dict(\n name=model_opts['name'],\n attr0_name=model_opts['attr0_name'],\n attr1_name=model_opts['attr1_name'],\n npzfile=os.path.join(data_dir, model_opts['npzfiles'][opts['dm_type']]),\n use_attr=model_opts['use_attr']\n )\n \n model_kwargs = dict(\n seed=opts['model_random_seed'], \n pass_coeff=opts['pass_coeff'],\n fair_coeff=opts['fair_coeff'],\n adim=1,\n ydim=1,\n xdim=model_opts['xdim'] + (1 if model_opts['use_attr'] else 0),\n hidden_layer_specs=model_opts['hidden_layer_specs']\n )\n\n return {**dataset_agnostic_data_kwargs, **dataset_kwargs}, model_kwargs", "def _generate_datasets(self):\n\n degrade_test = False\n if self._opts['degrade_step'] == 'test':\n degrade_test = True\n\n use_trainset_for_tests = UseTrainForTest.IDENTICAL # can be different in few shot workflow\n\n train_dataset, test_dataset = self._gen_datasets_with_options(self._opts['train_classes'],\n self._opts['test_classes'],\n is_superclass=self._opts['superclass'],\n class_proportion=self._opts['class_proportion'],\n degrade_test=degrade_test,\n degrade_type=self._opts['degrade_type'], # only relevant if degrade_test = True\n degrade_val=self._opts['min_val'], # only relevant if degrade_test = True\n recurse_train=self._is_train_recursive(),\n recurse_test=self._is_inference_recursive(),\n num_batch_repeats=self._opts['num_repeats'],\n recurse_iterations=self._opts['recurse_iterations'],\n evaluate_step=self._opts['evaluate'],\n use_trainset_for_tests=use_trainset_for_tests,\n invert_images=self._opts['invert_images'],\n min_val=self._opts['min_val'])\n return train_dataset, test_dataset", "def get_dataset(args):\n\n if args['experiment']['dataset'] == Dataset.mindsets:\n xs, ys, cs = make_mindsets(mindset_sizes=args['dataset']['mindset_sizes'],\n nb_questions=args['dataset']['nb_questions'],\n nb_useless=args['dataset']['nb_useless'],\n noise=args['dataset']['noise'],\n seed=args['experiment']['seed'])\n\n return Data(xs=xs, ys=ys, cs=cs)\n\n if args['experiment']['dataset'] == Dataset.questionnaire_likert:\n xs, ys, cs = make_likert_questionnaire(nb_samples=args['dataset']['nb_samples'],\n nb_features=args['dataset']['nb_features'],\n nb_mindsets=args['dataset']['nb_mindsets'],\n centers=args['dataset']['centers'],\n range_answers=args['dataset']['range_answers'],\n seed=args['experiment']['seed'])\n\n return Data(xs=xs, ys=ys, cs=cs)\n\n if args['experiment']['dataset'] == Dataset.retinal:\n xs, ys = load_RETINAL(root_path=args['root_dir'],\n nb_bins=args['dataset']['nb_bins'],\n max_idx=args['dataset']['max_idx'])\n\n return Data(xs=xs, ys=ys)\n\n if args['experiment']['dataset'] == Dataset.moons:\n xs, ys = make_moons(n_samples=args['dataset']['n_samples'],\n noise=args['dataset']['noise'],\n random_state=args['experiment']['seed'])\n\n return Data(xs=xs, ys=ys)\n\n if args['experiment']['dataset'] == Dataset.breast_cancer_wisconsin:\n xs, ys = load_CANCER(args['dataset']['nb_bins'])\n\n return Data(xs=xs, ys=ys)\n\n if args['experiment']['dataset'] == Dataset.SBM:\n A, ys, G = load_SBM(block_sizes=args['dataset']['block_sizes'],\n p_in=args['dataset']['p'],\n p_out=args['dataset']['q'],\n seed=args['experiment']['seed'])\n\n return Data(ys=ys, A=A, G=G)\n\n if args['experiment']['dataset'] == Dataset.gaussian_mixture:\n xs, ys = make_blobs(n_samples=args['dataset']['blob_sizes'],\n centers=args['dataset']['blob_centers'],\n n_features=args['dataset']['blob_centers'],\n cluster_std=args['dataset']['blob_variances'],\n random_state=args['experiment']['seed'])\n\n return Data(xs=xs, ys=ys)\n\n if args['experiment']['dataset'] == Dataset.LFR:\n A, ys, G = load_LFR(nb_nodes=args['dataset']['nb_nodes'],\n tau1=args['dataset']['tau1'],\n tau2=args['dataset']['tau2'],\n mu=args['dataset']['mu'],\n average_degree=args['dataset']['average_degree'],\n min_community=args['dataset']['min_community'],\n seed=args['experiment']['seed'])\n\n return Data(ys=ys, A=A, G=G)\n\n if args['experiment']['dataset'] == Dataset.wave:\n df = pd.read_csv('datasets/waveform.csv')\n xs = df[df.columns[:-1]].to_numpy()\n ys = df[df.columns[-1]].to_numpy()\n\n return Data(xs=xs, ys=ys)\n\n raise ValueError('Wrong name for a dataset')", "def create_dataset(opt):\n\tdata_loader = CustomDatasetDataLoader(opt)\n\tdataset = data_loader.load_data()\n\treturn dataset", "def __init__(self, opts = None):\n if opts is not None:\n self.opts = opts\n else:\n self.opts = Options()\n \n # Initialize dictionary of features\n self.init_features()", "def create_dataset(opt):\n data_loader = CustomDatasetDataLoader(opt)\n dataset = data_loader.load_data()\n return dataset", "def create_dataset(dataset_type, soruce, opts): \n\n p = PreProcessor(dataset_type, opts)\n\n # If we are NOT running \"implementation.py\", we read the data from file\n if dataset_type == \"train\" or dataset_type == \"dev\" or dataset_type == \"test\":\n path_to_data = soruce\n p.read_labelled_data(path_to_data) \n # Otherwise, we read the sentence that \"implementation.py\" gave us\n elif dataset_type == \"submit\":\n submission_sentence = soruce\n p.read_test_data(submission_sentence)\n\n # Encode all the data to a list of torchTensors\n encoded_tokens, encoded_pred, encoded_tokens_pos, encoded_labels = p.encode_all_data()\n # Create SRL dataset\n dataset = SRLDataset(x=encoded_tokens, pr=encoded_pred, p=encoded_tokens_pos, y=encoded_labels)\n print(\"{} dataset size is {}\".format(dataset_type, len(dataset)))\n\n if dataset_type == \"train\" or dataset_type == \"dev\" or dataset_type == \"test\":\n return dataset\n elif dataset_type == \"submit\":\n return dataset, p.list_l_original_predicates", "def make_dataset():\n\n\tnumberOfTrials = dataset_params.num_of_samples\n\tnumberOfTrials_train = int(numberOfTrials*0.8)\n\tnumberOfTrials_test = int(numberOfTrials*0.2)\n\n\tprint(\"==================================================\")\n\tprint(\"1. Generating Train images ......\")\n\tprint(\"\\nTrain image per variation\", numberOfTrials_train)\n\tmakeDataset(numberOfTrials_train, \"train\")\n\n\tprint(\"==================================================\")\n\tprint(\"2. Generating Test images ......\")\n\tprint(\"\\nTest image per variation\", numberOfTrials_test)\n\tmakeDataset(numberOfTrials_test, \"test\")\n\n\tprint(\"==================================================\")\n\tprint(\"Done!!!\")", "def generate_synthetic_dataset(args):\n logger = logging.getLogger(\"GACM\")\n logger.info('Checking the data files...')\n for data_path in args.train_dirs + args.dev_dirs + args.test_dirs:\n assert os.path.exists(data_path), '{} file does not exist.'.format(data_path)\n assert len(args.test_dirs) > 0, 'No test files are provided.'\n dataset = Dataset(args, train_dirs=args.train_dirs, dev_dirs=args.dev_dirs, test_dirs=args.test_dirs)\n logger.info('Initialize the model...')\n model = Agent(args, len(dataset.qid_query), len(dataset.uid_url), len(dataset.vid_vtype))\n logger.info('model.global_step: {}'.format(model.global_step))\n assert args.load_model > -1\n logger.info('Restoring the model...')\n model.load_model(model_dir=args.load_dir, model_prefix=args.algo, global_step=args.load_model, load_optimizer=False)\n\n synthetic_types = ['deterministic', 'stochastic']\n shuffle_splits = [None, [1, 11], [1, 6, 11]]\n amplifications = [1, 7]\n for synthetic_type in synthetic_types:\n for shuffle_split in shuffle_splits:\n for amplification in amplifications:\n #synthetic_type = 'deterministic'\n #shuffle_split = None\n #amplification = 1\n file_path = os.path.join(args.load_dir, '..', 'synthetic')\n model.generate_synthetic_dataset('test', dataset, file_path, \n 'synthetic_{}_{}_{}.txt'.format(synthetic_type[0].upper(), str(shuffle_split), amplification), \n synthetic_type=synthetic_type, shuffle_split=shuffle_split, amplification=amplification)\n # exit()\n logger.info('Done with click sequence generation.')", "def mkdataset(destdir, totalsize, filecount=10, plan=None):\n if not plan:\n plan = {\n 'files': [{\n 'totalsize': totalsize,\n 'totalfiles': filecount,\n 'type': 'uniform'\n }]\n }\n plan.update({\n 'totalsize': totalsize,\n 'totalfiles': filecount\n })\n DatasetMaker(destdir, plan).fill()", "def __init__(self):\n self.args = self._prepare_args(locals())\n self.requires_full_dataset_in_memory = False", "def main(**opts: tp.Any) -> None:", "def __init__(self, opt):\n BaseDataset.__init__(self, opt)\n self.dir_images = os.path.join(opt.dataroot, opt.phase) # get the image directory\n self.image_paths = sorted(make_dataset(self.dir_images, opt.max_dataset_size)) # get image paths\n assert(self.opt.load_size >= self.opt.crop_size) # crop_size should be smaller than the size of loaded image\n self.input_nc = self.opt.output_nc\n self.output_nc = self.opt.input_nc", "def make_travel_dataset():\n util.make_dataset_dmoz(\n n_items=600,\n seed=0,\n category_parent='Top/Recreation/Travel',\n rawdir=os.environ['DMOZ_TRAVEL_RAW'],\n webdir=os.environ['DMOZ_TRAVEL_WEB'],\n )", "def generateDataset(self):\n if self.outdir[-1] != \"/\": \n self.outdir += \"/\"\n self.outdir += \"dataset_trackml\"\n i = 1\n while os.path.exists(self.outdir):\n self.outdir.replace(\"_\"+str(i-1), \"\")\n self.outdir += (\"_\"+str(i))\n i += 1\n cmd = \"mkdir -p \"+ self.outdir\n os.system(cmd)\n\n cont = pc.particleController()\n cont.generateEvents(self.numevents, self.hpe, self.detectors)\n\n self.generateHits(cont)\n self.generateTruths(cont)\n self.generateSolution(cont)", "def __init__(self, opt):\n BaseDataset.__init__(self, opt)\n\n self.data_folder = os.path.join(opt.dataroot, opt.phase)\n self.image_file_names = sorted(os.listdir(self.data_folder))\n self.batch_size = opt.batch_size\n self.z_dim = opt.z_dim\n self.imsize = opt.crop_size\n\n self.transform = self.get_transform(True, True, True, opt.center_crop)", "def build(opt):\n version = '1.1'\n dpath = os.path.join(opt['datapath'], 'ner')\n\n # check if data had been previously built\n raw_path = os.path.abspath(opt['raw_dataset_path'] or \".\")\n if len([f for f in os.listdir(raw_path) if f.endswith(\".iob\")]) == 0:\n if not build_data.built(dpath, version_string=version):\n print('[target data path: ' + dpath + ']')\n # make a clean directory if needed\n if build_data.built(dpath):\n # an older version exists, so remove these outdated files.\n build_data.remove_dir(dpath)\n build_data.make_dir(dpath)\n\n ds_path = os.environ.get('DATASETS_URL')\n file_name = 'gareev.tar.gz'\n if not ds_path:\n raise RuntimeError(\"Looks like the `DATASETS_URL` variable is set incorrectly\")\n print('Trying to download a dataset %s from the repository' % file_name)\n url = urllib.parse.urljoin(ds_path, file_name)\n build_data.download(url, dpath, file_name)\n build_data.untar(dpath, file_name)\n print('Downloaded a %s dataset' % file_name)\n # mark the data as built\n build_data.mark_done(dpath, version_string=version)\n opt['raw_dataset_path']=dpath\n create_heap_file(opt['raw_dataset_path'])\n else:\n print('Use raw data for {}'.format(opt['raw_dataset_path']))\n create_heap_file(dpath, opt['raw_dataset_path'])\n build_data.mark_done(dpath, version_string=version)\n print(\"Use dataset from path: %s\" % repr(opt['raw_dataset_path']))", "def prep_dataset(settings):\n train_dims = settings[\"train_dims\"]\n # Open HDF store. This is usually a soft link to our filtered dataset\n input_df, target_df, const = load_from_store(settings[\"dataset_path\"], columns=train_dims)\n\n try:\n del input_df[\"nions\"] # Delete leftover artifact from dataset split\n except KeyError:\n pass\n\n target_df = drop_outliers(target_df, settings)\n target_df = drop_nans(target_df)\n\n data_df = filter_input(input_df, target_df)\n del target_df, input_df\n data_df = convert_dtype(data_df, settings)\n\n return data_df", "def main():\n parser = optparse.OptionParser()\n parser.add_option(\"-c\", \"--clear\", action=\"store_true\", dest=\"clear\",\n help=\"clear out all generated reports\")\n parser.add_option(\"-n\", \"--num\", action=\"store\", type=\"int\", dest=\"num\",\n help=\"number of data points to generate\")\n parser.add_option(\"-m\", \"--min\", action=\"store\", type=\"float\", dest=\"min\",\n help=\"minimum of polynomial range\")\n parser.add_option(\"-f\", \"--fun\", action=\"store\", type=\"string\", dest=\"fun\",\n help=(\"Python expression (function of x)\"))\n (options, _) = parser.parse_args()\n if options.clear:\n clear_data()\n else:\n report_id = generate_id()\n if report_id is None:\n print \"Too many tests exist already\"\n else:\n gen = DataGen(options.min, options.fun, options.num)\n gen.generate_data()\n gen.write_ref(report_id)\n gen.write_rand(report_id)", "def test_empty_args(self):\n env = make(\"Pendulum-v0\")\n test_env = make(\"Pendulum-v0\")\n policy = DDPG(state_shape=env.observation_space.shape,\n action_dim=env.action_space.high.size,\n gpu=-1,\n memory_capacity=1000,\n max_action=env.action_space.high[0],\n batch_size=32,\n n_warmup=10)\n Trainer(policy, env, {}, test_env=test_env)", "def graphsetup(self):\n gopts = ['-F', '-E', '--disable-rrdtool-tag']\n if self.height:\n gopts.append('--height=%d' % int(self.height))\n if self.width:\n gopts.append('--width=%d' % int(self.width))\n if self.log:\n gopts.append('--logarithmic')\n if self.maxy > -1:\n gopts.append('--upper-limit=%d' % int(self.maxy))\n gopts.append('--rigid')\n if self.miny > -1:\n gopts.append('--lower-limit=%d' % int(self.miny))\n gopts.append('--rigid')\n # Always include a vertical label so that multiple graphs on page\n # align correctly.\n gopts.append('--vertical-label=%s' % (self.units or ' '))\n if self.units == 'percentage':\n if not self.maxy > -1:\n gopts.append('--upper-limit=100')\n if not self.miny > -1:\n gopts.append('--lower-limit=0')\n if self.base:\n gopts.append('--base=1024')\n gopts = [str(o) for o in gopts]\n return gopts", "def __init__(self, opt):\n BaseDataset.__init__(self, opt)\n self.A_paths = sorted(make_dataset(opt.dataroot, opt.max_dataset_size))\n input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc\n self.transform = get_transform(opt, grayscale=(input_nc == 1))", "def makecldf(args):\n with_dataset(args, Dataset._install)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'DataSet':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = DataSetArgs.__new__(DataSetArgs)\n\n __props__.__dict__[\"arn\"] = None\n __props__.__dict__[\"aws_account_id\"] = None\n __props__.__dict__[\"column_groups\"] = None\n __props__.__dict__[\"column_level_permission_rules\"] = None\n __props__.__dict__[\"consumed_spice_capacity_in_bytes\"] = None\n __props__.__dict__[\"created_time\"] = None\n __props__.__dict__[\"data_set_id\"] = None\n __props__.__dict__[\"data_set_refresh_properties\"] = None\n __props__.__dict__[\"data_set_usage_configuration\"] = None\n __props__.__dict__[\"dataset_parameters\"] = None\n __props__.__dict__[\"field_folders\"] = None\n __props__.__dict__[\"import_mode\"] = None\n __props__.__dict__[\"ingestion_wait_policy\"] = None\n __props__.__dict__[\"last_updated_time\"] = None\n __props__.__dict__[\"logical_table_map\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"output_columns\"] = None\n __props__.__dict__[\"permissions\"] = None\n __props__.__dict__[\"physical_table_map\"] = None\n __props__.__dict__[\"row_level_permission_data_set\"] = None\n __props__.__dict__[\"row_level_permission_tag_configuration\"] = None\n __props__.__dict__[\"tags\"] = None\n return DataSet(resource_name, opts=opts, __props__=__props__)", "def generate_dataset(self):\n\t\timg_set = []\n\t\tqa_set = []\n\t\tfor i in range(self.config.dataset_size):\n\t\t\timg, r = self.generate_image()\n\t\t\tq = self.generate_question()\n\t\t\ta = self.generate_answer(r, q)\n\t\t\timg_sample = {\n\t\t\t\t'id': i,\n\t\t\t\t'image': img.tolist()\n\t\t\t}\n\t\t\timg_set.append(img_sample)\n\t\t\tfor j in range(len(q)):\n\t\t\t\tqa_sample = {\n\t\t\t\t\t'id': i,\n\t\t\t\t\t'question': q[j].tolist(),\n\t\t\t\t\t'answer': a[j].tolist()\n\t\t\t\t}\n\t\t\t\tqa_set.append(qa_sample)\n\t\tprint('Finished creating smaples')\n\t\tdataset = {\n\t\t\t'image':\timg_set,\n\t\t\t'qa':\tqa_set\n\t\t}\n\t\twith open(self.path, 'w') as f:\n\t\t\tjson.dump(dataset, f)", "def task_dataset(representations, features):\n return FakeTaskDataset(representations, features)", "def __init__(self, opt):\n BaseDataset.__init__(self, opt)\n self.SR_factor = opt.SR_factor\n\n assert util_dataset.check_whether_last_dir(opt.dataroot), 'when SingleDataset, opt.dataroot:{} should be dir and contains only image files'.format(opt.dataroot)\n self.dir_A = opt.dataroot\n self.A_paths = sorted(make_images_dataset(self.dir_A, opt.max_dataset_size)) # get image paths\n\n self.input_nc = self.opt.input_nc\n self.output_nc = self.opt.output_nc", "def main(opts):\n\n # Create a dataloader for the training images\n train_dataloader, _ = get_emoji_loader(opts.emoji, opts)\n\n # Create checkpoint and sample directories\n utils.create_dir(opts.checkpoint_dir)\n utils.create_dir(opts.sample_dir)\n\n train(train_dataloader, opts)" ]
[ "0.6061263", "0.56916666", "0.55919266", "0.54698056", "0.5453293", "0.53809595", "0.53586495", "0.5279866", "0.5200752", "0.5178504", "0.51729673", "0.5166292", "0.5101323", "0.5015216", "0.50128824", "0.49636617", "0.48968497", "0.4892959", "0.4892534", "0.48797497", "0.48761037", "0.48388648", "0.48150504", "0.4809967", "0.4806764", "0.4795033", "0.4789418", "0.47876492", "0.4783712", "0.47833097" ]
0.6708323
0
Generate the appropriate calculation engine for the given datatype. Datatypes with '!' appended are evaluated using external C DLLs rather than OpenCL.
def make_engine(model_info, data, dtype, cutoff): if dtype == 'sasview': return eval_sasview(model_info, data) elif dtype.endswith('!'): return eval_ctypes(model_info, data, dtype=dtype[:-1], cutoff=cutoff) else: return eval_opencl(model_info, data, dtype=dtype, cutoff=cutoff)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def eval_ctypes(model_info, data, dtype='double', cutoff=0.):\n if dtype == 'quad':\n dtype = 'longdouble'\n model = core.build_model(model_info, dtype=dtype, platform=\"dll\")\n calculator = DirectModel(data, model, cutoff=cutoff)\n calculator.engine = \"OMP%s\"%DTYPE_MAP[dtype]\n return calculator", "def eval_opencl(model_info, data, dtype='single', cutoff=0.):\n try:\n model = core.build_model(model_info, dtype=dtype, platform=\"ocl\")\n except Exception as exc:\n print(exc)\n print(\"... trying again with single precision\")\n model = core.build_model(model_info, dtype='single', platform=\"ocl\")\n calculator = DirectModel(data, model, cutoff=cutoff)\n calculator.engine = \"OCL%s\"%DTYPE_MAP[dtype]\n return calculator", "def makeCalc(self, dataSet):\n\n #cyl = sasmodels.core.load_model_info('cylinder')\n #hs = sasmodels.core.load_model_info('hardsphere')\n #cylhs = sasmodels.core.load_model_info('cylinder@hardsphere')\n cylhmsa = sasmodels.core.load_model_info('cylinder@hayter_msa')\n\n # Build using c version instead of python. Avoids pyopencl\n model = sasmodels.core.build_model(cylhmsa, platform='dll')\n self.calculator = sasmodels.direct_model.DirectModel(dataSet, model)\n\n return", "def const(math_engine, shape, data):\n if not isinstance(math_engine, MathEngine):\n raise ValueError('The `math_engine` should be neoml.MathEngine.')\n\n np_shape = numpy.array(shape, dtype=numpy.int32, copy=False)\n\n if len(np_shape.shape) > 7:\n raise ValueError('The `shape` should have not more than 7 dimensions.')\n\n if numpy.isscalar(data):\n return Blob(PythonWrapper.blob_const(math_engine._internal, np_shape, float(data)))\n\n np_data = numpy.array(data, copy=False, order='C')\n\n if len(np_data.shape) > 7:\n raise ValueError('The `shape` should have not more than 7 dimensions.')\n\n return Blob(PythonWrapper.blob_const(math_engine._internal, np_shape, np_data))", "def load_standard_operators():\n #\n # Compute scripts\n #\n \n cscript('select' ,scriptpath+'mcdo.sh \"${operator}\" \"${out}\" \"${var}\" \"${period_iso}\" \"${domain}\" \"${alias}\" \"${units}\" \"${missing}\" ${ins} ',\n commuteWithTimeConcatenation=True, commuteWithSpaceConcatenation=True)\n #\n cscript('ccdo',\n scriptpath+'mcdo.sh \"${operator}\" \"${out}\" \"${var}\" \"${period_iso}\" \"${domain}\" \"${alias}\" \"${units}\" \"${missing}\" ${ins}')\n #\n cscript('minus', 'cdo sub ${in_1} ${in_2} ${out}',\n commuteWithTimeConcatenation=True, commuteWithSpaceConcatenation=True)\n #\n cscript('plus', 'cdo add ${in_1} ${in_2} ${out}',\n commuteWithTimeConcatenation=True, commuteWithSpaceConcatenation=True)\n #\n cscript('space_average',\n scriptpath+'mcdo.sh fldmean \"${out}\" \"${var}\" \"${period_iso}\" \"${domain}\" \"${alias}\" \"${units}\" \"${missing}\" ${ins}', \n commuteWithTimeConcatenation=True)\n #\n cscript('time_average' ,\n scriptpath+'mcdo.sh timmean \"${out}\" \"${var}\" \"${period_iso}\" \"${domain}\" \"${alias}\" \"${units}\" \"${missing}\" ${ins}' ,\n commuteWithSpaceConcatenation=True)\n #\n cscript('llbox' ,\n scriptpath+'mcdo.sh \"\" \"${out}\" \"${var}\" \"${period_iso}\" \"${latmin},${latmax},${lonmin},${lonmax}\" \"${alias}\" \"${units}\" \"${missing}\" ${ins}',\n commuteWithTimeConcatenation=True, commuteWithSpaceConcatenation=True)\n #\n cscript('regrid' ,\n scriptpath+'regrid.sh ${in} ${in_2} ${out} ${option}',\n commuteWithTimeConcatenation=True, commuteWithSpaceConcatenation=True)\n #\n cscript('regridn' ,\n scriptpath+'regrid.sh ${in} ${cdogrid} ${out} ${option}',\n commuteWithTimeConcatenation=True, commuteWithSpaceConcatenation=True)\n #\n cscript('rescale' ,\n 'cdo expr,\\\"${var}=${scale}*${var}+${offset};\\\" ${in} ${out}',\n commuteWithTimeConcatenation=True, commuteWithSpaceConcatenation=True)\n #\n cscript('mean_and_std',\n scriptpath+'mean_and_std.sh ${in} ${var} ${out} ${out_sdev}', \n # This tells CliMAF how to compute varname for name output 'sdev' \n\t # using input varname\n sdev_var=\"std(%s)\" , \n commuteWithTimeConcatenation=True)\n #\n # Declare plot scripts\n cscript('ncview' ,'ncview ${in} 1>/dev/null 2>&1&' )\n #\n cscript('timeplot', 'ncl '+scriptpath+'timeplot.ncl infile=\\'\\\"${in}\\\"\\' outfile=\\'\\\"${out}\\\"\\' '\n 'var=\\'\\\"${var}\\\"\\' title=\\'\\\"${title}\\\"\\'',format=\"png\")\n #\n # plot: main field (main_file) + auxiliary field (aux_file, optional) + vectors (u_file & v_file, optionals)\n #\n cscript('plot' , '(ncl -Q '+ scriptpath +'gplot.ncl main_file=\\'\\\"${in}\\\"\\' aux_file=\\'\\\"${in_2}\\\"\\' '\n 'u_file=\\'\\\"${in_3}\\\"\\' v_file=\\'\\\"${in_4}\\\"\\' rotation=${rotation} '\n 'plotname=\\'\\\"${out}\\\"\\' cmap=\\'\\\"${color}\\\"\\' vmin=${min} vmax=${max} vdelta=${delta} '\n 'main_var=\\'\\\"${var}\\\"\\' aux_var=\\'\\\"${var_2}\\\"\\' u_var=\\'\\\"${var_3}\\\"\\' v_var=\\'\\\"${var_4}\\\"\\' '\n 'title=\\'\\\"${title}\\\"\\' scale=${scale} offset=${offset} mpCenterLonF=${mpCenterLonF} '\n 'vcRefMagnitudeF=${vcRefMagnitudeF} vcRefLengthF=${vcRefLengthF} vcMinDistanceF=${vcMinDistanceF} '\n 'vcGlyphStyle=\\'\\\"${vcGlyphStyle}\\\"\\' vcLineArrowColor=\\'\\\"${vcLineArrowColor}\\\"\\' '\n 'units=\\'\\\"${units}\\\"\\' linp=${linp} colors=\\'\\\"${colors}\\\"\\' level=${level} time=${time} '\n 'proj=\\'\\\"${proj}\\\"\\' contours=\\'\\\"${contours}\\\"\\' focus=\\'\\\"${focus}\\\"\\' '\n 'type=\\'\\\"${format}\\\"\\' resolution=\\'\\\"${resolution}\\\"\\' trim=${trim} '\n 'vcb=${vcb} lbLabelFontHeightF=${lbLabelFontHeightF} invXY=${invXY} '\n 'tmYLLabelFontHeightF=${tmYLLabelFontHeightF} tmXBLabelFontHeightF=${tmXBLabelFontHeightF} '\n 'tmYRLabelFontHeightF=${tmYRLabelFontHeightF} tiXAxisFontHeightF=${tiXAxisFontHeightF} '\n 'tiYAxisFontHeightF=${tiYAxisFontHeightF} gsnPolarLabelFontHeightF=${gsnPolarLabelFontHeightF} '\n 'tiMainFont=\\'\\\"${tiMainFont}\\\"\\' tiMainFontHeightF=${tiMainFontHeightF} '\n 'tiMainPosition=\\'\\\"${tiMainPosition}\\\"\\' gsnLeftString=\\'\\\"${gsnLeftString}\\\"\\' '\n 'gsnRightString=\\'\\\"${gsnRightString}\\\"\\' gsnCenterString=\\'\\\"${gsnCenterString}\\\"\\' '\n 'gsnStringFont=\\'\\\"${gsnStringFont}\\\"\\' gsnStringFontHeightF=${gsnStringFontHeightF} )', format=\"graph\") \n # \n cscript('lines' , '(ncl -Q '+ scriptpath +'lineplot.ncl infile=\\'\\\"${mmin}\\\"\\' '\n 'plotname=\\'\\\"${out}\\\"\\' var=\\'\\\"${var}\\\"\\' title=\\'\\\"${title}\\\"\\' '\n 'linp=${linp} labels=\\'\\\"${labels}\\\"\\' colors=\\'\\\"${colors}\\\"\\' thickness=${thickness}'\n 'T_axis=\\'\\\"${T_axis}\\\"\\' fmt=\\'\\\"${fmt}\\\"\\' && '\n 'convert ${out} -trim ${out}) ', format=\"png\")\n #\n cscript('curves' , '(ncl -Q '+ scriptpath +'curves.ncl infile=\\'\\\"${mmin}\\\"\\' '\n 'plotname=\\'\\\"${out}\\\"\\' var=\\'\\\"${var}\\\"\\' title=\\'\\\"${title}\\\"\\' '\n 'labels=\\'\\\"${labels}\\\"\\' colors=\\'\\\"${colors}\\\"\\' thickness=${thickness} && '\n 'convert ${out} -trim ${out}) ', format=\"png\")\n #\n # cpdfcrop : pdfcrop by preserving metadata\n #\n cscript('cpdfcrop' , 'pdfcrop ${in} ${out} ', format=\"pdf\")\n # \n cscript('ncdump' , 'ncdump -h ${in} ', format=\"txt\")\n #\n if (os.system(\"type cdfmean >/dev/null 2>&1\")== 0 ) :\n load_cdftools_operators()\n else :\n clogger.warning(\"No Cdftool available\")", "def compile_kernel(context, queue, source_code, function_name,\n compiler_flags=None):\n if cdouble(queue)(42).nbytes >= 8:\n type_definitions = \"\"\"\n #define cdouble double\n \"\"\"\n else:\n print('WARNING: no 64bit float support available for this device.')\n type_definitions = \"\"\"\n #define cdouble float\n \"\"\"\n # The definition of cfloat and cshort is fixed for now since I do\n # not know of any cases where these are not available. If this\n # happens to be the case, we can add a check as for double here.\n type_definitions += \"\"\"\n #define cfloat float\n #define cshort short\n \"\"\"\n flags = BUILD_OPTIONS[:]\n if compiler_flags is not None:\n flags.extend(compiler_flags)\n full_source = type_definitions + basic_code() + source_code\n program = ocl.Program(context, full_source).build(flags)\n return getattr(program, function_name)", "def build_algorithm(self, algorithm_type):\n distance_matrix = self.matrix_handler.distance_matrix\n algorithm_execution_parameters = {}\n if algorithm_type == \"spectral\":\n # We need to set number of clusters for performance and we get sigma if defined\n algorithm_execution_parameters[\"max_clusters\"] = self.evaluation_parameters[\"maximum_clusters\"]\n if \"sigma\" in self.clustering_parameters[\"algorithms\"][\"spectral\"]:\n algorithm_execution_parameters[\"sigma_sq\"] = self.clustering_parameters[\"algorithms\"][\"spectral\"][\"sigma\"]\n # else it calculates its own sigma\n\n if algorithm_type in [\"spectral\",\"dbscan\",\"gromos\",\"kmedoids\",\"random\",\"hierarchical\"] :\n return ClusteringExplorer.get_clustering_algorithm_class()[algorithm_type](distance_matrix, **algorithm_execution_parameters)\n else:\n print \"[ERROR][ClusteringExplorer::build_algorithms] Not known algorithm type ( %s )\"%(algorithm_type)\n self.notify(\"SHUTDOWN\", \"Not known algorithm type ( %s )\"%(algorithm_type))\n exit()", "def mol_kern_factory(kernel_type: str, *args, **kwargs):\n kernel_to_kernel_type = {\n MolGraphKernel: MOL_GRAPH_CONT_KERNEL_TYPES + MOL_GRAPH_INT_KERNEL_TYPES,\n MolFingerprintKernel: MOL_FINGERPRINT_KERNEL_TYPES,\n MolDistanceKernel: MOL_DISTANCE_KERNEL_TYPES,\n MolSimilarityKernel: MOL_SIMILARITY_KERNEL_TYPES\n }\n kernel_type_to_kernel = {\n kernel_type: kernel\n for kernel, kernel_type_list in kernel_to_kernel_type.items()\n for kernel_type in kernel_type_list\n }\n if kernel_type not in kernel_type_to_kernel:\n raise ValueError(\"Not recognized kernel type: {}\".format(kernel_type))\n kernel = kernel_type_to_kernel[kernel_type]\n return kernel(kernel_type, *args, **kwargs)", "def render(self, algorithm_type, **kwargv):\n\n\t\tlogging.info('[Compiler]:this is debug')\n\t\tfid = kwargv['fid']\n\t\tdata_key = kwargv['data_key']\n\t\t# 用户录入值/测试输入值\n\t\tif 'field_value' in kwargv:\n\t\t\tfield_value = kwargv['field_value']\n\t\telse:\n\t\t\tfield_value = ''\n\n\t\talgorithm_type = 'script' if algorithm_type == 'script' else 'input'\n\t\tlogging.info('[Compiler]:start render')\n\t\t# print \"Source Code:\",field_algorithm\n\t\t# print \"ALGORITHM:\",_algorithm\n\t\t_lang_cfg = Common.collection_find(self.lang_cfg, lambda s: s['lang'] == self.algorithm[algorithm_type]['lang'])\n\t\tself.cms_assert((_lang_cfg is None and self.algorithm[algorithm_type]['lang'] != 'raw'), 500,\n\t\t\t\t\t\t'not support ' + self.algorithm[algorithm_type]['lang'])\n\n\t\tprint('step-1', _lang_cfg)\n\t\tif self.algorithm[algorithm_type]['lang'] == 'raw':\n\t\t\t# 判断算法类型,input则只读取raw配套的data,script则需要优先使用raw,如raw配套为空,则使用用户数据field_value\n\t\t\tif self.algorithm[algorithm_type]['data'].strip() != '':\n\t\t\t\tprint('direct raw')\n\t\t\t\treturn self.algorithm[algorithm_type]['data']\n\t\t\telif algorithm_type == 'script':\n\t\t\t\tprint('script mode null raw replaced by user data')\n\t\t\t\treturn field_value\n\t\t\telse:\n\t\t\t\tprint('input mode null raw convert to \"\" ')\n\t\t\t\treturn ''\n\t\tprint('step-2')\n\t\troot = os.path.split(os.path.realpath(__file__))[0]\n\t\tcmd = _lang_cfg['cfg']['run'].replace('{$root}', root)\n\t\tcmd = cmd.replace('%1', data_key)\n\t\t# 构建算法文件input类型前缀为developer_ script类型前缀为user_\n\t\t# prefix = 'user_' if algorithm_type=='script' else 'devloper_'\n\t\tprefix = algorithm_type + '_'\n\t\tpath = root + '/plugins/script/' + _lang_cfg['lang'] + '/usr/' + prefix + str(self.pid) + '_' + str(\n\t\t\tself.tid) + '_' + str(fid) + '.' + _lang_cfg['cfg']['extname']\n\t\tlogging.info(\"Script:\" + path)\n\t\talgo = Algorithm()\n\t\t_ready = False\n\t\tif os.path.exists(path):\n\t\t\tfp = open(path, 'r')\n\t\t\t_code = fp.read()\n\t\t\tfp.close()\n\t\t\ttarget_hash = Common.md5(_code)\n\t\t\tsource_hash = Common.md5(self.algorithm[algorithm_type]['data'])\n\t\t\tif target_hash == source_hash:\n\t\t\t\t_ready = True\n\t\t# return algo.execute(cmd)\n\t\tif not _ready:\n\t\t\tfp = open(path, 'w')\n\t\t\tfp.write(self.algorithm[algorithm_type]['data'])\n\t\t\tfp.close()\n\t\t\t_ready = True\n\t\tlogging.info('[Compiler-Debug]:end render')\n\t\tself.__debug_performance()\n\t\tlogging.info('[Compiler-Debug]:CMD:' + cmd)\n\t\trender_data = algo.execute(cmd)\n\t\tself.__debug_performance()\n\t\tlogging.info('[Compiler]:' + str(render_data))\n\t\tself.cms_assert(render_data == 'algorithm time out', 500, 'algorith time out!!!')\n\t\t# 分析处理渲染结果\n\t\t# 错误处理\n\t\t_p = re.compile(r\"\\[CMSERRKEY=.*?\\]\")\n\t\t_errkey = _p.findall(render_data)\n\t\tif len(_errkey) >= 1:\n\t\t\t_errkey = _errkey[0].replace('[CMSERRKEY=', '').replace(']', '')\n\t\t\t_errdata = str(self.share_memory.get(_errkey),encoding='utf-8')\n\t\t\t_script_errinfo = json.loads(_errdata)\n\t\t\tself.cms_assert(_script_errinfo['errcode'] == Error.ALGORITHMABORT['code'], 500,\n\t\t\t\t\t\t\tError.ALGORITHMABORT['errmsg'] + \" Detail:\" + _script_errinfo['errmsg'])\n\n\t\t# 结果分析\n\t\t_p = re.compile(r\"\\[CMSDATAKEY=.*?\\]\")\n\t\t_key = _p.findall(render_data)\n\t\tif len(_key) == 1:\n\t\t\treturn str(self.share_memory.get(_key[0].replace('[CMSDATAKEY=', '').replace(']', '')), encoding='utf-8')\n\t\telse:\n\t\t\treturn render_data", "def as_tensorflow(self, cuda_threads_per_block=_default_cuda_threads_per_block):\n tf.logging.log(tf.logging.DEBUG, 'Compiling generic C++ for Op ' + self.__class__.__name__)\n cpu_op_lib = Operator._make_generic_c(self.op_c_generic, self.op_name)\n if cuda_enabled:\n tf.logging.log(tf.logging.DEBUG, 'Compiling generic CUDA for Op ' + self.__class__.__name__)\n cuda_op_lib = Operator._make_generic_cuda(self.op_cuda_generic, self.op_name)\n else:\n cuda_op_lib = ''\n\n if self.grad_name is None:\n gpu_grad_name = ''\n gpu_grad_lib = ''\n cpu_grad_name = ''\n cpu_grad_lib = ''\n else:\n tf.logging.log(tf.logging.DEBUG, 'Compiling generic C++ for gradient of Op ' + self.__class__.__name__)\n cpu_grad_lib = Operator._make_generic_c(self.grad_c_generic, self.grad_name)\n cpu_grad_name = self.grad_name + '_generic_cpp'\n if cuda_enabled:\n tf.logging.log(tf.logging.DEBUG, 'Compiling generic CUDA for gradient of Op ' + self.__class__.__name__)\n gpu_grad_lib = Operator._make_generic_cuda(self.grad_cuda_generic, self.grad_name)\n gpu_grad_name = self.grad_name + '_generic_cuda'\n else:\n gpu_grad_name = ''\n gpu_grad_lib = ''\n\n out_shapes = []\n out_types = []\n for cur_type in self.output_types:\n if cur_type.dtype == float32:\n tf_type = 'float'\n elif cur_type.dtype == float64:\n tf_type = 'double'\n else:\n raise NotImplementedError('Only floats and doubles currently supported.')\n\n out_types.append(tf_type)\n out_shapes.append(cur_type.shape)\n\n Operator._register_shape_inference()\n Operator._load_dynamiclib_module()\n Operator._register_gradient()\n tf_op = Operator._dynamiclibop_module.dynamic_lib(inputs=self._inputs,\n out_shapes=out_shapes,\n out_types=out_types,\n cpu_lib_path=cpu_op_lib,\n cpu_func_name=self.op_name + '_generic_cpp',\n gpu_lib_path=cuda_op_lib,\n gpu_func_name=self.op_name + '_generic_cuda',\n gpu_grad_func_name=gpu_grad_name,\n gpu_grad_lib_path=gpu_grad_lib,\n cpu_grad_func_name=cpu_grad_name,\n cpu_grad_lib_path=cpu_grad_lib,\n cuda_threads_per_block=cuda_threads_per_block)\n if len(out_shapes) == 1:\n return tf_op[0]\n else:\n return tf_op", "def map_latencydata_types(type_name):\n\n if type_name in ['0', '1']:\n return Unknown(type_name)\n\n # cl is register that is used for certain instructions\n if type_name == 'cl':\n return Register(type_name)\n\n # TODO make this its own type?\n if type_name == \"stack pointer\":\n return Register(type_name)\n\n if type_name == \"[r+s*x]\" or type_name == \"[r+s*y]\":\n return Unknown(type_name)\n\n if type_name[:1] == 'r':\n if type_name[-1] == 'l' or type_name[-1] == 'h':\n # h, l refer to high, low? get rid of these and continnue as normally\n type_name = type_name[:-1] # mistake in the document? get rid of the trailing l?\n size = int(type_name[1:]) if len(type_name) > 1 else None\n return Register(type_name, size)\n\n # vector registers (I think)\n if type_name in [\"xmm\", \"mmx\", \"ymm\", \"mmy\"]:\n return Register(type_name)\n\n if type_name == 'i':\n return Immediate(type_name)\n if type_name == \"v\":\n return Register(type_name)\n\n if type_name[:3] == \"xmm\":\n return Register(type_name)\n\n if type_name[:2] == 'mm':\n size = int(type_name[2:]) if len(type_name) > 2 else None\n return Memory(type_name, size)\n\n if type_name[0] == 'm':\n size = int(type_name[1:]) if len(type_name) > 1 else None\n return Memory(type_name, size)\n\n if type_name == \"x\":\n return Register(type_name)\n\n if type_name == \"y\":\n return Register(type_name)\n\n if type_name == \"near\" or type_name == \"short\":\n return Unknown(type_name)\n raise ValueError(f\"uknown type {type_name}\")", "def build_and_run(mod: IRModule, target: Target, dev_type: str) -> np.ndarray:\n rt_mod = tvm.build(mod, target=target)\n return run_module_via_rpc(\n rpc_config=rpc_config,\n lib=rt_mod,\n dev_type=dev_type,\n args={i: v for i, v in enumerate(inputs)}, # pylint: disable=unnecessary-comprehension\n continuation=create_calculator(backend=\"tir\"),\n backend=\"tir\",\n )", "def run_op_numeric_data(input_data, op_fun, *args):\n runtime = get_runtime()\n node = op_fun(input_data, *args)\n computation = runtime.computation(node)\n return computation()", "def _compile_ops(self, parameters, space_group):\n ke = self.wave_numbers[0]\n kw = self.wave_numbers[1]\n ki = self.wave_numbers[2:]\n mu = 1\n cavities = self.cavities\n ops = {}\n def add(i, j, op, key='default'):\n if (i, j) not in ops:\n ops[(i, j)] = {key: op}\n else:\n if key in ops[(i, j)]:\n raise ValueError(\"Duplicate key value provided in operator construction\")\n else:\n ops[(i, j)][key] = op\n\n # cavities\n for row, _ in enumerate(cavities):\n for col, _ in enumerate(cavities):\n if row == col:\n add(\n row, col,\n -1 * self.multitrace_operator(ki[row], mu, cavities[row], parameters=parameters, space_group=space_group)\n )\n add(\n row, col,\n -1 * self.multitrace_operator(kw, mu, cavities[row], parameters=parameters, space_group=space_group),\n key='wall'\n )\n else:\n add(\n row, col,\n -1 * self.multitrace_operator(kw, mu, cavities[col], target=cavities[row], parameters=parameters, space_group=space_group)\n ),\n # # self to wall\n add(\n row, col+1,\n self.multitrace_operator(kw, mu, self.main, target=cavities[row], parameters=parameters, space_group=space_group)\n )\n \n for col, cavity in enumerate(cavities):\n add(\n row+1, col,\n -1 * self.multitrace_operator(kw, mu, cavity, target=self.main, parameters=parameters, space_group=space_group)\n )\n \n # external boundary\n add(\n row+1, col+1,\n self.multitrace_operator(kw, mu, self.main, parameters=parameters, space_group=space_group),\n key='wall'\n\n )\n add(\n row+1, col+1,\n self.multitrace_operator(ke, mu, self.main, parameters=parameters, space_group=space_group),\n key='exterior'\n )\n # finished\n return ops", "async def _calc(self, ctx, *, m):\r\n m = \"\".join(m)\r\n math_filter = re.findall(\r\n r\"[\\[\\]\\-()*+/0-9=.,% ]|>|<|==|>=|<=|\\||&|~|!=|^|sum\"\r\n + \"|range|random|randint|choice|randrange|True|False|if|and|or|else\"\r\n + \"|is|not|for|in|acos|acosh|asin|asinh|atan|atan2|atanh|ceil\"\r\n + \"|copysign|cos|cosh|degrees|e|erf|erfc|exp|expm1|fabs|factorial\"\r\n + \"|floor|fmod|frexp|fsum|gamma|gcd|hypot|inf|isclose|isfinite\"\r\n + \"|isinf|isnan|ldexp|lgamma|log|log10|log1p|log2|modf|nan|pi\"\r\n + \"|pow|radians|sin|sinh|sqrt|tan|tanh|round\",\r\n m,\r\n )\r\n calculate_stuff = eval(\"\".join(math_filter))\r\n if len(str(calculate_stuff)) > 0:\r\n em = await Embed.create(\r\n ctx,\r\n title=\"CollectorDevTeam Calculator\",\r\n thumbnail=self.thumbnail,\r\n description=\"**Input**\\n`{}`\\n\\n**Result**\\n`{}`\".format(m, calculate_stuff),\r\n )\r\n em.add_field(name=\"Type Math\", value=\"Get Fun\")\r\n await ctx.send(embed=em)", "def dynamic_call(self, *args):\n expression = self.expression(*args)\n if not self.dimensionality_verified:\n bs_validation.raise_if_not(expression, self.units)\n self.dimensionality_verified = True\n return expression.to_base_units()", "def get_scalar(self, obs_type, record, db_manager):\n try:\n # Form the method name, then call it with arguments\n return getattr(self, 'calc_%s' % obs_type)(obs_type, record, db_manager)\n except AttributeError:\n raise weewx.UnknownType(obs_type)", "def do_math(cls, quad, type):\n\t\tdata = cls.get_address_value(quad.left_operand)\n\t\tval = 0.0\n\t\tif(type == \"sin\"):\n\t\t\tval = math.sin(data)\n\t\telif(type == \"cos\"):\n\t\t\tval = math.cos(data)\n\t\telif(type == \"tan\"):\n\t\t\tval = math.tan(data)\n\t\telif(type == \"exp\"):\n\t\t\tval = math.exp(data)\n\t\telif(type == \"log10\"):\n\t\t\tval = math.log10(data)\n\t\telif(type == \"sqrt\"):\n\t\t\tval = math.sqrt(data)\n\n\t\tcls.set_address_value(quad.result, val)", "def calculator(**pars):\n # paying for parameter conversion each time to keep life simple, if not fast\n pars = revert_pars(model_info, pars)\n for k, v in pars.items():\n parts = k.split('.') # polydispersity components\n if len(parts) == 2:\n model.dispersion[parts[0]][parts[1]] = v\n else:\n model.setParam(k, v)\n return theory()", "def autohard(equation):\n\n try:\n # Try to set a variable to an integer\n num1 = int(equation.split(\" \")[1])\n\n except ValueError:\n # Try to set a variable to a decimal\n num1 = float(equation.split(\" \")[1])\n\n # If the lowercase version of the operation equals 'log'\n if equation.split(\" \")[0].lower() == \"log\":\n # Return the answer\n return math.log(num1)\n\n # If the lowercase version of the operation equals 'acos'\n elif equation.split(\" \")[0].lower() == \"acos\":\n # Return the answer\n return math.acos(num1)\n\n # If the lowercase version of the operation equals 'asin'\n elif equation.split(\" \")[0].lower() == \"asin\":\n # Return the answer\n return math.asin(num1)\n\n # If the lowercase version of the operation equals 'atan'\n elif equation.split(\" \")[0].lower() == \"atan\":\n # Return the answer\n return math.atan(num1)\n\n # If the lowercase version of the operation equals 'cos'\n elif equation.split(\" \")[0].lower() == \"cos\":\n # Return the answer\n return math.cos(num1)\n\n # If the lowercase version of the operation equals 'hypot'\n elif equation.split(\" \")[0].lower() == \"hypot\":\n try:\n # Try to set a variable to an integer\n num2 = int(equation.split(\" \")[2])\n\n except ValueError:\n # Try to set a variable to an decimal\n num2 = float(equation.split(\" \")[2])\n\n # Return the answer\n return math.hypot(num1, num2)\n\n # If the lowercase version of the operation equals 'sin'\n elif equation.split(\" \")[0].lower() == \"sin\":\n # Return the answer\n return math.sin(num1)\n\n # If the lowercase version of the operation equals 'tan'\n elif equation.split(\" \")[0].lower() == \"tan\":\n # Return the answer\n return math.tan(num1)\n\n # Raise a warning\n raise ValueError(\"Invalid operation entered.\")", "def kernel_func(z, kernel_type):\n if kernel_type == \"uniform\":\n return 1.0 * (np.abs(z) <= 1)\n elif kernel_type == \"triangular\":\n return (1.0 - np.abs(z)) * (np.abs(z) <= 1)\n elif kernel_type == \"gaussian\":\n return np.exp(-np.power(z, 2.0) / 2.0)\n elif kernel_type == \"epanechnikov\":\n return (1.0 - np.power(z, 2.0)) * (np.abs(z) <= 1)\n elif kernel_type == \"quartic\":\n return np.power((1.0 - np.power(z, 2.0)), 2.0)*(np.abs(z) <= 1)\n elif kernel_type == \"triweight\":\n return np.power((1.0 - np.power(z, 2.0)), 3.0)*(np.abs(z) <= 1)\n elif kernel_type == \"tricube\":\n return np.power((1.0 - np.power(np.abs(z), 3.0)), 3.0)*(np.abs(z) <= 1)\n elif kernel_type == \"cosine\":\n return np.cos(z*np.pi/2)*(np.abs(z) <= 1)\n else:\n raise NotImplementedError(\"Unknown kernel type.\")", "def cmd_calc(self, event, command, usercommand):\n try:\n result = str(self.parser.eval(usercommand.arguments))\n response = '*** Calc: {}'.format(escape(result))\n except:\n fail = '*** Could not evaluate expression.'\n\n if self.wolfram:\n try:\n res = self.wolfram.query(usercommand.arguments)\n if len(res.pods) > 1:\n answer = res.pods[1].text\n\n # fix unicode\n answer = answer.encode('unicode-escape')\n answer = answer.replace(b'\\\\\\\\:', b'\\u')\n answer = answer.decode('unicode-escape')\n\n response = '*** W|A: {}'.format(escape(answer))\n else:\n response = fail\n except Exception as ex:\n if 'Computation error' in str(ex):\n response = fail\n else:\n print('exception:', ex)\n response = '*** Sorry, we ran into a problem. Please try again later'\n else:\n response = fail\n\n event['from_to'].msg(response)", "def eval(*args, **kwargs)->Any:\n pass", "def inp_kernel(r, ktype):\n \n if ktype == 'uniform':\n \n if r < 1.:\n return 1./((4./3.)*pi)\n else:\n return 0.\n \n elif ktype == 'sph-anarchy':\n \n if r <= 1.: return (21./(2.*pi)) * ((1. - r)*(1. - r)*(1. - r)*(1. - r)*(1. + 4.*r)) \n else: return 0. \n \n elif ktype == 'gadget-2':\n \n if r < 0.5: return (8./pi) * (1. - 6*(r*r) + 6*(r*r*r))\n elif r < 1.: return (8./pi) * 2 * ((1. - r)*(1. - r)*(1. - r))\n else: return 0.\n \n elif ktype == 'cubic':\n \n if r < 0.5: return (2.546479089470 + 15.278874536822 * (r - 1.0) * r * r)\n elif r < 1: return 5.092958178941 * (1.0 - r) * (1.0 - r) * (1.0 - r)\n else: return 0\n \n elif ktype == 'quintic':\n \n if r < 0.333333333: return 27.0*(6.4457752*r*r*r*r*(1.0-r) -1.4323945*r*r +0.17507044)\n elif r < 0.666666667: return 27.0*(3.2228876*r*r*r*r*(r-3.0) +10.7429587*r*r*r -5.01338071*r*r +0.5968310366*r +0.1352817016)\n elif r < 1: return 27.0*0.64457752*(-r*r*r*r*r +5.0*r*r*r*r -10.0*r*r*r +10.0*r*r -5.0*r +1.0)\n else: return 0\n \n else:\n \n print (\"Doesn't recognize the kernel. Input your own kernel in `inp_kernel`\")\n exit()", "def get_process_func(dataset_type, dsname):\n\n dsformat = 'VOL'\n if dataset_type == 'RAW':\n func_name = process_raw\n elif dataset_type == 'NCVOL':\n func_name = process_save_radar\n elif dataset_type == 'PWR':\n func_name = 'process_signal_power'\n elif dataset_type == 'SNR':\n func_name = 'process_snr'\n elif dataset_type == 'RHOHV_CORRECTION':\n func_name = 'process_correct_noise_rhohv'\n elif dataset_type == 'BIAS_CORRECTION':\n func_name = 'process_correct_bias'\n elif dataset_type == 'L':\n func_name = 'process_l'\n elif dataset_type == 'CDR':\n func_name = 'process_cdr'\n elif dataset_type == 'SAN':\n func_name = 'process_echo_id'\n elif dataset_type == 'ECHO_FILTER':\n func_name = 'process_echo_filter'\n elif dataset_type == 'SNR_FILTER':\n func_name = 'process_filter_snr'\n elif dataset_type == 'VIS_FILTER':\n func_name = 'process_filter_visibility'\n elif dataset_type == 'OUTLIER_FILTER':\n func_name = 'process_outlier_filter'\n elif dataset_type == 'PHIDP0_CORRECTION':\n func_name = 'process_correct_phidp0'\n elif dataset_type == 'PHIDP_SMOOTH_1W':\n func_name = 'process_smooth_phidp_single_window'\n elif dataset_type == 'PHIDP_SMOOTH_2W':\n func_name = 'process_smooth_phidp_double_window'\n elif dataset_type == 'PHIDP_KDP_MAESAKA':\n func_name = 'process_phidp_kdp_Maesaka'\n elif dataset_type == 'PHIDP_KDP_LP':\n func_name = 'process_phidp_kdp_lp'\n elif dataset_type == 'KDP_LEASTSQUARE_1W':\n func_name = 'process_kdp_leastsquare_single_window'\n elif dataset_type == 'KDP_LEASTSQUARE_2W':\n func_name = 'process_kdp_leastsquare_double_window'\n elif dataset_type == 'ATTENUATION':\n func_name = 'process_attenuation'\n elif dataset_type == 'RAINRATE':\n func_name = 'process_rainrate'\n elif dataset_type == 'WIND_VEL':\n func_name = 'process_wind_vel'\n elif dataset_type == 'WINDSHEAR':\n func_name = 'process_windshear'\n elif dataset_type == 'HYDROCLASS':\n func_name = 'process_hydroclass'\n elif dataset_type == 'PHIDP0_ESTIMATE':\n func_name = 'process_estimate_phidp0'\n elif dataset_type == 'RHOHV_RAIN':\n func_name = 'process_rhohv_rain'\n elif dataset_type == 'ZDR_RAIN':\n func_name = 'process_zdr_rain'\n elif dataset_type == 'SELFCONSISTENCY_KDP_PHIDP':\n func_name = 'process_selfconsistency_kdp_phidp'\n elif dataset_type == 'SELFCONSISTENCY_BIAS':\n func_name = 'process_selfconsistency_bias'\n elif dataset_type == 'TIME_AVG':\n func_name = 'process_time_avg'\n dsformat = 'TIMEAVG'\n elif dataset_type == 'WEIGHTED_TIME_AVG':\n func_name = 'process_weighted_time_avg'\n dsformat = 'TIMEAVG'\n elif dataset_type == 'FLAG_TIME_AVG':\n func_name = 'process_time_avg_flag'\n dsformat = 'TIMEAVG'\n elif dataset_type == 'COLOCATED_GATES':\n func_name = 'process_colocated_gates'\n dsformat = 'COLOCATED_GATES'\n elif dataset_type == 'INTERCOMP':\n func_name = 'process_intercomp'\n dsformat = 'INTERCOMP'\n elif dataset_type == 'INTERCOMP_TIME_AVG':\n func_name = 'process_intercomp_time_avg'\n dsformat = 'INTERCOMP'\n elif dataset_type == 'MONITORING':\n func_name = 'process_monitoring'\n dsformat = 'MONITORING'\n elif dataset_type == 'SUN_HITS':\n func_name = 'process_sun_hits'\n dsformat = 'SUN_HITS'\n elif dataset_type == 'POINT_MEASUREMENT':\n func_name = process_point_measurement\n dsformat = 'TIMESERIES'\n elif dataset_type == 'TRAJ':\n func_name = process_trajectory\n dsformat = 'TRAJ_ONLY'\n elif dataset_type == 'TRAJ_ATPLANE':\n func_name = process_traj_atplane\n dsformat = 'TIMESERIES'\n elif dataset_type == 'TRAJ_ANTENNA_PATTERN':\n func_name = process_traj_antenna_pattern\n dsformat = 'TIMESERIES'\n else:\n raise ValueError(\"ERROR: Unknown dataset type '%s' of dataset '%s'\"\n % (dataset_type, dsname))\n\n return func_name, dsformat", "def make_divergence(bcs: Boundaries) -> OperatorType:\n assert isinstance(bcs.grid, CylindricalSymGrid)\n bcs.check_value_rank(0)\n boundary_r, boundary_z = bcs\n\n # calculate preliminary quantities\n dim_r, dim_z = bcs.grid.shape\n dr = bcs.grid.discretization[0]\n scale_r, scale_z = 1 / (2 * bcs.grid.discretization)\n\n value_outer = boundary_r.high.make_virtual_point_evaluator()\n region_z = boundary_z.make_region_evaluator()\n\n # use processing for large enough arrays\n parallel = dim_r * dim_z >= config[\"numba.parallel_threshold\"]\n\n @jit_allocate_out(parallel=parallel, out_shape=(dim_r, dim_z))\n def divergence(arr, out=None):\n \"\"\"apply divergence operator to array `arr`\"\"\"\n for j in nb.prange(0, dim_z): # iterate axial points\n # inner radial boundary condition\n i = 0\n arr_z_l, _, arr_z_h = region_z(arr[1], (i, j))\n d_r = (arr[0, 1, j] + 3 * arr[0, 0, j]) * scale_r\n d_z = (arr_z_h - arr_z_l) * scale_z\n out[i, j] = d_r + d_z\n\n for i in range(1, dim_r - 1): # iterate radial points\n arr_z_l, _, arr_z_h = region_z(arr[1], (i, j))\n d_r = (arr[0, i + 1, j] - arr[0, i - 1, j]) * scale_r\n d_r += arr[0, i, j] / ((i + 0.5) * dr)\n d_z = (arr_z_h - arr_z_l) * scale_z\n out[i, j] = d_r + d_z\n\n # outer radial boundary condition\n i = dim_r - 1\n arr_z_l, _, arr_z_h = region_z(arr[1], (i, j))\n arr_r_h = value_outer(arr[0], (i, j))\n d_r = (arr_r_h - arr[0, i - 1, j]) * scale_r\n d_r += arr[0, i, j] / ((i + 0.5) * dr)\n d_z = (arr_z_h - arr_z_l) * scale_z\n out[i, j] = d_z + d_r\n\n return out\n\n return divergence # type: ignore", "def evaluate(cfg: DictConfig):\n\n # suppress TensorFlow and DALI warnings\n suppress_warnings()\n\n if cfg.USE_MULTI_GPUS.VALUE:\n # change number of visible gpus for evaluation\n set_gpus(cfg.USE_MULTI_GPUS.GPU_IDS)\n # update batch size according to available gpus\n data_generator.update_batch_size(cfg)\n\n if cfg.OPTIMIZATION.AMP:\n print(\"Enabling Automatic Mixed Precision(AMP) training\")\n policy = mixed_precision.Policy('mixed_float16')\n mixed_precision.set_global_policy(policy)\n\n if cfg.OPTIMIZATION.XLA:\n print(\"Enabling Automatic Mixed Precision(XLA) training\")\n tf.config.optimizer.set_jit(True)\n\n # create model\n strategy = None\n if cfg.USE_MULTI_GPUS.VALUE:\n # multi gpu training using tensorflow mirrored strategy\n strategy = tf.distribute.MirroredStrategy(\n cross_device_ops=tf.distribute.HierarchicalCopyAllReduce()\n )\n print('Number of visible gpu devices: {}'.format(strategy.num_replicas_in_sync))\n with strategy.scope():\n optimizer = tf.keras.optimizers.Adam(\n learning_rate=cfg.HYPER_PARAMETERS.LEARNING_RATE\n ) # optimizer\n if cfg.OPTIMIZATION.AMP:\n optimizer = mixed_precision.LossScaleOptimizer(\n optimizer,\n dynamic=True\n )\n dice_coef = DiceCoefficient(post_processed=True, classes=cfg.OUTPUT.CLASSES)\n dice_coef = tf.keras.metrics.MeanMetricWrapper(name=\"dice_coef\", fn=dice_coef)\n model = prepare_model(cfg, training=True)\n else:\n optimizer = tf.keras.optimizers.Adam(\n learning_rate=cfg.HYPER_PARAMETERS.LEARNING_RATE\n ) # optimizer\n if cfg.OPTIMIZATION.AMP:\n optimizer = mixed_precision.LossScaleOptimizer(\n optimizer,\n dynamic=True\n )\n dice_coef = DiceCoefficient(post_processed=True, classes=cfg.OUTPUT.CLASSES)\n dice_coef = tf.keras.metrics.MeanMetricWrapper(name=\"dice_coef\", fn=dice_coef)\n model = prepare_model(cfg, training=True)\n\n model.compile(\n optimizer=optimizer,\n loss=unet3p_hybrid_loss,\n metrics=[dice_coef],\n )\n\n # weights model path\n checkpoint_path = join_paths(\n cfg.WORK_DIR,\n cfg.CALLBACKS.MODEL_CHECKPOINT.PATH,\n f\"{cfg.MODEL.WEIGHTS_FILE_NAME}.hdf5\"\n )\n\n assert os.path.exists(checkpoint_path), \\\n f\"Model weight's file does not exist at \\n{checkpoint_path}\"\n\n # TODO: verify without augment it produces same results\n # load model weights\n model.load_weights(checkpoint_path, by_name=True, skip_mismatch=True)\n model.summary()\n\n # data generators\n val_generator = data_generator.get_data_generator(cfg, \"VAL\", strategy)\n validation_steps = data_generator.get_iterations(cfg, mode=\"VAL\")\n\n # evaluation metric\n evaluation_metric = \"dice_coef\"\n if len(model.outputs) > 1:\n evaluation_metric = f\"{model.output_names[0]}_dice_coef\"\n\n result = model.evaluate(\n x=val_generator,\n steps=validation_steps,\n workers=cfg.DATALOADER_WORKERS,\n return_dict=True,\n )\n\n # return computed loss, validation accuracy, and it's metric name\n return result, evaluation_metric", "def main():\n model = Calculator()", "def Calc():\n print('Please type a maths expression with 2 intergers or floats and an operator \"+\", \"-\", \"*\" or \"/\"')\n inp = (input())\n for char in inp:\n if char not in '1234567890.-+*/':\n print('Please restart the program and only type valid characters')\n return\n operators = [\"+\", \"-\", \"*\", \"/\"]\n buf = ''\n operand1 = 0.0\n operand2 = 0.0\n for char in inp:\n if char not in operators:\n buf += char\n else:\n operator = char\n operand1 = float(buf)\n buf = ''\n operand2 = float(buf)\n res = 0.0\n if operator == '+':\n res = su(operand1, operand2)\n elif operator == '-':\n res = sub(operand1, operand2)\n elif operator == '*':\n res = mu(operand1, operand2)\n elif operand2==0:\n return \"Can not divide by 0\"\n else:\n res = di(operand1, operand2)\n print(res)\n return res", "def eval(*args, **kwargs):\n\n pass" ]
[ "0.6674885", "0.63499874", "0.61020803", "0.52706206", "0.49951327", "0.49773255", "0.49654198", "0.4941015", "0.48718393", "0.48411128", "0.4809473", "0.47807318", "0.47564074", "0.47495553", "0.47205937", "0.47169065", "0.47130096", "0.46978438", "0.4689692", "0.46767172", "0.46571094", "0.46525377", "0.46451446", "0.4640526", "0.4635012", "0.4626446", "0.46217278", "0.46211442", "0.46132275", "0.461249" ]
0.71022725
0
Preform a comparison using options from the command line. limits are the limits on the values to use, either to set the yaxis for 1D or to set the colormap scale for 2D. If None, then they are inferred from the data and returned. When exploring using Bumps, the limits are set when the model is initially called, and maintained as the values are adjusted, making it easier to see the effects of the parameters.
def compare(opts, limits=None): Nbase, Ncomp = opts['n1'], opts['n2'] pars = opts['pars'] data = opts['data'] # Base calculation if Nbase > 0: base = opts['engines'][0] try: base_value, base_time = time_calculation(base, pars, Nbase) base_value = np.ma.masked_invalid(base_value) print("%s t=%.2f ms, intensity=%.0f" % (base.engine, base_time, base_value.sum())) _show_invalid(data, base_value) except ImportError: traceback.print_exc() Nbase = 0 # Comparison calculation if Ncomp > 0: comp = opts['engines'][1] try: comp_value, comp_time = time_calculation(comp, pars, Ncomp) comp_value = np.ma.masked_invalid(comp_value) print("%s t=%.2f ms, intensity=%.0f" % (comp.engine, comp_time, comp_value.sum())) _show_invalid(data, comp_value) except ImportError: traceback.print_exc() Ncomp = 0 # Compare, but only if computing both forms if Nbase > 0 and Ncomp > 0: resid = (base_value - comp_value) relerr = resid/comp_value _print_stats("|%s-%s|" % (base.engine, comp.engine) + (" "*(3+len(comp.engine))), resid) _print_stats("|(%s-%s)/%s|" % (base.engine, comp.engine, comp.engine), relerr) # Plot if requested if not opts['plot'] and not opts['explore']: return view = opts['view'] import matplotlib.pyplot as plt if limits is None: vmin, vmax = np.Inf, -np.Inf if Nbase > 0: vmin = min(vmin, base_value.min()) vmax = max(vmax, base_value.max()) if Ncomp > 0: vmin = min(vmin, comp_value.min()) vmax = max(vmax, comp_value.max()) limits = vmin, vmax if Nbase > 0: if Ncomp > 0: plt.subplot(131) plot_theory(data, base_value, view=view, use_data=False, limits=limits) plt.title("%s t=%.2f ms"%(base.engine, base_time)) #cbar_title = "log I" if Ncomp > 0: if Nbase > 0: plt.subplot(132) plot_theory(data, comp_value, view=view, use_data=False, limits=limits) plt.title("%s t=%.2f ms"%(comp.engine, comp_time)) #cbar_title = "log I" if Ncomp > 0 and Nbase > 0: plt.subplot(133) if not opts['rel_err']: err, errstr, errview = resid, "abs err", "linear" else: err, errstr, errview = abs(relerr), "rel err", "log" #err,errstr = base/comp,"ratio" plot_theory(data, None, resid=err, view=errview, use_data=False) if view == 'linear': plt.xscale('linear') plt.title("max %s = %.3g"%(errstr, abs(err).max())) #cbar_title = errstr if errview=="linear" else "log "+errstr #if is2D: # h = plt.colorbar() # h.ax.set_title(cbar_title) if Ncomp > 0 and Nbase > 0 and '-hist' in opts: plt.figure() v = relerr v[v == 0] = 0.5*np.min(np.abs(v[v != 0])) plt.hist(np.log10(np.abs(v)), normed=1, bins=50) plt.xlabel('log10(err), err = |(%s - %s) / %s|' % (base.engine, comp.engine, comp.engine)) plt.ylabel('P(err)') plt.title('Distribution of relative error between calculation engines') if not opts['explore']: plt.show() return limits
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def determinePlotLimits(self):\n max_str = \"up99\"\n min_str = \"dn99\"\n if self.keywords.get(\"limit_type\",\"99per\") == \"minmax\":\n max_str = \"max\"\n min_str = \"min\"\n \n # Determine the min/max of variables over all models\n limits = {}\n prune = False\n for fname in glob.glob(os.path.join(self.output_path,\"*.nc\")):\n with Dataset(fname) as dataset:\n if \"MeanState\" not in dataset.groups: continue\n group = dataset.groups[\"MeanState\"]\n variables = [v for v in group.variables.keys() if v not in group.dimensions.keys()]\n for vname in variables:\n var = group.variables[vname]\n pname = vname.split(\"_\")[0]\n region = vname.split(\"_\")[-1]\n if var[...].size <= 1: continue\n if space_opts.has_key(pname):\n if not limits.has_key(pname):\n limits[pname] = {}\n limits[pname][\"min\"] = +1e20\n limits[pname][\"max\"] = -1e20\n limits[pname][\"unit\"] = post.UnitStringToMatplotlib(var.getncattr(\"units\"))\n limits[pname][\"min\"] = min(limits[pname][\"min\"],var.getncattr(min_str))\n limits[pname][\"max\"] = max(limits[pname][\"max\"],var.getncattr(max_str))\n elif time_opts.has_key(pname):\n if not limits.has_key(pname): limits[pname] = {}\n if not limits[pname].has_key(region):\n limits[pname][region] = {}\n limits[pname][region][\"min\"] = +1e20\n limits[pname][region][\"max\"] = -1e20\n limits[pname][region][\"unit\"] = post.UnitStringToMatplotlib(var.getncattr(\"units\"))\n limits[pname][region][\"min\"] = min(limits[pname][region][\"min\"],var.getncattr(\"min\"))\n limits[pname][region][\"max\"] = max(limits[pname][region][\"max\"],var.getncattr(\"max\"))\n if not prune and \"Benchmark\" in fname and pname == \"timeint\":\n prune = True\n self.pruneRegions(Variable(filename = fname,\n variable_name = vname,\n groupname = \"MeanState\"))\n \n # Second pass to plot legends (FIX: only for master?)\n for pname in limits.keys():\n\n try:\n opts = space_opts[pname]\n except:\n continue\n \n # Determine plot limits and colormap\n if opts[\"sym\"]:\n vabs = max(abs(limits[pname][\"min\"]),abs(limits[pname][\"min\"]))\n limits[pname][\"min\"] = -vabs\n limits[pname][\"max\"] = vabs\n\n # if a score, force to be [0,1]\n if \"score\" in pname:\n limits[pname][\"min\"] = 0\n limits[pname][\"max\"] = 1\n\n limits[pname][\"cmap\"] = opts[\"cmap\"]\n if limits[pname][\"cmap\"] == \"choose\": limits[pname][\"cmap\"] = self.cmap\n\n # Plot a legend for each key\n if opts[\"haslegend\"]:\n fig,ax = plt.subplots(figsize=(6.8,1.0),tight_layout=True)\n label = opts[\"label\"]\n if label == \"unit\": label = limits[pname][\"unit\"]\n post.ColorBar(ax,\n vmin = limits[pname][\"min\"],\n vmax = limits[pname][\"max\"],\n cmap = limits[pname][\"cmap\"],\n ticks = opts[\"ticks\"],\n ticklabels = opts[\"ticklabels\"],\n label = label)\n fig.savefig(os.path.join(self.output_path,\"legend_%s.png\" % (pname))) \n plt.close()\n\n # Determine min/max of relationship variables\n for fname in glob.glob(os.path.join(self.output_path,\"*.nc\")):\n with Dataset(fname) as dataset:\n for g in dataset.groups.keys():\n if \"relationship\" not in g: continue\n grp = dataset.groups[g]\n if not limits.has_key(g):\n limits[g] = {}\n limits[g][\"xmin\"] = +1e20\n limits[g][\"xmax\"] = -1e20\n limits[g][\"ymin\"] = +1e20\n limits[g][\"ymax\"] = -1e20\n limits[g][\"xmin\"] = min(limits[g][\"xmin\"],grp.variables[\"ind_bnd\"][ 0, 0])\n limits[g][\"xmax\"] = max(limits[g][\"xmax\"],grp.variables[\"ind_bnd\"][-1,-1])\n limits[g][\"ymin\"] = min(limits[g][\"ymin\"],grp.variables[\"dep_bnd\"][ 0, 0])\n limits[g][\"ymax\"] = max(limits[g][\"ymax\"],grp.variables[\"dep_bnd\"][-1,-1])\n\n \n self.limits = limits", "def set_axis_limits(*args):\n robots = get_robot_roots()\n if not robots:\n raise MimicError('Nothing Selected; Select a valid robot')\n return\n\n current_tab = pm.tabLayout('limits_tab_layout',\n query=True,\n selectTab=True)\n\n if current_tab == 'position_limits_tab':\n set_position_limits()\n elif current_tab == 'velocity_limits_tab':\n set_deriv_limits('Velocity')\n elif current_tab == 'accel_limits_tab':\n set_deriv_limits('Accel')\n elif current_tab == 'jerk_limits_tab':\n set_deriv_limits('Jerk')", "def output_limits(self, limits):\n if limits is None:\n self._min_output, self._max_output = None, None\n return\n\n min_output, max_output = limits\n\n if None not in limits and max_output < min_output:\n raise ValueError('lower limit must be less than upper limit')\n\n self._min_output = min_output\n self._max_output = max_output\n\n self._integral = self._clamp(self._integral, self.output_limits)\n self._last_output = self._clamp(self._last_output, self.output_limits)", "def setlimits(self, Xlim=[], Ylim=[]):\n self.data['Xmin'] = Xlim[0]\n self.data['Xmax'] = Xlim[1]\n self.data['Ymin'] = Ylim[0]\n self.data['Ymax'] = Ylim[1]", "def set_output_limits(self, min_value, max_value):\n self.out_min = min_value\n self.out_max = max_value\n if self.out_min > self.out_max:\n print(\"set_output_limits(): min must be smaller than max.\")\n self.iterm = self.clip_to_output_limits(self.iterm)\n self.output = self.clip_to_output_limits(self.output)", "def view_limits(self, dmin, dmax):\n base = self._select_base(dmin, dmax)\n if mpl.rcParams['axes.autolimit_mode'] == 'round_numbers':\n vmin = base.le(dmin)\n vmax = base.ge(dmax)\n if vmin == vmax:\n vmin -= 1\n vmax += 1\n else:\n vmin = dmin\n vmax = dmax\n\n return mtransforms.nonsingular(vmin, vmax)", "def compare_profile(prof_ref, prof_2, feature_cols, direction = 'h'): \n \n plt.style.use('ggplot')\n \n n_dim = len(feature_cols) \n \n fig = plt.figure()\n \n if (direction == 'h'):\n \n x_vals = np.arange(1, n_dim + 1)\n plt.bar(x_vals, prof_ref * 100.0, color = '#273746', label='Minimum Case')\n plt.bar(x_vals, prof_2 * 100.0, color = '#D81B60', alpha = 0.5, label='Maximum Case')\n plt.xticks(x_vals, feature_cols, rotation='vertical')\n plt.ylim([0,100])\n # plt.ylabel('A' + str(i + 1))\n plt.rcParams.update({'font.size': 10})\n plt.tight_layout()\n plt.legend(loc='upper left') \n \n elif (direction == 'v'):\n \n y_vals = np.arange(1, n_dim + 1) \n \n plt.barh(y_vals, prof_ref * 100.0, color = '#273746', label='Archetype')\n plt.barh(y_vals, prof_2 * 100.0, color = '#D81B60', alpha = 0.5, label='Closet Data') \n plt.yticks(y_vals, feature_cols)\n plt.xlim([0,100])\n plt.rcParams.update({'font.size': 10})\n plt.tight_layout()\n plt.legend(loc='upper left') \n \n else:\n \n raise ValueError('acceptable direction values are \"h\" and \"v\"!')\n\n return fig", "def set_limits_minmax(self, zmin, zmax):\n self._color_mapper.update(low=zmin, high=zmax)\n self.update()", "def py_apply_limits(self, plot):\n if any(x is not None for x in self.x_lim):\n if self.x_lim[0] is not None: # at least left?\n if self.x_lim[1] is not None: # left and right?\n plot.set_xlim(left=self.x_lim[0], right=self.x_lim[1])\n else:\n plot.set_xlim(left=self.x_lim[0])\n else: # just right\n plot.set_xlim(rigt=self.x_lim[1])\n if any(y is not None for y in self.y_lim):\n if self.y_lim[0] is not None: # at least bottom?\n if self.y_lim[1] is not None:\n plot.set_ylim(bottom=self.y_lim[0], top=self.y_lim[1])\n else:\n plot.set_ylim(bottom=self.y_lim[0])\n else:\n plot.set_ylim(top=self.y_lim[1])", "def __init__(self, values: dict):\n self.options = [\n \"lower\",\n \"regression\",\n \"upper\"\n ]", "def test_min_vs_max(self, fig_test, fig_ref):\n ax = fig_test.add_subplot(projection=\"ternary\")\n ax.set_ternary_min(0.1, 0.2, 0.3)\n\n ax = fig_ref.add_subplot(projection=\"ternary\")\n ax.set_ternary_max(0.5, 0.6, 0.7)", "def view_limits(self, vmin, vmax):\n return vmin, vmax\n # return nonsingular(vmin, vmax)", "def parse_opts():\n MODELS = core.list_models()\n flags = [arg for arg in sys.argv[1:]\n if arg.startswith('-')]\n values = [arg for arg in sys.argv[1:]\n if not arg.startswith('-') and '=' in arg]\n args = [arg for arg in sys.argv[1:]\n if not arg.startswith('-') and '=' not in arg]\n models = \"\\n \".join(\"%-15s\"%v for v in MODELS)\n if len(args) == 0:\n print(USAGE)\n print(\"\\nAvailable models:\")\n print(columnize(MODELS, indent=\" \"))\n sys.exit(1)\n if len(args) > 3:\n print(\"expected parameters: model N1 N2\")\n\n name = args[0]\n try:\n model_info = core.load_model_info(name)\n except ImportError as exc:\n print(str(exc))\n print(\"Could not find model; use one of:\\n \" + models)\n sys.exit(1)\n\n invalid = [o[1:] for o in flags\n if o[1:] not in NAME_OPTIONS\n and not any(o.startswith('-%s='%t) for t in VALUE_OPTIONS)]\n if invalid:\n print(\"Invalid options: %s\"%(\", \".join(invalid)))\n sys.exit(1)\n\n\n # pylint: disable=bad-whitespace\n # Interpret the flags\n opts = {\n 'plot' : True,\n 'view' : 'log',\n 'is2d' : False,\n 'qmax' : 0.05,\n 'nq' : 128,\n 'res' : 0.0,\n 'accuracy' : 'Low',\n 'cutoff' : 0.0,\n 'seed' : -1, # default to preset\n 'mono' : False,\n 'show_pars' : False,\n 'show_hist' : False,\n 'rel_err' : True,\n 'explore' : False,\n 'use_demo' : True,\n 'zero' : False,\n }\n engines = []\n for arg in flags:\n if arg == '-noplot': opts['plot'] = False\n elif arg == '-plot': opts['plot'] = True\n elif arg == '-linear': opts['view'] = 'linear'\n elif arg == '-log': opts['view'] = 'log'\n elif arg == '-q4': opts['view'] = 'q4'\n elif arg == '-1d': opts['is2d'] = False\n elif arg == '-2d': opts['is2d'] = True\n elif arg == '-exq': opts['qmax'] = 10.0\n elif arg == '-highq': opts['qmax'] = 1.0\n elif arg == '-midq': opts['qmax'] = 0.2\n elif arg == '-lowq': opts['qmax'] = 0.05\n elif arg == '-zero': opts['zero'] = True\n elif arg.startswith('-nq='): opts['nq'] = int(arg[4:])\n elif arg.startswith('-res='): opts['res'] = float(arg[5:])\n elif arg.startswith('-accuracy='): opts['accuracy'] = arg[10:]\n elif arg.startswith('-cutoff='): opts['cutoff'] = float(arg[8:])\n elif arg.startswith('-random='): opts['seed'] = int(arg[8:])\n elif arg == '-random': opts['seed'] = np.random.randint(1e6)\n elif arg == '-preset': opts['seed'] = -1\n elif arg == '-mono': opts['mono'] = True\n elif arg == '-poly': opts['mono'] = False\n elif arg == '-pars': opts['show_pars'] = True\n elif arg == '-nopars': opts['show_pars'] = False\n elif arg == '-hist': opts['show_hist'] = True\n elif arg == '-nohist': opts['show_hist'] = False\n elif arg == '-rel': opts['rel_err'] = True\n elif arg == '-abs': opts['rel_err'] = False\n elif arg == '-half': engines.append(arg[1:])\n elif arg == '-fast': engines.append(arg[1:])\n elif arg == '-single': engines.append(arg[1:])\n elif arg == '-double': engines.append(arg[1:])\n elif arg == '-single!': engines.append(arg[1:])\n elif arg == '-double!': engines.append(arg[1:])\n elif arg == '-quad!': engines.append(arg[1:])\n elif arg == '-sasview': engines.append(arg[1:])\n elif arg == '-edit': opts['explore'] = True\n elif arg == '-demo': opts['use_demo'] = True\n elif arg == '-default': opts['use_demo'] = False\n # pylint: enable=bad-whitespace\n\n if len(engines) == 0:\n engines.extend(['single', 'sasview'])\n elif len(engines) == 1:\n if engines[0][0] != 'sasview':\n engines.append('sasview')\n else:\n engines.append('single')\n elif len(engines) > 2:\n del engines[2:]\n\n n1 = int(args[1]) if len(args) > 1 else 1\n n2 = int(args[2]) if len(args) > 2 else 1\n use_sasview = any(engine=='sasview' and count>0\n for engine, count in zip(engines, [n1, n2]))\n\n # Get demo parameters from model definition, or use default parameters\n # if model does not define demo parameters\n pars = get_pars(model_info, opts['use_demo'])\n\n\n # Fill in parameters given on the command line\n presets = {}\n for arg in values:\n k, v = arg.split('=', 1)\n if k not in pars:\n # extract base name without polydispersity info\n s = set(p.split('_pd')[0] for p in pars)\n print(\"%r invalid; parameters are: %s\"%(k, \", \".join(sorted(s))))\n sys.exit(1)\n presets[k] = float(v) if not k.endswith('type') else v\n\n # randomize parameters\n #pars.update(set_pars) # set value before random to control range\n if opts['seed'] > -1:\n pars = randomize_pars(pars, seed=opts['seed'])\n print(\"Randomize using -random=%i\"%opts['seed'])\n if opts['mono']:\n pars = suppress_pd(pars)\n pars.update(presets) # set value after random to control value\n #import pprint; pprint.pprint(model_info)\n constrain_pars(model_info, pars)\n if use_sasview:\n constrain_new_to_old(model_info, pars)\n if opts['show_pars']:\n print(str(parlist(model_info, pars, opts['is2d'])))\n\n # Create the computational engines\n data, _ = make_data(opts)\n if n1:\n base = make_engine(model_info, data, engines[0], opts['cutoff'])\n else:\n base = None\n if n2:\n comp = make_engine(model_info, data, engines[1], opts['cutoff'])\n else:\n comp = None\n\n # pylint: disable=bad-whitespace\n # Remember it all\n opts.update({\n 'name' : name,\n 'def' : model_info,\n 'n1' : n1,\n 'n2' : n2,\n 'presets' : presets,\n 'pars' : pars,\n 'data' : data,\n 'engines' : [base, comp],\n })\n # pylint: enable=bad-whitespace\n\n return opts", "def ic_compare(args):\n\n config_file = args.setupfn\n conf_base = os.path.basename(config_file).split('.')[0]\n statfile = os.path.join(args.outputdir,\n \"{}_radvel.stat\".format(conf_base))\n\n status = load_status(statfile)\n\n P, post = radvel.utils.initialize_posterior(config_file,\n decorr=args.decorr)\n\n assert status.getboolean('fit', 'run'), \\\n \"Must perform max-liklihood fit before running Information Criteria comparisons\"\n post = radvel.posterior.load(status.get('fit', 'postfile'))\n\n choices = ['nplanets', 'e', 'trend', 'jit', 'gp']\n statsdictlist = []\n paramlist = []\n compareparams = args.type\n\n ipost = copy.deepcopy(post)\n\n if args.simple:\n statsdictlist += radvel.fitting.model_comp(ipost, params=[], verbose=args.verbose)\n else:\n if hasattr(args, 'fixjitter') and args.fixjitter:\n for param in ipost.params:\n if len(param) >= 3 and param[0:3] == 'jit':\n ipost.params[param].vary = False\n\n for compareparam in compareparams:\n assert compareparam in choices, \\\n \"Valid parameter choices for 'ic -t' are combinations of: \"\\\n + \" \".join(choices)\n paramlist.append(compareparam)\n if hasattr(args, 'mixed') and not args.mixed:\n statsdictlist += radvel.fitting.model_comp(ipost, params=[compareparam], verbose=args.verbose)\n if hasattr(args, 'mixed') and not args.mixed:\n new_statsdictlist = []\n for dicti in statsdictlist:\n anymatch = False\n for seendict in new_statsdictlist:\n if collections.Counter(dicti['Free Params'][0]) == \\\n collections.Counter(seendict['Free Params'][0]):\n anymatch = True\n continue\n if not anymatch:\n new_statsdictlist.append(dicti)\n statsdictlist = new_statsdictlist\n\n if not hasattr(args, 'mixed') or (hasattr(args, 'mixed') and args.mixed):\n statsdictlist += radvel.fitting.model_comp(ipost, params=paramlist, verbose=args.verbose)\n\n savestate = {'ic': statsdictlist}\n save_status(statfile, 'ic_compare', savestate)", "def set_colorbar_limits(fld,cmin,cmax):\n\n # handle input\n if (cmin is None) and (cmax is not None):\n raise RuntimeError('Only cmax given, must provide both cmin and cmax')\n elif (cmin is not None) and (cmax is None):\n raise RuntimeError('Only cmin given, must provide both cmin and cmax')\n else:\n # handle colorbar limits accidentally passed as with xarray functions\n if type(cmin) is xr.DataArray:\n cmin = cmin.values()\n elif cmin is not None:\n raise TypeError('Unsure of cmin type: ',type(cmin))\n if type(cmax) is xr.DataArray:\n cmax = cmax.values()\n elif cmax is not None:\n raise TypeError('Unsure of cmax type: ',type(cmax))\n\n # compute fld limits\n fld_min = fld.min(skipna=True).values\n fld_max = fld.max(skipna=True).values\n\n # if cmin/cmax not set, compute\n if (cmin is None) and (cmax is None):\n\n cmin = fld_min\n cmax = fld_max\n\n # determine if divergent colorbar \n # Note: Not making divergent colorbar for temperature\n # in degC because still sequential even though +/-\n if (fld_max*fld_min < 0) and (fld.name is not 'THETA'):\n cmax = np.nanmax(np.abs(fld.values))\n cmin = -cmax\n\n # determine if colorbar needs to be extended\n if (cmin > fld_min) and (cmax < fld_max):\n extend_cbar = \"both\"\n elif cmin > fld_min:\n extend_cbar = \"min\"\n elif cmax < fld_max:\n extend_cbar = \"max\"\n else:\n extend_cbar = \"neither\"\n\n return cmin, cmax, extend_cbar", "def add_limits_args(parser: argparse.ArgumentParser):\n parser.add_argument(\"north_lim\", help=\"northern limit of the search inside the area.\", type=float)\n parser.add_argument(\"south_lim\", help=\"southern limit of the search inside the area.\", type=float)\n parser.add_argument(\"east_lim\", help=\"eastern limit of the search inside the area.\", type=float)\n parser.add_argument(\"west_lim\", help=\"western limit of the search inside the area.\", type=float)", "def compare_curves(target_data, source_data, q_min, q_max, chi2):\n\n if chi2:\n rfactor, scale = calculate_chi2(target_data, source_data, q_min, q_max)\n else:\n rfactor, scale = calculate_rfactor(\n target_data, source_data, q_min, q_max)\n\n return rfactor, scale", "def plot(self, view='log'):\n pars = dict((k, v.value) for k, v in self.pars.items())\n pars.update(self.pd_types)\n self.opts['pars'] = pars\n limits = compare(self.opts, limits=self.limits)\n if self.limits is None:\n vmin, vmax = limits\n vmax = 1.3*vmax\n vmin = vmax*1e-7\n self.limits = vmin, vmax", "def _plot_comparison(xs, pan, other_program_name, **kw):\n\n pans = ['Bmax', 'Emax']\n units = ['(mG)', '(kV/m)']\n title_app = [', Max Magnetic Field', ', Max Electric Field']\n save_suf = ['-%s-comparison-Bmax' % other_program_name,\n '-%s-comparison-Emax' % other_program_name]\n\n for p,u,t,s in zip(pans, units, title_app, save_suf):\n #figure object and axes\n fig = plt.figure()\n ax_abs = fig.add_subplot(2,1,1)\n ax_per = ax_abs.twinx()\n ax_mag = fig.add_subplot(2,1,2)\n #Bmax\n #init handles and labels lists for legend\n kw['H'], kw['L'] = [], []\n _plot_comparison_repeatables(ax_abs, ax_per, ax_mag, pan, p, u,\n other_program_name, **kw)\n _plot_wires(ax_mag, xs.hot, xs.gnd, pan['emf.fields-results'][p], **kw)\n _check_und_conds([xs], [ax_mag], **kw)\n ax_abs.set_title('Absolute and Percent Difference' + t)\n ax_mag.set_ylabel(p + ' ' + u)\n ax_mag.set_title('Model Results' + t)\n ax_mag.legend(kw['H'], kw['L'], **_leg_kw)\n _color_twin_axes(ax_abs, mpl.rcParams['axes.labelcolor'], ax_per, 'firebrick')\n _format_line_axes_legends(ax_abs, ax_per, ax_mag)\n #_format_twin_axes(ax_abs, ax_per)\n _save_fig(xs.sheet + s, fig, **kw)", "def _update_limits(self):\n if self.pos_x > self.max_x:\n self.max_x = self.pos_x\n if self.pos_y > self.max_y:\n self.max_y = self.pos_y\n if self.pos_x < self.min_x:\n self.min_x = self.pos_x\n if self.pos_y < self.min_y:\n self.min_y = self.pos_y", "def parameter_compare(regressions,colors=['m','c'],upper_q=75,lower_q=25,ci_alpha = 0.2, bound_alpha = 0.0,\n labels = None,vertical_bbox_position = 1.4,width = 6,height = 5,draw_samples=True,num_samples =500):\n\n assert type(regressions) is dict\n \n # If no labels are provided, we take them from the first DynamicRegression object\n if labels is None:\n labels = regressions[regressions.keys()[0]].predictor_columns\n \n # this is the number of subplots in this figure\n n_predictors = regressions[regressions.keys()[0]].design.shape[1]\n figure, axes = plt.subplots(n_predictors,figsize = (width,height),sharex=True)\n \n for i,key in enumerate(regressions.keys()):\n \n if draw_samples:\n samples = regressions[key].ffbs.backward_sample(num_samples = num_samples)\n else:\n samples = regressions[key].ffbs.theta\n x = regressions[key].design.index\n \n for j in range(n_predictors):\n \n # Calculate and plot the confidence interval plus median\n lower = np.percentile(samples[:,j,:],lower_q,axis=1)\n upper = np.percentile(samples[:,j,:],upper_q,axis=1)\n median = np.percentile(samples[:,j,:],50,axis=1)\n axes[j].fill_between(x,upper,lower,color=colors[i],alpha = ci_alpha,\n label = '{0}%-{1}% range for {2}'.format(lower_q,upper_q,key))\n axes[j].plot(x,lower,color=colors[i],linestyle='--',alpha = bound_alpha)\n axes[j].plot(x,upper,color=colors[i],linestyle='--',alpha = bound_alpha)\n axes[j].plot(x,median,color=colors[i])\n axes[j].tick_params(direction = 'in')\n\n # a twin axis is made so we can label it easily on the right hand side of the plot\n twin = plt.twinx(axes[j])\n twin.set_ylabel(labels[j])\n \n # hide the tick labels and ticks because we only want the axis label\n twin.set_yticks([])\n \n axes[0].legend(ncol=len(list(regressions.keys())),bbox_to_anchor=(1.00, vertical_bbox_position), borderaxespad=0.,frameon=True,edgecolor='k',fancybox=False)\n return figure", "def onselect(xmin, xmax): \n # convert matplotlib float dates to a datetime format\n date_min = mdates.num2date(xmin)\n date_max = mdates.num2date(xmax) \n \n # put the xmin and xmax in datetime format to compare\n date_min = datetime.datetime(date_min.year, date_min.month, date_min.day, date_min.hour, date_min.minute) \n date_max = datetime.datetime(date_max.year, date_max.month, date_max.day, date_max.hour, date_max.minute)\n \n # find the indices that were selected \n indices = np.where((comp_data['dates'] >= date_min) & (comp_data['dates'] <= date_max))\n indices = indices[0]\n \n # set the data in ax2 plot\n plot2a.set_data(comp_data['dates'][indices], comp_data['observed_parameter'][indices])\n plot2b.set_data(comp_data['dates'][indices], comp_data['modeled_parameter'][indices])\n \n # calculate updated stats \n updated_r_squared_coeff = statistics.r_squared(modeled = comp_data['modeled_parameter'][indices], observed = comp_data['observed_parameter'][indices])\n updated_nash_sutcliffe_coeff = statistics.nash_sutcliffe(modeled = comp_data['modeled_parameter'][indices], observed = comp_data['observed_parameter'][indices])\n \n ax2.set_xlim(comp_data['dates'][indices][0], comp_data['dates'][indices][-1])\n param_max = np.max((comp_data['observed_parameter'][indices], comp_data['modeled_parameter'][indices]))\n param_min = np.min((comp_data['observed_parameter'][indices], comp_data['modeled_parameter'][indices]))\n ax2.set_ylim(param_min, param_max)\n \n # show text of mean, max, min values on graph; use matplotlib.patch.Patch properies and bbox\n text2 = 'R_squared = %.2f\\nNash sutcliffe = %.2f' % (updated_r_squared_coeff, updated_nash_sutcliffe_coeff)\n \n ax2_text.set_text(text2)\n \n # set the data in ax4 plot\n plot4a.set_data(comp_data['dates'][indices], comp_data['stats']['relative_error'][indices])\n plot4b.set_data(comp_data['dates'][indices], comp_data['stats']['relative_error'][indices])\n \n # calculate updated mean, max, min for stats data\n stat_mean = np.mean(comp_data['stats']['relative_error'][indices])\n stat_max = np.max(comp_data['stats']['relative_error'][indices])\n stat_min = np.min(comp_data['stats']['relative_error'][indices])\n \n ax4.set_xlim(comp_data['dates'][indices][0], comp_data['dates'][indices][-1])\n ax4.set_ylim(stat_min, stat_max)\n \n # show text of mean, max, min values on graph; use matplotlib.patch.Patch properies and bbox\n text4 = 'Mean = %.2f\\nMax = %.2f\\nMin = %.2f' % (stat_mean, stat_max, stat_min)\n \n ax4_text.set_text(text4) \n \n fig.canvas.draw()", "def act_limits(self, act_limits):\n\n # Validate input\n missing_keys = [key for key in [\"low\", \"high\"] if key not in act_limits.keys()]\n if missing_keys:\n warn_string = \"WARN: act_limits could not be set as {} not found.\".format(\n f\"keys {missing_keys} were\"\n if len(missing_keys) > 1\n else f\"key {missing_keys} was\"\n )\n print(colorize(warn_string, \"yellow\"))\n invalid_length = [\n key for key, val in act_limits.items() if len(val) != self._a_dim\n ]\n if invalid_length:\n warn_string = (\n f\"WARN: act_limits could not be set as the length of {invalid_length} \"\n + \"{}\".format(\"were\" if len(invalid_length) > 1 else \"was\")\n + f\" unequal to the dimension of the action space (dim={self._a_dim}).\"\n )\n print(colorize(warn_string, \"yellow\"))\n\n # Set action limits\n self._act_limits = {\"low\": act_limits[\"low\"], \"high\": act_limits[\"high\"]}\n self.ga.act_limits = self._act_limits", "def test_order_limits(self, fig_test, fig_ref):\n ax = fig_test.add_subplot(projection=\"ternary\")\n ax.set_ternary_lim(0.1, 0.7, 0.1, 0.6, 0.1, 0.5)\n\n ax = fig_ref.add_subplot(projection=\"ternary\")\n ax.set_ternary_lim(0.7, 0.1, 0.6, 0.1, 0.5, 0.1)", "def set_glidein_config_limits(self, limits_data):\n self.glidein_config_limits = limits_data", "def set_limits_minmax(self, zmin, zmax):\n self.camera.set_clim(zmin, zmax)\n self.autoscale = False", "def set_limits(xlim=None, ylim=None, ax=None):\n if ax is None:\n ax = plt.gca()\n if ylim is not None:\n ax.set_ylim(ylim)\n if xlim is not None:\n ax.set_xlim(xlim)", "def _set_axes_limits(ax, parameter, axis=\"x\"):\n\n lims = list(ax.get_xlim()) if axis == \"x\" else list(ax.get_ylim())\n\n if \"low\" in DEFAULT_BOUNDS[parameter]:\n low = DEFAULT_BOUNDS[parameter][\"low\"]\n if lims[0] < low:\n lims[0] = DEFAULT_BOUNDS[parameter][\"low\"]\n if \"high\" in DEFAULT_BOUNDS[parameter]:\n high = DEFAULT_BOUNDS[parameter][\"high\"]\n if lims[1] > high:\n lims[1] = DEFAULT_BOUNDS[parameter][\"high\"]\n\n if axis == \"x\":\n ax.set_xlim(lims)\n else:\n ax.set_ylim(lims)", "def minmin_maxmax( *args ):\n rmin = min( [ mv.min() for mv in args ] )\n rmax = max( [ mv.max() for mv in args ] )\n rmv = cdms2.createVariable( [rmin,rmax] )\n return rmv", "def main():\n \n try:\n opts, _ = getopt.getopt(sys.argv[1:]\n , \"b:c:d:e:f:i:l:m:o:r:s:t:x:y:\"\n , \"default-setting=\")\n except getopt.GetoptError, err:\n print str(err) \n usage()\n sys.exit(2)\n \n directory = None\n config_of_interest = None\n bench_of_interest = None\n time_of_interest = None\n time_to_ignore = None\n bench_expectations = {}\n rep = None # bench representation algorithm\n revision_range = '0:'\n regression_range = '0:'\n latest_revision = None\n requested_height = None\n requested_width = None\n title = 'Bench graph'\n settings = {}\n default_settings = {}\n\n def parse_range(range):\n \"\"\"Takes '<old>[:<new>]' as a string and returns (old, new).\n Any revision numbers that are dependent on the latest revision number\n will be filled in based on latest_revision.\n \"\"\"\n old, _, new = range.partition(\":\")\n old = int(old)\n if old < 0:\n old += latest_revision;\n if not new:\n new = latest_revision;\n new = int(new)\n if new < 0:\n new += latest_revision;\n return (old, new)\n\n def add_setting(settings, setting):\n \"\"\"Takes <key>[=<value>] adds {key:value} or {key:True} to settings.\"\"\"\n name, _, value = setting.partition('=')\n if not value:\n settings[name] = True\n else:\n settings[name] = value\n\n def read_expectations(expectations, filename):\n \"\"\"Reads expectations data from file and put in expectations dict.\"\"\"\n for expectation in open(filename).readlines():\n elements = expectation.strip().split(',')\n if not elements[0] or elements[0].startswith('#'):\n continue\n if len(elements) != 5:\n raise Exception(\"Invalid expectation line format: %s\" %\n expectation)\n bench_entry = elements[0] + ',' + elements[1]\n if bench_entry in expectations:\n raise Exception(\"Dup entries for bench expectation %s\" %\n bench_entry)\n # [<Bench_BmpConfig_TimeType>,<Platform-Alg>] -> (LB, UB)\n expectations[bench_entry] = (float(elements[-2]),\n float(elements[-1]))\n\n def check_expectations(lines, expectations, newest_revision, key_suffix):\n \"\"\"Check if there are benches in latest rev outside expected range.\"\"\"\n exceptions = []\n for line in lines:\n line_str = str(line)\n bench_platform_key = (line_str[ : line_str.find('_{')] + ',' +\n key_suffix)\n this_revision, this_bench_value = lines[line][-1]\n if (this_revision != newest_revision or\n bench_platform_key not in expectations):\n # Skip benches without value for latest revision.\n continue\n this_min, this_max = expectations[bench_platform_key]\n if this_bench_value < this_min or this_bench_value > this_max:\n exceptions.append('Bench %s value %s out of range [%s, %s].' %\n (bench_platform_key, this_bench_value, this_min, this_max))\n if exceptions:\n raise Exception('Bench values out of range:\\n' +\n '\\n'.join(exceptions))\n\n try:\n for option, value in opts:\n if option == \"-b\":\n bench_of_interest = value\n elif option == \"-c\":\n config_of_interest = value\n elif option == \"-d\":\n directory = value\n elif option == \"-e\":\n read_expectations(bench_expectations, value)\n elif option == \"-f\":\n regression_range = value\n elif option == \"-i\":\n time_to_ignore = value\n elif option == \"-l\":\n title = value\n elif option == \"-m\":\n rep = value\n elif option == \"-o\":\n redirect_stdout(value)\n elif option == \"-r\":\n revision_range = value\n elif option == \"-s\":\n add_setting(settings, value)\n elif option == \"-t\":\n time_of_interest = value\n elif option == \"-x\":\n requested_width = int(value)\n elif option == \"-y\":\n requested_height = int(value)\n elif option == \"--default-setting\":\n add_setting(default_settings, value)\n else:\n usage()\n assert False, \"unhandled option\"\n except ValueError:\n usage()\n sys.exit(2)\n\n if directory is None:\n usage()\n sys.exit(2)\n\n if time_of_interest:\n time_to_ignore = None\n\n # The title flag (-l) provided in buildbot slave is in the format\n # Bench_Performance_for_Skia_<platform>, and we want to extract <platform>\n # for use in platform_and_alg to track matching benches later. If title flag\n # is not in this format, there may be no matching benches in the file\n # provided by the expectation_file flag (-e).\n platform_and_alg = title\n if platform_and_alg.startswith(TITLE_PREAMBLE):\n platform_and_alg = (\n platform_and_alg[TITLE_PREAMBLE_LENGTH:] + '-' + rep)\n title += ' [representation: %s]' % rep\n\n latest_revision = get_latest_revision(directory)\n oldest_revision, newest_revision = parse_range(revision_range)\n oldest_regression, newest_regression = parse_range(regression_range)\n\n unfiltered_revision_data_points = parse_dir(directory\n , default_settings\n , oldest_revision\n , newest_revision\n , rep)\n\n # Filter out any data points that are utterly bogus... make sure to report\n # that we did so later!\n (allowed_revision_data_points, ignored_revision_data_points) = filter_data_points(\n unfiltered_revision_data_points)\n\n # Update oldest_revision and newest_revision based on the data we could find\n all_revision_numbers = allowed_revision_data_points.keys()\n oldest_revision = min(all_revision_numbers)\n newest_revision = max(all_revision_numbers)\n\n lines = create_lines(allowed_revision_data_points\n , settings\n , bench_of_interest\n , config_of_interest\n , time_of_interest\n , time_to_ignore)\n\n regressions = create_regressions(lines\n , oldest_regression\n , newest_regression)\n\n output_xhtml(lines, oldest_revision, newest_revision, ignored_revision_data_points,\n regressions, requested_width, requested_height, title)\n\n check_expectations(lines, bench_expectations, newest_revision,\n platform_and_alg)" ]
[ "0.5663375", "0.53814524", "0.5351617", "0.5318616", "0.5304712", "0.5269855", "0.518687", "0.5080418", "0.5079761", "0.5076662", "0.5063355", "0.50254184", "0.50113034", "0.50095356", "0.49927393", "0.49918455", "0.4990025", "0.49890125", "0.4981406", "0.4974337", "0.49381927", "0.4936551", "0.49147564", "0.4910177", "0.4900885", "0.49001008", "0.487543", "0.48712176", "0.48648912", "0.48577294" ]
0.64452285
0