query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
set the sales rep for this Account | def set_sales_rep(self, sales_rep):
self._sales_rep = sales_rep | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sales(self, sales):\n\n self._sales = sales",
"def get_sales_rep(self):\r\n return self._sales_rep",
"def sales_rep_code(self, sales_rep_code):\n if sales_rep_code is not None and len(sales_rep_code) > 10:\n raise ValueError(\"Invalid value for `sales_rep_code`, length must be less than or equal to `10`\")\n\n self._sales_rep_code = sales_rep_code",
"def sales_rep_code(self):\n return self._sales_rep_code",
"def sells(self, sells):\n\n self._sells = sells",
"def setNumPurchased(self, val):\n self.numberPurchased = val",
"def rental_offers(self, rental_offers):\n\n self._rental_offers = rental_offers",
"def UpdateInvoice(self):\n self.builder.get_object('GuiInvProd').get_buffer().set_text(\"Product:\\n\")\n self.builder.get_object('GuiInvPrice').get_buffer().set_text(\"Price:\\n\")\n self.amount = 0\n for items in self.prod_list:\n self.builder.get_object('GuiInvProd').get_buffer().insert_at_cursor(\n u\"%s\\n\" % items['name'])\n if self.is_member:\n self.builder.get_object('GuiInvPrice').get_buffer().insert_at_cursor(\n config.CURRENCY_SYMBOL + u\"%.2f\\n\" % items[2])\n self.amount = self.amount + items[2]\n else:\n self.builder.get_object('GuiInvPrice').get_buffer().insert_at_cursor(\n config.CURRENCY_SYMBOL + u\"%.2f\\n\" % items[3])\n self.amount = self.amount + items[3]\n if self.is_member:\n self.builder.get_object('GuiInvProd').get_buffer().insert_at_cursor(\n u\"\\nYou are a member.\")\n self.builder.get_object('GuiTotal').set_text(config.CURRENCY_SYMBOL + u\"%.2f\" % self.amount)\n self.builder.get_object('GuiInput').set_text(\"\")",
"def set_Srs(self, x):\n x = float(x)\n if self.Srs != x:\n self.Srs = x",
"def assign(self, prod1_name, prod2_name, times):\n try:\n self._purchased.update({PROD1: prod1_name, PROD2: prod2_name},\n {'$set': {TIMES: times}},\n True\n )\n self._purchased.update({PROD1: prod2_name, PROD2: prod1_name},\n {'$set': {TIMES: times}},\n True\n )\n print('assign: succeeded')\n return True\n except pyerrors.OperationFailure as ex:\n print(ex.value)\n except pyerrors.PyMongoError as ex:\n print(ex.value)\n print('assign: failed')\n return False",
"def save(self, *args, **kwargs):\n self.order_total = self.membership.price\n if not self.order_number:\n self.order_number = self._generate_order_number()\n super().save(*args, **kwargs)",
"def pay_rolls(self, pay_rolls):\n\n self._pay_rolls = pay_rolls",
"def test_access_sales_person(self):\n # Salesperson can see only their own sales order\n with self.assertRaises(AccessError):\n self.order.with_user(self.company_data['default_user_salesman_2']).read()\n # Now assign the SO to themselves\n self.order.write({'user_id': self.company_data['default_user_salesman_2'].id})\n self.order.with_user(self.company_data['default_user_salesman_2']).read()\n # Salesperson can change a Sales Team of SO\n self.order.with_user(self.company_data['default_user_salesman_2']).write({'team_id': self.company_data['default_sale_team'].id})\n # Salesperson can't create the SO of other salesperson\n with self.assertRaises(AccessError):\n self.env['sale.order'].with_user(self.company_data['default_user_salesman_2']).create({\n 'partner_id': self.partner_a.id,\n 'user_id': self.company_data['default_user_salesman'].id\n })\n # Salesperson can't delete the SO\n with self.assertRaises(AccessError):\n self.order.with_user(self.company_data['default_user_salesman_2']).unlink()\n # Salesperson can confirm the SO\n self.order.with_user(self.company_data['default_user_salesman_2']).action_confirm()",
"def customer(self, customer):\n\n self._customer = customer",
"def customer(self, customer):\n\n self._customer = customer",
"def sales_price(book):\n book = copy(book)\n book.price = round(book.price - book.price * .2, 2)\n return book",
"def sell(self):\n self.status = \"sold\"\n return self",
"def _setbeneficiary_customer_59A(self, val):\n self.swift_obj.BeneficiaryCustomer_A = val\n self.swift_obj.BeneficiaryCustomer_A.swiftTag = '59A'",
"def reviews(self, reviews: object):\n\n self._reviews = reviews",
"def selling_rate(self, selling_rate):\n\n self._selling_rate = selling_rate",
"def setCrystal(self,crystal,nn): \n\t\tself.crystal=crystal\n\t\t#self.nn=nn\n\t\tself.offset7(nn)\n\t\tself.offset6(self.analyser.keys().index(self.crystal))\n\t\tself.dspace = self.analyser[crystal]/nn",
"def sales_price(book):\n book = copy(book)\n book.price = round(book.price-book.price*.2, 2)\n return book",
"def product(self, product):\n self._product = product",
"def accounts(self, accounts):\n\n self._accounts = accounts",
"def repaid(self, repaid):\n\n self._repaid = repaid",
"def repaid(self, repaid):\n\n self._repaid = repaid",
"def tax(self):\n\n self.x = self.a\n self.set_zn(self.x)",
"def test_set_plan_renewal(self):\n up = baker.make(\"UserPlan\")\n o = baker.make(\"Order\", amount=10)\n up.set_plan_renewal(order=o, card_masked_number=\"1234\")\n self.assertEqual(up.recurring.amount, 10)\n self.assertEqual(up.recurring.card_masked_number, \"1234\")\n old_id = up.recurring.id\n\n # test setting new values\n up.set_plan_renewal(order=o)\n self.assertEqual(up.recurring.amount, 10)\n self.assertEqual(up.recurring.card_masked_number, None)\n self.assertEqual(up.recurring.id, old_id)",
"def set_repin(self, repin):\n self.repin = repin",
"def set_deals(self, deals):\n\n\t\tif deals is not None and not isinstance(deals, list):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: deals EXPECTED TYPE: list', None, None)\n\t\t\n\t\tself.__deals = deals\n\t\tself.__key_modified['Deals'] = 1"
] | [
"0.71684736",
"0.60573095",
"0.5699982",
"0.55540127",
"0.5448541",
"0.5195966",
"0.5159155",
"0.51239115",
"0.5077749",
"0.5022728",
"0.49740148",
"0.49725026",
"0.49634492",
"0.4955389",
"0.4955389",
"0.49489853",
"0.49296564",
"0.49223012",
"0.49128014",
"0.49119002",
"0.49087504",
"0.49039358",
"0.49009177",
"0.48987812",
"0.48929596",
"0.48929596",
"0.48822436",
"0.48681462",
"0.48633826",
"0.48615348"
] | 0.81403697 | 0 |
replaces the list of market segments for this Account | def set_market_segments(self, segments):
"""
Q1-2. Implement this method, which takes an iterable of MarketSegments
to which this Account will be attached. This method REPLACES all
MarketSegment associations, so be sure to update each
MarketSegment's internal representation of associated Accounts
appropriately.
"""
for existing_segment in self._market_segments:
# only need to remove the ones that aren't in the new list
if existing_segment not in segments:
existing_segment.remove_account(self)
for segment in segments:
# add segments, catch ValueErrors which means the segment was
# already part of this account, therefor no followup action is
# needed
try:
self._market_segments.append(segment)
# add_ms_to_account needs to be False because we've already
# added the segment to this account
segment.add_account(self, add_ms_to_account=False)
except ValueError:
# this account was already associated to that segment,
# continue on
continue | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_from_market_segment(self, market_segment):\r\n if market_segment in self._market_segments:\r\n self._market_segments.remove(market_segment)\r\n market_segment.remove_account(self)\r\n else:\r\n # nothing to do, the market segment was already\r\n # not in the account market segments\r\n pass",
"async def _update_balances(self):\n local_asset_names = set(self._account_balances.keys())\n remote_asset_names = set()\n resp_json = await self._api_request(\"post\",\n \"terra/balances\",\n {\"address\": self._terra_wallet_address})\n for token, bal in resp_json[\"balances\"].items():\n self._account_available_balances[token] = Decimal(str(bal))\n self._account_balances[token] = Decimal(str(bal))\n remote_asset_names.add(token)\n\n asset_names_to_remove = local_asset_names.difference(remote_asset_names)\n for asset_name in asset_names_to_remove:\n del self._account_available_balances[asset_name]\n del self._account_balances[asset_name]\n\n self._in_flight_orders_snapshot = {k: copy.copy(v) for k, v in self._in_flight_orders.items()}\n self._in_flight_orders_snapshot_timestamp = self.current_timestamp",
"def __init__(self, name, accounts=None):\r\n self.name = name\r\n if accounts:\r\n self._accounts = accounts\r\n for account in accounts:\r\n # add_account_to_ms is False because we've already added the\r\n # account to this segment, don't want to do it again\r\n account.add_to_market_segment(self, add_account_to_ms=False)\r\n else:\r\n self._accounts = []\r\n check_for_existing_market_segment(self)",
"def update_my_contracts(self):\n my_shares = self.browser.get('https://www.predictit.org/Profile/GetSharesAjax')\n for market in my_shares.soup.find_all('table', class_='table table-striped table-center'):\n market_title = market.previous_element.previous_element.find('div', class_='outcome-title').find('a').get(\n 'title')\n for contract in self.my_contracts:\n if market_title == contract.market:\n market_data = [i.text.strip().replace(\n \"\\n\", \"\").replace(\" \", \"\").replace('\\r', '') for i in market.find_all('td')]\n market_data_lists = [market_data[x:x + 10] for x in range(0, len(market_data), 10)]\n cid = None\n for list_ in market_data_lists:\n parsed_market_data = [market_title]\n for string in list_:\n try:\n cid = re.search(\n pattern='#\\w+\\-(\\d+)', string=string\n ).group(1)\n string = re.search(\n pattern='(.*)\\$\\(.*\\)\\;', string=string\n ).group(1)\n except AttributeError:\n pass\n parsed_market_data.append(string)\n parsed_market_data.insert(1, cid)\n self.timestamp = datetime.datetime.now()\n self.avg_price = parsed_market_data[5]\n self.gain_loss = parsed_market_data[8]\n self.latest = parsed_market_data[9]\n self.buy = parsed_market_data[-2]\n self.sell = parsed_market_data[-1]\n else:\n continue",
"def update_list(self):\n\t\tAsset.update_list(self, uri_keys=('airport', 'list'), uri_args=self._ems_id, colsort=False)",
"def get_market_segments(self):\r\n return self._market_segments",
"def set_markets(self, markets=None):\n if markets and isinstance(markets, str):\n if markets.find(',') != -1:\n market_list = markets.split(',')\n for item in market_list:\n self.markets.append(item.strip())\n else:\n self.markets.append(markets)\n else:\n self.markets = [\"Nasdaq\", \"Dow Jones & Company\",\n \"Standard & Poor's\", \"EURO STOXX 50\",\n \"OMX Vilnius\", \"MICEX\"]",
"def __update_accounts(self):\n\t\tfor acct in self.wallet:\n\t\t\tif len(get_unspent(acct[\"address\"], self.testnet))!=0:\n\t\t\t\tacct[\"status\"] = \"in use\"\n\t\t\telse:\n\t\t\t\tspent = get_spent(acct[\"address\"], self.testnet)\n\t\t\t\tconfirm = (s[\"confirmations\"] >= 6 for s in spent)\n\t\t\t\tif len(spent) > 0 and all(confirm):\n\t\t\t\t\tacct[\"status\"] = \"used\"\n\t\t\t\telif len(spent) > 0:\n\t\t\t\t\tacct[\"status\"] = \"in use\"\n\t\tself.header[\"LAST_UPDATE_TIME\"] = str(round(time.time()))\n\t\toutput = [self.header, *self.wallet]\n\t\twith open(self.filepath, 'w+') as f:\n\t\t\tjson.dump(output, f)",
"def updatedPotentialPartnerNames(self):\n for i in range(0, len(self._potential_partner)):\n ## If the name of the _potential_partner isn't in the list\n # of plant names anymore it will be removed from the partner\n # list\n if (self._potential_partner[i]) and (self._potential_partner[i]\n not in self._plant_names):\n self._potential_partner[i] = []\n self._rgf_counter[i] = -1",
"def updatedPartnerNames(self):\n for i in range(0, len(self._partner_names)):\n partners_delete = []\n for j in range(0, len(self._partner_names[i])):\n ## If the name of the partner isn't in the list of plant\n # names anymore it will be removed from the partner list\n if self._partner_names[i][j] not in self._plant_names:\n partners_delete.append(self._partner_names[i][j])\n if partners_delete:\n for p in partners_delete:\n self._partner_names[i].remove(p)",
"def setSegments(self, segments):\n for point, segment in zip(self.points, segments):\n point.set(segment.p1)",
"def custom_reset(self):\n if self.similar:\n return [\"stocks\", \"ca\", f\"set {','.join(self.similar)}\"]\n return []",
"def replaceChain(self, newbc):\n if (Blockchain.validChain(newbc) == False):\n print(\"New Blockchain is invalid\")\n return\n elif (len(newbc.chain) < len(self.chain)):\n print(\"Not enough blocks on new Blockchain\")\n return\n \n print(\"Updating blockchain to newest version\")\n self.chain = newbc",
"def sort_currency_list_if_changed(self):\r\n currency_list = self.gox.wallet.keys()\r\n if len(currency_list) == len(self.sorted_currency_list):\r\n return\r\n\r\n # now we will bring base and quote currency to the front and sort the\r\n # the rest of the list of names by acount balance in descending order\r\n if self.gox.curr_base in currency_list:\r\n currency_list.remove(self.gox.curr_base)\r\n if self.gox.curr_quote in currency_list:\r\n currency_list.remove(self.gox.curr_quote)\r\n currency_list.sort(key=lambda name: -self.gox.wallet[name])\r\n currency_list.insert(0, self.gox.curr_quote)\r\n currency_list.insert(0, self.gox.curr_base)\r\n self.sorted_currency_list = currency_list",
"def extend(self):\n # -1 in the segments means that starts counting in the end of the list\n self.add_segment(self.segments[-1].position())",
"def _fix_chainID(self):\n\n from string import ascii_uppercase\n\n # get the current names\n data = self.get('chainID')\n natom = len(data)\n\n #get uniques\n chainID = []\n for c in data:\n if c not in chainID:\n chainID.append(c)\n\n if chainID == ['A','B']:\n return\n\n if len(chainID)>26:\n print(\"Warning more than 26 chains have been detected. This is so far not supported\")\n sys.exit()\n\n # declare the new names\n newID = [''] * natom\n\n # fill in the new names\n for ic,chain in enumerate(chainID):\n index = self.get('rowID',chainID=chain)\n for ind in index:\n newID[ind] = ascii_uppercase[ic]\n\n # update the new name\n self.update_column('chainID',newID)",
"def update_nets_with_segments(pcb_data: List[Dict[str, Any]], nets: List[Net]):\n segments = get_all_dicts_by_key(pcb_data, 'segment')\n for segment in segments:\n start: Coords = get_dict_by_key(segment['segment'], 'start')['start']\n start[1] = str(-1*float(start[1]))\n end: Coords = get_dict_by_key(segment['segment'], 'end')['end']\n end[1] = str(-1 * float(end[1]))\n width: str = get_dict_by_key(segment['segment'], 'width')['width']\n layer_data: str = get_dict_by_key(segment['segment'], 'layer')['layer']\n layers: List[Layer] = convert_to_layers(layer_data)\n new_segment: Segment = Segment(start=start, end=end, width=width, layers=layers)\n net_id: str = get_dict_by_key(segment['segment'], 'net')['net']\n for net in nets:\n if float(net.net_id) == float(net_id):\n net.segments.append(new_segment)",
"def updateCurveList(self):\n self.curvelist = []\n for i, cinfo in enumerate(self.pltw.curvelist):\n if cinfo.yvinfo.blkpos == self.blkno:\n self.curvelist.append(cinfo)\n if i > 2:\n break",
"def clear_augmented_sentences(self):\n self.augmented_sentence_list = list()",
"def _refresh_common(self):\n if self._markets_map is None or (time.time() - self._markets_age) > self.market_update_interval:\n # Index our market information by market string\n common = self.get(\"/v1/common\")\n self._currencies_map = {c['code']: c for c in common['currencies']}\n # Set some convenience keys so we can pass around just the dict\n for m in common['markets']:\n m['string'] = \"{market_currency}_{base_currency}\".format(**m)\n m['base_currency'] = self._currencies_map[m['base_currency']]\n m['market_currency'] = self._currencies_map[m['market_currency']]\n self._markets_map = {m['string']: m for m in common['markets']}\n self._markets_map.update({m['id']: m for m in common['markets']})\n self._markets_age = time.time()",
"def reset(self):\n self.book = {}\n self.book[Trade.WAY_BUY] = []\n self.book[Trade.WAY_SELL] = []",
"def betting_market_splits(self, betting_market_splits):\n\n self._betting_market_splits = betting_market_splits",
"def refresh(self):\n self._accounts = None",
"def cleanup_regions(self, timestamp, bid, ofr):\n regions = []\n\n for region in self.regions:\n if not region.can_delete(timestamp, bid, ofr):\n regions.append(region)\n\n # replace the regions list\n self.regions = regions",
"async def refresh_pairs(self):\n\n summaries = await self.api.get_market_summaries()\n if summaries is None:\n self.log.error('Could not get market summaries data.')\n return None\n\n pairs = []\n pair_count = 0\n changes, volumes, min_trade_qtys, min_trade_sizes = await self._extract_filtered_summaries(summaries)\n bases = list(config['min_base_volumes'].keys())\n\n for pair in sorted(volumes, key=volumes.get, reverse=True):\n if await Market.apply_pair_prefer_filter(pair, bases, volumes.keys()):\n continue\n if await self._handle_greylisted(pair):\n continue\n\n pairs.append(pair)\n self.log.debug('Added pair {}: volume {}, change {}.', pair, volumes[pair], changes[pair], verbosity=1)\n\n pair_count += 1\n if config['max_pairs'] and pair_count >= config['max_pairs']:\n break\n\n if config['app_node_index'] is not None:\n pair_splits = list(utils.split(pairs, config['app_node_max']))\n self.pairs = pair_splits[config['app_node_index']] # pylint: disable=E1126\n else:\n self.pairs = pairs\n\n self.extra_base_pairs = [pair for pair in config['base_pairs'] if pair not in pairs]\n self.min_trade_qtys = min_trade_qtys\n self.min_trade_sizes = min_trade_sizes",
"def _group_by_bank(self):\n rslt = {}\n for company in self:\n if not company.indexa_currency_provider:\n continue\n\n if rslt.get(company.indexa_currency_provider):\n rslt[company.indexa_currency_provider] += company\n else:\n rslt[company.indexa_currency_provider] = company\n return rslt",
"def add_to_market_segment(self, market_segment, add_account_to_ms=True):\r\n if market_segment in self._market_segments:\r\n raise ValueError(\"{name} already part of {ms_name}\"\r\n .format(name=self.name,\r\n ms_name=market_segment.name))\r\n self._market_segments.append(market_segment)\r\n if add_account_to_ms:\r\n # add_ms_to_account needs to be False since this account already\r\n # knows about the market segment\r\n market_segment.add_account(self, add_ms_to_account=False)",
"def __init__(self, name, sales_rep=None, market_segments=None):\r\n self.name = name\r\n self._sales_rep = sales_rep\r\n self._children = []\r\n if market_segments:\r\n self._market_segments = market_segments\r\n for market_segment in market_segments:\r\n # add_ms_to_account needs to be False so we don't try to add\r\n # the market segment to the account again\r\n market_segment.add_account(self, add_ms_to_account=False)\r\n else:\r\n self._market_segments = []",
"def remove_redundant_regions(self):\r\n self.flanking_region.attributes.id = self._flanking_region.attributes.id\r\n self.flanking_region.attributes.parent = ''\r\n for feature in self.pcr_product:\r\n feature.attributes.id = feature.attributes.parent\r\n feature.attributes.parent = ''\r\n self._flanking_region = None\r\n self.gt_seq_region = []\r\n if self.pcr_product:\r\n snp_parent = self.pcr_product[0].attributes.id\r\n else:\r\n snp_parent = self.flanking_region.attributes.id\r\n for snp in self.snp:\r\n snp.attributes.parent = snp_parent",
"def update_list(self):\n Asset.update_list(self, uri_keys=('ems_sys', 'list'))"
] | [
"0.5418846",
"0.5301822",
"0.4919815",
"0.48922998",
"0.48700333",
"0.4845712",
"0.48402044",
"0.479326",
"0.47908777",
"0.4755174",
"0.47279075",
"0.47138822",
"0.46888635",
"0.46419635",
"0.46125323",
"0.45995706",
"0.45915312",
"0.4556438",
"0.45542613",
"0.45532054",
"0.45474678",
"0.4540162",
"0.45293343",
"0.4503405",
"0.45026875",
"0.44925007",
"0.44907868",
"0.44567686",
"0.44550842",
"0.44347394"
] | 0.7383106 | 0 |
add a market segment to this account | def add_to_market_segment(self, market_segment, add_account_to_ms=True):
if market_segment in self._market_segments:
raise ValueError("{name} already part of {ms_name}"
.format(name=self.name,
ms_name=market_segment.name))
self._market_segments.append(market_segment)
if add_account_to_ms:
# add_ms_to_account needs to be False since this account already
# knows about the market segment
market_segment.add_account(self, add_ms_to_account=False) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_market_segments(self, segments):\r\n \"\"\"\r\n Q1-2. Implement this method, which takes an iterable of MarketSegments\r\n to which this Account will be attached. This method REPLACES all\r\n MarketSegment associations, so be sure to update each\r\n MarketSegment's internal representation of associated Accounts\r\n appropriately.\r\n \"\"\"\r\n for existing_segment in self._market_segments:\r\n # only need to remove the ones that aren't in the new list\r\n if existing_segment not in segments:\r\n existing_segment.remove_account(self)\r\n for segment in segments:\r\n # add segments, catch ValueErrors which means the segment was\r\n # already part of this account, therefor no followup action is\r\n # needed\r\n try:\r\n self._market_segments.append(segment)\r\n # add_ms_to_account needs to be False because we've already\r\n # added the segment to this account\r\n segment.add_account(self, add_ms_to_account=False)\r\n except ValueError:\r\n # this account was already associated to that segment,\r\n # continue on\r\n continue",
"def add_segment(self, segment):\n self.segments.append(segment)",
"def add_market_street(market, start):\r\n market.append(make_market_street(start))",
"def addCurveSegment(self, *args):\n return _libsbml.Curve_addCurveSegment(self, *args)",
"def add_segment(self):\n copy = self.segments[-1]\n segment = Segment(copy.radius, copy.position.copy(),\n copy.heading_vector.copy())\n self.segments.append(segment)",
"def add_segment(self):\n last_seg = c.coords(self.segments[0].instance)\n x = last_seg[2] - SEG_SIZE\n y = last_seg[3] - SEG_SIZE\n self.segments.insert(0, Segment(x, y))",
"def do_add(self, line):\n if self.bootstrap() != 0:\n return self.return_code(1, True)\n\n # Get logs\n logs_start = self.card.get_logs()\n logs_start_max_idx = logs_start.max_idx\n logs_start_max_id = logs_start.max_id\n\n # Show last N logs\n logs_to_show = [x for x in logs_start.lines if x.used]\n if len(logs_to_show) > self.last_n_logs and logs_start_max_idx is not None:\n logs_to_show = logs_start.lines[logs_start_max_idx - self.last_n_logs + 1: logs_start_max_idx+1]\n\n if len(logs_to_show) > 0:\n print('\\nLast %d log lines: ' % len(logs_to_show))\n for msg in logs_to_show:\n self.dump_log_line(msg)\n else:\n print('\\nThere are no logs on the card')\n\n # Show all shares\n shares_start = self.card.get_shares()\n free_shares = []\n for idx, share in enumerate(shares_start):\n if not share.used and idx != 3:\n free_shares.append(idx+1)\n # self.dump_share(idx, share)\n print('\\n')\n\n if len(free_shares) == 0:\n print(self.t.red('Cannot add a new share, all are set'))\n return self.return_code(1, True)\n\n # Add a new share\n try:\n code, res, sw = self.add_share(free_shares=free_shares)\n if code == 0:\n print(self.t.green('New share added successfully!'))\n elif code == 2:\n print(self.t.yellow('Not adding the key share'))\n else:\n print(self.t.red('Key share was not added'))\n except Exception as e:\n logger.error('Exception: %s' % e)\n\n # Dump shares again\n shares_end = self.card.get_shares()\n for idx, share in enumerate(shares_end):\n self.dump_share(idx, share)\n\n\n with self.t.location():\n with self.t.cbreak():\n sys.stdout.write('\\n\\n Press any key to continue ...')\n sys.stdout.flush()\n self.t.inkey()\n self.t.clear_bol()\n self.t.clear_eol()\n # Logs since last dump\n logs_end = self.card.get_logs()\n if len(logs_end.lines) > 0 and logs_end.max_idx is not None:\n logs_end_max_id = logs_end.max_id\n print('\\nNew log entries. Latest log entry: %X' % logs_end_max_id)\n for msg in logs_end.lines:\n if not msg.used:\n continue\n if logs_start_max_id is not None and logs_start_max_id > 0 and msg.id <= logs_start_max_id:\n continue\n self.dump_log_line(msg)\n else:\n print('There are no logs on the card')\n\n return self.return_code(0)",
"def add_merchant_to_market(market, merchant):\r\n poss = get_possible_addition(market)\r\n if merchant in poss:\r\n add_merchant(get_active_market_street(market), merchant)\r\n else:\r\n add_market_street(market, merchant)",
"def add_segment(self, segment):\n assert segment is None or isinstance(segment, Segment)\n\n self.segment = segment\n if segment is None:\n return\n\n ## reset Strand description with the description derived\n ## from the new Segment\n try:\n frag1 = segment[0]\n frag2 = segment[-1]\n except IndexError:\n return\n\n self.chain_id1 = frag1.chain_id\n self.fragment_id1 = frag1.fragment_id\n self.res_name1 = frag1.res_name\n\n self.chain_id2 = frag2.chain_id\n self.fragment_id2 = frag2.fragment_id\n self.res_name2 = frag2.res_name",
"def add_segment(self, curve, start_y=0, end_y=0):\n palette = \"dark\" if (len(self.segments) / s.RUMBLE_LENGTH) % 2 == 0 else \"light\"\n segment = seg.Segment(palette, len(self.segments), curve, start_y, end_y)\n\n self.segments.append(segment)",
"def add_deposit(self, tx_id: str, insert_time: int, amount: float, asset: str, auto_commit=True):\n row = (tx_id, insert_time, asset, amount)\n self.add_row(tables.SPOT_DEPOSIT_TABLE, row, auto_commit)",
"def seg_add(self, remote_path, corpus_id, segments, storage_id=None):\n\n client, remote_path = self._get_storage(remote_path, storage_id=storage_id)\n return client.seg_add(corpus_id, segments)",
"def add_withdraw(self, withdraw_id: str, tx_id: str, apply_time: int, asset: str, amount: float, fee: float,\n auto_commit: bool = True):\n row = (withdraw_id, tx_id, apply_time, asset, amount, fee)\n self.add_row(tables.SPOT_WITHDRAW_TABLE, row, auto_commit=auto_commit)",
"def add_account(self, account, add_ms_to_account=True):\r\n # check if name already exists and throw ValueError if it does\r\n # it doesn't make sense to add an account twice -- this could be\r\n # refactored to use a set instead\r\n # check for accounts by name per Q2 bonus below\r\n if account.name in [account.name for account in self._accounts]:\r\n raise ValueError(\"{} already associated to {}\".format(account.name,\r\n self.name))\r\n self._accounts.append(account)\r\n if add_ms_to_account:\r\n # add_account_to_ms is False because we've already added the\r\n # account to this segment, don't want to do it again\r\n account.add_to_market_segment(self, add_account_to_ms=False)",
"def addSegment(self, p1, p2, a, b):\n\n self.segments.append((p1,p2,a,b))",
"def remove_from_market_segment(self, market_segment):\r\n if market_segment in self._market_segments:\r\n self._market_segments.remove(market_segment)\r\n market_segment.remove_account(self)\r\n else:\r\n # nothing to do, the market segment was already\r\n # not in the account market segments\r\n pass",
"def __init__(self, name, accounts=None):\r\n self.name = name\r\n if accounts:\r\n self._accounts = accounts\r\n for account in accounts:\r\n # add_account_to_ms is False because we've already added the\r\n # account to this segment, don't want to do it again\r\n account.add_to_market_segment(self, add_account_to_ms=False)\r\n else:\r\n self._accounts = []\r\n check_for_existing_market_segment(self)",
"def add(self, offer):\n other_offer = self.get(offer.get_price(), offer.get_way())\n if other_offer:\n other_offer.add_quote_amount(offer.get_quote_amount())\n other_offer.add_base_amount(offer.get_base_amount())\n return\n self.book[offer.get_way()].append(offer)\n self.book[offer.get_way()] = sorted(self.book[offer.get_way()], key=lambda entry: entry.get_price(),\n reverse=(offer.get_way() == Trade.WAY_BUY))",
"def add_segment(self, segment):\n assert segment is None or isinstance(segment, Segment)\n self.segment = segment\n\n ## just return if the segment is None\n if segment is None:\n return\n\n ## reset AlphaHelix description with the description derived\n ## from the new Segment\n try:\n frag1 = segment[0]\n frag2 = segment[-1]\n except IndexError:\n return\n\n self.chain_id1 = frag1.chain_id\n self.fragment_id1 = frag1.fragment_id\n self.res_name1 = frag1.res_name\n\n self.chain_id2 = frag2.chain_id\n self.fragment_id2 = frag2.fragment_id\n self.res_name2 = frag2.res_name\n\n self.helix_length = len(segment)",
"def add_instrument(self,par,T,coup,price,compounding_freq=2):\r\n self.instruments[T]=(par,coup,price,compounding_freq)",
"def add_dividend(self, div_id: int, div_time: int, asset: str, amount: float, auto_commit: bool = True):\n row = (div_id, div_time, asset, amount)\n self.add_row(tables.SPOT_DIVIDEND_TABLE, row, auto_commit=auto_commit)",
"def new_segment(**kwargs):\n sessiontoken = kwargs['sessiontoken']\n proxy = kwargs['proxy']\n if kwargs['objectname'] is None or kwargs['gateway'] is None:\n print(\"Please specify a name for the segment, and the gateway/network.\")\n sys.exit(1)\n if kwargs['segment_type'] == \"flexible\" and kwargs['tier1_id'] is None:\n print(\"Please specify either the segment type as 'fixed' (-st fixed) OR segment type as 'flexible' as well as the ID of the Tier1 for connectivity (-t1id TIER1ID). Use pyVMC -h for additional options.\")\n sys.exit(1)\n if kwargs['segment_type'] == \"fixed\" and kwargs['tier1_id'] is not None:\n print(\"Invalid configuration - 'fixed' segments may only be connected to the default CGW. To attach to a customer Tier1, please create a 'flexible' segment.\")\n sys.exit(1)\n rt_set = [None, \"ROUTED\", \"DISCONNECTED\"]\n if kwargs['segment_type'] == \"fixed\" and kwargs['routing_type'] not in rt_set:\n print(\"Invalid configuration. For a 'fixed' segment, the routing type must be left blank or set explicitly to 'ROUTED' or 'DISCONNECTED.'\")\n sys.exit(1)\n\n segment_name = kwargs[\"objectname\"]\n gateway = kwargs['gateway']\n\n # Search for segment to determine if it already exists\n segment=search_nsx_json(proxy, sessiontoken, \"Segment\", segment_name)\n if len(segment['results']) > 0:\n print(\"The segment already appears to exist.\")\n sys.exit(1)\n\n\n # Establish baseline json payload\n json_data = {\n \"display_name\":segment_name,\n \"id\":segment_name,\n \"advanced_config\":{\"connectivity\":\"ON\"},\n \"subnets\":[\n {\n \"gateway_address\": gateway\n }\n ]\n }\n #set segment type as either \"fixed\" or \"flexible\"\n segment_type = kwargs['segment_type']\n tier1_id = kwargs['tier1_id']\n\n if segment_type == \"fixed\":\n json_data[\"connectivity_path\"] = \"/infra/tier-1s/cgw\"\n if kwargs['routing_type'] == \"DISCONNECTED\":\n json_data[\"advanced_config\"][\"connectivity\"] = \"OFF\"\n else:\n json_data[\"advanced_config\"][\"connectivity\"] = \"ON\"\n elif segment_type == \"flexible\" and tier1_id is not None:\n json_data[\"connectivity_path\"] = f'/infra/tier-1s/{tier1_id}'\n else:\n print(\"Please specify either the segment type as 'fixed' (-st fixed) OR segment type as 'flexible' as well as the ID of the Tier1 for connectivity (-t1id TIER1ID). Use pyVMC -h for additional options.\")\n if kwargs['dhcp_range'] is not None:\n json_data[\"subnets\"][0][\"dhcp_ranges\"] = [f'{kwargs[\"dhcp_range\"]}']\n if kwargs['domain_name'] is not None:\n json_data[\"domain_name\"] = kwargs[\"domain_name\"]\n\n print(json.dumps(json_data, indent = 2))\n\n status = new_segment_json(proxy, sessiontoken, segment_name, segment_type, json_data)\n if status == 200:\n print(f'The following network has been created: {segment_name}')\n vars = {\"proxy\":proxy, \"sessiontoken\":sessiontoken, \"object_type\":\"Segment\", \"object_id\":segment_name}\n search_nsx(**vars)\n else:\n print(\"The segment was not created. Please check your syntax and try again.\")\n sys.exit(1)",
"def add_merchant(street, merchant):\r\n street.append(merchant)",
"def add_fund(self):\n pass",
"def add_spot_dust(self, tran_id: str, time: int, asset: str, asset_amount: float, bnb_amount: float, bnb_fee: float,\n auto_commit: bool = True):\n\n row = (tran_id, time, asset, asset_amount, bnb_amount, bnb_fee)\n self.add_row(tables.SPOT_DUST_TABLE, row, auto_commit=auto_commit)",
"def api_asset_add(char_code: str, name: str, capital: str, interest: str):\n capital, interest = float(capital), float(interest)\n asset = Asset(char_code=char_code, name=name, capital=capital, interest=interest)\n\n if app.bank.contains(asset):\n return f\"Asset '{name}' already exists\", 403\n\n app.bank.add(asset)\n return f\"Asset '{name}' was successfully added\", 200",
"def add_station(self, station):\n self.__stations.append(station)",
"def add_price(self, price, date, shares):\n\t\tvalue = price * shares\n\t\tself.price_list.append(value)\n\t\tself.date_priced.append(date)",
"def insert_orderbook(self, instrument, market_place, market_segment, market_capability, tick_size_list, \\\n round_lot, day_counting, orderbook_name, commit_orderbook, tiering_level, orderbook_curr=None):\n logger.DLOG(\"Insert orderbook...\") \n try:\n new_ob_obj = acm.FOrderBook()\n new_ob_obj.Instrument = instrument\n if orderbook_curr:\n new_ob_obj.Currency = orderbook_curr\n else:\n new_ob_obj.Currency = instrument.Currency()\n \n new_ob_obj.Quotation = instrument.Quotation() \n new_ob_obj.TickSizeList = self.get_tick_size_list(tick_size_list, market_capability)\n new_ob_obj.RoundLot = self.get_round_lot(instrument, round_lot)\n new_ob_obj.DayCounting = day_counting\n new_ob_obj.MarketPlace = market_place\n new_ob_obj.PhysicalMarketSegment(market_segment)\n new_ob_obj.Cid = 504\n new_ob_obj.QuoteFactor = orderbook_quotefactor\n new_ob_obj.TradingStatus = orderbook_tradingstatus\n new_ob_obj.ValueDate = orderbook_valuedate\n new_ob_obj.MigrationStatus = orderbook_migrationstatus\n new_ob_obj.FeedName = orderbook_feedname\n new_ob_obj.ExternalId = orderbook_name\n new_ob_obj.ExternalType = market_capability\n if str(tiering_level):\n new_ob_obj.ExternalType = tiering_level\n \n if commit_orderbook:\n new_ob_obj.Commit()\n group_map = self.get_list_leaf(new_ob_obj, market_segment)\n new_ob_obj.GroupMaps().Add(group_map) \n new_ob_obj.GroupMaps().Commit()\n \n logger.LOG(\"**Successfully** commited orderbook <%s> for Instrument <%s>\"%(orderbook_name, instrument.Name()))\n except Exception as e:\n logger.ELOG(\"**Cannot commit** orderbook for Instrument <%s>\"%instrument.Name())\n logger.ELOG(\"**Error**:%s\"%str(e), exc_info=1)",
"def add(self):\n d = {}\n\n self.ok_signal.emit(d, 'account')"
] | [
"0.65802324",
"0.6395969",
"0.61662567",
"0.6164237",
"0.5980403",
"0.5974769",
"0.5835148",
"0.5825939",
"0.5788526",
"0.5769048",
"0.5765961",
"0.5738792",
"0.572644",
"0.57032543",
"0.5656395",
"0.5605565",
"0.54737425",
"0.5466607",
"0.543837",
"0.5411004",
"0.5402753",
"0.5354625",
"0.5349499",
"0.5269454",
"0.52681553",
"0.5264006",
"0.52173644",
"0.5191599",
"0.5190039",
"0.517709"
] | 0.73550326 | 0 |
remove the market segment from this account | def remove_from_market_segment(self, market_segment):
if market_segment in self._market_segments:
self._market_segments.remove(market_segment)
market_segment.remove_account(self)
else:
# nothing to do, the market segment was already
# not in the account market segments
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_account(self, account, remove_ms_from_account=True):\r\n # check for accounts by name per Q2 bonus below\r\n if account.name in [account.name for account in self._accounts]:\r\n self._accounts.remove(account)\r\n if remove_ms_from_account:\r\n account.remove_from_market_segment(self)\r\n else:\r\n # nothing to do, the account wasn't part of the market\r\n # segment so we're done\r\n pass",
"def do_erase(self, line):\n if self.bootstrap() != 0:\n return self.return_code(1, True)\n\n # Warning\n print('')\n print(self.t.underline_red('! WARNING !'))\n print('This is a destructive operation, all shares will be unrecoverably deleted from the card')\n if not self.ask_proceed('Do you really want to remove all key shares? (y/n): ', support_non_interactive=True):\n return self.return_code(0)\n\n # Erase\n resp, sw = self.card.send_erase_shares()\n if sw != 0x9000:\n logger.error('Could not erase all shares, code: %04X' % sw)\n return self.return_code(1)\n\n print('All shares erased successfully')\n return self.return_code(0)",
"def remove(ctx, schain_name):\n skale = ctx.obj['skale']\n skale.manager.delete_schain(schain_name, wait_for=True,\n gas_price=4500000000)\n print(f'sChain {schain_name} removed!')",
"def remove(self):\n self._switch.odlclient._request(self._path, method=\"delete\")",
"def remove_fragment(self, fragment):\n Segment.remove_fragment(self, fragment)\n fragment.chain = None",
"def remove_segment(**kwargs):\n sessiontoken = kwargs['sessiontoken']\n proxy = kwargs['proxy']\n segment_name = kwargs[\"objectname\"]\n segment=search_nsx_json(proxy, sessiontoken, \"Segment\", segment_name)\n if len(segment['results']) > 0:\n segment_path = segment['results'][0]['path']\n status = remove_segment_json(proxy, sessiontoken, segment_path)\n if status == 200:\n print(f'The following network has been removed: {segment_name}')\n else:\n print(\"The segment was not removed. Please check your syntax and try again.\")\n sys.exit(1)\n else:\n print(\"The segment does not exist.\")",
"def remove_chain(self, chain):\n assert isinstance(chain, Chain)\n self.chain_list.remove(chain)\n del self.chain_dict[chain.chain_id]\n chain.model = None",
"def delete_account(self):\n Credential.account_list.remove(self)",
"def remove_chain(self, chain):\n assert isinstance(chain, Chain)\n self.model_dict[chain.model_id].remove_chain(chain)",
"def remove_card(self, slot):\n del self._starting_card[slot]",
"def removeChain(self, mychain):\n\n\t\tichain = self.getChain(mychain)\t\n\t\tif ichain == None:\n\t\t\treturn\n\n\t\tself.chain.remove(ichain)",
"def delete_segment(self, n):\n self.get_segment(n).delete()",
"def delete_account(self, account):\n \n pass",
"def remove_account(self, account_name):\n del self._accounts[account_name]",
"def remove_segment(self):\n selected_segment = \\\n self.controller.shared_data.obj_track.selected_segment_idx\n\n if len(selected_segment) == 1:\n segment_idx = selected_segment[0]\n\n msg = 'Do you want to remove the selected segment?'\n proceed = tk.messagebox.askyesno(title='Remove segment',\n message=msg)\n\n if proceed:\n size = self.controller.shared_data.obj_track.remove_segment(\n segment_idx)\n\n if size > 0:\n plots.update_plots(\n self.controller.shared_data.obj_track,\n self.controller.shared_data.ax_track,\n self.controller.shared_data.ax_ele,\n self.controller.shared_data.ax_track_info,\n canvas=self.controller.shared_data.canvas)\n\n else:\n plots.initial_plots(\n self.controller.shared_data.ax_track,\n self.controller.shared_data.ax_ele,\n self.controller.shared_data.ax_track_info)\n\n tk.messagebox.showwarning(\n title='No segment',\n message='Last segment has been removed.')\n\n self.controller.shared_data.canvas.draw()\n\n elif len(selected_segment) > 1:\n messagebox.showerror('Warning',\n 'More than one segment is selected')\n elif len(selected_segment) == 0:\n messagebox.showerror('Warning',\n 'No segment is selected')",
"def destroy(self):\n\t\tos.remove(self.account_file)",
"def remove(self):\n traci.vehicle.remove(self.id)",
"def remove(self):\n traci.vehicle.remove(self.id)",
"def remove():",
"def delete_segment(self, name: str) -> None:\n self._status.check_authority_for_draft()\n\n delete_data: Dict[str, Any] = {\"segmentName\": name}\n delete_data.update(self._status.get_status_info())\n\n self._client.open_api_do(\"DELETE\", \"segments\", self.dataset_id, json=delete_data)",
"def remove_atom(self, atom):\n assert isinstance(atom, Atom)\n assert atom.model_id == self.model_id \n self.chain_dict[atom.chain_id].remove_atom(atom)",
"def eraseLastSeg(self):\n self.can.delete(self.segs.pop().getGraphicObject())",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()"
] | [
"0.6916367",
"0.6139307",
"0.6004958",
"0.5911972",
"0.58914524",
"0.5845003",
"0.58389115",
"0.57946837",
"0.5777495",
"0.57668144",
"0.57654405",
"0.5762761",
"0.5756127",
"0.57521224",
"0.57489353",
"0.5699705",
"0.56938523",
"0.56938523",
"0.5637024",
"0.5632075",
"0.5620863",
"0.5600481",
"0.5586753",
"0.5586753",
"0.5586753",
"0.5586753",
"0.5586753",
"0.5586753",
"0.5586753",
"0.5586753"
] | 0.8205841 | 0 |
helper function that returns market segments in a list | def get_market_segments(self):
return self._market_segments | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def segments(self):\n return (self._subset((i,i+1)) for i in range(len(self)-1))",
"def getSegments(self) -> List[int]:\n ...",
"def segment(raw_sents:List[str], segment=\"jieba\") -> List[List[str]]:\n\t# segment_list = [\"pkuseg\", \"jieba\"]\n\t# if segment.strip() not in segment_list:\n\t# \treturn []\n\n\tseg_sents = []\n\tif segment == \"pkuseg\":\n\t\timport pkuseg\n\n\t\t## init the seg\n\t\tseg = pkuseg.pkuseg()\n\n\t\t## segment the sentence by pkuseg\n\t\tfor sent in raw_sents:\n\t\t\tres_seg = seg.cut(sent)\n\t\t\tseg_sents.append(res_seg)\n\t\t# print(seg_sents)\n\telif segment == \"jieba\":\n\t\timport jieba\n\t\tfor sent in raw_sents:\n\t\t\tres_seg = jieba.lcut(sent)\n\t\t\tsentence = \" \".join(res_seg)\n\t\t\tpattern4 = re.compile(\" +\", re.S)\n\t\t\tsentence = pattern4.sub(\" \", sentence)\n\t\t\tres_seg = sentence.split(\" \")\n\t\t\tseg_sents.append(res_seg)\n\n\treturn seg_sents",
"def get_segments(self, sets=None):\n if sets is None:\n if self.sets is not None:\n sets = self.sets\n else:\n raise ValueError(\"sets and self.sets attributes are None, \\\n you need either to pass an origin argument to get_segments or \\\n to use get_filtration method before\")\n segments = []\n for s in sets:\n if self.epsilon <= s.getRelevance():\n t, a, b = s.getPosition()\n for i, seg in enumerate(segments):\n tp, ap, bp = seg\n if t >= tp and bp > a:\n bp = a\n elif t <= tp and ap < b:\n ap = b\n segments[i] = (tp, ap, bp)\n segments.append((t, a, b))\n return segments",
"def make_market_street(start):\r\n return [start]",
"def get_kmers(seq,k=2):\n pair_list = []\n for i in range(0,len(seq),k):\n pair_list.append(str(seq)[i:i+k])\n return pair_list",
"def label_to_segments(utters, labels):\n segment_list = []\n for i, utterence in enumerate(utters):\n segments = []\n seg = \"\"\n for j, char in enumerate(utterence):\n if labels[i][j] >= 0.5:\n if len(seg) > 0:\n segments.append(seg)\n seg = \"\"\n seg = seg + char\n else:\n seg = seg + char\n if j == (len(utterence) - 1):\n segments.append(seg)\n segment_list.append(segments)\n return segment_list",
"def slice(list, point):\n index = list.index(point)\n slices = []\n \n slices.append(list[:index])\n slices.append(list[index + 1:])\n \n return slices",
"def segments(seg_type=None):\n\n for index in xrange(idaapi.get_segm_qty()):\n seg = Segment(index=index)\n if (seg_type is None) or (seg.type == seg_type):\n yield Segment(index=index)",
"def get_kmers(seq, k):\n\n return [seq[i:i+k] for i in range(len(seq)-k+1)]",
"def getSegments(self):\n l = len(self.points)\n return [Segment(self.points[i % l], self.points[(i + 1) % l], \\\n color=self.side_color, width=self.side_width) for i in range(l)]",
"def strech_list(sector, subgraphs_):\n\n strechs=[]\n subs=conv_sub(subgraphs_)\n for j in range(len(subs)):\n si=len(set(sector)&set(subs[j]))-subgraphs_[j].NLoopSub()\n strechs+=[1000+j]*si\n return list(set(strechs))",
"def make_market(start):\r\n market = [];\r\n market.append(make_market_street(start));\r\n return market;",
"async def fetch_markets(self, params={}):\n spotMarketsInfo = await self.publicGetConfPubInfoPair(params)\n futuresMarketsInfo = await self.publicGetConfPubInfoPairFutures(params)\n spotMarketsInfo = self.safe_value(spotMarketsInfo, 0, [])\n futuresMarketsInfo = self.safe_value(futuresMarketsInfo, 0, [])\n markets = self.array_concat(spotMarketsInfo, futuresMarketsInfo)\n marginIds = await self.publicGetConfPubListPairMargin(params)\n marginIds = self.safe_value(marginIds, 0, [])\n #\n # [\n # \"1INCH:USD\",\n # [\n # null,\n # null,\n # null,\n # \"2.0\",\n # \"100000.0\",\n # null,\n # null,\n # null,\n # null,\n # null,\n # null,\n # null\n # ]\n # ]\n #\n result = []\n for i in range(0, len(markets)):\n pair = markets[i]\n id = self.safe_string_upper(pair, 0)\n market = self.safe_value(pair, 1, {})\n spot = True\n if id.find('F0') >= 0:\n spot = False\n swap = not spot\n baseId = None\n quoteId = None\n if id.find(':') >= 0:\n parts = id.split(':')\n baseId = parts[0]\n quoteId = parts[1]\n else:\n baseId = id[0:3]\n quoteId = id[3:6]\n base = self.safe_currency_code(baseId)\n quote = self.safe_currency_code(quoteId)\n splitBase = base.split('F0')\n splitQuote = quote.split('F0')\n base = self.safe_string(splitBase, 0)\n quote = self.safe_string(splitQuote, 0)\n symbol = base + '/' + quote\n baseId = self.get_currency_id(baseId)\n quoteId = self.get_currency_id(quoteId)\n settle = None\n settleId = None\n if swap:\n settle = quote\n settleId = quote\n symbol = symbol + ':' + settle\n minOrderSizeString = self.safe_string(market, 3)\n maxOrderSizeString = self.safe_string(market, 4)\n margin = False\n if spot and self.in_array(id, marginIds):\n margin = True\n result.append({\n 'id': 't' + id,\n 'symbol': symbol,\n 'base': base,\n 'quote': quote,\n 'settle': settle,\n 'baseId': baseId,\n 'quoteId': quoteId,\n 'settleId': settleId,\n 'type': 'spot' if spot else 'swap',\n 'spot': spot,\n 'margin': margin,\n 'swap': swap,\n 'future': False,\n 'option': False,\n 'active': True,\n 'contract': swap,\n 'linear': True if swap else None,\n 'inverse': False if swap else None,\n 'contractSize': self.parse_number('1') if swap else None,\n 'expiry': None,\n 'expiryDatetime': None,\n 'strike': None,\n 'optionType': None,\n 'precision': {\n 'amount': int('8'), # https://github.com/ccxt/ccxt/issues/7310\n 'price': int('5'),\n },\n 'limits': {\n 'leverage': {\n 'min': None,\n 'max': None,\n },\n 'amount': {\n 'min': self.parse_number(minOrderSizeString),\n 'max': self.parse_number(maxOrderSizeString),\n },\n 'price': {\n 'min': self.parse_number('1e-8'),\n 'max': None,\n },\n 'cost': {\n 'min': None,\n 'max': None,\n },\n },\n 'info': market,\n })\n return result",
"def get_segments(weights, threshold):\n marker_list = [True if i >= threshold else False for i in weights]\n i = 0\n final_pairs = []\n while i < len(weights):\n if marker_list[i]:\n start = i\n while i < len(weights) and marker_list[i]:\n i = i + 1\n end = i - 1\n if end-start > 1:\n final_pairs.append(start)\n final_pairs.append(end)\n i = i + 1\n return np.array(final_pairs)",
"def getTimeSegments(segments,bounds,radius,starttime,endtime,magrange,catalog,contributor):\n stime = starttime\n etime = endtime\n \n dt = etime - stime\n dtseconds = dt.days*86400 + dt.seconds\n #segment 1\n newstime = stime\n newetime = stime + timedelta(seconds=dtseconds/2)\n nevents,maxevents = getEventCount(bounds=bounds,radius=radius,starttime=newstime,endtime=newetime,\n magrange=magrange,catalog=catalog,contributor=contributor)\n if nevents < maxevents:\n segments.append((newstime,newetime))\n else:\n segments = getTimeSegments(segments,bounds,radius,newstime,newetime,\n magrange,catalog,contributor)\n #segment 2\n newstime = newetime\n newetime = etime\n nevents,maxevents = getEventCount(bounds=bounds,radius=radius,\n starttime=newstime,endtime=newetime,\n magrange=magrange,catalog=catalog,\n contributor=contributor)\n if nevents < maxevents:\n segments.append((newstime,newetime))\n else:\n segments = getTimeSegments(segments,bounds,radius,newstime,newetime,\n magrange,catalog,contributor)\n\n return segments",
"def segments(self):\n L = len(self.vertices)\n return itertools.chain((self._subset((i,i+1)) for i in range(len(self)-1)),\n (self._subset((L-1,0)),))",
"def get_station_graph(start_station_id, end_station_list):\n start_station_graph = []\n for i in range(10):\n if end_station_list[i] is not None:\n start_station_graph.append((start_station_id, end_station_list[i]))\n return start_station_graph",
"def get_segments(cst):\n assert isinstance(cst, ChromStruct)\n\n # create a set of coordinates for the start and end of segments\n segs = np.load(cst.sg_files)['sg']\n end = np.cumsum(segs)\n start = np.concatenate(([0], end[:-1]))\n\n return np.column_stack((start, end)).astype(int)",
"def getSegments(points):\n return _identifyStrokes(points)[1]",
"def lists_and_segments(self):\n response = self._get(self.uri_for(\"listsandsegments\"))\n return json_to_py(response)",
"def getSegments(source=None, episode=None):\n return None",
"def split_list_by(lst, sepfunc, includesep):\n\tblocks = []\n\tblock = []\n\tfor elem in lst:\n\t\tif sepfunc(elem):\n\t\t\tif includesep:\n\t\t\t\tblock.append(elem)\n\t\t\tblocks.append(block)\n\t\t\tblock = []\n\t\telse:\n\t\t\tblock.append(elem)\n\tif len(block):\n\t\tblocks.append(block)\n\treturn blocks",
"def segment(data):",
"def kmer_list(s, k):\n kmer = []\n n = len(s)\n # n-k+1 is the available range of values or probablities.\n for x in range(0, n-k+1):\n kmer.append(s[x:x+k])\n return kmer",
"def get_exons(chromStart, chromEnd, blockSizes, blockStarts):\n blockSizes = [int(i) for i in blockSizes.split(\",\") if not i == \"\" ]\n blockStarts = [int(i) for i in blockStarts.split(\",\") if not i == \"\" ]\n n = len(blockSizes)\n exons = []\n #print(\"block: \" + str(n))\n #print(blockSizes, blockStarts)\n for i in range(n):\n #print(i)\n blockStart = blockStarts[i]\n blockSize = blockSizes[i]\n exonStart = chromStart + blockStart\n exonEnd = exonStart + blockSize\n exons.append([exonStart, exonEnd])\n return(exons)",
"def seperate_list(list, division_part):\n avg = len(list) / float(division_part)\n out = []\n last = 0.0\n\n while last < len(list):\n out.append(list[int(last):int(last + avg)])\n last += avg\n return out",
"def extract_segments(results):\n tt = [ ( parse_date(x[\"t1\"]), parse_date(x[\"t2\"]) ) for x in results[\"labels\"]+results[\"detected\"] ]\n ts = sorted(itertools.chain.from_iterable( tt ))\n t1 = parse_date(results[\"t1\"])\n if t1 < ts[0]:\n ts.insert(0, t1)\n t2 = parse_date(results[\"t2\"])\n if t2 > ts[-1]:\n ts.append(t2)\n return [ dict(t1=x[0].isoformat(), t2=x[1].isoformat()) for x in list(sliding_window(ts, 2)) ]",
"def stations():\n\n return station_list",
"def get_segments(input_path):\n with open(input_path, 'r') as segments_file:\n segments = []\n for line in segments_file:\n words = line.split('\\t')\n sg_dict = {}\n sg_dict['start'] = float(words[0].replace(',', '.'))\n sg_dict['end'] = float(words[1].replace(',', '.'))\n sg_dict['class'] = words[2][:-1]\n segments.append(sg_dict)\n return segments"
] | [
"0.61199504",
"0.6006646",
"0.58567095",
"0.58383083",
"0.5830448",
"0.5721447",
"0.5628691",
"0.5575491",
"0.55676895",
"0.5562992",
"0.55484986",
"0.5547066",
"0.55222625",
"0.5482955",
"0.54704505",
"0.54665774",
"0.54454684",
"0.54365534",
"0.5432282",
"0.5424745",
"0.54100686",
"0.5408604",
"0.5403384",
"0.5400169",
"0.5393382",
"0.5391886",
"0.53524774",
"0.5338479",
"0.533103",
"0.5309259"
] | 0.6885291 | 0 |
associates an instance of ChildAccount to this Account | def add_child(self, child_account):
self._children.append(child_account) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_account(self):\n Credential.account_list.append(self)",
"def add(self, account):\n if isinstance(account, Account) and account not in self.account:\n self.account.append(account)",
"def put_account(self, account):\n \n pass",
"def account(self, account):\n\n self._account = account",
"def account(self, account):\n\n self._account = account",
"def account(self, account):\n\n self._account = account",
"def account(self, account):\n\n self._account = account",
"def add_account(self, account):\n self.accounts[account.account_number] = account.json()\n # We should save in database the new account using self.di, but not now in order to get our tests passed",
"def add_account(self, account, replace=False):\n for asset in account.assets():\n for asset_class in asset.class2ratio.keys():\n assert asset_class in self._leaf_asset_classes, (\n f'Unknown or non-leaf asset class: {asset_class}')\n\n assert replace or account.name() not in self._accounts, (\n f'Attempting to add duplicate account: {account.name()}')\n\n self._accounts[account.name()] = account\n return self",
"def account(self):\r\n return Account(self)",
"def account(self):\n return Account(self)",
"def account(self, account_id):\r\n return Account(self, account_id)",
"def associate_member_account(memberAccountId=None):\n pass",
"def save_account(self):\n Credentials.credentials_list.append(self)",
"def save_accounts(account):\n account.save_account()",
"def save_accounts(account):\n account.save_account()",
"def accounts(self, accounts):\n\n self._accounts = accounts",
"def account_id(self, account_id):\n self._account_id = account_id",
"def add_account(insert_dict):\n return ar.add_account(insert_dict)",
"def account_id(self, account_id):\n\n self._account_id = account_id",
"def account_id(self, account_id):\n\n self._account_id = account_id",
"def account_id(self, account_id):\n\n self._account_id = account_id",
"def account_id(self, account_id):\n\n self._account_id = account_id",
"def account_id(self, account_id):\n\n self._account_id = account_id",
"def account_id(self, account_id):\n\n self._account_id = account_id",
"def account(self, account: str):\n self._account = account",
"def test_add_account(self):\n person1 = self.owner\n person2 = Person(\n self.initial_year, \"Spouse\", self.initial_year - 20,\n retirement_date=self.retirement_date,\n gross_income=Money(50000),\n spouse=person1, tax_treatment=self.tax_treatment)\n # Add an account and confirm that the Person passed as owner is\n # updated.\n account1 = Account(owner=person1)\n account2 = Account(owner=person1)\n self.assertEqual(person1.accounts, {account1, account2})\n self.assertEqual(person2.accounts, set())",
"def add(self):\n d = {}\n\n self.ok_signal.emit(d, 'account')",
"def account_balance(self, account_balance):\n\n self._account_balance = account_balance",
"def account_balance(self, account_balance):\n\n self._account_balance = account_balance"
] | [
"0.6679163",
"0.6542428",
"0.6518352",
"0.64333874",
"0.64333874",
"0.64333874",
"0.64333874",
"0.6373562",
"0.63151264",
"0.62300396",
"0.6036437",
"0.6035886",
"0.599156",
"0.59576416",
"0.59400326",
"0.59400326",
"0.5933551",
"0.5873395",
"0.5868553",
"0.58684623",
"0.58684623",
"0.58684623",
"0.58684623",
"0.58684623",
"0.58684623",
"0.58433324",
"0.5830607",
"0.582966",
"0.57865196",
"0.57865196"
] | 0.6822193 | 0 |
print a hierarchical structure representing an account and all child accounts associated to it to the console | def print_tree(account, level=0):
""" In the example output below, "GE" is the root account, "Jet Engines"
and "Appliances" are first-degree ChildAccounts, and "DoD Contracts"
and "Washing Machines" are second-degree ChildAccounts.
> print_tree(general_electric)
GE (Manufacturing, R&D): Daniel Testperson
Jet Engines (Manufacturing, R&D, Aerospace): Daniel Testperson
DoD Contracts (Defense, R&D, Aerospace): William Testperson
Appliances (Manufacturing, Consumer Goods): Janet Testperson
Washing Machines (Consumer Goods): Janet Testperson
"""
markets_output = ""
# work a little magic to properly format the names of the market segments
# specifically strip off the leading and trailing quotes and add a
# separating comma
for market in account.get_market_segments():
markets_output += market.name.strip("\'") + ", "
markets_output = markets_output.strip("\'")
# print a row to console
print("{arrow}> {ac_name} ({markets}): {rep}"
.format(arrow=2*level*"-",
ac_name=account.name,
markets=markets_output[:-2],
rep=account.get_sales_rep()))
# recursively call print on the children (if any) Base Case: no children
for child in account.get_children():
print_tree(child, level=level+1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def print_tree(self):\n\t\tprint(self.__print_tree('', True, ''))",
"def print_recursive(self, indents):\n\n\t\tind = \"\\t\"\n\t\toutput = indents * ind + self.name\n\t\tprint(output)\n\t\tfor i in self.children:\n\t\t\ti.print_recursive(indents+1)",
"def print_account(account):\r\n markets_output = \"\"\r\n for market in account.get_market_segments():\r\n markets_output += market.name.strip(\"\\'\") + \", \"\r\n markets_output = markets_output.strip(\"\\'\")\r\n print(f'{account.name} ({markets_output[:-2]}): {account.get_sales_rep()}')",
"def show_all_accounts(self, account_name=None, account_id=None, search=False,\n print_table=True):\n pt = PrettyTable(['ACCOUNT_NAME', 'ACCOUNT_ID'])\n pt.hrules = 1\n pt.align = 'l'\n list = self.get_all_accounts(account_name=account_name,\n account_id=account_id,\n search=search)\n for account in list:\n pt.add_row([account['account_name'], account['account_id']])\n if print_table:\n self.log.info(\"\\n\" + str(pt) + \"\\n\")\n else:\n return pt",
"def print_tree(tree):\n if not tree:\n print None\n return\n \n if tree.children:\n print 'Directory hash = {}'.format(base64.urlsafe_b64encode(tree.dmt_hash))\n print 'Contents:'\n for name, subtree in tree.children.iteritems():\n print\n print name\n print_tree(subtree)\n \n else:\n print 'File hash = {}'.format(base64.urlsafe_b64encode(tree.dmt_hash))",
"def print(self) -> None:\n\n print('')\n print(f\"{self.get_name()}, {self.get_description()}\")\n print('-------------')\n for child in self._children:\n child.print()",
"def pretty_print(self,depth=0):\n\t\tfor i in range(depth):\n\t\t\tprint \"\\t\",\n\t\t\t\t\n\t\tprint self.__str__()\n\t\t\n\t\tfor c in self.tree.children:\n\t\t\tc.viz.pretty_print(depth+1)",
"def printTree(self):\n print(printTreeF(self, 0, self))",
"def print_tree(tree, indent=0):\n for c in tree.children:\n print \" \" * indent, \"-->\", c.name\n \n if c.children != []:\n print_tree(c, indent+1)",
"def print_out_account_balances(list_of_all_accounts_known):\n for account in list_of_all_accounts_known:\n print('{0} {1}'.format(account.account_id, account.balance))",
"def print_tree(self):\n return \"\"",
"def show_tree(obj,d=0):\n print \"%s%s\" % (\"-\"*d,obj.__class__.__name__)\n if 'get_children' in dir(obj):\n for a in obj.get_children(): show_tree(a,d+1)",
"def print_output(tree):\n print_value(tree)\n print_tree(tree)",
"def print_tree(self):\n\t\tself.root.print_recursive(0)",
"def print_bi_tree(self):\n\n to_print = [self]\n # current = None\n\n while to_print:\n current = to_print.pop(0)\n if current:\n print(f'\\t{current.data}')\n to_print.append(current.left)\n to_print.append(current.right)",
"def print_children(self):\n print('\\nchildren:', end=\" \")\n if not self.children: print('None'); return ''\n else:\n s = ''\n for child in self.children:\n s += '\\n\\t'\n #s += f'\\n\\tmove: {child.last()} '\n s += f'N: {child.N}'\n s += f' Q: {(child.Q):.2f}'\n s += f' \\t U: {(child.U):.2f}'\n s += f' \\t policy: '\n s += f'{[\"{:.3f}\".format(pi) for pi in child.pi]}'\n #s += f' policy: ' + ' '.join(f\"{x:2.2f}\" for x in self.pi)\n print(s)",
"def print_private(self):\n print('Account Number : ', self.__Account)\n return \"\"",
"def show_tree(self, root_id):\n expands = ','.join(self.expands)\n accounts_pager = self.api.get_pager('accounts', expand=expands,\n page_size=10000)\n accounts = dict((x['resource_uri'], x) for x in accounts_pager)\n root_ref = root = {\"node\": shellish.TreeNode('root')}\n for uri, x in accounts.items():\n parent = accounts.get(x['account'], root)\n if 'node' not in parent:\n parent['node'] = shellish.TreeNode(parent)\n if 'node' not in x:\n x['node'] = shellish.TreeNode(x)\n parent['node'].children.append(x['node'])\n if root_id is not None and x['id'] == root_id:\n root_ref = x\n if root_ref == root:\n root_ref = root['node'].children\n else:\n root_ref = [root_ref['node']]\n formatter = lambda x: self.formatter(self.bundle(x.value))\n t = shellish.Tree(formatter=formatter,\n sort_key=lambda x: x.value['id'])\n for x in t.render(root_ref):\n print(x)",
"def print_tree(self):\n out = \"\"\n for i in range(self.level):\n out += ' |'\n out += '___'\n out += str(self.action)\n if self.action is None:\n print \"None\"\n else:\n print out\n for child in self.children:\n child.print_tree()",
"def print_tree(self):\n stack = [(self.root, 0, 0)] # (node, child no., tabs)\n ntabs = 0\n while len(stack):\n n, i, tabs = stack.pop()\n if len(n.branch):\n if i>=1 and i==len(n.children)-1:\n print(tabs*'\\t' + 'axis-' + str(n.axis) + ': >' + str(n.branch[i-1]))\n else:\n print(tabs*'\\t' + 'axis-' + str(n.axis) + ': <=' + str(n.branch[i]))\n stack.append((n, i+1, tabs))\n if i<len(n.children):\n stack.append((n.children[i], 0, tabs+1))\n else:\n avg = np.dot(n.probabilities[:,0], n.probabilities[:,1])\n print(tabs*'\\t' + 'Label: ' + str(avg) + '\\n')",
"def print_tree(tree, depth=0):\n print('+','--'*depth,tree[0])\n if isinstance(tree[1], str):\n print('|',' '*depth,'->',tree[1])\n return\n if isinstance(tree[1],Terminal):\n print('|',' '*depth,'->',repr(tree[1]))\n return\n for subtree in tree[1]:\n print_tree(subtree, depth+1)",
"def printout_all(self, indent_level):\n indent = \" \"*indent_level*INDENTATION_MULTIPLIER\n\n print(indent, \"Physical Resource ID:\", self.ID, sep='')\n print(indent, \"|-name:\", self.name, sep='')\n\n print(indent, \"|-info:\", self.info, sep='')\n print(indent, \"|-IP address:\", self.IP_address, sep='')\n print(indent, \"|-MAC address:\", self.MAC_address, sep='')",
"def printout_all(self, indent_level):\n indent = \" \"*indent_level*INDENTATION_MULTIPLIER\n\n print(indent, \"Recipient ID:\", self.ID, sep='')\n print(indent, \"|-name:\", self.name, sep='')\n\n print(indent, \"|-version info:\", self.version_info, sep='')\n print(indent, \"|-IP address:\", self.access_IP_address, sep='')\n print(indent, \"|-URL:\", self.access_URL, sep='')\n print(indent, \"|-username for user/pwd credentials:\", self.username_creds, sep='')\n print(indent, \"|-password for user/pwd credentials:\", self.password_creds, sep='')\n print(indent, \"|-key credentials:\", self.key_creds, sep='')\n print(indent, \"|-info about network:\", self.network_info, sep='')",
"def print_tree(node, depth=1):\n for child in node:\n print(\" \" * depth + child.get_name())\n print_tree(child, depth+1)",
"def show_accounts(conn, userid):\n print('\\n\\nAccount statment for user', (userid))\n with conn.cursor() as curs:\n curs.execute('SELECT id, type, balance FROM accounts WHERE owner_id=%s', (userid,))\n rows = curs.fetchall()\n print('Number of results:', curs.rowcount)\n for row in rows:\n print(row)",
"def print(self, root):\n\n depth = self.depth(root)\n for i in range(1, depth + 1):\n print(\"\\n***\", \"Level\", i, \"*********************************\")\n self.print_level(root, i)",
"def execute_print_chain(arg):\n blockchain = Blockchain()\n blockchain.read_blockchain()\n blockchain.print_blocks()",
"def print_tree(tree, indent=''):\n\n for branch in tree:\n if type(branch) == list and branch != []:\n print_tree(branch, indent + ' ')\n else:\n if branch != []:\n print(indent + str(branch))",
"def print_tree(node):\r\n if node is None:\r\n return\r\n print_tree(node.left)\r\n print node.key\r\n print_tree(node.right)",
"def recursive_print(root: Node, depth=0):\n if not root:\n return\n print(\n (\" \" * depth)\n + f\"({root.resource.order}, exec={root.resource.execution_ms:.3f}, \"\n + f\"ttfb={root.resource.time_to_first_byte_ms}, delay={root.resource.fetch_delay_ms:.3f}, \"\n + f\"size={root.resource.size} B, {ResourceType(root.resource.type).name}, {root.resource.url})\"\n )\n for next_node in root.children:\n recursive_print(next_node, depth + 1)"
] | [
"0.65483695",
"0.6483711",
"0.64607173",
"0.63137734",
"0.62994003",
"0.6192441",
"0.6160616",
"0.6061901",
"0.60491484",
"0.60221314",
"0.6008796",
"0.6002482",
"0.6001772",
"0.5992742",
"0.598194",
"0.59669787",
"0.59571195",
"0.59471905",
"0.59323215",
"0.589066",
"0.58874047",
"0.5872912",
"0.58591515",
"0.58496684",
"0.58480084",
"0.5833539",
"0.58228344",
"0.5812871",
"0.5810415",
"0.5805687"
] | 0.77148306 | 0 |
utility function that checks the global scope for an object that matches the one passed in, if it doesn't exist create the reference in the global scope, this allows for "anonymous" object creation and to still get the object back later Note, the new object name will be the name property with special characters removed and spaces turned to _ and appended with "_ms" so a name of "My Awesome Video Games!" becomes "My_Awesome_Video_Games_ms" This is only called from the MarketSegment constructor | def check_for_existing_market_segment(segment):
for var in list(globals().keys()):
if isinstance(eval("{var}".format(var=var)), MarketSegment):
if eval("{var}.name".format(var=var)) == segment.name:
return
# no matching segment found in globals, create it!
var_name = "{}_ms".format(segment.name.replace(" ", "_"))
regex = re.compile('[^a-zA-Z0-9_]')
var_name = regex.sub("", var_name)
globals()[var_name] = segment | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def default_object_scoper(object_name):\n return \"tag=\\\"{}\\\"\".format(object_name)",
"def create_object(object_name):\n if object_name == 'deathstar':\n return Deathstar()\n elif object_name == 'mercury':\n return Mercury()\n elif object_name == 'venus':\n return Venus()\n elif object_name == 'mars':\n return Mars()\n elif object_name == 'earth':\n return Earth()\n elif object_name == 'moon':\n return Moon()\n elif object_name == 'tatooine':\n return Tatooine()\n elif object_name == 'mordor':\n return Mordor()\n elif object_name == 'xwing':\n return Xwing()",
"def get_object_name(obj):\n\n namespace = dict(globals(), **locals()) \n return [name for name in namespace if namespace[name] is obj][0]",
"def replace(name, newobject):",
"def create_simplenamespace():\n obj1 = _(foo=1)\n obj1.random = \"Whoa\"\n print(obj1)\n obj2 = _(foo=2, bar=\"Yipee!\")\n print(obj2)\n obj3 = _(foo=5, bar=4.0, boo=[\"list\", \"with\", \"strings\"])\n print(obj3)",
"def _valid_object_with_name(ui_object):\n return ui_object.obj_name",
"def object_creator(object_name):\n obj = TemplateClass()\n print(f\"Name of object:{obj.get_object_name()}\")\n obj.set_object_name(\"NewObjectName\")\n print(f\"This is the new object name: {obj.get_object_name()}\")",
"def _set_name_scope(self):\n if self.name is None:\n self._name_scope = self.__class__.__name__\n elif self.name == '<lambda>':\n self._name_scope = 'lambda'\n else:\n # E.g. '_my_loss' => 'my_loss'\n self._name_scope = self.name.strip('_')",
"def reuse_func(list_url,url):\n object_name = \"\"\n # loop through the splitted Url list\n for element in list_url:\n # if element match with the key of dictionary. store it in variable\n if element in dictionary.keys():\n object_name = element\n break\n if object_name:\n # if element found. Pop this element and again append it so that it would be new object\n queue.pop(object_name)\n queue.append(object_name)\n dictionary[object_name] = url\n else:\n # if any word of the provided Url does not match with existing keys\n # pop from left and store the key name\n name = queue.popleft()\n # append the new object with the poped name and also store in dictionary\n queue.append(name)\n dictionary[name] = url\n # return the key name of newly created object\n return object_name",
"def name_utility(obj, event):\n\n locate(obj, None, event.object.name)",
"def add_object(sv, name):\r\n if name in sv.Object: \r\n return sv.Object[name] # do not create \r\n else:\r\n nod=nd.Node() # create object\r\n sv.Object[name]=nod # add name to object dict (not ordered) \r\n sv.Object_list.append(name) # add name to object list (ordered) \r\n nod.name=name # object name\r\n return nod",
"def test_namespaced_object_name(self, get_context_mock, get_library_instance_mock):\n with mock.patch.object(\n CumulusCI, \"get_namespace_prefix\", return_value=\"foobar__\"\n ):\n po = PageObjects(FOO_PATH)\n\n FooTestPage = importer.import_class_or_module_by_path(FOO_PATH)\n MockGetLibraryInstance.libs[\"FooTestPage\"] = _PageObjectLibrary(\n FooTestPage()\n )\n\n pobj = po.get_page_object(\"Test\", \"Foo__c\")\n self.assertEqual(pobj.object_name, \"foobar__Foo__c\")",
"def register(obj_name, obj):\n if obj_name not in ninja_globals['register']:\n ninja_globals['register'][obj_name] = obj",
"def test_non_namespaced_object_name(\n self, get_context_mock, get_library_instance_mock\n ):\n with mock.patch.object(CumulusCI, \"get_namespace_prefix\", return_value=\"\"):\n po = PageObjects(FOO_PATH)\n\n FooTestPage = importer.import_class_or_module_by_path(FOO_PATH)\n MockGetLibraryInstance.libs[\"FooTestPage\"] = _PageObjectLibrary(\n FooTestPage()\n )\n\n pobj = po.get_page_object(\"Test\", \"Foo__c\")\n self.assertEqual(pobj.object_name, \"Foo__c\")",
"def init_obj(obj_name):\n ret = type(obj_name, (object,), {})\n return ret",
"def car_object_scoper(object_name):\n return \"match(tag, \\\"dm-{}-.*\\\")\".format(object_name)",
"def resolve_name(obj, _):\n return obj.name.decode()",
"def _ensure_exists(self, name, shape):\n ident = name.lower()\n internal = self._internals.get(ident, None)\n if internal is None:\n internal = Internal(name, shape)\n self._internals[ident] = internal\n return internal",
"def get_valid_name_in_top_scope(self, name: str) -> str:\n while name in self.defined or name in self.undefined:\n name += \"_\"\n return name",
"def register(self, name, obj):\r\n self.eval_allowed_globals[name] = obj",
"def new_with_classname_in_global(classname, *args):\n try:\n c = globals()[classname]\n except KeyError:\n raise\n return c(*args)",
"def _fullname(obj):\n if obj is None:\n return None\n return _modname(obj, True)",
"def full_object_name(obj):\n\n try:\n module = obj.__module__\n if module is None or module == str.__class__.__module__:\n return obj.__name__ # Avoid reporting __builtin__\n else:\n return module + '.' + obj.__name__\n except Exception:\n return None",
"def ref(name):\n return { 'name': name } if name else None",
"def __init__(self, base):\n if isinstance(base, str):\n self._name = base\n else:\n raise TypeError(NAME_CREATE_ERROR)",
"def set_object_name(self, object_name = \"DefaultObject\"):\n self.obj_name = object_name",
"def checkObjectInNameSpace(objectName):\n if objectName is None or not isinstance(objectName, basestring) or objectName == u\"\": return False\n if objectName in globals(): return True\n return objectName in dir(builtins)",
"def new(name=None):",
"def _merge_scope(current, new):\n if new[2] is None:\n return current\n elif new[1] or not current:\n return new[2]\n return '%s.%s' % (current, new[2])",
"def _get_real_object(self, name):\n name = name if isinstance(name, str) else name.name\n for obj in self._objects:\n if name == obj.name:\n return obj\n else:\n raise ValueError(\"Cannot retrieve object. Unknown name {}. \".format(name))"
] | [
"0.58888537",
"0.58255064",
"0.57960576",
"0.5781576",
"0.5686544",
"0.5686237",
"0.5602739",
"0.55378485",
"0.5523215",
"0.54923826",
"0.544969",
"0.5409906",
"0.5392193",
"0.5361912",
"0.5337955",
"0.5288816",
"0.52435946",
"0.5224719",
"0.52194655",
"0.5201718",
"0.51977795",
"0.51968545",
"0.51823705",
"0.5151551",
"0.514181",
"0.51404124",
"0.5103547",
"0.5098619",
"0.50884897",
"0.5059741"
] | 0.6119238 | 0 |
Basically any speaker id is valid. | def clean(self, value):
speakers = speaker_models.Speaker.objects.filter(pk__in=value)
if len(speakers) != len(value):
raise ValidationError(self.error_messages['invalid_choice'] % value)
return speakers | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_api_invalid_stream_id(self) -> None:\n user = self.example_user(\"hamlet\")\n self.login_user(user)\n result = self.api_patch(\n user,\n \"/api/v1/users/me/subscriptions/121\",\n {\"property\": \"is_muted\", \"value\": \"somevalue\"},\n )\n self.assert_json_error(result, \"Invalid stream ID\")",
"def check_id(self, id):",
"def validate_identifier(self, identifier):\n pass",
"def set_speaker(self, identifier):\n self.up_to_date = False\n self._speaker = identifier",
"def test_validate_party_info_id_is_none(self):\n self.party_test_data[\"id\"] = None\n response = validate_party_info(self.party_test_data)\n self.assertDictEqual(\n response, {\"message\": \"id is required\", \"code\": 400})",
"def test_id_nodata(self):\n self.assertEqual(jc.parsers.id.parse('', quiet=True), {})",
"def test_id_no_value(self):\n self.line._parse_event_swimmer_id(\" \")\n self.assertEqual(None, self.line.event_swimmer_id)",
"def validateID(id):\n\n if re.compile('[0-9]+').match(id) == None:\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s' is not a valid Id. ID should be numeric with Length = '%s' \" \n\t\t\t% (id, lib.constants._ATTR_ID_LENGHT)))\n return -1\n else:\n # Check for the lenght \n counter = 0\n for char in id:\n counter += 1\n print counter , lib.constants._ATTR_ID_LENGHT\n if counter > lib.constants._ATTR_ID_LENGHT :\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s' exceeded the given length i.e Max Length = '%s'\" % \n\t\t\t(id, lib.constants._ATTR_ID_LENGHT)))\n return -1\n else:\n return 0\n return 0",
"def test_sounds_id_get(self):\n pass",
"def test_readable_id_valid(readable_id_value):\n program = ProgramFactory.build(readable_id=readable_id_value)\n program.save()\n assert program.id is not None\n course = CourseFactory.build(program=None, readable_id=readable_id_value)\n course.save()\n assert course.id is not None",
"def id_check(employee_id):\r\n# badge_pattern = re.compile('[A-Za-z]{2}-\\d{4}')\r\n# re.search(badge_pattern, employee_id)\r\n\r\n # if statement\r\n if not re.match('[A-Z]{2}-\\d{4}', employee_id):\r\n print(employee_id, 'is not a valid ID.')",
"def testValidateId(self):\n #create a different person and try to use their id\n self.directory.invokeFactory(type_name=\"FSDPerson\",id=\"def456\",firstName=\"Joe\",lastName=\"Blow\")\n self.failUnless('def456' in self.person.validate_id('def456'))\n #create a different content object and try to use its id\n self.directory.invokeFactory(\"Document\", \"mydoc\")\n self.failUnless('mydoc' in self.person.validate_id('mydoc'))",
"def test_plays_id_get(self):\n pass",
"def test_get_event_type_by_id_invalid_id(self):\n\t\trequest = self.client.get('/api/event_type/esper/0', follow=True)\n\t\tself.assertEqual(request.status_code, status.HTTP_400_BAD_REQUEST)",
"def check_player_id(self):\n if self.player_id == 'me':\n profile = self.profile\n self.player_id = profile['id']",
"def test_readable_id_invalid(readable_id_value):\n program = ProgramFactory.build(readable_id=readable_id_value)\n with pytest.raises(ValidationError):\n program.save()\n course = CourseFactory.build(program=None, readable_id=readable_id_value)\n with pytest.raises(ValidationError):\n course.save()",
"def test_id_good_values(self):\n for input_val, output_val in self.known_values:\n self.line._parse_event_swimmer_id(input_val)\n self.assertEqual(output_val, self.line.event_swimmer_id)",
"def validate_id(self, value):\n try:\n Tutor.objects.get(pk=value)\n except Tutor.DoesNotExist:\n raise serializers.ValidationError('Tutor object with id \\'{}\\' does not exist.'.format(value))\n return value",
"def _validate_call_id(self, call_id):\n\n self._validate_required_data(call_id, self.CALL_ID)\n\n query = CallRecord.objects.filter(call_id=call_id)\n\n if query.exists():\n raise NotAcceptable(\n detail='Call id is already in use. Please, choose another')",
"def id_check(self, message):\n matches = ID_SYNTAX.match(message)\n if matches:\n return matches.group(1)\n return None",
"def _validate(cls, pid_value):\n blop = re.compile('^[-\\w]+$')\n if not bool(blop.match(pid_value)):\n raise ValidationError(\n 'The ID should contain only letters with numbers or dashes.',\n field_name='id',\n )",
"def verify_sequencer(seq: 'Sequencer') -> Optional['Sequencer']:\n valid = [ch.isalpha() or ch.isdigit() or ch == '_' for ch in seq.Name]\n if all(valid):\n return seq\n return None",
"def test_id_bad_value(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_event_swimmer_id(val))",
"def test_validate_party_info_id_is_string(self):\n self.party_test_data[\"id\"] = \"1\"\n response = validate_party_info(self.party_test_data)\n self.assertDictEqual(\n response, {\"message\": \"id must be a number\", \"code\": 400})",
"def identify_id(id: str) -> bool:\n return validate_handle(id)",
"def test_unknown_identifier(self):\n # Lists are not supported and should cause an error\n with self.assertRaises(TypeError):\n avp.AVP([0, 3])",
"def test_p1(self):\n print 'test subject_identifier is uuid by default'\n registered_subject = RegisteredSubjectFactory()\n re_pk = re.compile('[\\w]{8}-[\\w]{4}-[\\w]{4}-[\\w]{4}-[\\w]{12}')\n self.assertTrue(re_pk.match(registered_subject.subject_identifier))",
"def validate_passport_id(passport_id: str) -> None:\n if RE_PID.match(passport_id) is None:\n raise ValueError(\"Passport ID is not nine decimal digits\")",
"def isValidInternalSId(*args):\n return _libsbml.SyntaxChecker_isValidInternalSId(*args)",
"def is_id_valid(id_code: str) -> bool:\n if id_code.isdigit():\n if len(str(id_code)) == 11:\n id_code = str(id_code)\n gender_number = int(id_code[0:1])\n day = int(id_code[5:7])\n month = int(id_code[3:5])\n year = id_code[1:3]\n birth_number = id_code[7:10]\n if is_valid_gender_number(gender_number) \\\n and is_valid_year_number(int(year)) \\\n and is_valid_month_number(int(month)) \\\n and is_valid_day_number(gender_number, int(year), int(month), int(day)) \\\n and is_valid_birth_number(int(birth_number)) \\\n and is_valid_control_number(str(id_code)):\n return True\n return False\n return False\n return False"
] | [
"0.5956092",
"0.59091747",
"0.5825499",
"0.56224096",
"0.5605519",
"0.5577106",
"0.5576042",
"0.5531081",
"0.5529223",
"0.5528064",
"0.55078095",
"0.55064124",
"0.5498113",
"0.5472017",
"0.54554164",
"0.54426545",
"0.54162496",
"0.54084516",
"0.53983134",
"0.5389476",
"0.53808933",
"0.53595173",
"0.534715",
"0.53419435",
"0.52975494",
"0.5282789",
"0.5214033",
"0.5213335",
"0.5211382",
"0.52035093"
] | 0.59182245 | 1 |
Extract names of categorical column This function accepts a dataframe and returns categorical list, containing the names of categorical columns(categorical_var). | def categorical(df):
categorical_var=df.select_dtypes(include ='object').columns.tolist()
return categorical_var | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_categorical(X):\n return list(X.columns[X.dtypes == \"object\"])",
"def find_cats(column):\r\n return pd.Categorical(column).categories",
"def find_categorical(self, df):\n# print(type(df),df.ndim)\n categorical = [key for key in df.keys() if df.dtypes[key] == np.dtype('O')]\n numeric = [key for key in df.keys() if df.dtypes[key] != np.dtype('O')]\n # correct naive expectations\n actual_categoric = ['MSSubClass']\n numeric = list(set(numeric) - set(actual_categoric))\n categorical = list(set(categorical).union(set(actual_categoric)))\n return categorical",
"def find_categorical(self, df):\n# print(type(df),df.ndim)\n categorical = [key for key in df.keys() if df.dtypes[key] == np.dtype('O')]\n numeric = [key for key in df.keys() if df.dtypes[key] != np.dtype('O')]\n # correct naive expectations\n actual_categoric = ['MSSubClass']\n numeric = list(set(numeric) - set(actual_categoric))\n categorical = list(set(categorical).union(set(actual_categoric)))\n return categorical",
"def get_categorical_columns(\n data_frame: pd.DataFrame, uniqueness_thresshold: Optional[float] = None\n) -> list:\n categorical_columns = []\n for column in data_frame.columns:\n values = data_frame[column]\n\n if values.dtype.name == \"category\":\n categorical_columns.append(column)\n continue\n\n # This is a dirty way to check if it is non-numeric, but pandas thinks\n # all the columns are strings.\n try:\n float(values.iloc[0])\n except ValueError:\n categorical_columns.append(column)\n continue\n except TypeError:\n pass\n\n # If it is numeric, but lots of non-zero values are identical, consider it\n # categorical.\n if uniqueness_thresshold is not None:\n # Correct for sparseness, by ignoring zero values.\n if 0 in values.unique() and values.nunique() > 1:\n non_sparse_counts = len(values) - values.value_counts()[0]\n if (values.nunique() - 1) / non_sparse_counts <= uniqueness_thresshold:\n categorical_columns.append(column)\n elif values.nunique() / len(values) <= uniqueness_thresshold:\n categorical_columns.append(column)\n\n return categorical_columns",
"def categorical(df):\n\n # variables which need to be transformed to categorical\n categorical = [\"prop_country_id\", \"visitor_location_country_id\"]\n\n for var in categorical:\n df = pd.concat([df, pd.get_dummies(df[var], prefix=var)], axis=1)\n del df[var]\n\n return df",
"def infer_categorical_variables_in_place(df: pd.DataFrame):\n # infer which variables are categorical\n MAX_UNIQUE_VALUES = 10\n for column in df.columns:\n if df[column].nunique() <= MAX_UNIQUE_VALUES:\n df[column] = df[column].astype('category')",
"def get_categorical_columns() -> list:\n return [\n \"National Provider Identifier\",\n \"Last Name/Organization Name of the Provider\",\n \"First Name of the Provider\",\n \"Middle Initial of the Provider\",\n \"Credentials of the Provider\",\n \"Gender of the Provider\",\n \"Entity Type of the Provider\",\n \"Street Address 1 of the Provider\",\n \"Street Address 2 of the Provider\",\n \"City of the Provider\",\n \"Zip Code of the Provider\",\n \"State Code of the Provider\",\n \"Country Code of the Provider\",\n \"Provider Type\",\n \"Medicare Participation Indicator\",\n \"Place of Service\",\n \"HCPCS Code\",\n \"HCPCS Description\",\n \"HCPCS Drug Indicator\"\n ]",
"def convert_categorical(df):\n print(\" --- Converting Categories into binary features.\")\n columns = df.columns\n categorical = [x for x in columns if x.startswith('c_')]\n for col in categorical:\n print(\" ---- Converting: {}\".format(col))\n category_binary = pd.get_dummies(df[col], prefix=col)\n df = pd.concat([df, category_binary], axis=1)\n df = df.drop(categorical, axis=1)\n print(\" --- Finished converting Categories into binary features.\")\n return df",
"def cat_converter(df):\n \n categoricals = df.columns[df.dtypes == object] \n \n for column in categoricals: \n df[column] = pd.Categorical(df[column])\n new_column = column + '_new'\n df[new_column] = df[column].cat.codes\n df = df.drop([column], axis = 1)\n \n return df",
"def data_categorical(df, cat_features = [], cont_features = []):\n subset_cat = []\n subset_dict={}\n # Add all the object type features to config.cat_features \n for col in df.columns:\n if df[col].dtype == 'object' and col not in cont_features:\n subset_cat.append(col)\n if col not in cat_features :\n cat_features.append(col)\n if cat_features !=[]:\n print('Categorical features : ', ' '.join(cat_features))\n printmd('**Number of unique values for every feature:**')\n print(pd.DataFrame(df[cat_features].nunique(), columns = ['Unique values']).sort_values(by = 'Unique values', ascending=False))\n printmd(\"**5 uniques samples of every Categorical Features :**\")\n for col in cat_features :\n subset_dict[col]= df[col].unique()[:5]\n print(pd.DataFrame.from_dict(subset_dict, orient='index').transpose())\n return (cat_features)",
"def cat_labels(self):\n try:\n return list(self.cats.columns)\n except AttributeError:\n return []",
"def get_cols(df):\n meta = get_metafeatures(df)\n categorical_columns = meta.loc[meta['type'] == 'object', 'column'].tolist()\n cols_to_drop = meta.loc[meta['missing'] > 0.5, 'column'].tolist()\n logging.debug('%s categorical columns found', len(categorical_columns))\n logging.debug('%s columns will be dropped', len(cols_to_drop))\n return categorical_columns, cols_to_drop",
"def retrieve_names(self, categorical_column, num_list):\n return [self.num_to_name[categorical_column][i] for i in num_list]",
"def get_categorical_features(self, x: pd.DataFrame) -> pd.DataFrame:\n return x[self.categorical_features]",
"def process_categorical_data(data_df):\n return pd.get_dummies(data_df, columns=Columns.categorical)",
"def get_unique_categorical(series: pd.Series) -> list:\n\n return list(series.unique())",
"def categorical(df, remove_ov=True):\n object_features = df.loc[:, df.dtypes == 'object'].columns.tolist()\n categorical_features = df.loc[:, df.dtypes == 'category'].columns.tolist()\n features = list(set(object_features + categorical_features))\n if remove_ov:\n try:\n features.remove(\"Survived\")\n except ValueError:\n None\n\n return features",
"def get_encoded_categorical_feature_indexes(self):\n cols = []\n for col_parent in self.categorical_feature_names:\n temp = [self.encoded_feature_names.index(\n col) for col in self.encoded_feature_names if col.startswith(col_parent) and\n col not in self.continuous_feature_names]\n cols.append(temp)\n return cols",
"def _is_categorical(df, field):\n return df[field].dtype.name == 'category'",
"def grab_col_names(dataframe, cat_th=10, car_th=20):\n\n # cat_cols, cat_but_car\n cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == \"O\"]\n\n num_but_cat = [col for col in dataframe.columns if (dataframe[col].nunique() < cat_th) and (dataframe[col].dtypes != \"O\")]\n\n cat_but_car = [col for col in dataframe.columns if (dataframe[col].nunique() > car_th) and (dataframe[col].dtypes == \"O\")]\n\n cat_cols = cat_cols + num_but_cat\n cat_cols = [col for col in cat_cols if col not in cat_but_car]\n\n # num_cols\n num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != \"O\"]\n num_cols = [col for col in num_cols if col not in num_but_cat]\n\n print(f\"Observations: {dataframe.shape[0]}\")\n print(f\"Variables: {dataframe.shape[1]}\")\n print(f'cat_cols: {len(cat_cols)}')\n print(f'num_cols: {len(num_cols)}')\n print(f'cat_but_car: {len(cat_but_car)}')\n print(f'num_but_cat: {len(num_but_cat)}')\n\n return cat_cols, num_cols, cat_but_car",
"def convert_cols_numeric_to_categorical(df, col_list=None):\n if col_list is None:\n col_list = df.columns\n ret = pd.DataFrame()\n for column_name in df.columns:\n column = df[column_name]\n if column_name in col_list and column.dtype != \"object\":\n ret[column_name] = _convert_to_string(column)\n else:\n ret[column_name] = column\n return ret",
"def get_categorical_feature_names(\n pipeline: Pipeline, transformer_name: str, feature_names: List[str]\n) -> List[str]:\n full_feature_names = feature_names + [\n f\"{feature_names[idx]}_missing\"\n for idx in pipeline[\"preprocess\"]\n .named_transformers_[transformer_name][\"impute\"]\n .indicator_.features_\n ]\n feature_names_mapping = {\n f\"x{idx}\": feature_name for idx, feature_name in enumerate(full_feature_names)\n }\n encoded_feature_names = (\n pipeline[\"preprocess\"]\n .named_transformers_[\"cat_features\"][\"encode\"]\n .get_feature_names()\n )\n categorical_feature_names = []\n for feature_name in encoded_feature_names:\n prefix, name = feature_name.split(\"_\", maxsplit=1)\n categorical_feature_names.append(f\"{feature_names_mapping[prefix]}_{name}\")\n return categorical_feature_names",
"def classify_columns(df_preds, verbose=0):\r\n train = copy.deepcopy(df_preds)\r\n #### If there are 30 chars are more in a discrete_string_var, it is then considered an NLP variable\r\n max_nlp_char_size = 30\r\n max_cols_to_print = 30\r\n print('############## C L A S S I F Y I N G V A R I A B L E S ####################')\r\n print('Classifying variables in data set...')\r\n #### Cat_Limit defines the max number of categories a column can have to be called a categorical colum\r\n cat_limit = 35\r\n float_limit = 15 #### Make this limit low so that float variables below this limit become cat vars ###\r\n def add(a,b):\r\n return a+b\r\n sum_all_cols = dict()\r\n orig_cols_total = train.shape[1]\r\n #Types of columns\r\n cols_delete = [col for col in list(train) if (len(train[col].value_counts()) == 1\r\n ) | (train[col].isnull().sum()/len(train) >= 0.90)]\r\n train = train[left_subtract(list(train),cols_delete)]\r\n var_df = pd.Series(dict(train.dtypes)).reset_index(drop=False).rename(\r\n columns={0:'type_of_column'})\r\n sum_all_cols['cols_delete'] = cols_delete\r\n var_df['bool'] = var_df.apply(lambda x: 1 if x['type_of_column'] in ['bool','object']\r\n and len(train[x['index']].value_counts()) == 2 else 0, axis=1)\r\n string_bool_vars = list(var_df[(var_df['bool'] ==1)]['index'])\r\n sum_all_cols['string_bool_vars'] = string_bool_vars\r\n var_df['num_bool'] = var_df.apply(lambda x: 1 if x['type_of_column'] in [np.uint8,\r\n np.uint16, np.uint32, np.uint64,\r\n 'int8','int16','int32','int64',\r\n 'float16','float32','float64'] and len(\r\n train[x['index']].value_counts()) == 2 else 0, axis=1)\r\n num_bool_vars = list(var_df[(var_df['num_bool'] ==1)]['index'])\r\n sum_all_cols['num_bool_vars'] = num_bool_vars\r\n ###### This is where we take all Object vars and split them into diff kinds ###\r\n discrete_or_nlp = var_df.apply(lambda x: 1 if x['type_of_column'] in ['object'] and x[\r\n 'index'] not in string_bool_vars+cols_delete else 0,axis=1)\r\n ######### This is where we figure out whether a string var is nlp or discrete_string var ###\r\n var_df['nlp_strings'] = 0\r\n var_df['discrete_strings'] = 0\r\n var_df['cat'] = 0\r\n var_df['id_col'] = 0\r\n discrete_or_nlp_vars = var_df.loc[discrete_or_nlp==1]['index'].values.tolist()\r\n if len(var_df.loc[discrete_or_nlp==1]) != 0:\r\n for col in discrete_or_nlp_vars:\r\n #### first fill empty or missing vals since it will blowup ###\r\n train[col] = train[col].fillna(' ')\r\n if train[col].map(lambda x: len(x) if type(x)==str else 0).mean(\r\n ) >= max_nlp_char_size and len(train[col].value_counts()\r\n ) <= int(0.9*len(train)) and col not in string_bool_vars:\r\n var_df.loc[var_df['index']==col,'nlp_strings'] = 1\r\n elif len(train[col].value_counts()) > cat_limit and len(train[col].value_counts()\r\n ) <= int(0.9*len(train)) and col not in string_bool_vars:\r\n var_df.loc[var_df['index']==col,'discrete_strings'] = 1\r\n elif len(train[col].value_counts()) > cat_limit and len(train[col].value_counts()\r\n ) == len(train) and col not in string_bool_vars:\r\n var_df.loc[var_df['index']==col,'id_col'] = 1\r\n else:\r\n var_df.loc[var_df['index']==col,'cat'] = 1\r\n nlp_vars = list(var_df[(var_df['nlp_strings'] ==1)]['index'])\r\n sum_all_cols['nlp_vars'] = nlp_vars\r\n discrete_string_vars = list(var_df[(var_df['discrete_strings'] ==1) ]['index'])\r\n sum_all_cols['discrete_string_vars'] = discrete_string_vars\r\n ###### This happens only if a string column happens to be an ID column #######\r\n #### DO NOT Add this to ID_VARS yet. It will be done later.. Dont change it easily...\r\n #### Category DTYPE vars are very special = they can be left as is and not disturbed in Python. ###\r\n var_df['dcat'] = var_df.apply(lambda x: 1 if str(x['type_of_column'])=='category' else 0,\r\n axis=1)\r\n factor_vars = list(var_df[(var_df['dcat'] ==1)]['index'])\r\n sum_all_cols['factor_vars'] = factor_vars\r\n ########################################################################\r\n date_or_id = var_df.apply(lambda x: 1 if x['type_of_column'] in [np.uint8,\r\n np.uint16, np.uint32, np.uint64,\r\n 'int8','int16',\r\n 'int32','int64'] and x[\r\n 'index'] not in string_bool_vars+num_bool_vars+discrete_string_vars+nlp_vars else 0,\r\n axis=1)\r\n ######### This is where we figure out whether a numeric col is date or id variable ###\r\n var_df['int'] = 0\r\n var_df['date_time'] = 0\r\n ### if a particular column is date-time type, now set it as a date time variable ##\r\n var_df['date_time'] = var_df.apply(lambda x: 1 if x['type_of_column'] in ['<M8[ns]','datetime64[ns]'] and x[\r\n 'index'] not in string_bool_vars+num_bool_vars+discrete_string_vars+nlp_vars else 0,\r\n axis=1)\r\n ### this is where we save them as date time variables ###\r\n if len(var_df.loc[date_or_id==1]) != 0:\r\n for col in var_df.loc[date_or_id==1]['index'].values.tolist():\r\n if len(train[col].value_counts()) == len(train):\r\n if train[col].min() < 1900 or train[col].max() > 2050:\r\n var_df.loc[var_df['index']==col,'id_col'] = 1\r\n else:\r\n try:\r\n pd.to_datetime(train[col],infer_datetime_format=True)\r\n var_df.loc[var_df['index']==col,'date_time'] = 1\r\n except:\r\n var_df.loc[var_df['index']==col,'id_col'] = 1\r\n else:\r\n if train[col].min() < 1900 or train[col].max() > 2050:\r\n if col not in num_bool_vars:\r\n var_df.loc[var_df['index']==col,'int'] = 1\r\n else:\r\n try:\r\n pd.to_datetime(train[col],infer_datetime_format=True)\r\n var_df.loc[var_df['index']==col,'date_time'] = 1\r\n except:\r\n if col not in num_bool_vars:\r\n var_df.loc[var_df['index']==col,'int'] = 1\r\n else:\r\n pass\r\n int_vars = list(var_df[(var_df['int'] ==1)]['index'])\r\n date_vars = list(var_df[(var_df['date_time'] == 1)]['index'])\r\n id_vars = list(var_df[(var_df['id_col'] == 1)]['index'])\r\n sum_all_cols['int_vars'] = int_vars\r\n copy_date_vars = copy.deepcopy(date_vars)\r\n for date_var in copy_date_vars:\r\n #### This test is to make sure sure date vars are actually date vars\r\n try:\r\n pd.to_datetime(train[date_var],infer_datetime_format=True)\r\n except:\r\n ##### if not a date var, then just add it to delete it from processing\r\n cols_delete.append(date_var)\r\n date_vars.remove(date_var)\r\n sum_all_cols['date_vars'] = date_vars\r\n sum_all_cols['id_vars'] = id_vars\r\n sum_all_cols['cols_delete'] = cols_delete\r\n ## This is an EXTREMELY complicated logic for cat vars. Don't change it unless you test it many times!\r\n var_df['numeric'] = 0\r\n float_or_cat = var_df.apply(lambda x: 1 if x['type_of_column'] in ['float16',\r\n 'float32','float64'] else 0,\r\n axis=1)\r\n if len(var_df.loc[float_or_cat == 1]) > 0:\r\n for col in var_df.loc[float_or_cat == 1]['index'].values.tolist():\r\n if len(train[col].value_counts()) > 2 and len(train[col].value_counts()\r\n ) <= float_limit and len(train[col].value_counts()) <= len(train):\r\n var_df.loc[var_df['index']==col,'cat'] = 1\r\n else:\r\n if col not in num_bool_vars:\r\n var_df.loc[var_df['index']==col,'numeric'] = 1\r\n cat_vars = list(var_df[(var_df['cat'] ==1)]['index'])\r\n continuous_vars = list(var_df[(var_df['numeric'] ==1)]['index'])\r\n ######## V E R Y I M P O R T A N T ###################################################\r\n ##### There are a couple of extra tests you need to do to remove abberations in cat_vars ###\r\n cat_vars_copy = copy.deepcopy(cat_vars)\r\n for cat in cat_vars_copy:\r\n if df_preds[cat].dtype==float:\r\n continuous_vars.append(cat)\r\n cat_vars.remove(cat)\r\n var_df.loc[var_df['index']==cat,'cat'] = 0\r\n var_df.loc[var_df['index']==cat,'numeric'] = 1\r\n elif len(df_preds[cat].value_counts()) == df_preds.shape[0]:\r\n id_vars.append(cat)\r\n cat_vars.remove(cat)\r\n var_df.loc[var_df['index']==cat,'cat'] = 0\r\n var_df.loc[var_df['index']==cat,'id_col'] = 1\r\n sum_all_cols['cat_vars'] = cat_vars\r\n sum_all_cols['continuous_vars'] = continuous_vars\r\n sum_all_cols['id_vars'] = id_vars\r\n ###### This is where you consoldate the numbers ###########\r\n var_dict_sum = dict(zip(var_df.values[:,0], var_df.values[:,2:].sum(1)))\r\n for col, sumval in var_dict_sum.items():\r\n if sumval == 0:\r\n print('%s of type=%s is not classified' %(col,train[col].dtype))\r\n elif sumval > 1:\r\n print('%s of type=%s is classified into more then one type' %(col,train[col].dtype))\r\n else:\r\n pass\r\n ############### This is where you print all the types of variables ##############\r\n ####### Returns 8 vars in the following order: continuous_vars,int_vars,cat_vars,\r\n ### string_bool_vars,discrete_string_vars,nlp_vars,date_or_id_vars,cols_delete\r\n if verbose == 1:\r\n print(\" Number of Numeric Columns = \", len(continuous_vars))\r\n print(\" Number of Integer-Categorical Columns = \", len(int_vars))\r\n print(\" Number of String-Categorical Columns = \", len(cat_vars))\r\n print(\" Number of Factor-Categorical Columns = \", len(factor_vars))\r\n print(\" Number of String-Boolean Columns = \", len(string_bool_vars))\r\n print(\" Number of Numeric-Boolean Columns = \", len(num_bool_vars))\r\n print(\" Number of Discrete String Columns = \", len(discrete_string_vars))\r\n print(\" Number of NLP String Columns = \", len(nlp_vars))\r\n print(\" Number of Date Time Columns = \", len(date_vars))\r\n print(\" Number of ID Columns = \", len(id_vars))\r\n print(\" Number of Columns to Delete = \", len(cols_delete))\r\n if verbose == 2:\r\n marthas_columns(df_preds,verbose=1)\r\n print(\" Numeric Columns: %s\" %continuous_vars[:max_cols_to_print])\r\n print(\" Integer-Categorical Columns: %s\" %int_vars[:max_cols_to_print])\r\n print(\" String-Categorical Columns: %s\" %cat_vars[:max_cols_to_print])\r\n print(\" Factor-Categorical Columns: %s\" %factor_vars[:max_cols_to_print])\r\n print(\" String-Boolean Columns: %s\" %string_bool_vars[:max_cols_to_print])\r\n print(\" Numeric-Boolean Columns: %s\" %num_bool_vars[:max_cols_to_print])\r\n print(\" Discrete String Columns: %s\" %discrete_string_vars[:max_cols_to_print])\r\n print(\" NLP text Columns: %s\" %nlp_vars[:max_cols_to_print])\r\n print(\" Date Time Columns: %s\" %date_vars[:max_cols_to_print])\r\n print(\" ID Columns: %s\" %id_vars[:max_cols_to_print])\r\n print(\" Columns that will not be considered in modeling: %s\" %cols_delete[:max_cols_to_print])\r\n ##### now collect all the column types and column names into a single dictionary to return!\r\n len_sum_all_cols = reduce(add,[len(v) for v in sum_all_cols.values()])\r\n if len_sum_all_cols == orig_cols_total:\r\n print(' %d Predictors classified...' %orig_cols_total)\r\n #print(' This does not include the Target column(s)')\r\n else:\r\n print('No of columns classified %d does not match %d total cols. Continuing...' %(\r\n len_sum_all_cols, orig_cols_total))\r\n ls = sum_all_cols.values()\r\n flat_list = [item for sublist in ls for item in sublist]\r\n if len(left_subtract(list(train),flat_list)) == 0:\r\n print(' Missing columns = None')\r\n else:\r\n print(' Missing columns = %s' %left_subtract(list(train),flat_list))\r\n return sum_all_cols",
"def agg_categorical(df, parent_var, df_name):\n \n # Select the categorical columns\n categorical = pd.get_dummies(df.select_dtypes('category'))\n\n # Make sure to put the identifying id on the column\n categorical[parent_var] = df[parent_var]\n\n # Groupby the group var and calculate the sum and mean\n categorical = categorical.groupby(parent_var).agg(['sum', 'count', 'mean'])\n \n column_names = []\n \n # Iterate through the columns in level 0\n for var in categorical.columns.levels[0]:\n # Iterate through the stats in level 1\n for stat in ['sum', 'count', 'mean']:\n # Make a new column name\n column_names.append('%s_%s_%s' % (df_name, var, stat))\n \n categorical.columns = column_names\n \n # Remove duplicate columns by values\n _, idx = np.unique(categorical, axis = 1, return_index = True)\n categorical = categorical.iloc[:, idx]\n \n return categorical",
"def train_cats(df):\n for n,c in df.items():\n if is_string_dtype(c): df[n] = c.astype('category').cat.as_ordered()",
"def train_cats(df):\n for n,c in df.items():\n if is_string_dtype(c): df[n] = c.astype('category').cat.as_ordered()",
"def get_categorical_features(self):\n return self.categorical_features",
"def transform_categorical_feature(\n df: pd.DataFrame, column_name: str, column_prefix: str = \"\"\n) -> pd.DataFrame:\n\n df1 = pd.get_dummies(df[column_name].astype(str))\n if column_prefix != \"\":\n df1.columns = [\"is_type_\" + col for col in df1.columns]\n\n new_df = pd.concat([df, df1], axis=1)\n\n # we don't need transformed column anymore\n new_df = new_df.drop(columns=[column_name])\n\n return new_df",
"def dummify_all_categorical(df):\n\n df = pd.get_dummies(df)\n df = dummify(df, \"detailed industry recode\")\n df = dummify(df, \"detailed occupation recode\") ## add some variables that are encoded as int64 but that are in fact categorical\n return df"
] | [
"0.76022893",
"0.73164177",
"0.721496",
"0.721496",
"0.71881354",
"0.7059722",
"0.7012867",
"0.68130624",
"0.6789028",
"0.6767523",
"0.6670025",
"0.66088444",
"0.65843177",
"0.65426064",
"0.647856",
"0.6408604",
"0.63178056",
"0.62948096",
"0.6281347",
"0.6274706",
"0.6269459",
"0.62551236",
"0.62473965",
"0.61918515",
"0.6149883",
"0.61490107",
"0.61490107",
"0.61404955",
"0.6122265",
"0.6108969"
] | 0.8474346 | 0 |
Extract names of numerical column This function accepts a dataframe and returns numerical list, containing the names of numerical columns(numerical_var). | def numerical(df):
numerical_var=df.select_dtypes(include =['float64','int64']).columns.tolist()
return numerical_var | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_values(df):\n return df.columns.values.tolist()",
"def get_non_num_cols(df):\n numerics = ['number']\n newdf = df.select_dtypes(exclude=numerics).columns\n return newdf",
"def get_numerical_columns(\n data_frame: pd.DataFrame,\n ignore_columns: list = [],\n uniqueness_thresshold: Optional[float] = None,\n) -> list:\n categorical_columns = get_categorical_columns(data_frame, uniqueness_thresshold)\n\n def is_numeric_and_not_ignored(column):\n \"\"\" Columns not categorical are numeric. \"\"\"\n if column not in categorical_columns and column not in ignore_columns:\n return True\n return False\n\n numerical_columns = list(filter(is_numeric_and_not_ignored, data_frame.columns))\n return numerical_columns",
"def get_non_float_column_names(df):\n if not isinstance(df, pd.DataFrame):\n msg = 'df of type=\"{}\" is not a pandas DataFrame'\n raise TypeError(msg.format(str(type(df))))\n if len(set(df.columns)) != len(df.columns):\n msg = 'df contains duplicated column names which is not supported'\n raise ValueError(msg)\n return list(set(df.select_dtypes(exclude=[np.floating]).columns))",
"def get_float_column_names(df):\n if not isinstance(df, pd.DataFrame):\n msg = 'df of type=\"{}\" is not a pandas DataFrame'\n raise TypeError(msg.format(str(type(df))))\n if len(set(df.columns)) != len(df.columns):\n msg = 'df contains duplicated column names which is not supported'\n raise ValueError(msg)\n return list(set(df.select_dtypes(include=[np.floating]).columns))",
"def grab_col_names(dataframe, cat_th=10, car_th=20):\n\n # cat_cols, cat_but_car\n cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == \"O\"]\n\n num_but_cat = [col for col in dataframe.columns if (dataframe[col].nunique() < cat_th) and (dataframe[col].dtypes != \"O\")]\n\n cat_but_car = [col for col in dataframe.columns if (dataframe[col].nunique() > car_th) and (dataframe[col].dtypes == \"O\")]\n\n cat_cols = cat_cols + num_but_cat\n cat_cols = [col for col in cat_cols if col not in cat_but_car]\n\n # num_cols\n num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != \"O\"]\n num_cols = [col for col in num_cols if col not in num_but_cat]\n\n print(f\"Observations: {dataframe.shape[0]}\")\n print(f\"Variables: {dataframe.shape[1]}\")\n print(f'cat_cols: {len(cat_cols)}')\n print(f'num_cols: {len(num_cols)}')\n print(f'cat_but_car: {len(cat_but_car)}')\n print(f'num_but_cat: {len(num_but_cat)}')\n\n return cat_cols, num_cols, cat_but_car",
"def getColumnsNames(self):\r\n ColsName = []\r\n for i in range(len(self.columns)):\r\n ColsName.append(self.columns[i].getColName())\r\n return ColsName",
"def retrieve_names(self, categorical_column, num_list):\n return [self.num_to_name[categorical_column][i] for i in num_list]",
"def get_column_names(self):\n # here, creating combined column/volue column names for uniqueness\n colname_temp = list()\n for column in self.col_value:\n colname_temp.append(self.question_column + \"-\" + str(column))\n return colname_temp",
"def get_numerical_feature_names():\n names = []\n hmog_feature_names = lambda x: [x + '_mean_during', x + '_sd_during', x + '_difference_before_after',\n x + '_net_change_due_to_tap', x + '_max_change', x + '_restoration_time',\n x + '_normalized_duration', x + '_normalized_duration_max']\n for file_name in file_names:\n for y in y_columns[file_name]:\n names += hmog_feature_names(y)\n return names",
"def get_column_names(self):\n names = []\n names.append(self.question_column + \"_agree_lot\")\n names.append(self.question_column + \"_agree_little\")\n names.append(self.question_column + \"_neither\")\n names.append(self.question_column + \"_dis_little\")\n names.append(self.question_column + \"_dis_lot\")\n return names",
"def column_names(self):\n return self.data.columns.values",
"def categorical(df):\r\n categorical_var=df.select_dtypes(include ='object').columns.tolist()\r\n\r\n return categorical_var",
"def get_dialed_digits_grid_column_names_by_order(self):\n self.column_name_list = self.get_grid_column_names_by_order(self.dialed_digits_grid_div_id)\n return self.column_name_list",
"def get_headers(df):\n return df.columns.values",
"def get_columns(df, data_type=\"category\"):\n if data_type == \"numeric\":\n cols = [col_name for col_name, col_type in df.dtypes.items() if col_type.kind in [\"i\", \"f\"]]\n elif data_type == \"integer\":\n cols = [col_name for col_name, col_type in df.dtypes.items() if col_type.kind == \"i\"]\n elif data_type == \"float\":\n cols = [col_name for col_name, col_type in df.dtypes.items() if col_type.kind == \"f\"]\n elif data_type in [\"object\", \"category\"] :\n cols = df.columns[df.dtypes == data_type].values\n elif data_type == \"non_numeric\":\n cols = [col_name for col_name, col_type in df.dtypes.items() if col_type.kind == \"O\"]\n elif data_type == \"date\":\n cols = [col_name for col_name, col_type in df.dtypes.items() if col_type.kind == \"M\"]\n return cols",
"def identify_numeric_columns(dataset):\n return identify_columns_by_type(dataset, include=['int64', 'float64'])",
"def getColumnNames(self):\n return self.colnames",
"def get_nan_columns(df):\n df = nan_val_summary(df)\n return df[df['fraction_missing'] > 0]['columns'].values",
"def column_names(self):\n return self._hndl.column_names()",
"def get_returns_columns(df: pd.DataFrame) -> list:\n return [col for col in df.columns if '_period_return' in col]",
"def get_sorted_columns(data):\n if not isinstance(data, pd.DataFrame):\n raise TypeError('Invalid input type: type(data) = {}'.format(type(data)))\n col_names = pd.Series(index=data.index)\n for idx, row in data.iterrows():\n col_names[idx] = row.sort_values().index.tolist()\n return col_names",
"def create_quanti_cols(df: pd.DataFrame) -> list:\n\n # create a dictionary that contains datatype of each column\n dtypeDict = dict(df.dtypes)\n # create a list of column names that contains only quantitative data\n quanti_cols = []\n quali_cols = []\n for key, value in dtypeDict.items():\n if value == \"float64\" or value == \"int64\" or value == \"uint8\":\n quanti_cols.append(key)\n elif value == \"object\" or value == \"bool\":\n quali_cols.append(key)\n else:\n print(f\"No such dtypes values yet. Please add {value} in the function\")\n if len(quali_cols) == 1:\n return quanti_cols, quali_cols[0]\n else:\n return quanti_cols, quali_cols",
"def get_columns(self) -> List[str]:\n return self.get_dyf().toDF().columns",
"def getColumnNames(self):\n return self.columnNames",
"def get_sample_colnames(ms_df: DF) -> List[str]:\n\n sample_numbers = get_sample_numbers(ms_df)\n\n target_sample_cols = list()\n for sample in sample_numbers:\n for col in SAMPLE_COLS:\n target_sample_cols.append('{attr}_{sample}'.format(attr=col, sample=sample))\n return target_sample_cols",
"def names(self):\n if self.dtype.fields:\n return list(self.dtype.names)\n elif getattr(self, \"_coldefs\", None) is not None:\n return self._coldefs.names\n else:\n return None",
"def get_columns(self):\n columns = []\n for column in self.columns:\n columns.append(column.data.name)\n return columns",
"def num_and_cat_columns(df):\n \n \n cols = df.columns\n num_cols = df._get_numeric_data().columns\n cat_cols = list(set(cols) - set(num_cols))\n \n return num_cols, cat_cols",
"def get_column_names(self, table):\n try:\n logging.info(f'Getting column names of table `{table}`')\n return list(self.execute(f'SELECT * FROM `{table}`'))\n except:\n logging.exception('Something went wrong getting column names. Check trace.')\n return"
] | [
"0.69793",
"0.6972095",
"0.6884711",
"0.68393546",
"0.68223983",
"0.6716611",
"0.6578074",
"0.65518904",
"0.6534117",
"0.65030515",
"0.6406427",
"0.6395277",
"0.6376944",
"0.6354434",
"0.63127095",
"0.62207276",
"0.6156436",
"0.61465174",
"0.6141555",
"0.6123878",
"0.6118097",
"0.61117864",
"0.6082881",
"0.6064601",
"0.6053052",
"0.6052321",
"0.60415375",
"0.60382396",
"0.603583",
"0.603302"
] | 0.81623864 | 0 |
Instances based on the condition This function accepts a dataframe, 2 columns(feature) and 2 values which returns the dataframe based on the condition. | def instances_based_condition(df,col1,val1,col2,val2):
instance=df[(df[col1]>val1) & (df[col2]==val2)]
return instance | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_features(\r\n df:pd.DataFrame,\r\n path_data_dir:str\r\n ) -> pd.DataFrame:\r\n # Check input.\r\n # Copy dataframe to avoid in place modification.\r\n df = df.copy()\r\n # Check file path.\r\n if not os.path.exists(path_data_dir):\r\n raise IOError(textwrap.dedent(\"\"\"\\\r\n Path does not exist:\r\n path_data_dir = {path}\"\"\".format(\r\n path=path_data_dir)))\r\n ########################################\r\n # Returned_asm\r\n # Interpretation of assumptions:\r\n # If DSEligible=0, then the vehicle is not eligible for a guarantee.\r\n # * And Returned=-1 (null) since we don't know whether or not it would have been returned,\r\n # but given that it wasn't eligible, it may have been likely to have Returned=1.\r\n # If DSEligible=1, then the vehicle is eligible for a guarantee.\r\n # * And if Returned=0 then the guarantee was purchased and the vehicle was not returned.\r\n # * And if Returned=1 then the guarantee was purchased and the vehicle was returned.\r\n # * And if Returned=-1 (null) then the guarantee was not purchased.\r\n # We don't know whether or not it would have been returned,\r\n # but given that the dealer did not purchase, it may have been likely to have Returned=0.\r\n # Assume:\r\n # If Returned=-1 and DSEligible=0, then Returned_asm=1\r\n # If Returned=-1 and DSEligible=1, then Returned_asm=0\r\n logger.info(textwrap.dedent(\"\"\"\\\r\n Returned_asm: Assume returned status to fill nulls as new feature.\r\n If Returned=-1 and DSEligible=0, then Returned_asm=1 (assumes low P(resale|buyer, car))\r\n If Returned=-1 and DSEligible=1, then Returned_asm=0 (assumes high P(resale|buyer, car))\"\"\"))\r\n df['Returned_asm'] = df['Returned']\r\n df.loc[\r\n np.logical_and(df['Returned'] == -1, df['DSEligible'] == 0),\r\n 'Returned_asm'] = 1\r\n df.loc[\r\n np.logical_and(df['Returned'] == -1, df['DSEligible'] == 1),\r\n 'Returned_asm'] = 0\r\n logger.info(\"Relationship between DSEligible and Returned:\\n{pt}\".format(\r\n pt=pd.pivot_table(\r\n df[['DSEligible', 'Returned']].astype(str),\r\n index='DSEligible', columns='Returned',\r\n aggfunc=len, margins=True, dropna=False)))\r\n logger.info(\"Relationship between DSEligible and Returned_asm:\\n{pt}\".format(\r\n pt=pd.pivot_table(\r\n df[['DSEligible', 'Returned_asm']].astype(str),\r\n index='DSEligible', columns='Returned_asm',\r\n aggfunc=len, margins=True, dropna=False)))\r\n logger.info(\"Relationship between Returned and Returned_asm:\\n{pt}\".format(\r\n pt=pd.pivot_table(\r\n df[['Returned', 'Returned_asm']].astype(str),\r\n index='Returned', columns='Returned_asm',\r\n aggfunc=len, margins=True, dropna=False)))\r\n ########################################\r\n # SellingLocation_lat, SellingLocation_lon\r\n # Cell takes ~1 min to execute if shelf does not exist.\r\n # Google API limit: https://developers.google.com/maps/documentation/geocoding/usage-limits\r\n logger.info(textwrap.dedent(\"\"\"\\\r\n SellingLocation: Geocode.\r\n Scraping webpages for addresses and looking up latitude, longitude coordinates.\"\"\"))\r\n path_shelf = os.path.join(path_data_dir, 'sellloc_geoloc.shelf')\r\n seconds_per_query = 1.0/50.0 # Google API limit\r\n sellloc_geoloc = dict()\r\n with shelve.open(filename=path_shelf, flag='c') as shelf:\r\n for loc in df['SellingLocation'].unique():\r\n if loc in shelf:\r\n raw = shelf[loc]\r\n if raw is None:\r\n location = raw\r\n else:\r\n address = raw['formatted_address']\r\n latitude = raw['geometry']['location']['lat']\r\n longitude = raw['geometry']['location']['lng']\r\n location = geopy.location.Location(\r\n address=address, point=(latitude, longitude), raw=raw)\r\n else: \r\n url = r'https://www.manheim.com/locations/{loc}/events'.format(loc=loc)\r\n page = requests.get(url)\r\n tree = bs4.BeautifulSoup(page.text, 'lxml')\r\n address = tree.find(name='p', class_='loc_address').get_text().strip()\r\n try:\r\n components = {\r\n 'country': 'United States',\r\n 'postal_code': address.split()[-1]}\r\n location = geopy.geocoders.GoogleV3().geocode(\r\n query=address,\r\n exactly_one=True,\r\n components=components)\r\n except:\r\n logger.warning(textwrap.dedent(\"\"\"\\\r\n Exception raised. Setting {loc} geo location to `None`\r\n sys.exc_info() =\r\n {exc}\"\"\".format(loc=loc, exc=sys.exc_info())))\r\n location = None\r\n finally:\r\n time.sleep(seconds_per_query)\r\n if location is None:\r\n shelf[loc] = location\r\n else:\r\n shelf[loc] = location.raw\r\n sellloc_geoloc[loc] = location\r\n logger.info(\"Mapping SellingLocation to latitude, longitude coordinates.\")\r\n sellloc_lat = {\r\n sellloc: (geoloc.latitude if geoloc is not None else 0.0)\r\n for (sellloc, geoloc) in sellloc_geoloc.items()}\r\n sellloc_lon = {\r\n sellloc: (geoloc.longitude if geoloc is not None else 0.0)\r\n for (sellloc, geoloc) in sellloc_geoloc.items()}\r\n df['SellingLocation_lat'] = df['SellingLocation'].map(sellloc_lat)\r\n df['SellingLocation_lon'] = df['SellingLocation'].map(sellloc_lon)\r\n # # TODO: experiment with one-hot encoding (problems is that it doesn't scale)\r\n # df = pd.merge(\r\n # left=df,\r\n # right=pd.get_dummies(df['SellingLocation'], prefix='SellingLocation'),\r\n # how='inner',\r\n # left_index=True,\r\n # right_index=True)\r\n ########################################\r\n # JDPowersCat: One-hot encoding\r\n # TODO: Estimate sizes from Wikipedia, e.g. https://en.wikipedia.org/wiki/Vehicle_size_class.\r\n logger.info(\"JDPowersCat: One-hot encoding.\")\r\n # Cast to string, replacing 'nan' with 'UNKNOWN'.\r\n df['JDPowersCat'] = (df['JDPowersCat'].astype(str)).str.replace(' ', '').apply(\r\n lambda cat: 'UNKNOWN' if cat == 'nan' else cat)\r\n # One-hot encoding.\r\n df = pd.merge(\r\n left=df,\r\n right=pd.get_dummies(df['JDPowersCat'], prefix='JDPowersCat'),\r\n left_index=True,\r\n right_index=True)\r\n ########################################\r\n # LIGHT_N0G1Y2R3\r\n # Rank lights by warning level.\r\n logger.info(\"LIGHT_N0G1Y2R3: Rank lights by warning level (null=0, green=1, yellow=2, red=3).\")\r\n df['LIGHT_N0G1Y2R3'] = df['LIGHTG']*1 + df['LIGHTY']*2 + df['LIGHTR']*3\r\n ########################################\r\n # SaleDate_*: Extract timeseries features.\r\n logger.info(\"SaleDate: Extract timeseries features.\")\r\n df['SaleDate_dow'] = df['SaleDate'].dt.dayofweek\r\n df['SaleDate_doy'] = df['SaleDate'].dt.dayofyear\r\n df['SaleDate_day'] = df['SaleDate'].dt.day\r\n df['SaleDate_decyear'] = df['SaleDate'].dt.year + (df['SaleDate'].dt.dayofyear-1)/366\r\n ########################################\r\n # BuyerID, SellerID, VIN, SellingLocation, CarMake, JDPowersCat:\r\n # Make cumulative informative priors (*_num*, *_frac*) for string features.\r\n logger.info(textwrap.dedent(\"\"\"\\\r\n BuyerID, SellerID, VIN, SellingLocation, CarMake, JDPowersCat:\r\n Make cumulative informative priors (*_num*, *_frac*) for string features.\"\"\"))\r\n # Cumulative features require sorting by time.\r\n df.sort_values(by=['SaleDate'], inplace=True)\r\n df.reset_index(drop=True, inplace=True)\r\n for col in ['BuyerID', 'SellerID', 'VIN', 'SellingLocation', 'CarMake', 'JDPowersCat']:\r\n logger.info(\"Processing {col}\".format(col=col))\r\n ####################\r\n # Cumulative count of transactions and DSEligible:\r\n # Cumulative count of transactions (yes including current).\r\n df[col+'_numTransactions'] = df[[col]].groupby(by=col).cumcount().astype(int) + 1\r\n df[col+'_numTransactions'].fillna(value=1, inplace=True)\r\n # Cumulative count of transactions that were DealShield-eligible (yes including current).\r\n df[col+'_numDSEligible1'] = df[[col, 'DSEligible']].groupby(by=col)['DSEligible'].cumsum().astype(int)\r\n df[col+'_numDSEligible1'].fillna(value=0, inplace=True)\r\n # Cumulative ratio of transactions that were DealShield-eligible (0=bad, 1=good).\r\n df[col+'_fracDSEligible1DivTransactions'] = (df[col+'_numDSEligible1']/df[col+'_numTransactions'])\r\n df[col+'_fracDSEligible1DivTransactions'].fillna(value=1, inplace=True)\r\n ####################\r\n # DSEligible and Returned\r\n # Note:\r\n # * DealShield-purchased ==> Returned != -1 (not null)\r\n # * below requires\r\n # DSEligible == 0 ==> Returned == -1 (is null)\r\n # Returned != -1 (not null) ==> DSEligible == 1\r\n assert (df.loc[df['DSEligible']==0, 'Returned'] == -1).all()\r\n assert (df.loc[df['Returned']!=-1, 'DSEligible'] == 1).all()\r\n # Cumulative count of transactions that were DealShield-eligible and DealShield-purchased.\r\n df_tmp = df[[col, 'Returned']].copy()\r\n df_tmp['ReturnedNotNull'] = df_tmp['Returned'] != -1\r\n df[col+'_numReturnedNotNull'] = df_tmp[[col, 'ReturnedNotNull']].groupby(by=col)['ReturnedNotNull'].cumsum().astype(int)\r\n df[col+'_numReturnedNotNull'].fillna(value=0, inplace=True)\r\n del df_tmp\r\n # Cumulative ratio of DealShield-eligible transactions that were DealShield-purchased (0=mode).\r\n df[col+'_fracReturnedNotNullDivDSEligible1'] = df[col+'_numReturnedNotNull']/df[col+'_numDSEligible1']\r\n df[col+'_fracReturnedNotNullDivDSEligible1'].fillna(value=0, inplace=True)\r\n # Cumulative count of transactions that were DealShield-elegible and DealShield-purchased and DealShield-returned.\r\n df_tmp = df[[col, 'Returned']].copy()\r\n df_tmp['Returned1'] = df_tmp['Returned'] == 1\r\n df[col+'_numReturned1'] = df_tmp[[col, 'Returned1']].groupby(by=col)['Returned1'].cumsum().astype(int)\r\n df[col+'_numReturned1'].fillna(value=0, inplace=True)\r\n del df_tmp\r\n # Cumulative ratio of DealShield-eligible, DealShield-purchased transactions that were DealShield-returned (0=good, 1=bad).\r\n # Note: BuyerID_fracReturned1DivReturnedNotNull is the cumulative return rate for a buyer.\r\n df[col+'_fracReturned1DivReturnedNotNull'] = df[col+'_numReturned1']/df[col+'_numReturnedNotNull']\r\n df[col+'_fracReturned1DivReturnedNotNull'].fillna(value=0, inplace=True)\r\n # Check that weighted average of return rate equals overall return rate.\r\n # Note: Requires groups sorted by date, ascending.\r\n assert np.isclose(\r\n (df[[col, col+'_fracReturned1DivReturnedNotNull', col+'_numReturnedNotNull']].groupby(by=col).last().product(axis=1).sum()/\\\r\n df[[col, col+'_numReturnedNotNull']].groupby(by=col).last().sum()).values[0],\r\n sum(df['Returned']==1)/sum(df['Returned'] != -1),\r\n equal_nan=True)\r\n ####################\r\n # DSEligible and Returned_asm\r\n # NOTE:\r\n # * Below requires\r\n # DSEligible == 0 ==> Returned_asm == 1\r\n # Returned_asm == 0 ==> DSEligible == 1\r\n assert (df.loc[df['DSEligible']==0, 'Returned_asm'] == 1).all()\r\n assert (df.loc[df['Returned_asm']==0, 'DSEligible'] == 1).all()\r\n # Cumulative number of transactions that were assumed to be returned.\r\n df_tmp = df[[col, 'Returned_asm']].copy()\r\n df_tmp['Returnedasm1'] = df_tmp['Returned_asm'] == 1\r\n df[col+'_numReturnedasm1'] = df_tmp[[col, 'Returnedasm1']].groupby(by=col)['Returnedasm1'].cumsum().astype(int)\r\n df[col+'_numReturnedasm1'].fillna(value=0, inplace=True)\r\n del df_tmp\r\n # Cumulative ratio of transactions that were assumed to be returned (0=mode).\r\n df[col+'_fracReturnedasm1DivTransactions'] = df[col+'_numReturnedasm1']/df[col+'_numTransactions']\r\n df[col+'_fracReturnedasm1DivTransactions'].fillna(value=0, inplace=True)\r\n # Check that weighted average of assumed return rate equals overall assumed return rate.\r\n assert np.isclose(\r\n (df[[col, col+'_fracReturnedasm1DivTransactions', col+'_numTransactions']].groupby(by=col).last().product(axis=1).sum()/\\\r\n df[[col, col+'_numTransactions']].groupby(by=col).last().sum()).values[0],\r\n sum(df['Returned_asm']==1)/sum(df['Returned_asm'] != -1),\r\n equal_nan=True)\r\n # Note:\r\n # * Number of transactions that were DealShield-eligible and assumed to be returned ==\r\n # number of transactions that were DealShield-elegible and DealShield-purchased and DealShield-returned\r\n # (numReturned1)\r\n return df",
"def feature_filter(df,feature, high = True):\r\n assert feature in [\"speechiness\",\r\n \"acousticness\",\r\n \"instrumentalness\",\r\n \"liveness\"], \"feature must be one of the following: speechiness,acousticness,instrumentalness,liveness\"\r\n #more features may be added\r\n x = 0.9 if high == True else 0.1\r\n df = df[df[feature] > x] if high == True else df[df[feature] < x]\r\n return df",
"def featureprepare(self,df):\n try:\n df = self.featureselection(df)\n emp_len_dict= pickleload(self.dict_employ_len) # Load emp len\n df['emp_length'] = df['emp_length'].map(emp_len_dict)\n df['Long_emp_length'] = df['emp_length'].apply(lambda x: 'Yes' if x == 10 else 'No') # creating new feature\n df[\"emp_title\"].fillna('Missing', inplace=True)\n\n # Handling missing numerical value\n dict_Mean_var = pickleload(self.dict_Mean_var)\n for col, mean_val in dict_Mean_var.items():\n df[col].fillna(mean_val, inplace=True)\n\n # Handling rare values\n Freqlabels = pickleload(self.Freqlabels)\n for variable, frequent_labels in Freqlabels.items():\n df[variable] = np.where(df[variable].isin(frequent_labels), df[variable], 'Rare')\n\n # Encoding Categorical features\n x = pickleload(self.labelEncoder)\n for features, labels in x.items():\n df.loc[:, features] = labels.transform(df.loc[:, features])\n return df\n except Exception as e:\n self._Logger.error(\"Error in feature preparation: {}\".format(e))",
"def my_feature_xxx(df: pd.DataFrame):\n\n # CODE HERE\n\n return df",
"def _get_sample_df(self, df, features, r):\n grouped = df.groupby('feature')\n df_sample = pd.DataFrame()\n for feature in features:\n group = grouped.get_group(feature)\n samples = group.sample(n=r)\n df_sample = df_sample.append(samples)\n return df_sample",
"def featureselection(self, df):\n try:\n # converting blank value to NaN value.\n df = df.replace(' ', np.nan)\n df[\"Long_emp_length\"] = \"\" # adding additional feature col.\n\n # loading list of features\n features = pd.read_csv(self.feature_selected)\n self.features = [x for x in features[\"0\"]]\n df = df[self.features]\n return df\n except Exception as e:\n self._Logger.error(\"Error in Feature Selection: {}\".format(e))",
"def generate_features(df):\n df_new = pd.DataFrame()\n \n # 6 original features\n df_new['open'] = df['open']\n df_new['open_1'] = df['open'].shift(1)\n df_new['close_1'] = df['close'].shift(1)\n df_new['high_1'] = df['high'].shift(1)\n df_new['low_1'] = df['low'].shift(1)\n df_new['volume_1'] = df['volume'].shift(1)\n \n # 50 original features\n # average price\n df_new['avg_price_5'] = df['close'].rolling(window=5).mean().shift(1)\n df_new['avg_price_30'] = df['close'].rolling(window=21).mean().shift(1)\n df_new['avg_price_90'] = df['close'].rolling(window=63).mean().shift(1)\n df_new['avg_price_365'] = df['close'].rolling(window=252).mean().shift(1)\n \n # average price ratio\n df_new['ratio_avg_price_5_30'] = df_new['avg_price_5'] / df_new['avg_price_30']\n df_new['ratio_avg_price_905_'] = df_new['avg_price_5'] / df_new['avg_price_90']\n df_new['ratio_avg_price_5_365'] = df_new['avg_price_5'] / df_new['avg_price_365']\n df_new['ratio_avg_price_30_90'] = df_new['avg_price_30'] / df_new['avg_price_90']\n df_new['ratio_avg_price_30_365'] = df_new['avg_price_30'] / df_new['avg_price_365']\n df_new['ratio_avg_price_90_365'] = df_new['avg_price_90'] / df_new['avg_price_365'] \n \n \n # average volume\n df_new['avg_volume_5'] = df['volume'].rolling(window=5).mean().shift(1)\n df_new['avg_volume_30'] = df['volume'].rolling(window=21).mean().shift(1)\n df_new['avg_volume_90'] = df['volume'].rolling(window=63).mean().shift(1)\n df_new['avg_volume_365'] = df['volume'].rolling(window=252).mean().shift(1)\n \n #average volume ratio\n df_new['ratio_avg_volume_5_30'] = df_new['avg_volume_5'] / df_new['avg_volume_30']\n df_new['ratio_avg_volumee_5_90'] = df_new['avg_volume_5'] / df_new['avg_volume_90'] \n df_new['ratio_avg_volume_5_365'] = df_new['avg_volume_5'] / df_new['avg_volume_365']\n df_new['ratio_avg_volume_30_90'] = df_new['avg_volume_30'] / df_new['avg_volume_90']\n df_new['ratio_avg_volume_30_365'] = df_new['avg_volume_30'] / df_new['avg_volume_365']\n df_new['ratio_avg_volume_90_365'] = df_new['avg_volume_90'] / df_new['avg_volume_365'] \n \n \n # standard deviation of prices\n df_new['std_price_5'] = df['close'].rolling(window=5).std().shift(1)\n df_new['std_price_30'] = df['close'].rolling(window=21).std().shift(1)\n df_new['std_price_90'] = df['close'].rolling(window=63).std().shift(1) \n df_new['std_price_365'] = df['close'].rolling(window=252).std().shift(1)\n \n # standard deviation ratio of prices \n df_new['ratio_std_price_5_30'] = df_new['std_price_5'] / df_new['std_price_30']\n df_new['ratio_std_price_5_90'] = df_new['std_price_5'] / df_new['std_price_90']\n df_new['ratio_std_price_5_365'] = df_new['std_price_5'] / df_new['std_price_365']\n df_new['ratio_std_price_30_90'] = df_new['std_price_30'] / df_new['std_price_90'] \n df_new['ratio_std_price_30_365'] = df_new['std_price_30'] / df_new['std_price_365'] \n df_new['ratio_std_price_90_365'] = df_new['std_price_90'] / df_new['std_price_365'] \n \n \n # standard deviation of volumes\n df_new['std_volume_5'] = df['volume'].rolling(window=5).std().shift(1)\n df_new['std_volume_30'] = df['volume'].rolling(window=21).std().shift(1)\n df_new['std_volume_90'] = df['volume'].rolling(window=63).std().shift(1)\n df_new['std_volume_365'] = df['volume'].rolling(window=252).std().shift(1)\n \n #standard deviation ratio of volumes\n df_new['ratio_std_volume_5_30'] = df_new['std_volume_5'] / df_new['std_volume_30']\n df_new['ratio_std_volume_5_90'] = df_new['std_volume_5'] / df_new['std_volume_90']\n df_new['ratio_std_volume_5_365'] = df_new['std_volume_5'] / df_new['std_volume_365'] \n df_new['ratio_std_volume_30_90'] = df_new['std_volume_30'] / df_new['std_volume_90']\n df_new['ratio_std_volume_30_365'] = df_new['std_volume_30'] / df_new['std_volume_365']\n df_new['ratio_std_volume_90_365'] = df_new['std_volume_90'] / df_new['std_volume_365'] \n \n # return\n df_new['return_1'] = ((df['close'] - df['close'].shift(1)) / df['close'].shift(1)).shift(1)\n df_new['return_5'] = ((df['close'] - df['close'].shift(5)) / df['close'].shift(5)).shift(1)\n df_new['return_30'] = ((df['close'] - df['close'].shift(21)) / df['close'].shift(21)).shift(1)\n df_new['return_90'] = ((df['close'] - df['close'].shift(63)) / df['close'].shift(63)).shift(1) \n df_new['return_365'] = ((df['close'] - df['close'].shift(252)) / df['close'].shift(252)).shift(1)\n \n #average of return\n df_new['moving_avg_5'] = df_new['return_1'].rolling(window=5).mean()\n df_new['moving_avg_30'] = df_new['return_1'].rolling(window=21).mean()\n df_new['moving_avg_90'] = df_new['return_1'].rolling(window=63).mean()\n df_new['moving_avg_365'] = df_new['return_1'].rolling(window=252).mean()\n \n # the target\n df_new['close'] = df['close']\n df_new = df_new.dropna(axis=0)\n return df_new",
"def my_featurize(apartment):\n col =np.array([1, 2, 0, 0, 0, 0, 0, 0 ])\n a= pd.DataFrame(apartment[col])\n if(apartment.get('condition')== 'good'):\n col[1] =1\n else:\n if(apartment.get('condition')== 'zero condition'):\n col[1] = 0\n col[2] =apartment.get('num_rooms')\n col[3] =apartment.get('area')\n col[4] =apartment.get('num_bathrooms')\n col[5] =apartment.get('floor')\n col[6] =apartment.get('ceiling_height')\n col[7] =apartment.get('max_floor')\n\n return col, apartment['price']",
"def make_features(\n dataframe: pd.DataFrame,\n feature_params: FeatureParams,\n processing_params: ProcessingParams,\n handle_target: bool = True\n) -> Tuple[pd.DataFrame, pd.Series]:\n transformer = FeaturesTransformer(feature_params, processing_params)\n\n if handle_target:\n features = dataframe.drop([feature_params.target_col], axis=1)\n target = dataframe[feature_params.target_col]\n\n if feature_params.use_log_trick:\n target = pd.Series(np.log(target.to_numpy()))\n\n features = transformer.transform(features)\n else:\n target = None\n features = dataframe.copy()\n\n return pd.DataFrame(features), target",
"def features_past_generation(features_creation_function,\n days,\n feature_names_prefix,\n data,\n indices):\n matches_outcomes=[]\n for i,match_indice in enumerate(indices):\n match=data.iloc[match_indice,:]\n past_matches=data[(data.Date<match.Date)&(data.Date>=match.Date-datetime.timedelta(days=days))]\n match_features_outcome_1=features_creation_function(1,match,past_matches)\n match_features_outcome_2=features_creation_function(2,match,past_matches)\n matches_outcomes.append(match_features_outcome_1)\n matches_outcomes.append(match_features_outcome_2)\n if i%100==0:\n print(str(i)+\"/\"+str(len(indices))+\" matches treated. \"+ features_creation_function.__name__ + str(days))\n train=pd.DataFrame(matches_outcomes)\n train.columns=[feature_names_prefix + \"_\" + str(days) +\"_\" +str(i) for i in range(len(train.columns))]\n \n \n \n return train",
"def create_dataframe_for_training(data):\n feature_column_name = 'X'\n #data_cp = data[['label']].copy()\n for i, row in tqdm(data.iterrows(), total=len(data)):\n all_features = f'{row.claimant} {row.claim} {row.article_content}'\n data.loc[i, feature_column_name] = all_features\n\n return data[feature_column_name]",
"def classify_df(full_df: pd.DataFrame) -> pd.DataFrame:\n # Use the first 10 rows, and set classification target.\n yield utils.AddClassificationTargetToDataFrame(\n full_df.iloc[range(10), :].copy(), \"amd_tahiti_7970\"\n )",
"def data_filter(\n df, CondTempRange=[float('-inf'), float('inf')],\n EvapTempRange=[float('-inf'), float('inf')],\n RemovalPoint=[OperatingPoint()],\n AddPoint=[OperatingPoint()]\n ):\n\n # copy new dataframe\n df_new = copy.deepcopy(df)\n\n # condition list\n cond = []\n cond.append(df.CondTempInF >= CondTempRange[0])\n cond.append(df.CondTempInF <= CondTempRange[1])\n cond.append(df.EvapTempInF >= EvapTempRange[0])\n cond.append(df.EvapTempInF <= EvapTempRange[1])\n for point in RemovalPoint:\n cond.append(df.OperatingPoint != point)\n addcond = []\n for point in AddPoint:\n addcond.append(df.OperatingPoint == point)\n\n # Apply AND to all conditions\n final_condition = cond[0]\n for ii in xrange(1, len(cond)):\n final_condition = final_condition*cond[ii]\n\n # Apply OR to all conditions\n for ii in xrange(0, len(addcond)):\n final_condition = final_condition+addcond[ii]\n\n # Return the data that satisfy all conditions\n return df_new[final_condition]",
"def get_hikedetails_by_feature(feature):\n\n if (feature == \"dog\"):\n npbyfeature = Hike.query.filter(Hike.features.like('%dogs-leash%')).all()\n\n if (feature == \"kid\"):\n npbyfeature = Hike.query.filter(Hike.features.like('%kids%') | Hike.features.like('%strollers%')).all()\n \n if (feature == \"water\"):\n npbyfeature = Hike.query.filter(Hike.features.like('%river%') | Hike.features.like('%beach%')).all()\n\n \n return npbyfeature",
"def clfFeature(feature, mode):\r\n \r\n feature_path = 'C:\\\\Users\\\\Tom\\\\Documents\\\\Informatiekunde\\\\Thesis\\\\features\\\\' + feature + '.txt'\r\n classlist = ['negative', 'positive']\r\n features = pd.DataFrame()\r\n\r\n for label in classlist:\r\n path = 'C:\\\\Users\\\\Tom\\\\Documents\\\\Informatiekunde\\\\Thesis\\\\data\\\\' + mode + '\\\\' + label + '\\\\'\r\n allFiles = glob.glob(path + \"*.txt\")\r\n for review in allFiles:\r\n title = review.strip('.txt').split('\\\\')[-1]\r\n file = open(review, 'r', encoding='utf8').read().lower()\r\n wordlist = []\r\n featreader = csv.reader(open(feature_path, 'r'), delimiter= '\\n')\r\n for word in featreader:\r\n if word[0] in file:\r\n wordlist.append(word[0])\r\n df = pd.DataFrame({'File': [title], feature.capitalize(): [', '.join(wordlist)]}).set_index('File')\r\n features = features.append(df)\r\n \r\n return features",
"def create_features(df,rsi_window = 14,macd_feat = [12,26,9]):\n df.dropna(inplace=True)\n ## day and month\n df['Date'] = pd.to_datetime(df['Date'])\n df['Month'] = df['Date'].dt.month\n df['dayowk'] = df['Date'].dt.dayofweek\n df = pd.get_dummies(data = df,columns = ['Month','dayowk'])\n \n ##Previos n-day pct_changes\n df['1day_pct'] = df['Adj Close'].pct_change()\n df['2day_pct'] = df['Adj Close'].pct_change(periods = 2)\n df['3day_pct'] = df['Adj Close'].pct_change(periods = 3)\n df['4day_pct'] = df['Adj Close'].pct_change(periods = 4)\n df['5day_pct'] = df['Adj Close'].pct_change(periods = 5)\n df['7day_pct'] = df['Adj Close'].pct_change(periods = 7)\n \n ##Cumulative sum of 1day_pct\n df['1day_pct_cs'] = df['Adj Close'].pct_change().cumsum()\n \n ##EWMA of 7, 50 and 200 days\n df['ewma_7'] = df['Adj Close'].ewm(span=7).mean()/df['Adj Close']\n df['ewma_50'] = df['Adj Close'].ewm(span=50).mean()/df['Adj Close']\n df['ewma_200'] = df['Adj Close'].ewm(span=200).mean()/df['Adj Close']\n ## Golden Cross vs Death Cross etc.\n #df['7g(50&200)'] = (df['ewma_7'] > df['ewma_50']) & (df['ewma_7'] > df['ewma_200'])\n #df['7l(50&200)'] = (df['ewma_7'] < df['ewma_50']) & (df['ewma_7'] < df['ewma_200'])\n #df['7g50'] = (df['ewma_7'] > df['ewma_50']) & (df['ewma_7'] < df['ewma_200'])\n #df['7g200'] = (df['ewma_7'] < df['ewma_50']) & (df['ewma_7'] > df['ewma_200'])\n \n ##RSI and MACD\n df = RSI(df,14)\n df = MACD_mod(df,nl=macd_feat[0],nh=macd_feat[1],nsig=macd_feat[2])\n \n df['day_var'] = (df['High'] - df['Low'])/df['Close']## Days variance\n df['open_close'] = (df['Open'] - df['Close'])/df['Close'] ## Days Open-Close\n df['high_close'] = (df['High'] - df['Close'])/df['Close'] ##Days High-Close\n df['open_prev_close'] = (df['Open'] - df['Close'].shift(1))/df['Close'] ## Days open - Previos Dyas Close\n \n ##Classification target\n df['target'] = round((np.sign(df['1day_pct']).shift(-1)+1)/2) ## Target for classification\n #df['1_day_target'] = df['Adj Close'].shift(-1) - df['Adj Close'] ## Target for Regression\n #df['target2'] = round((np.sign(df['1day_pct']).shift(-1)+1)/2)## Will the price go up intra-day\n \n ## IS the stock Overbought or Oversold based on RSI?\n df['RSI_overbought'] = df['RSI']>70\n df['RSI_oversold'] = df['RSI']<30\n \n \n #df.drop(['Open','High','Low','Close'],axis=1,inplace=True)\n# df = df.dropna()\n \n #df = df.reset_index(drop=True)\n \n ## Calculating how large the previos hot and cold streaks were\n f = 0\n df['prev_hot_streak'] = np.zeros(df.shape[0])\n for i in range(df.shape[0]-1):\n if df['target'][i] ==1:\n f += 1\n if df['target'][i+1] ==0:\n df['prev_hot_streak'][i+1] = f\n f = 0\n for i in range(1,df.shape[0]):\n #print(i)\n if df['prev_hot_streak'][i]==0:\n df['prev_hot_streak'][i]=df['prev_hot_streak'][i-1]\n \n \n df['prev_cold_streak'] = np.zeros(df.shape[0])\n for i in range(df.shape[0]-1):\n if df['target'][i] ==0:\n f += 1\n if df['target'][i+1] ==1:\n df['prev_cold_streak'][i+1] = f\n f = 0\n\n for i in range(1,df.shape[0]):\n #print(i)\n if df['prev_cold_streak'][i]==0:\n df['prev_cold_streak'][i] = df['prev_cold_streak'][i-1]\n \n ## Calculating current hot and cold streaks\n df['current_hot_streak'] = np.zeros(df.shape[0])\n df['current_cold_streak'] = np.zeros(df.shape[0])\n fhot=0\n fcold=0\n for i in range(df.shape[0]):\n if df['target'][i]==1:\n fhot += 1\n fcold = 0\n df['current_hot_streak'][i] = fhot\n elif df['target'][i]==0:\n fcold += 1\n fhot = 0\n df['current_cold_streak'][i] = fcold\n \n df['prev_hot_streak'] = df['prev_hot_streak'].shift(1)\n df['prev_cold_streak'] = df['prev_cold_streak'].shift(1)\n df['current_hot_streak'] = df['current_hot_streak'].shift(1)\n df['current_cold_streak'] = df['current_cold_streak'].shift(1)\n \n ## Combinations of previos streaks\n df['prev_current_hot'] = df['prev_hot_streak'] - df['current_hot_streak']\n df['prev_current_cold'] = df['prev_cold_streak'] - df['current_cold_streak']\n df['current_hot_prev_cold'] = df['current_hot_streak'] - df['prev_cold_streak']\n df['current_cold_prev_hot'] = df['current_cold_streak'] - df['prev_hot_streak']\n \n ##Calculating days since max\n current_max = df['Adj Close'][0]\n df['days_from_max'] = np.zeros(df.shape[0])\n df['pct_from_max'] = np.zeros(df.shape[0])\n #print('blah')\n for i in range(1,df.shape[0]):\n if df['Adj Close'][i] > current_max:\n current_max = df['Adj Close'][i]\n # print(current_max)\n else:\n df['days_from_max'][i] = df['days_from_max'][i-1]+1\n df['pct_from_max'][i] = (df['Adj Close'][i]-current_max)/current_max\n #print(df['days_from_max'][i])\n \n \n \n df.dropna(inplace=True)\n df = df.reset_index(drop=True)\n return df",
"def features_target_split():\n transactions = load_all_transactions('creditcard.csv')\n features = transactions.drop('Class', axis=1)\n target = transactions['Class']\n print('\\nCreation of feature set dataframe and target seiries successful \\n')\n return features, target",
"def feature_selection_rf(df, threshold, cols_to_filter, label_col = 'label', pcg = 1.0):\n print(\"[Info] Feature selection by Random Forest may take a long time\")\n\n df = df.select(cols_to_filter + [label_col]).sample(withReplacement=False, fraction=pcg)\n\n df = only_numeric_columns(df, label_col = label_col)\n\n df.cache()\n\n print \"[Info] Number of rows in the DF: \" + str(df.count())\n\n input_cols = list(set(df.columns) - set([label_col]))\n\n assembler = VectorAssembler(inputCols=input_cols, outputCol='features')\n\n numTrees, maxDepth, minInstancesPerNode, maxBins, subsamplingRate, maxIter = param_selection(df)\n\n rf_model = RandomForestClassifier(numTrees=numTrees, maxDepth=maxDepth,\n minInstancesPerNode=minInstancesPerNode,\n maxBins=maxBins, featureSubsetStrategy='auto', minInfoGain=0.0,\n impurity='gini', subsamplingRate=subsamplingRate, labelCol = label_col)\\\n\n pipeline = Pipeline(stages=[assembler, rf_model])\n\n pipeline_model = pipeline.fit(df)\n\n from churn_nrt.src.projects_utils.models.modeler import getOrderedRelevantFeats\n\n feat_imp_nrt = getOrderedRelevantFeats(pipeline_model, input_cols, \"f\")\n\n n = threshold if(threshold >=1) else round(threshold*len(feat_imp_nrt))\n\n num_cols = [f[0] for f in feat_imp_nrt][0:n]\n\n return num_cols",
"def feature_selection_Logistic(df, cat_columns, cf_columns):\n \n X, X_train_rs, X_test, y_train_rs, y_test = preprocess_data(df, cat_columns, cf_columns)\n X_train_scaled, X_test_scaled = scale_X(X_train_rs, X_test)\n \n #run SelectFromModel for feature selection\n selector = SelectFromModel(LogisticRegression(fit_intercept=True))\n selector.fit(X_train_scaled, y_train_rs)\n selected_feat = list(X.columns[(selector.get_support())])\n selected = dict(zip((list(X.columns)), list(selector.get_support())))\n X_train_selected = selector.transform(X_train_scaled)\n X_test_selected = selector.transform(X_test_scaled)\n print(sum(selector.get_support()),\" features selected out of \", len(list(X.columns)))\n \n return selected, X_train_selected, X_test_selected, y_train_rs, y_test",
"def addDummyFeatures(inputDf, feature):\n\n\n ## TODO ##\n if feature not in inputDf.columns:\n return('Feature not in dataset')\n rows,columns = inputDf.shape\n feature_List = []\n OHE_Matrix = np.array([[]]) #Create a matrix to store the OHE values\n for i in range(rows):\n if pd.isna(inputDf.loc[i,feature]):\n OHE_Matrix = np.concatenate((OHE_Matrix,np.zeros((1,len(feature_List)))),axis=0) #If missing data, create a new row of zeros\n elif str(inputDf.loc[i,feature]) not in feature_List:\n feature_List.append(str(inputDf.loc[i,feature]))\n OHE_Matrix = np.concatenate((OHE_Matrix,np.zeros((i+1,1))),axis=1)#if there is a new feature, create a new column of zeros\n if str(inputDf.loc[i,feature]) in feature_List:\n OHE_Matrix = np.concatenate((OHE_Matrix,np.zeros((1,len(feature_List)))),axis=0)#if this it is alreay in feature list , create a new row of zeros and set the feature related column to 1\n OHE_Matrix[i,feature_List.index(str(inputDf.loc[i,feature]))]=1\n for i in range(len(feature_List)):\n feature_List[i] = feature + '_'+feature_List[i]#New column names for OHE\n\n OHE_Matrix = np.delete(OHE_Matrix,rows,0)#Delete the extra row created\n\n dataOut= pd.DataFrame(OHE_Matrix,columns=feature_List) #Create a dataframe with OHE as matrix and the new feature list\n outDf = pd.concat([inputDf,dataOut],axis=1)#Concate new features to original matrix\n outDf = outDf.drop(feature,axis=1)#drop the original feature\n return outDf",
"def addDummyFeatures(inputDf, feature):\n\n\n ## TODO ##\n if feature not in inputDf.columns:\n return('Feature not in dataset')\n rows,columns = inputDf.shape\n feature_List = []\n OHE_Matrix = np.array([[]]) #Create a matrix to store the OHE values\n for i in range(rows):\n if pd.isna(inputDf.loc[i,feature]):\n OHE_Matrix = np.concatenate((OHE_Matrix,np.zeros((1,len(feature_List)))),axis=0) #If missing data, create a new row of zeros\n elif str(inputDf.loc[i,feature]) not in feature_List:\n feature_List.append(str(inputDf.loc[i,feature]))\n OHE_Matrix = np.concatenate((OHE_Matrix,np.zeros((i+1,1))),axis=1)#if there is a new feature, create a new column of zeros\n if str(inputDf.loc[i,feature]) in feature_List:\n OHE_Matrix = np.concatenate((OHE_Matrix,np.zeros((1,len(feature_List)))),axis=0)#if this it is alreay in feature list , create a new row of zeros and set the feature related column to 1\n OHE_Matrix[i,feature_List.index(str(inputDf.loc[i,feature]))]=1\n for i in range(len(feature_List)):\n feature_List[i] = feature + '_'+feature_List[i]#New column names for OHE\n\n OHE_Matrix = np.delete(OHE_Matrix,rows,0)#Delete the extra row created\n\n dataOut= pd.DataFrame(OHE_Matrix,columns=feature_List) #Create a dataframe with OHE as matrix and the new feature list\n outDf = pd.concat([inputDf,dataOut],axis=1)#Concate new features to original matrix\n outDf = outDf.drop(feature,axis=1)#drop the original feature\n return outDf",
"def data_specific_processing(self, dataframe):\n return dataframe",
"def preprocess_feature(df):",
"def filldf(df,features,CrossMethod):\n for i in CrossMethod.keys():\n for j in features:\n if i in j:\n p = j[1:-1].split(i)\n df[j] = CrossMethod[i](df[p[0]],df[p[1]])\n return df",
"def update_features(\r\n df:pd.DataFrame\r\n ) -> pd.DataFrame:\r\n # Check input.\r\n # Copy dataframe to avoid in place modification.\r\n df = df.copy()\r\n ########################################\r\n # Returned_asm\r\n # Interpretation of assumptions:\r\n # If DSEligible=0, then the vehicle is not eligible for a guarantee.\r\n # * And Returned=-1 (null) since we don't know whether or not it would have been returned,\r\n # but given that it wasn't eligible, it may have been likely to have Returned=1.\r\n # If DSEligible=1, then the vehicle is eligible for a guarantee.\r\n # * And if Returned=0 then the guarantee was purchased and the vehicle was not returned.\r\n # * And if Returned=1 then the guarantee was purchased and the vehicle was returned.\r\n # * And if Returned=-1 (null) then the guarantee was not purchased.\r\n # We don't know whether or not it would have been returned,\r\n # but given that the dealer did not purchase, it may have been likely to have Returned=0.\r\n # Assume:\r\n # If Returned=-1 and DSEligible=0, then Returned_asm=1\r\n # If Returned=-1 and DSEligible=1, then Returned_asm=0\r\n logger.info(textwrap.dedent(\"\"\"\\\r\n Returned_asm: Assume returned status to fill nulls as new feature.\r\n If Returned=-1 and DSEligible=0, then Returned_asm=1 (assumes low P(resale|buyer, car))\r\n If Returned=-1 and DSEligible=1, then Returned_asm=0 (assumes high P(resale|buyer, car))\"\"\"))\r\n df['Returned_asm'] = df['Returned']\r\n df.loc[\r\n np.logical_and(df['Returned'] == -1, df['DSEligible'] == 0),\r\n 'Returned_asm'] = 1\r\n df.loc[\r\n np.logical_and(df['Returned'] == -1, df['DSEligible'] == 1),\r\n 'Returned_asm'] = 0\r\n logger.info(\"Relationship between DSEligible and Returned:\\n{pt}\".format(\r\n pt=pd.pivot_table(\r\n df[['DSEligible', 'Returned']].astype(str),\r\n index='DSEligible', columns='Returned',\r\n aggfunc=len, margins=True, dropna=False)))\r\n logger.info(\"Relationship between DSEligible and Returned_asm:\\n{pt}\".format(\r\n pt=pd.pivot_table(\r\n df[['DSEligible', 'Returned_asm']].astype(str),\r\n index='DSEligible', columns='Returned_asm',\r\n aggfunc=len, margins=True, dropna=False)))\r\n logger.info(\"Relationship between Returned and Returned_asm:\\n{pt}\".format(\r\n pt=pd.pivot_table(\r\n df[['Returned', 'Returned_asm']].astype(str),\r\n index='Returned', columns='Returned_asm',\r\n aggfunc=len, margins=True, dropna=False)))\r\n ########################################\r\n # BuyerID, SellerID, VIN, SellingLocation, CarMake, JDPowersCat:\r\n # Make cumulative informative priors (*_num*, *_frac*) for string features.\r\n logger.info(textwrap.dedent(\"\"\"\\\r\n BuyerID, SellerID, VIN, SellingLocation, CarMake, JDPowersCat:\r\n Make cumulative informative priors (*_num*, *_frac*) for string features.\"\"\"))\r\n # Cumulative features require sorting by time.\r\n assert (df['SaleDate'].diff().iloc[1:] >= np.timedelta64(0, 'D')).all()\r\n for col in ['BuyerID', 'SellerID', 'VIN', 'SellingLocation', 'CarMake', 'JDPowersCat']:\r\n logger.info(\"Processing {col}\".format(col=col))\r\n ####################\r\n # Cumulative count of transactions and DSEligible:\r\n # Cumulative count of transactions (yes including current).\r\n df[col+'_numTransactions'] = df[[col]].groupby(by=col).cumcount().astype(int) + 1\r\n df[col+'_numTransactions'].fillna(value=1, inplace=True)\r\n # Cumulative count of transations that were DealShield-eligible (yes including current).\r\n df[col+'_numDSEligible1'] = df[[col, 'DSEligible']].groupby(by=col)['DSEligible'].cumsum().astype(int)\r\n df[col+'_numDSEligible1'].fillna(value=0, inplace=True)\r\n # Cumulative ratio of transactions that were DealShield-eligible (0=bad, 1=good).\r\n df[col+'_fracDSEligible1DivTransactions'] = (df[col+'_numDSEligible1']/df[col+'_numTransactions'])\r\n df[col+'_fracDSEligible1DivTransactions'].fillna(value=1, inplace=True)\r\n ####################\r\n # DSEligible and Returned\r\n # Note:\r\n # * DealShield-purchased ==> Returned != -1 (not null)\r\n # * below requires\r\n # DSEligible == 0 ==> Returned == -1 (is null)\r\n # Returned != -1 (not null) ==> DSEligible == 1\r\n assert (df.loc[df['DSEligible']==0, 'Returned'] == -1).all()\r\n assert (df.loc[df['Returned']!=-1, 'DSEligible'] == 1).all()\r\n # Cumulative count of transactions that were DealShield-eligible and DealShield-purchased.\r\n df_tmp = df[[col, 'Returned']].copy()\r\n df_tmp['ReturnedNotNull'] = df_tmp['Returned'] != -1\r\n df[col+'_numReturnedNotNull'] = df_tmp[[col, 'ReturnedNotNull']].groupby(by=col)['ReturnedNotNull'].cumsum().astype(int)\r\n df[col+'_numReturnedNotNull'].fillna(value=0, inplace=True)\r\n del df_tmp\r\n # Cumulative ratio of DealShield-eligible transactions that were DealShield-purchased (0=mode).\r\n df[col+'_fracReturnedNotNullDivDSEligible1'] = df[col+'_numReturnedNotNull']/df[col+'_numDSEligible1']\r\n df[col+'_fracReturnedNotNullDivDSEligible1'].fillna(value=0, inplace=True)\r\n # Cumulative count of transactions that were DealShield-elegible and DealShield-purchased and DealShield-returned.\r\n df_tmp = df[[col, 'Returned']].copy()\r\n df_tmp['Returned1'] = df_tmp['Returned'] == 1\r\n df[col+'_numReturned1'] = df_tmp[[col, 'Returned1']].groupby(by=col)['Returned1'].cumsum().astype(int)\r\n df[col+'_numReturned1'].fillna(value=0, inplace=True)\r\n del df_tmp\r\n # Cumulative ratio of DealShield-eligible, DealShield-purchased transactions that were DealShield-returned (0=good, 1=bad).\r\n # Note: BuyerID_fracReturned1DivReturnedNotNull is the cumulative return rate for a buyer.\r\n df[col+'_fracReturned1DivReturnedNotNull'] = df[col+'_numReturned1']/df[col+'_numReturnedNotNull']\r\n df[col+'_fracReturned1DivReturnedNotNull'].fillna(value=0, inplace=True)\r\n # Check that weighted average of return rate equals overall return rate.\r\n # Note: Requires groups sorted by date, ascending.\r\n assert np.isclose(\r\n (df[[col, col+'_fracReturned1DivReturnedNotNull', col+'_numReturnedNotNull']].groupby(by=col).last().product(axis=1).sum()/\\\r\n df[[col, col+'_numReturnedNotNull']].groupby(by=col).last().sum()).values[0],\r\n sum(df['Returned']==1)/sum(df['Returned'] != -1),\r\n equal_nan=True)\r\n ####################\r\n # DSEligible and Returned_asm\r\n # NOTE:\r\n # * Below requires\r\n # DSEligible == 0 ==> Returned_asm == 1\r\n # Returned_asm == 0 ==> DSEligible == 1\r\n assert (df.loc[df['DSEligible']==0, 'Returned_asm'] == 1).all()\r\n assert (df.loc[df['Returned_asm']==0, 'DSEligible'] == 1).all()\r\n # Cumulative number of transactions that were assumed to be returned.\r\n df_tmp = df[[col, 'Returned_asm']].copy()\r\n df_tmp['Returnedasm1'] = df_tmp['Returned_asm'] == 1\r\n df[col+'_numReturnedasm1'] = df_tmp[[col, 'Returnedasm1']].groupby(by=col)['Returnedasm1'].cumsum().astype(int)\r\n df[col+'_numReturnedasm1'].fillna(value=0, inplace=True)\r\n del df_tmp\r\n # Cumulative ratio of transactions that were assumed to be returned (0=mode).\r\n df[col+'_fracReturnedasm1DivTransactions'] = df[col+'_numReturnedasm1']/df[col+'_numTransactions']\r\n df[col+'_fracReturnedasm1DivTransactions'].fillna(value=0, inplace=True)\r\n # Check that weighted average of assumed return rate equals overall assumed return rate.\r\n assert np.isclose(\r\n (df[[col, col+'_fracReturnedasm1DivTransactions', col+'_numTransactions']].groupby(by=col).last().product(axis=1).sum()/\\\r\n df[[col, col+'_numTransactions']].groupby(by=col).last().sum()).values[0],\r\n sum(df['Returned_asm']==1)/sum(df['Returned_asm'] != -1),\r\n equal_nan=True)\r\n # Note:\r\n # * Number of transactions that were DealShield-eligible and assumed to be returned ==\r\n # number of transactions that were DealShield-elegible and DealShield-purchased and DealShield-returned\r\n # (numReturned1)\r\n return df",
"def predict(self,\n df: pd.DataFrame,\n return_instance_score: bool = True,\n return_forecast: bool = True\n ) -> Dict[Dict[str, str], Dict[pd.DataFrame, pd.DataFrame]]:\n # compute outlier scores\n forecast = self.score(df)\n iscore = pd.DataFrame(data={\n 'ds': df['ds'].values,\n 'instance_score': forecast['score']\n })\n\n # values above threshold are outliers\n outlier_pred = pd.DataFrame(data={\n 'ds': df['ds'].values,\n 'is_outlier': (forecast['score'] > 0.).astype(int)\n })\n\n # populate output dict\n od = outlier_prediction_dict()\n od['meta'] = self.meta\n od['data']['is_outlier'] = outlier_pred\n if return_instance_score:\n od['data']['instance_score'] = iscore\n if return_forecast:\n od['data']['forecast'] = forecast\n return od",
"def eval_input_fn(df):\n fts = df.drop(columns=['class'])\n labs = df.filter(items=['class']).values.astype(int)\n\n features = {k:list(v.values) for k,v in fts.items()}\n features = dict(features)\n x = fts.values\n x = np.array([[x]]).reshape((np.shape(x)[0], np.shape(x)[1], 1, 1))\n # Convert the inputs to a Dataset.\n dataset = tf.data.Dataset.from_tensor_slices({\"x_ph\":x,\"y_ph\":convert_to_one_hot(labs)})\n \n # Shuffle, repeat, and batch the examples.\n dataset = dataset.shuffle(1000).batch(np.shape(x)[0]).repeat()\n # Return the read end of the pipeline.\n return dataset.make_one_shot_iterator().get_next()",
"def generate_features(self, df):\n df = df.reset_index()\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n return extract_features(df, column_id=\"id\", impute_function=impute,\n default_fc_parameters=self.extraction_settings)",
"def getFeature(df, start, end):\n\n return [df[start:end].mean(),\n df[start:end].std(),\n df[start:end].skew(),\n df[start:end].kurt(),\n df[start:end].quantile(0.25),\n df[start:end].quantile(0.75),\n df[start:end].quantile(0.90),\n df[start:end].quantile(0.15),\n df[start:end].median(),\n df[start:end].mad(),\n df[start:end].sem(),\n df[start:end].var(),\n df[start:end].autocorr(1),\n df[start:end].autocorr(2),\n df[start:end].autocorr(3),\n df[start:end].autocorr(4),\n df[start:end].autocorr(5),\n np.append(df[start:end].mode(), -1)[0]\n ]",
"def new_features(df):\n print(\"Add new features ...\")\n # distinguish Spring, Fall and pregnant females (don't care about juvenilles/unknown)\n df[\"gender_plus\"] = df[\"Gender\"]\n df.loc[df.Gravid, \"gender_plus\"] = \"f_gra\"\n\n df[\"gender_seasons\"] = df[\"Gender\"]\n df.loc[df.Gravid, \"gender_seasons\"] = \"f_gra\"\n\n # add features\n df[\"Age_To_Weight\"] = df[\"Annuli\"] / df[\"Weight\"]\n\n # Calcuate Number of recaptures\n df_captures = df[[\"ID\", \"Date\"]].groupby(\"ID\").count()\n df_captures.columns = [\"recapture_count\"]\n df_captures.reset_index(inplace=True)\n df = pd.merge(df, df_captures, how=\"outer\", on=\"ID\")\n\n # recalculate annuli\n df_min = pd.pivot_table(\n df[df.Annuli > 0],\n values=[\"Date\", \"Annuli\"],\n index=[\"ID\"],\n aggfunc={\"Date\": min, \"Annuli\": min},\n )\n df_min.columns = [\"annuli_min\", \"date_min\"]\n df_min.reset_index(inplace=True)\n\n df = pd.merge(df, df_min, how=\"outer\", on=\"ID\")\n df[\"year\"] = df.Date.map(lambda x: x.year)\n df[\"year_min\"] = df.date_min.map(lambda x: x.year)\n df[\"Annuli_orig\"] = df.Annuli\n df.Annuli = df.year - df.year_min + df.annuli_min\n df.Annuli = np.nan_to_num(df.Annuli)\n df[\"Annuli\"] = pd.to_numeric(df[\"Annuli\"], downcast=\"integer\")\n\n # Annuli Buckets\n buckets = 5\n interval = int(df[\"Annuli\"].max() / buckets)\n buckets = [i for i in range(0, df[\"Annuli\"].max() + interval, interval)]\n labels = [\"'{0} - {1}'\".format(i, i + interval) for i in buckets]\n df[\"Annuli_Group\"] = pd.cut(\n df.Annuli, buckets, labels=labels[:-1], include_lowest=True\n )\n\n return df"
] | [
"0.6037674",
"0.6036051",
"0.60075384",
"0.5784221",
"0.5742433",
"0.572674",
"0.56658465",
"0.5660512",
"0.5627629",
"0.55905753",
"0.5539647",
"0.55276555",
"0.55015665",
"0.5474494",
"0.5460671",
"0.54350704",
"0.54293996",
"0.54264325",
"0.5407314",
"0.53973925",
"0.53973925",
"0.5382971",
"0.5368673",
"0.536704",
"0.5364628",
"0.53627765",
"0.5337693",
"0.5329646",
"0.5324765",
"0.5305711"
] | 0.6541016 | 0 |
Aggregate values according to month This function accepts a dataframe, 2 columns(feature) and aggregated funcion(agg) which returns the Pivot table with different aggregated value of the feature with an index of the month. | def agg_values_ina_month(df,date_col,agg_col, agg):
df[date_col] = pd.to_datetime(df[date_col])
aggregate = {'mean':np.mean,'max':np.max,'min':np.min,'sum':np.sum,'len':len}
aggregated_value = df.pivot_table(values=[agg_col], index=df[date_col].dt.month,aggfunc={agg_col:aggregate[agg]})
return aggregated_value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def month_summary(phenology_df, out_csv=None):\n month_list = ['Jul','Aug','Sep','Oct','Nov','Dec','Jan','Feb','Mar','Apr',\n 'May','Jun']\n species_list = phenology_df['species'].unique().tolist()\n\n count_list = []\n for species in species_list:\n count_dict = {'species':species}\n for month in month_list:\n month_check = phenology_df['monthStart'] == month\n species_check = phenology_df['species'] == species\n occurrence_count = phenology_df[month_check & species_check]['numSpecimen'].sum()\n count_dict[month] = occurrence_count\n count_list.append(count_dict)\n\n month_df = pd.DataFrame(count_list)\n month_df = month_df.set_index('species')\n month_df = month_df[month_list]\n\n return month_df",
"def getAggregateStatistics(df: pd.core.frame.DataFrame, feature: str,\n kind: str, year: str) -> pd.core.frame.DataFrame:\n if year == 'all':\n df = df.loc[df['Trade Flow'] == kind, [feature,\n 'Year', 'Reporter']].groupby(['Year', 'Reporter']).agg(['sum']).reset_index()\n else:\n df = df.loc[(df['Trade Flow'] == kind) &\n (df['Period'] > f'{year}-01-01') & (df['Period'] <= f'{year}-12-31'), \n [feature,'Reporter']].groupby(['Reporter']).agg(['sum']).reset_index()\n \n df['Year'] = int(year)\n\n df_sorted = df.sort_values(by=(feature,'sum'), ascending=False)\n \n return df_sorted",
"def breakdown_by_month(\n df,\n start_column,\n end_column,\n key_column,\n value_column,\n output_columns=None,\n aggfunc=\"count\",\n):\n\n def build_df(t):\n start_date = getattr(t, start_column)\n end_date = getattr(t, end_column)\n key = getattr(t, key_column)\n value = getattr(t, value_column)\n\n if end_date is pd.NaT:\n end_date = pd.Timestamp.today()\n\n first_month = (\n start_date.normalize().to_period(\"M\").to_timestamp(\"D\", \"S\")\n )\n last_month = end_date.normalize().to_period(\"M\").to_timestamp(\"D\", \"S\")\n\n index = pd.date_range(first_month, last_month, freq=\"MS\")\n\n return pd.DataFrame(index=index, data=[[key]], columns=[value])\n\n breakdown = (\n pd.concat([build_df(t) for t in df.itertuples()], sort=True)\n .resample(\"MS\")\n .agg(aggfunc)\n )\n\n if output_columns:\n breakdown = breakdown[\n [s for s in output_columns if s in breakdown.columns]\n ]\n\n return breakdown",
"def get_magic_feature(df, outname):\n df['fea_unum'] = df[['1_total_fee','2_total_fee','3_total_fee', '4_total_fee']].nunique(axis=1)\n df.drop_duplicates(subset =['1_total_fee','2_total_fee','3_total_fee', '4_total_fee'],inplace=True)\n df = df[df.fea_unum>2]\n for month1_month2 in [\n [1,2],\n [1,3],\n [1,4],\n [2,1],\n [2,3],\n [2,4],\n [3,1],\n [3,2],\n [3,4],\n [4,1],\n [4,2],\n [4,3],\n ]:\n month1, month2 = str(month1_month2[0]), str(month1_month2[1])\n mstr = '_total_fee'\n tmp = df.groupby([month1 + mstr, month2 + mstr]).size().reset_index()\n tmp.columns =['first','second','{}_total_fee_{}_total_fee'.format(month1,month2)]\n if month1_month2 == [1,2]:\n result_df = tmp\n else:\n result_df = result_df.merge(tmp, on = ['first','second'], how = 'outer')\n\n tmpall = result_df\n tmpall = tmpall[tmpall.second!=0]\n tmpall['count'] = tmpall.iloc[:,2:].sum(axis=1)\n tmpall = tmpall.merge(tmpall.groupby('second',as_index=False)['count'].agg({'sum':'sum'}),on='second',how='left')\n tmpall['rate'] = tmpall['count'] / tmpall['sum']\n tmpall = tmpall.sort_values(['first','rate'],ascending=False)\n tmpall = tmpall [tmpall['count']>10]\n tmpall = tmpall.sort_values(['first','count'],ascending=False)\n tmp_res = tmpall.drop_duplicates('first',keep='first')\n tmp_res[tmp_res['count']>10].to_csv(output_path + outname, columns = ['first','second'],index = False)",
"def _mean_of_monthly_means_basic_method(df: pd.DataFrame) -> pd.DataFrame:\n monthly_df: pd.DataFrame = df.groupby(df.index.month).mean().mean().to_frame()\n monthly_df.columns = ['MOMM']\n return monthly_df",
"def agg_albedo(time_index, albedo):\n monthly_albedo = np.zeros(12).tolist()\n albedo = np.array(albedo)\n for month in range(1, 13):\n m = np.where(time_index.month == month)[0]\n monthly_albedo[int(month - 1)] = albedo[m].mean()\n\n return monthly_albedo",
"def datetime_columns(df, feature):\r\n df['day'] = pd.to_datetime(df[feature]).dt.day\r\n df['month'] = pd.to_datetime(df[feature]).dt.month\r\n df['year'] = pd.to_datetime(df[feature]).dt.year\r\n return df",
"def dataset_extract_features_from_date(dataset,date_feature): \n dataset['dayofmonth'] = dataset[date_feature].dt.day\n dataset['dayofyear'] = dataset[date_feature].dt.dayofyear \n dataset['dayofweek'] = dataset[date_feature].dt.dayofweek\n dataset['month'] = dataset[date_feature].dt.month\n dataset['year'] = dataset[date_feature].dt.year\n dataset['weekofyear'] = dataset[date_feature].dt.weekofyear\n dataset['is_month_start'] = (dataset[date_feature].dt.is_month_start).astype(int)\n dataset['is_month_end'] = (dataset[date_feature].dt.is_month_end).astype(int)\n return dataset",
"def feature_engineer_ts(self, month=12):\n st_data_dt = self.get_st_data_dt()\n end_data_dt = self.get_end_data_dt()\n date_list = pd.date_range(*(pd.to_datetime([st_data_dt, end_data_dt]) + pd.offsets.MonthEnd()), freq='M').to_list()\n population = self.get_population()\n is_raw_partition = self.get_is_raw_partition()\n# Lag 2 months\n all_data = []\n# join past is_raw columns\n for d in date_list:\n \n population_partition = population[population['ft_data_dt'] == d] \n old_date = d - relativedelta(months=month)\n y = old_date.year\n m = old_date.month\n day = calendar.monthrange(y, m)[1]\n old_date = date(y, m, day)\n old_date = max(old_date, st_data_dt)\n date_list_join = pd.date_range(*(pd.to_datetime([old_date, d]) + pd.offsets.MonthEnd()), freq='M').to_list()\n date_list_join.reverse()\n for index, date_join in enumerate(date_list_join):\n if date_join.strftime(\"%Y-%m-%d\") not in is_raw_partition.keys():\n continue\n \n tmp_is_raw_partition = is_raw_partition[date_join.strftime(\"%Y-%m-%d\")]\n \n rename_col = [c for c in list(tmp_is_raw_partition.columns) if c not in ['idd', 'ft_data_dt']]\n new_col = [c+'_'+str(index+1) for c in rename_col]\n name_dict = dict(list(zip(rename_col, new_col)))\n tmp_is_raw_partition = tmp_is_raw_partition.rename(columns = name_dict)\n population_partition = population_partition.merge(tmp_is_raw_partition.drop(columns=['ft_data_dt']), on=['idd'], how='left')\n all_data.append(population_partition)\n ts_df = pd.concat(all_data)\n threshold_null = len(ts_df.columns) - 4\n ts_df = ts_df[ts_df.isnull().sum(axis=1) < threshold_null]\n \n def sum_ts(self, df, col_list, feature, duration):\n ft_name = feature+ '_sum_'+str(duration)+'mth'\n tmp_df = df[col_list].sum(axis = 1).to_frame(name=ft_name)\n return tmp_df\n \n def mean_ts(self, df, col_list, feature, duration):\n ft_name = feature+ '_avg_'+str(duration)+'mth'\n tmp_df = df[col_list].mean(axis = 1).to_frame(name=ft_name)\n return tmp_df\n \n def std_ts(self, df, col_list, feature, duration):\n ft_name = feature+ '_std_'+str(duration)+'mth'\n tmp_df = df[col_list].std(axis = 1).to_frame(name=ft_name)\n return tmp_df\n \n def med_ts(self, df, col_list, feature, duration):\n ft_name = feature+ '_med_'+str(duration)+'mth'\n tmp_df = df[col_list].std(axis = 1).to_frame(name=ft_name)\n return tmp_df\n \n def min_ts(self, df, col_list, feature, duration):\n ft_name = feature+ '_min_'+str(duration)+'mth'\n tmp_df = df[col_list].min(axis = 1).to_frame(name=ft_name)\n return tmp_df\n \n def max_ts(self, df, col_list, feature, duration):\n ft_name = feature+ '_max_'+str(duration)+'mth'\n tmp_df = df[col_list].max(axis = 1).to_frame(name=ft_name)\n return tmp_df\n \n def q1_ts(self, df, col_list, feature, duration):\n ft_name = feature+ '_q1_'+str(duration)+'mth'\n tmp_df = df[col_list].quantile(q=0.25, axis = 1).to_frame(name=ft_name)\n return tmp_df\n \n def q3_ts(self, df, col_list, feature, duration):\n ft_name = feature+ '_q3_'+str(duration)+'mth'\n tmp_df = df[col_list].quantile(q=0.75, axis = 1).to_frame(name=ft_name)\n return tmp_df\n \n def last_ts(self, df, feature):\n ft_name = feature+ '_last'\n tmp_df = df[feature+'_'+str(1)].to_frame(name=ft_name)\n return tmp_df\n \n ts_duration = [1, 3, 6, 9, 12]\n feature_list = self.get_is_raw_col()\n df = ts_df[['idd', 'ft_data_dt']]\n# Time Series Features\n for duration in ts_duration:\n for col in feature_list:\n col_list = [col+'_'+str(i) for i in range(1, duration+1)]\n df = pd.concat([df\\\n , sum_ts(self, ts_df, col_list, col, duration)\\\n , mean_ts(self, ts_df, col_list, col, duration)\\\n , med_ts(self, ts_df, col_list, col, duration)\\\n , q1_ts(self, ts_df, col_list, col, duration)\\\n , q3_ts(self, ts_df, col_list, col, duration)\\\n , min_ts(self, ts_df, col_list, col, duration)\\\n , max_ts(self, ts_df, col_list, col, duration)]\n , axis=1)\n self.set_all_data(df)",
"def month_lag_distribution(source_df, field=\"month_lag\", path=path.path, nrows=None):\n _log.info(\"Creating features from {}\".format(field))\n prefix = source_df.split(\"_\")[0]\n source_df = \"{}/{}\".format(path, source_df)\n\n _log.info(\"Reading from {}\".format(source_df))\n try:\n df = pd.read_csv(source_df, usecols=[\"card_id\", field], nrows=nrows)\n _log.info(\"Successfully read from {}\".format(source_df))\n except Exception as e:\n _log.exception(e)\n\n _log.info(\"Computing distribution of month lag\")\n func_to_be_applied = [min, max, pd.Series.nunique]\n func_to_be_applied_dummy = [max, np.mean]\n rename_dict = create_rename_dict(prefix, field, func_to_be_applied)\n rename_dict_dummy = create_rename_dict(prefix, \"dummy\", func_to_be_applied_dummy)\n\n df[\"dummy\"] = 1\n df_features = df.groupby(\"card_id\").agg({field:func_to_be_applied}).reset_index()\n df_features = pd.concat([pd.DataFrame(df_features[\"card_id\"]), df_features[field]], axis=1, sort=False)\n\n _log.info(\"Renaming columns: {}\".format(rename_dict))\n df_features.rename(columns=rename_dict, inplace=True)\n\n _log.info(\"Computing time in month between transactions\")\n df_freq = (df.groupby([\"card_id\", field]).agg({\"dummy\": np.sum}).reset_index().groupby(\"card_id\")\n .agg({\"dummy\": func_to_be_applied_dummy}).reset_index())\n df_freq = pd.concat([pd.DataFrame(df_freq[\"card_id\"]), df_freq[\"dummy\"]], axis=1, sort=False)\n df_freq.rename(columns=rename_dict_dummy, inplace=True)\n\n _log.info(\"Creating final df\")\n df_features = df_features.merge(df_freq, how=\"inner\", on=\"card_id\")\n return df_features",
"def aggregator():\n return Aggregator(\n agg_col=\"col_a\", values_col=\"col_b\", aggregates=[\"min\", \"max\", \"avg\", \"sum\"]\n )",
"def getAggregate(df, step):\n #df = mig.getAggregate(df, 2)\n #df = df.resample('2t').mean() :alternate resampling method?\n\n idx, res, flag = [], [], []\n\n for (start, end) in getWindows(df.value, step, step):\n idx.append(df.index[end])\n res.append(df.value.iloc[start:end].mean())\n flag.append(df.flag.iloc[start] & df.flag.iloc[end])\n\n return pd.DataFrame.from_records({'value':res, 'flag':flag}, index=idx,\n columns=['value', 'flag'])",
"def depart_arrive_stats_by_month(flights):\n\n return ...",
"def __month(self):\n return _VirtualColumn(\n df_name=self.thisptr[\"df_name_\"],\n operator=\"month\",\n operand1=self,\n operand2=None\n )",
"def breakdown_by_month_sum_days(\n df,\n start_column,\n end_column,\n value_column,\n output_columns=None,\n aggfunc=\"sum\",\n):\n\n def build_df(t):\n start_date = getattr(t, start_column)\n end_date = getattr(t, end_column)\n value = getattr(t, value_column)\n\n if end_date is pd.NaT:\n end_date = pd.Timestamp.today()\n\n days_range = pd.date_range(start_date, end_date, freq=\"D\")\n first_month = (\n start_date.normalize().to_period(\"M\").to_timestamp(\"D\", \"S\")\n )\n last_month = end_date.normalize().to_period(\"M\").to_timestamp(\"D\", \"S\")\n\n index = pd.date_range(first_month, last_month, freq=\"MS\")\n\n return pd.DataFrame(\n index=index,\n data=[\n [\n len(\n pd.date_range(\n month_start,\n month_start + pd.tseries.offsets.MonthEnd(1),\n freq=\"D\",\n ).intersection(days_range)\n )\n ]\n for month_start in index\n ],\n columns=[value],\n )\n\n breakdown = (\n pd.concat([build_df(t) for t in df.itertuples()], sort=True)\n .resample(\"MS\")\n .agg(aggfunc)\n )\n\n if output_columns:\n breakdown = breakdown[\n [s for s in output_columns if s in breakdown.columns]\n ]\n\n return breakdown",
"def monthly_overview():\n df = (\n monzo\n [~monzo.category.isin(['general', 'transfer'])]\n .pivot_table('amount', 'month', 'category',\n aggfunc='sum', fill_value=0)\n .reset_index()\n .melt(id_vars=['month'], value_name='amount')\n )\n inc = df[df.category.eq('income')]\n g = df.groupby('month')\n fig = (\n px.bar(\n df[~df.category.eq('income')],\n x='month',\n y='amount',\n color='category',\n template='simple_white',\n hover_name='category',\n )\n .add_scatter(\n x=inc.month,\n y=inc.amount.mul(-1),\n showlegend=False,\n mode='markers',\n marker=dict(\n color='#EF9A9A',\n line_width=2,\n line_color='white',\n size=10\n )\n )\n .update_xaxes(\n rangeslider_visible=False,\n rangeselector=dict(\n buttons=list(\n [\n dict(\n count=1,\n label=\"1m\",\n step=\"month\",\n stepmode=\"backward\"\n ),\n dict(\n count=6,\n label=\"6m\",\n step=\"month\",\n stepmode=\"backward\"\n ),\n dict(\n count=1,\n label=\"1y\",\n step=\"year\",\n stepmode=\"backward\"\n ),\n dict(\n step=\"all\"\n ),\n ]\n )\n )\n )\n .update_layout(\n xaxis_title='Month',\n yaxis_title='Income / Spending',\n xaxis_tickformat='%b %Y',\n xaxis_tickangle=30,\n showlegend=False,\n )\n )\n return fig",
"def _calculate_month_roam_distances(df):\n\n month_df = (\n df\n .groupby('month')\n .sum()\n )\n\n return month_df",
"def keyword_based_date_range_selection(self, keyword,keyword_value, aggfunc={},date_column=None, date_column_format=\"%Y-%m-%d %H:%M:%S\", custom=[],grouping_colums=[],where=None):\n expected_interval_for_aggregation_in_seconds = 0\n # working code with converion of date limits commenting the below section for the testing of pivot tables and grouper below this section\n # need to use reg exp but there is problem with separating kewa_value ex:10min should be separated as 10 min\n # if keyword == 'custom':\n # print(\"Currently not supported\")\n # exit()\n #\n # elif 'min' in keyword:\n # expected_seconds = 60\n # expected_interval_for_aggregation_in_seconds = expected_seconds*keyword_value\n # elif 'hour' in keyword:\n # expected_seconds = 60*60\n # expected_interval_for_aggregation_in_seconds = expected_seconds*keyword_value\n # elif 'day' in keyword:\n # expected_seconds = 60*60*24\n # expected_interval_for_aggregation_in_seconds = expected_seconds*keyword_value\n # elif 'week' in keyword:\n # expected_seconds = 60*60*24*7\n # expected_interval_for_aggregation_in_seconds = expected_seconds*keyword_value\n # elif 'month' in keyword:\n # expected_seconds = 60*60*24*30\n # expected_interval_for_aggregation_in_seconds = expected_seconds*keyword_value\n\n\n\n #uniquify the date column from the dataframe\n\n\n\n # #now get the min_interval_in_seconds of the user\n # min_seconds = self.get_min_interval_in_seconds(date_column=date_column,format_of_date=date_column_format)\n #\n # print(\"the minimum interval seconds is\", min_seconds)\n # print(\"expected_interval_for_aggregation_in_seconds\", expected_interval_for_aggregation_in_seconds)\n # #compare the min_seconds and expected_interval_for_aggregation_in_seconds if min_seconds is greated than expected_inteval then as for now its error result_df.\n #\n # if expected_interval_for_aggregation_in_seconds > min_seconds:\n # #calculating the range to split the dataframe\n # range = int(expected_interval_for_aggregation_in_seconds/min_seconds)\n # #split the dataframr into multipldf based on range\n # splited_dfs = self.split_df_to_many(range)\n #\n # date_value = []\n # aggregation_value = []\n # #here we get splited df according to range\n # for df in splited_dfs:\n # print(\"splited dfs \",df)\n # value_df = df.iloc[:,value_column]\n # # print(\"the value list is \",value_df)\n # aggregation = Aggregator()\n # #apply aggregation on each chucnk of divrded dataframe\n # aggregation_result = aggregation.many_to_one(func,value_df)\n # d = self.df.iloc[:,date_column]\n # date_name = d.name\n # print(\"the date name\",date_name)\n # #append the first vale o date field into date_value list\n # date_value.append(df[date_name].iloc[0])\n # #append the result of aggregation class into aggregation_value list\n # aggregation_value.append(aggregation_result)\n # d = self.df.iloc[:,date_column]\n # date_name = d.name\n # v = self.df.iloc[:,value_column]\n # value_name = v.name\n #\n # #generate the dict from both date_value list and aggregation_value list\n # frame = {date_name:date_value,value_name:aggregation_value}\n # #create a result dataframe\n # result_df = pd.DataFrame(frame)\n # print(\"the results dataframe is \", result_df)\n #\n # print(\"the expected range is\",range)\n #\n # else:\n # print(\"-F- the interval range supporting is not found\")\n # exit()\n\n # todo\n # use self.df\n #print(self.df.iloc[0:range,1])\n # resulted_array = []\n # for v in self.df.iloc[0:range,value_column]:\n # resulted_array.append(v)\n #\n #\n # agg = Aggregator()\n # return agg.many_to_one(func, resulted_array)\n\n\n # craeting the below section for the testing of pivot table and grouper methods.\n df = self.df\n if aggfunc:\n if len(aggfunc)>0:\n\n for column, value in aggfunc.items():\n # print(\"the converting column name is\", column)\n try:\n df[column] = df[column].astype(float)\n except:\n result_df=\"Error\"\n\n\n # print(\"the converted column name is\",df.dtypes)\n #Todo should convert the numerical columns to numbered datatype]\n #for testing purpose e manually converted it\n\n\n # print(\"the keyword is \",keyword)\n # print(\"the date column is \",date_column)\n # print(\"the grouping_colums is \",grouping_colums)\n # print(\"the value column is \",value_column)\n # print(\"the aggrigation function is \",aggfunc)\n # print(\"in project query frequency\",keyword)\n if keyword:\n\n if keyword == 'custom':\n # print(\"Currently not supported\")\n exit()\n\n elif 'min' in keyword:\n expected_freq = 'M'\n # print(\"the date column is \",date_column)\n if where and aggfunc and grouping_colums :\n \n try:\n result_df = df.pivot_table(index= grouping_colums,columns =pd.Grouper(freq=expected_freq,key=date_column),fill_value=0,aggfunc=aggfunc,)\n result_df = result_df.stack().reset_index()\n except:\n result_df=\"Error\"\n elif not where and aggfunc and grouping_colums :\n \n try:\n result_df = df.pivot_table(index= grouping_colums,columns =pd.Grouper(freq=expected_freq,key=date_column),fill_value=0,aggfunc=aggfunc,)\n result_df = result_df.stack().reset_index()\n except:\n result_df=\"Error\"\n elif where and aggfunc and not grouping_colums:\n try:\n result_df = df.pivot_table(columns =pd.Grouper(freq=expected_freq,key=date_column),fill_value=0,aggfunc=aggfunc,)\n # print(\"new type of query\",result_df)\n pv_df = result_df.transpose()\n result_df = pv_df.reset_index()\n except:\n result_df=\"Error\"\n elif not where and aggfunc and not grouping_colums:\n try:\n result_df = df.pivot_table(columns =pd.Grouper(freq=expected_freq,key=date_column),fill_value=0,aggfunc=aggfunc,)\n # print(\"new type of query\",result_df)\n pv_df = result_df.transpose()\n result_df = pv_df.reset_index()\n except:\n result_df=\"Error\"\n\n elif where and grouping_colums and not aggfunc:\n try:\n # print(\"year just grouping\")\n grouping_colums.append(date_column)\n grouped_df =df.groupby(grouping_colums)\n result_df = pd.DataFrame(grouped_df.size().reset_index(name = \"Count\"))\n except:\n result_df=\"Error\"\n elif not where and grouping_colums and not aggfunc:\n try:\n # print(\"year just grouping\")\n grouping_colums.append(date_column)\n grouped_df =df.groupby(grouping_colums)\n result_df = pd.DataFrame(grouped_df.size().reset_index(name = \"Count\"))\n except:\n result_df=\"Error\"\n\n elif where and expected_freq:\n try:\n # print(\"only frequency\")\n s_df = df.groupby(pd.Grouper(freq=expected_freq,key=date_column))\n result_df = pd.DataFrame(s_df.size().reset_index(name = \"Count\"))\n \n except:\n result_df=\"Error\"\n elif not where and expected_freq:\n try:\n # print(\"only frequency\")\n s_df = df.groupby(pd.Grouper(freq=expected_freq,key=date_column))\n result_df = pd.DataFrame(s_df.size().reset_index(name = \"Count\"))\n \n except:\n result_df=\"Error\"\n\n\n elif 'hour' in keyword:\n expected_freq = 'H'\n # print(\"the date column is \",date_column)\n if where and aggfunc and grouping_colums :\n \n try:\n result_df = df.pivot_table(index= grouping_colums,columns =pd.Grouper(freq=expected_freq,key=date_column),fill_value=0,aggfunc=aggfunc,)\n result_df = result_df.stack().reset_index()\n except:\n result_df=\"Error\"\n elif not where and aggfunc and grouping_colums :\n \n try:\n result_df = df.pivot_table(index= grouping_colums,columns =pd.Grouper(freq=expected_freq,key=date_column),fill_value=0,aggfunc=aggfunc,)\n result_df = result_df.stack().reset_index()\n except:\n result_df=\"Error\"\n elif where and aggfunc and not grouping_colums:\n try:\n result_df = df.pivot_table(columns =pd.Grouper(freq=expected_freq,key=date_column),fill_value=0,aggfunc=aggfunc,)\n # print(\"new type of query\",result_df)\n pv_df = result_df.transpose()\n result_df = pv_df.reset_index()\n except:\n result_df=\"Error\"\n elif not where and aggfunc and not grouping_colums:\n try:\n result_df = df.pivot_table(columns =pd.Grouper(freq=expected_freq,key=date_column),fill_value=0,aggfunc=aggfunc,)\n # print(\"new type of query\",result_df)\n pv_df = result_df.transpose()\n result_df = pv_df.reset_index()\n except:\n result_df=\"Error\"\n\n elif where and grouping_colums and not aggfunc:\n try:\n # print(\"year just grouping\")\n grouping_colums.append(date_column)\n grouped_df =df.groupby(grouping_colums)\n result_df = pd.DataFrame(grouped_df.size().reset_index(name = \"Count\"))\n except:\n result_df=\"Error\"\n elif not where and grouping_colums and not aggfunc:\n try:\n # print(\"year just grouping\")\n grouping_colums.append(date_column)\n grouped_df =df.groupby(grouping_colums)\n result_df = pd.DataFrame(grouped_df.size().reset_index(name = \"Count\"))\n except:\n result_df=\"Error\"\n\n elif where and expected_freq:\n try:\n # print(\"only frequency\")\n s_df = df.groupby(pd.Grouper(freq=expected_freq,key=date_column))\n result_df = pd.DataFrame(s_df.size().reset_index(name = \"Count\"))\n \n except:\n result_df=\"Error\"\n elif not where and expected_freq:\n try:\n # print(\"only frequency\")\n s_df = df.groupby(pd.Grouper(freq=expected_freq,key=date_column))\n result_df = pd.DataFrame(s_df.size().reset_index(name = \"Count\"))\n \n except:\n result_df=\"Error\"\n elif 'week' in keyword:\n expected_freq = 'W'\n # print(\"the date column is \",date_column)\n if where and aggfunc and grouping_colums :\n \n try:\n result_df = df.pivot_table(index= grouping_colums,columns =pd.Grouper(freq=expected_freq,key=date_column),fill_value=0,aggfunc=aggfunc,)\n result_df = result_df.stack().reset_index()\n except:\n result_df=\"Error\"\n elif not where and aggfunc and grouping_colums :\n \n try:\n result_df = df.pivot_table(index= grouping_colums,columns =pd.Grouper(freq=expected_freq,key=date_column),fill_value=0,aggfunc=aggfunc,)\n result_df = result_df.stack().reset_index()\n except:\n result_df=\"Error\"\n elif where and aggfunc and not grouping_colums:\n try:\n result_df = df.pivot_table(columns =pd.Grouper(freq=expected_freq,key=date_column),fill_value=0,aggfunc=aggfunc,)\n # print(\"new type of query\",result_df)\n pv_df = result_df.transpose()\n result_df = pv_df.reset_index()\n except:\n result_df=\"Error\"\n elif not where and aggfunc and not grouping_colums:\n try:\n result_df = df.pivot_table(columns =pd.Grouper(freq=expected_freq,key=date_column),fill_value=0,aggfunc=aggfunc,)\n # print(\"new type of query\",result_df)\n pv_df = result_df.transpose()\n result_df = pv_df.reset_index()\n except:\n result_df=\"Error\"\n\n elif where and grouping_colums and not aggfunc:\n try:\n # print(\"year just grouping\")\n grouping_colums.append(date_column)\n grouped_df =df.groupby(grouping_colums)\n result_df = pd.DataFrame(grouped_df.size().reset_index(name = \"Count\"))\n except:\n result_df=\"Error\"\n elif not where and grouping_colums and not aggfunc:\n try:\n # print(\"year just grouping\")\n grouping_colums.append(date_column)\n grouped_df =df.groupby(grouping_colums)\n result_df = pd.DataFrame(grouped_df.size().reset_index(name = \"Count\"))\n except:\n result_df=\"Error\"\n\n elif where and expected_freq:\n try:\n # print(\"only frequency\")\n s_df = df.groupby(pd.Grouper(freq=expected_freq,key=date_column))\n result_df = pd.DataFrame(s_df.size().reset_index(name = \"Count\"))\n \n except:\n result_df=\"Error\"\n elif not where and expected_freq:\n try:\n # print(\"only frequency\")\n s_df = df.groupby(pd.Grouper(freq=expected_freq,key=date_column))\n result_df = pd.DataFrame(s_df.size().reset_index(name = \"Count\"))\n \n except:\n result_df=\"Error\"\n\n elif 'day' in keyword:\n expected_freq = 'D'\n # print(\"the date column is \",date_column)\n if where and aggfunc and grouping_colums :\n \n try:\n result_df = df.pivot_table(index= grouping_colums,columns =pd.Grouper(freq=expected_freq,key=date_column),fill_value=0,aggfunc=aggfunc,)\n result_df = result_df.stack().reset_index()\n except:\n result_df=\"Error\"\n elif not where and aggfunc and grouping_colums :\n \n try:\n result_df = df.pivot_table(index= grouping_colums,columns =pd.Grouper(freq=expected_freq,key=date_column),fill_value=0,aggfunc=aggfunc,)\n result_df = result_df.stack().reset_index()\n except:\n result_df=\"Error\"\n elif where and aggfunc and not grouping_colums:\n try:\n result_df = df.pivot_table(columns =pd.Grouper(freq=expected_freq,key=date_column),fill_value=0,aggfunc=aggfunc,)\n # print(\"new type of query\",result_df)\n pv_df = result_df.transpose()\n result_df = pv_df.reset_index()\n except:\n result_df=\"Error\"\n elif not where and aggfunc and not grouping_colums:\n try:\n result_df = df.pivot_table(columns =pd.Grouper(freq=expected_freq,key=date_column),fill_value=0,aggfunc=aggfunc,)\n # print(\"new type of query\",result_df)\n pv_df = result_df.transpose()\n result_df = pv_df.reset_index()\n except:\n result_df=\"Error\"\n\n elif where and grouping_colums and not aggfunc:\n try:\n # print(\"year just grouping\")\n grouping_colums.append(date_column)\n grouped_df =df.groupby(grouping_colums)\n result_df = pd.DataFrame(grouped_df.size().reset_index(name = \"Count\"))\n except:\n result_df=\"Error\"\n elif not where and grouping_colums and not aggfunc:\n try:\n # print(\"year just grouping\")\n grouping_colums.append(date_column)\n grouped_df =df.groupby(grouping_colums)\n result_df = pd.DataFrame(grouped_df.size().reset_index(name = \"Count\"))\n except:\n result_df=\"Error\"\n\n elif where and expected_freq:\n try:\n # print(\"only frequency\")\n s_df = df.groupby(pd.Grouper(freq=expected_freq,key=date_column))\n result_df = pd.DataFrame(s_df.size().reset_index(name = \"Count\"))\n \n except:\n result_df=\"Error\"\n elif not where and expected_freq:\n try:\n # print(\"only frequency\")\n s_df = df.groupby(pd.Grouper(freq=expected_freq,key=date_column))\n result_df = pd.DataFrame(s_df.size().reset_index(name = \"Count\"))\n \n except:\n result_df=\"Error\"\n elif 'month' in keyword:\n expected_freq = 'M'\n # print(\"the date column is \",date_column)\n if where and aggfunc and grouping_colums :\n \n try:\n result_df = df.pivot_table(index= grouping_colums,columns =pd.Grouper(freq=expected_freq,key=date_column),fill_value=0,aggfunc=aggfunc,)\n result_df = result_df.stack().reset_index()\n except:\n result_df=\"Error\"\n elif not where and aggfunc and grouping_colums :\n \n try:\n result_df = df.pivot_table(index= grouping_colums,columns =pd.Grouper(freq=expected_freq,key=date_column),fill_value=0,aggfunc=aggfunc,)\n result_df = result_df.stack().reset_index()\n except:\n result_df=\"Error\"\n elif where and aggfunc and not grouping_colums:\n try:\n result_df = df.pivot_table(columns =pd.Grouper(freq=expected_freq,key=date_column),fill_value=0,aggfunc=aggfunc,)\n # print(\"new type of query\",result_df)\n pv_df = result_df.transpose()\n result_df = pv_df.reset_index()\n except:\n result_df=\"Error\"\n elif not where and aggfunc and not grouping_colums:\n try:\n result_df = df.pivot_table(columns =pd.Grouper(freq=expected_freq,key=date_column),fill_value=0,aggfunc=aggfunc,)\n # print(\"new type of query\",result_df)\n pv_df = result_df.transpose()\n result_df = pv_df.reset_index()\n except:\n result_df=\"Error\"\n\n elif where and grouping_colums and not aggfunc:\n try:\n # print(\"year just grouping\")\n grouping_colums.append(date_column)\n grouped_df =df.groupby(grouping_colums)\n result_df = pd.DataFrame(grouped_df.size().reset_index(name = \"Count\"))\n except:\n result_df=\"Error\"\n elif not where and grouping_colums and not aggfunc:\n try:\n # print(\"year just grouping\")\n grouping_colums.append(date_column)\n grouped_df =df.groupby(grouping_colums)\n result_df = pd.DataFrame(grouped_df.size().reset_index(name = \"Count\"))\n except:\n result_df=\"Error\"\n\n elif where and expected_freq:\n try:\n # print(\"only frequency\")\n s_df = df.groupby(pd.Grouper(freq=expected_freq,key=date_column))\n result_df = pd.DataFrame(s_df.size().reset_index(name = \"Count\"))\n \n except:\n result_df=\"Error\"\n elif not where and expected_freq:\n try:\n # print(\"only frequency\")\n s_df = df.groupby(pd.Grouper(freq=expected_freq,key=date_column))\n result_df = pd.DataFrame(s_df.size().reset_index(name = \"Count\"))\n \n except:\n result_df=\"Error\"\n elif 'year' in keyword:\n expected_freq = 'Y'\n # print(\"year just grouping\",grouping_colums)\n # print(\"the date column is \",date_column)\n if where and aggfunc and grouping_colums :\n \n try:\n result_df = df.pivot_table(index= grouping_colums,columns =pd.Grouper(freq=expected_freq,key=date_column),fill_value=0,aggfunc=aggfunc,)\n result_df = result_df.stack().reset_index()\n except:\n result_df=\"Error\"\n elif not where and aggfunc and grouping_colums :\n \n try:\n result_df = df.pivot_table(index= grouping_colums,columns =pd.Grouper(freq=expected_freq,key=date_column),fill_value=0,aggfunc=aggfunc,)\n result_df = result_df.stack().reset_index()\n except:\n result_df=\"Error\"\n elif where and aggfunc and not grouping_colums:\n try:\n result_df = df.pivot_table(columns =pd.Grouper(freq=expected_freq,key=date_column),fill_value=0,aggfunc=aggfunc,)\n # print(\"new type of query\",result_df)\n pv_df = result_df.transpose()\n result_df = pv_df.reset_index()\n except:\n result_df=\"Error\"\n elif not where and aggfunc and not grouping_colums:\n try:\n result_df = df.pivot_table(columns =pd.Grouper(freq=expected_freq,key=date_column),fill_value=0,aggfunc=aggfunc,)\n # print(\"new type of query\",result_df)\n pv_df = result_df.transpose()\n result_df = pv_df.reset_index()\n except:\n result_df=\"Error\"\n\n elif where and grouping_colums and not aggfunc:\n try:\n # print(\"year just grouping\")\n grouping_colums.append(date_column)\n grouped_df =df.groupby(grouping_colums)\n result_df = pd.DataFrame(grouped_df.size().reset_index(name = \"Count\"))\n except:\n result_df=\"Error\"\n elif not where and grouping_colums and not aggfunc:\n try:\n # print(\"year just grouping\")\n grouping_colums.append(date_column)\n grouped_df =df.groupby(grouping_colums)\n result_df = pd.DataFrame(grouped_df.size().reset_index(name = \"Count\"))\n except:\n result_df=\"Error\"\n\n elif where and expected_freq:\n try:\n # print(\"only frequency\")\n s_df = df.groupby(pd.Grouper(freq=expected_freq,key=date_column))\n result_df = pd.DataFrame(s_df.size().reset_index(name = \"Count\"))\n \n except:\n result_df=\"Error\"\n elif not where and expected_freq:\n try:\n # print(\"only frequency\")\n s_df = df.groupby(pd.Grouper(freq=expected_freq,key=date_column))\n result_df = pd.DataFrame(s_df.size().reset_index(name = \"Count\"))\n \n except:\n result_df=\"Error\"\n elif 'quarterly' in keyword:\n expected_freq = 'Q'\n # print(\"the date column is \",date_column)\n if where and aggfunc and grouping_colums :\n \n try:\n result_df = df.pivot_table(index= grouping_colums,columns =pd.Grouper(freq=expected_freq,key=date_column),fill_value=0,aggfunc=aggfunc,)\n result_df = result_df.stack().reset_index()\n except:\n result_df=\"Error\"\n elif not where and aggfunc and grouping_colums :\n \n try:\n result_df = df.pivot_table(index= grouping_colums,columns =pd.Grouper(freq=expected_freq,key=date_column),fill_value=0,aggfunc=aggfunc,)\n result_df = result_df.stack().reset_index()\n except:\n result_df=\"Error\"\n elif where and aggfunc and not grouping_colums:\n try:\n result_df = df.pivot_table(columns =pd.Grouper(freq=expected_freq,key=date_column),fill_value=0,aggfunc=aggfunc,)\n # print(\"new type of query\",result_df)\n pv_df = result_df.transpose()\n result_df = pv_df.reset_index()\n except:\n result_df=\"Error\"\n elif not where and aggfunc and not grouping_colums:\n try:\n result_df = df.pivot_table(columns =pd.Grouper(freq=expected_freq,key=date_column),fill_value=0,aggfunc=aggfunc,)\n # print(\"new type of query\",result_df)\n pv_df = result_df.transpose()\n result_df = pv_df.reset_index()\n except:\n result_df=\"Error\"\n\n elif where and grouping_colums and not aggfunc:\n try:\n # print(\"year just grouping\")\n grouping_colums.append(date_column)\n grouped_df =df.groupby(grouping_colums)\n result_df = pd.DataFrame(grouped_df.size().reset_index(name = \"Count\"))\n except:\n result_df=\"Error\"\n elif not where and grouping_colums and not aggfunc:\n try:\n # print(\"year just grouping\")\n grouping_colums.append(date_column)\n grouped_df =df.groupby(grouping_colums)\n result_df = pd.DataFrame(grouped_df.size().reset_index(name = \"Count\"))\n except:\n result_df=\"Error\"\n\n elif where and expected_freq:\n try:\n # print(\"only frequency\")\n s_df = df.groupby(pd.Grouper(freq=expected_freq,key=date_column))\n result_df = pd.DataFrame(s_df.size().reset_index(name = \"Count\"))\n \n except:\n result_df=\"Error\"\n elif not where and expected_freq:\n try:\n # print(\"only frequency\")\n s_df = df.groupby(pd.Grouper(freq=expected_freq,key=date_column))\n result_df = pd.DataFrame(s_df.size().reset_index(name = \"Count\"))\n \n except:\n result_df=\"Error\"\n else:\n print(\"else in project query\")\n if where and aggfunc and grouping_colums :\n result_df = df.pivot_table(index= grouping_colums ,aggfunc=aggfunc)\n # print(\"the df without time grouper frequency and arregation\",result_df)\n result_df = result_df.reset_index()\n \n try:\n result_df = df.pivot_table(index= grouping_colums ,aggfunc=aggfunc)\n # print(\"the df without time grouper frequency and arregation\",result_df)\n result_df = result_df.reset_index()\n except:\n result_df=\"Error\"\n elif not where and aggfunc and grouping_colums :\n result_df = df.pivot_table(index= grouping_colums ,aggfunc=aggfunc)\n print(\"the df without time grouper frequency and arregation\",result_df)\n result_df = result_df.reset_index()\n print(\"after reset index\",result_df)\n \n try:\n result_df = df.pivot_table(index= grouping_colums ,aggfunc=aggfunc)\n print(\"the df without time grouper frequency and arregation\",result_df)\n result_df = result_df.reset_index()\n print(\"after reset index\",result_df)\n except:\n result_df=\"Error\"\n elif where and grouping_colums and not aggfunc:\n grouped_df = df.groupby(grouping_colums)\n result_df = pd.DataFrame(grouped_df.size().reset_index(name = \"Count\"))\n try:\n grouped_df = df.groupby(grouping_colums)\n result_df = pd.DataFrame(grouped_df.size().reset_index(name = \"Count\"))\n except:\n result_df=\"Error\"\n elif not where and grouping_colums and not aggfunc:\n grouped_df = df.groupby(grouping_colums)\n result_df = pd.DataFrame(grouped_df.size().reset_index(name = \"Count\"))\n try:\n grouped_df = df.groupby(grouping_colums)\n result_df = pd.DataFrame(grouped_df.size().reset_index(name = \"Count\"))\n except:\n result_df=\"Error\"\n \n elif aggfunc and not grouping_colums:\n print(\"its agrigation with no grouping\")\n try:\n result_df=\"Error\"\n except:\n result_df=\"Error\"\n \n \n \n \n else:\n if where and aggfunc and grouping_colums :\n \n \n try:\n result_df = df.pivot_table(index= grouping_colums ,aggfunc=aggfunc)\n # print(\"the df without time grouper frequency and arregation\",result_df)\n result_df = result_df.reset_index()\n except:\n result_df=\"Error\"\n elif not where and aggfunc and grouping_colums :\n \n \n try:\n result_df = df.pivot_table(index= grouping_colums ,aggfunc=aggfunc)\n print(\"the df without time grouper frequency and arregation\",result_df)\n result_df = result_df.reset_index()\n print(\"after reset index\",result_df)\n except:\n result_df=\"Error\"\n elif where and grouping_colums and not aggfunc:\n \n try:\n grouped_df = df.groupby(grouping_colums)\n result_df = pd.DataFrame(grouped_df.size().reset_index(name = \"Count\"))\n except:\n result_df=\"Error\"\n elif not where and grouping_colums and not aggfunc:\n \n try:\n grouped_df = df.groupby(grouping_colums)\n result_df = pd.DataFrame(grouped_df.size().reset_index(name = \"Count\"))\n except:\n result_df=\"Error\"\n \n elif where and aggfunc and not grouping_colums:\n \n try:\n result_df=\"Error\"\n except:\n result_df=\"Error\"\n elif not where and aggfunc and not grouping_colums:\n \n try:\n result_df=\"Error\"\n except:\n result_df=\"Error\"\n # print(\"the result data head\", result_df)\n # print(\"the grouper column is \",grouping_colums)\n # print(\"the resulted dataframe is from the pivot table\",result_df)\n return result_df",
"def add_financial_aggregate(data_dict, features_list):\n fields = ['total_stock_value', 'exercised_stock_options', 'total_payments']\n for name in data_dict:\n person = data_dict[name]\n is_valid = True\n for field in fields:\n if person[field] == 'NaN':\n is_valid = False\n if is_valid:\n person['financial_aggregate'] = sum([person[field] for field in fields])\n else:\n person['financial_aggregate'] = 'NaN'\n features_list += ['financial_aggregate']",
"def monthly_viewed(df):\n df = convert_to_datetime(df)\n today = datetime.date.today()\n this_month_start = today - timedelta(days=30)\n last_month_start = today - timedelta(days=60)\n month_per_min = []\n lastmonth_per_min = []\n thismonth_viewed = []\n lastmonth_viewed = []\n for index, row in df.iterrows():\n if row['session_start'].date() >= this_month_start:\n per_min = get_cards_per_min(row)\n month_per_min.append(per_min)\n thismonth_viewed.append(row['total_looked_at'])\n if last_month_start <= row['session_start'].date() < this_month_start:\n per_min = get_cards_per_min(row)\n lastmonth_per_min.append(per_min)\n lastmonth_viewed.append(row['total_looked_at'])\n month_viewed_result = total_viewed(thismonth_viewed, lastmonth_viewed)\n month_viewed_result['total_viewed_monthly'] = month_viewed_result.pop('total_viewed')\n return month_viewed_result",
"def _get_month_order_per_sku(self):\n order_sku_month = self._order.copy()\n order_sku_month['order_month'] = order_sku_month.order_date.astype(str).apply(lambda x: x[:7])\n order_sku_month = order_sku_month.groupby(['item_code', 'order_month'])[['ord_qty']].sum()\n order_sku_month['ord_qty'] = order_sku_month.ord_qty.apply(lambda x: 0 if x < 0 else x)\n order_sku_month = order_sku_month.unstack(level=-1).fillna(0.0)\n order_sku_month.columns = pd.date_range(start='2015-09-30', periods=len(order_sku_month.columns), freq='M')\n return order_sku_month",
"def add_features(df):\n \n assert df.columns.str.contains(\"query|value|keyword|ranking|timestamp|geo\").all(), \"Add features failed. \\\n Missing one of [query, value, keyword, ranking, timestamp, geo]\"\n \n # feature engineering: totals and normalize\n grouped = df.groupby(['ranking']).value # group values by ranking\n df['value_total'] = grouped.transform('sum') # total sum \n df['value_normalized'] = (df.value-grouped.transform('min'))/(grouped.transform('max')-grouped.transform('min')) # normalize \n df['value_normalized_total'] = df.groupby(['ranking']).value_normalized.transform('sum') # total sum of normalized values \n df['date'] = pd.to_datetime(df.query_timestamp).dtd\n \n return df",
"def _aggregate(group_df, sampling_percentage=5 * 2.5):\n out = {}\n dist = []\n total_count = 0\n for i, col in enumerate(columns):\n\n n = group_df[col].sum()\n total_count += n\n dist.append(dict(min=bins[i][0], max=bins[i][1], n=n))\n\n # only aggregate if we have data!\n if total_count:\n aggval, moe = cda.approximate_median(\n dist, sampling_percentage=sampling_percentage\n )\n else:\n aggval = np.nan\n moe = np.nan\n\n result = {}\n result[\"median\"] = aggval\n result[\"median_moe\"] = moe\n result[\"geometry\"] = group_df.geometry.unary_union\n\n return pd.Series(result)",
"def generate_time_series_df(eviction_df):\n evictions_by_month = get_counts_by_month(eviction_df, \"month\", \"total-eviction-filings\")\n timeseries_df = evictions_by_month\n return timeseries_df",
"def pivot_table(\n self,\n index,\n values,\n columns,\n aggfunc,\n fill_value,\n margins,\n dropna,\n margins_name,\n observed,\n sort,\n ):\n return DataFrameDefault.register(pandas.DataFrame.pivot_table)(\n self,\n index=index,\n values=values,\n columns=columns,\n aggfunc=aggfunc,\n fill_value=fill_value,\n margins=margins,\n dropna=dropna,\n margins_name=margins_name,\n observed=observed,\n sort=sort,\n )",
"def _agg(self, df, period):\n\n df = df.resample(period)['author'].agg(['count'])\n return df",
"def create_features(df,rsi_window = 14,macd_feat = [12,26,9]):\n df.dropna(inplace=True)\n ## day and month\n df['Date'] = pd.to_datetime(df['Date'])\n df['Month'] = df['Date'].dt.month\n df['dayowk'] = df['Date'].dt.dayofweek\n df = pd.get_dummies(data = df,columns = ['Month','dayowk'])\n \n ##Previos n-day pct_changes\n df['1day_pct'] = df['Adj Close'].pct_change()\n df['2day_pct'] = df['Adj Close'].pct_change(periods = 2)\n df['3day_pct'] = df['Adj Close'].pct_change(periods = 3)\n df['4day_pct'] = df['Adj Close'].pct_change(periods = 4)\n df['5day_pct'] = df['Adj Close'].pct_change(periods = 5)\n df['7day_pct'] = df['Adj Close'].pct_change(periods = 7)\n \n ##Cumulative sum of 1day_pct\n df['1day_pct_cs'] = df['Adj Close'].pct_change().cumsum()\n \n ##EWMA of 7, 50 and 200 days\n df['ewma_7'] = df['Adj Close'].ewm(span=7).mean()/df['Adj Close']\n df['ewma_50'] = df['Adj Close'].ewm(span=50).mean()/df['Adj Close']\n df['ewma_200'] = df['Adj Close'].ewm(span=200).mean()/df['Adj Close']\n ## Golden Cross vs Death Cross etc.\n #df['7g(50&200)'] = (df['ewma_7'] > df['ewma_50']) & (df['ewma_7'] > df['ewma_200'])\n #df['7l(50&200)'] = (df['ewma_7'] < df['ewma_50']) & (df['ewma_7'] < df['ewma_200'])\n #df['7g50'] = (df['ewma_7'] > df['ewma_50']) & (df['ewma_7'] < df['ewma_200'])\n #df['7g200'] = (df['ewma_7'] < df['ewma_50']) & (df['ewma_7'] > df['ewma_200'])\n \n ##RSI and MACD\n df = RSI(df,14)\n df = MACD_mod(df,nl=macd_feat[0],nh=macd_feat[1],nsig=macd_feat[2])\n \n df['day_var'] = (df['High'] - df['Low'])/df['Close']## Days variance\n df['open_close'] = (df['Open'] - df['Close'])/df['Close'] ## Days Open-Close\n df['high_close'] = (df['High'] - df['Close'])/df['Close'] ##Days High-Close\n df['open_prev_close'] = (df['Open'] - df['Close'].shift(1))/df['Close'] ## Days open - Previos Dyas Close\n \n ##Classification target\n df['target'] = round((np.sign(df['1day_pct']).shift(-1)+1)/2) ## Target for classification\n #df['1_day_target'] = df['Adj Close'].shift(-1) - df['Adj Close'] ## Target for Regression\n #df['target2'] = round((np.sign(df['1day_pct']).shift(-1)+1)/2)## Will the price go up intra-day\n \n ## IS the stock Overbought or Oversold based on RSI?\n df['RSI_overbought'] = df['RSI']>70\n df['RSI_oversold'] = df['RSI']<30\n \n \n #df.drop(['Open','High','Low','Close'],axis=1,inplace=True)\n# df = df.dropna()\n \n #df = df.reset_index(drop=True)\n \n ## Calculating how large the previos hot and cold streaks were\n f = 0\n df['prev_hot_streak'] = np.zeros(df.shape[0])\n for i in range(df.shape[0]-1):\n if df['target'][i] ==1:\n f += 1\n if df['target'][i+1] ==0:\n df['prev_hot_streak'][i+1] = f\n f = 0\n for i in range(1,df.shape[0]):\n #print(i)\n if df['prev_hot_streak'][i]==0:\n df['prev_hot_streak'][i]=df['prev_hot_streak'][i-1]\n \n \n df['prev_cold_streak'] = np.zeros(df.shape[0])\n for i in range(df.shape[0]-1):\n if df['target'][i] ==0:\n f += 1\n if df['target'][i+1] ==1:\n df['prev_cold_streak'][i+1] = f\n f = 0\n\n for i in range(1,df.shape[0]):\n #print(i)\n if df['prev_cold_streak'][i]==0:\n df['prev_cold_streak'][i] = df['prev_cold_streak'][i-1]\n \n ## Calculating current hot and cold streaks\n df['current_hot_streak'] = np.zeros(df.shape[0])\n df['current_cold_streak'] = np.zeros(df.shape[0])\n fhot=0\n fcold=0\n for i in range(df.shape[0]):\n if df['target'][i]==1:\n fhot += 1\n fcold = 0\n df['current_hot_streak'][i] = fhot\n elif df['target'][i]==0:\n fcold += 1\n fhot = 0\n df['current_cold_streak'][i] = fcold\n \n df['prev_hot_streak'] = df['prev_hot_streak'].shift(1)\n df['prev_cold_streak'] = df['prev_cold_streak'].shift(1)\n df['current_hot_streak'] = df['current_hot_streak'].shift(1)\n df['current_cold_streak'] = df['current_cold_streak'].shift(1)\n \n ## Combinations of previos streaks\n df['prev_current_hot'] = df['prev_hot_streak'] - df['current_hot_streak']\n df['prev_current_cold'] = df['prev_cold_streak'] - df['current_cold_streak']\n df['current_hot_prev_cold'] = df['current_hot_streak'] - df['prev_cold_streak']\n df['current_cold_prev_hot'] = df['current_cold_streak'] - df['prev_hot_streak']\n \n ##Calculating days since max\n current_max = df['Adj Close'][0]\n df['days_from_max'] = np.zeros(df.shape[0])\n df['pct_from_max'] = np.zeros(df.shape[0])\n #print('blah')\n for i in range(1,df.shape[0]):\n if df['Adj Close'][i] > current_max:\n current_max = df['Adj Close'][i]\n # print(current_max)\n else:\n df['days_from_max'][i] = df['days_from_max'][i-1]+1\n df['pct_from_max'][i] = (df['Adj Close'][i]-current_max)/current_max\n #print(df['days_from_max'][i])\n \n \n \n df.dropna(inplace=True)\n df = df.reset_index(drop=True)\n return df",
"def compute_store_month(df):\n return df[\"store\"].astype(str) + \"_\" + df[\"month\"].astype(str)",
"def merge_additional_features(df):\n col = [\"hour\",\"day\" ,\"dayofweek\", \"month\" , \"interval\" , \"season\", \"time_of_day\"]\n additional_featues = pd.DataFrame(data = [features_from_timestamp(i) for i in df.index ],columns=col).set_index(df.index)\n data = df.merge(additional_featues,on=\"dt\")\n data.sort_index(inplace=True) #make sure data is sorted by date\n\n return data",
"def obtain_monthly_mean(data=pd.DataFrame()):\n return data.resample(\"MS\").mean()"
] | [
"0.5653905",
"0.56239855",
"0.5513427",
"0.5429735",
"0.5405292",
"0.540336",
"0.5373406",
"0.53479385",
"0.5302161",
"0.5241061",
"0.5224448",
"0.51862144",
"0.51552224",
"0.51262534",
"0.5092144",
"0.50751626",
"0.50463194",
"0.5034887",
"0.5023972",
"0.5020303",
"0.5016048",
"0.50034595",
"0.49687397",
"0.49110192",
"0.4904617",
"0.48855606",
"0.48693526",
"0.48692676",
"0.48662856",
"0.48608744"
] | 0.75534564 | 0 |
Agrregate values by grouping This function accepts a dataframe, 1 column(feature) and aggregated function(agg1) which groupby the datframe based on the column. | def group_values(df,col1,agg1):
grouping=df.groupby(col1).agg(agg1)
return grouping | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def agg(self, arg):\n # DataFrame{'a': [1, 1, 2], 'b': [1, 2, 3], 'c': [2, 2, 1]})\n # a.groupby('a').agg('sum') -- applied on rest\n # a.groupby('a').agg(['sum', 'min']) -- both applied on rest\n # a.groupby('a').agg({'b': ['min', 'mean']}) -- applied on\n # TODO\n # a.groupby('a').aggregate( a= me['a'].mean(), b_min =me['b'].min(), b_mean=me['c'].mean()))\n # f1 = lambda x: x.quantile(0.5); f1.__name__ = \"q0.5\"\n # f2 = lambda x: x.quantile(0.75); f2.__name__ = \"q0.75\"\n # a.groupby('a').agg([f1, f2])\n\n res = {}\n for f, c in zip(self._key_fields, self._unzip_group_keys()):\n res[f.name] = c\n for agg_name, field, op in self._normalize_agg_arg(arg):\n res[agg_name] = self._apply1(field, op)\n return self._parent._fromdata(res, None)",
"def groupby_agg(\n self,\n by,\n agg_func,\n axis,\n groupby_kwargs,\n agg_args,\n agg_kwargs,\n how=\"axis_wise\",\n drop=False,\n series_groupby=False,\n ):\n if isinstance(by, type(self)) and len(by.columns) == 1:\n by = by.columns[0] if drop else by.to_pandas().squeeze()\n # converting QC 'by' to a list of column labels only if this 'by' comes from the self (if drop is True)\n elif drop and isinstance(by, type(self)):\n by = list(by.columns)\n\n defaulter = SeriesGroupByDefault if series_groupby else GroupByDefault\n return defaulter.register(defaulter.get_aggregation_method(how))(\n self,\n by=by,\n agg_func=agg_func,\n axis=axis,\n groupby_kwargs=groupby_kwargs,\n agg_args=agg_args,\n agg_kwargs=agg_kwargs,\n drop=drop,\n )",
"def getAggregate(df, step):\n #df = mig.getAggregate(df, 2)\n #df = df.resample('2t').mean() :alternate resampling method?\n\n idx, res, flag = [], [], []\n\n for (start, end) in getWindows(df.value, step, step):\n idx.append(df.index[end])\n res.append(df.value.iloc[start:end].mean())\n flag.append(df.flag.iloc[start] & df.flag.iloc[end])\n\n return pd.DataFrame.from_records({'value':res, 'flag':flag}, index=idx,\n columns=['value', 'flag'])",
"def aggregator():\n return Aggregator(\n agg_col=\"col_a\", values_col=\"col_b\", aggregates=[\"min\", \"max\", \"avg\", \"sum\"]\n )",
"def aggregate(self, arg):\n return self.agg(arg)",
"def aggregate_count_data(df, groupby, id_vars=[]):\n # Make sure we have the column we are grouping by\n if groupby not in df.columns:\n raise ValueError(\n f\"the specified column to group by '{by}' is not in the input data\"\n )\n\n # data columns\n data_columns = [\n col\n for col in df.columns\n if not col.startswith(\"geo\") and not col.endswith(\"moe\")\n ]\n\n def _aggregate(group_df):\n \"\"\"\n The function that aggregates each group\n \"\"\"\n out = {}\n for col in data_columns:\n # The name of the error column (if it exists)\n error_col = f\"{col}_moe\"\n\n # remove any NaN rows\n subset = group_df.dropna(subset=[col], how=\"any\")\n\n # aggregat if we had any rows left\n if len(subset):\n\n # column values, margin of error (if it exists)\n args = np.column_stack(\n [subset[col], subset.get(error_col, np.zeros(len(subset)))]\n )\n\n # do the aggregation\n aggval, moe = cda.approximate_sum(*args)\n else:\n aggval = moe = np.nan\n\n # store\n out[col] = aggval\n if error_col in subset.columns:\n out[f\"{col}_moe\"] = moe\n\n out[\"geometry\"] = group_df.geometry.unary_union\n return pd.Series(out)\n\n # this is the aggregated data, with index of \"by\", e.g., group label\n agg_df = df.groupby(groupby).apply(_aggregate)\n\n # Return a GeoDataFrame\n out = gpd.GeoDataFrame(agg_df, geometry=\"geometry\", crs=df.crs).reset_index()\n\n # Add in any id variables from\n if len(id_vars):\n if groupby not in id_vars:\n id_vars.append(groupby)\n out = out.merge(df[id_vars], on=groupby).drop_duplicates(subset=[groupby])\n\n return out",
"def FE_add_groupby_features_aggregated_to_dataframe(train,\r\n agg_types,groupby_columns,ignore_variables, test=\"\"):\r\n train_copy = copy.deepcopy(train)\r\n test_copy = copy.deepcopy(test)\r\n if isinstance(groupby_columns, str):\r\n groupby_columns = [groupby_columns]\r\n \r\n for groupby_column in groupby_columns:\r\n train_copy_index = train_copy.index\r\n MGB = My_Groupby_Encoder(groupby_column, agg_types, ignore_variables)\r\n train1 = MGB.fit_transform(train)\r\n addl_cols = left_subtract(train1.columns,train.columns)\r\n train1.index = train_copy_index\r\n train_copy = pd.concat([train_copy,train1[addl_cols]], axis=1)\r\n if isinstance(test, str) or test is None:\r\n pass\r\n else:\r\n test_copy_index = test_copy.index\r\n test1 = MGB.transform(test)\r\n addl_cols = left_subtract(test1.columns,test.columns)\r\n test1.index = test_copy_index\r\n test_copy = pd.concat([test_copy,test1[addl_cols]],axis=1)\r\n ### return the dataframes ###########\r\n return train_copy, test_copy",
"def compute_average_value(self, set_label, feature, group_by_features, low_dt=None, high_dt=None):\n assert ((low_dt is None) and (high_dt is None)) or ((low_dt is not None) and (high_dt is not None))\n tt = set_label + '_transaction'\n it = set_label + '_identity'\n feature = \"foo.\" + feature\n group_by_features = [\"foo.\" + item for item in group_by_features]\n group_by_features_str = \", \".join(group_by_features)\n view_table_sub = \"(SELECT * FROM {0} JOIN {1} USING (transactionid))\".format(tt, it)\n sql = \"SELECT \" + group_by_features_str + \", AVG(\"+ feature + \") FROM \"\n sql += view_table_sub + \" AS foo\"\n if low_dt is not None:\n assert low_dt <= high_dt\n sql += \" WHERE foo.transactiondt>={0} AND foo.transactiondt<{1}\".format(low_dt, high_dt)\n sql += \" GROUP BY \" + group_by_features_str\n sql +=\";\"\n cur = self.dbinstance.execute_sql(sql)\n return cur",
"def add_aggregators(self, stat_agg):\n stat_agg.add_aggregator(self.key_precision, '{:05.4f}') \n stat_agg.add_aggregator(self.key_precision+'_std', '{:05.4f}')\n stat_agg.add_aggregator(self.key_recall, '{:05.4f}') \n stat_agg.add_aggregator(self.key_recall+'_std', '{:05.4f}')\n stat_agg.add_aggregator(self.key_f1score, '{:05.4f}') \n stat_agg.add_aggregator(self.key_f1score+'_std', '{:05.4f}')",
"def getAggregateStatistics(df: pd.core.frame.DataFrame, feature: str,\n kind: str, year: str) -> pd.core.frame.DataFrame:\n if year == 'all':\n df = df.loc[df['Trade Flow'] == kind, [feature,\n 'Year', 'Reporter']].groupby(['Year', 'Reporter']).agg(['sum']).reset_index()\n else:\n df = df.loc[(df['Trade Flow'] == kind) &\n (df['Period'] > f'{year}-01-01') & (df['Period'] <= f'{year}-12-31'), \n [feature,'Reporter']].groupby(['Reporter']).agg(['sum']).reset_index()\n \n df['Year'] = int(year)\n\n df_sorted = df.sort_values(by=(feature,'sum'), ascending=False)\n \n return df_sorted",
"def _aggregate(group_df):\n out = {}\n for col in data_columns:\n # The name of the error column (if it exists)\n error_col = f\"{col}_moe\"\n\n # remove any NaN rows\n subset = group_df.dropna(subset=[col], how=\"any\")\n\n # aggregat if we had any rows left\n if len(subset):\n\n # column values, margin of error (if it exists)\n args = np.column_stack(\n [subset[col], subset.get(error_col, np.zeros(len(subset)))]\n )\n\n # do the aggregation\n aggval, moe = cda.approximate_sum(*args)\n else:\n aggval = moe = np.nan\n\n # store\n out[col] = aggval\n if error_col in subset.columns:\n out[f\"{col}_moe\"] = moe\n\n out[\"geometry\"] = group_df.geometry.unary_union\n return pd.Series(out)",
"def agg(X: np.ndarray):\n # _check_agg_params(X)\n\n agg = AgglomerativeClustering(n_clusters=6,\n affinity='euclidean',\n linkage='ward')\n y_agg = agg.fit_predict(X)\n return agg",
"def _aggregate(group_df, sampling_percentage=5 * 2.5):\n out = {}\n dist = []\n total_count = 0\n for i, col in enumerate(columns):\n\n n = group_df[col].sum()\n total_count += n\n dist.append(dict(min=bins[i][0], max=bins[i][1], n=n))\n\n # only aggregate if we have data!\n if total_count:\n aggval, moe = cda.approximate_median(\n dist, sampling_percentage=sampling_percentage\n )\n else:\n aggval = np.nan\n moe = np.nan\n\n result = {}\n result[\"median\"] = aggval\n result[\"median_moe\"] = moe\n result[\"geometry\"] = group_df.geometry.unary_union\n\n return pd.Series(result)",
"def agg_statistics(df, uid, value, agg_func, suffix=''):\n suffix = '_' + suffix if suffix else suffix\n tmp = df[uid + value].groupby(uid).agg(agg_func)\n tmp.columns = ['_'.join(col) for col in tmp.columns]\n tmp.columns = [col + suffix for col in tmp.columns]\n return tmp.reset_index(drop=False)",
"def fill_with_group_average(df, group, column):\r\n #df=None\r\n df[column].fillna(df.groupby(group)[column].transform('mean'), inplace=True)\r\n return df",
"def group_to_others(\n df: pd.DataFrame, to_be_grouped: dict, replace_value: str = \"Other\"\n) -> pd.DataFrame:\n\n for feature, values in to_be_grouped.items():\n df[feature] = [row if row in values else replace_value for row in df[feature]]\n return df",
"def groupby(df, group_cols=None, aggregations=None):\n if group_cols is None:\n raise ParamsValueError(\"You have to provide the 'group_cols' parameter with a list of at \"\n \"least one column on which to group data ('group_cols')\")\n if aggregations is None:\n raise ParamsValueError(\"You have to provide the 'aggregations' parameter with a dictionnary\"\n \"of at least a value column as key and an aggregation function as \"\n \"value (among sum, mean, median, prod, std, var)\")\n df = df.groupby(group_cols, as_index=False).agg(aggregations)\n return df",
"def aggregate(predictions, aggfunc):\n return [aggfunc(sublist) for sublist in np.transpose(predictions)]",
"def data_agg_by_mean_value(self):\n return self._data_agg_by_mean_value",
"def aggregate(df, grouping_vars=None):\n if grouping_vars is None:\n grouping_vars = [x for x in df.columns if x not in ['FlowAmount', 'DataReliability']]\n df_agg = df.groupby(grouping_vars).agg({'FlowAmount': ['sum']})\n df_agg['DataReliability'] = get_weighted_average(\n df, 'DataReliability', 'FlowAmount', grouping_vars)\n df_agg = df_agg.reset_index()\n df_agg.columns = df_agg.columns.droplevel(level=1)\n # drop those rows where flow amount is negative, zero, or NaN\n df_agg = df_agg[df_agg['FlowAmount'] > 0]\n df_agg = df_agg[df_agg['FlowAmount'].notna()]\n return df_agg",
"def get_aggr(self, attribute, aggregator=None, smooth=0., **kwargs):\n if aggregator is None:\n aggregator = np.mean\n data = self.get_all()\n itrs = {row['Iteration'] for row in data}\n itrs = sorted(list(itrs))\n vals = []\n running_avg = 0\n for itr in itrs:\n itr_data = DatasetBuilder(data).filter_itr(itr).get_all()\n val = aggregator([row[attribute] for row in itr_data], **kwargs)\n if len(vals) == 0:\n running_avg = val\n else:\n running_avg = smooth * running_avg + (1 - smooth) * val\n vals.append(running_avg)\n return np.array(itrs), np.array(vals)",
"def aggregate_perf(df,measure,thresh=0.05):\n df_agg = pd.DataFrame(columns=['roi','rank'])\n df['significance'] = df[measure] < thresh\n roi_list = df['roi'].unique()\n coef_list = []\n rank_list = []\n for roi in roi_list:\n rank_list.append(np.sum(df[df['roi']==roi]['significance'].values))\n coef_list.append(np.mean(df[df['roi']==roi]['coef'].values))\n df_agg['roi'] = roi_list\n df_agg['rank'] = rank_list\n df_agg['coef'] = coef_list\n \n return df_agg",
"def aggreg(iterable, aggregfuncs, geomfunc=None):\n def lookup_geomfunc(agg):\n # handle aliases\n if agg == \"dissolve\":\n agg = \"union\"\n elif agg == \"unique\":\n agg = \"difference\"\n\n # detect\n if agg == \"intersection\":\n def _func(fs):\n gs = (f.get_shapely() for f in fs if f.geometry)\n cur = next(gs)\n for g in gs:\n if not g.is_empty:\n cur = cur.intersection(g)\n return cur.__geo_interface__\n \n elif agg == \"difference\":\n def _func(fs):\n gs = (f.get_shapely() for f in fs if f.geometry)\n cur = next(gs)\n for g in gs:\n if not g.is_empty:\n cur = cur.difference(g)\n return cur.__geo_interface__\n\n elif agg == \"union\":\n def _func(fs):\n gs = [f.get_shapely() for f in fs if f.geometry]\n if len(gs) > 1:\n print(gs)\n from shapely.ops import cascaded_union\n return cascaded_union(gs).__geo_interface__\n elif len(gs) == 1:\n return gs[0].__geo_interface__\n\n elif hasattr(agg, \"__call__\"):\n # agg is not a string but a custom function\n return agg\n\n else:\n raise Exception(\"geomfunc must be a callable function or a valid set geometry string name\")\n\n return _func\n \n def lookup_aggfunc(agg):\n # handle aliases\n if agg in (\"average\",\"avg\"):\n agg = \"mean\"\n\n # detect\n if agg == \"count\": return len\n elif agg == \"sum\": return sum\n elif agg == \"max\": return max\n elif agg == \"min\": return min\n elif agg == \"first\": return lambda seq: seq.__getitem__(0)\n elif agg == \"last\": return lambda seq: seq.__getitem__(-1)\n elif agg == \"majority\": return lambda seq: max(itertools.groupby(sorted(seq)), key=lambda gidgroup: len(list(gidgroup[1])))[0]\n elif agg == \"minority\": return lambda seq: min(itertools.groupby(sorted(seq)), key=lambda gidgroup: len(list(gidgroup[1])))[0]\n elif agg == \"mean\": return lambda seq: sum(seq)/float(len(seq))\n elif isinstance(agg, basestring) and agg.endswith(\"concat\"):\n delim = agg[:-6]\n return lambda seq: delim.join((str(v) for v in seq))\n elif hasattr(agg, \"__call__\"):\n # agg is not a string but a function\n return agg\n else:\n raise Exception(\"aggfunc must be a callable function or a valid statistics string name\")\n\n def check_valfunc(valfunc):\n if hasattr(valfunc,\"__call__\"):\n pass\n elif isinstance(valfunc,basestring):\n hashindex = valfunc\n valfunc = lambda f: f[hashindex]\n else:\n raise Exception(\"valfunc for field '%s' must be a callable function or a string of the hash index for retrieving the value\"%name)\n return valfunc\n \n aggregfuncs = [(name,check_valfunc(valfunc),aggname,lookup_aggfunc(aggname)) for name,valfunc,aggname in aggregfuncs]\n\n def make_number(value):\n try: return float(value)\n except: return None\n\n def is_missing(val):\n return val is None or (isinstance(val, float) and math.isnan(val))\n\n iterable = list(iterable)\n row = []\n for _,valfunc,aggname,aggfunc in aggregfuncs:\n values = (valfunc(item) for item in iterable)\n\n # missing values are not considered when calculating stats\n values = [val for val in values if not is_missing(val)] \n \n if aggname in (\"sum\",\"max\",\"min\",\"mean\"):\n # only consider number values if numeric stats\n values = [make_number(value) for value in values if make_number(value) != None]\n\n if values:\n aggval = aggfunc(values)\n else:\n aggval = \"\" # or best with None\n \n row.append(aggval)\n\n if geomfunc:\n geomfunc = lookup_geomfunc(geomfunc)\n geom = geomfunc(iterable)\n return row,geom\n\n else:\n return row",
"def groupBy(featureCollection, propertiesOrFunc):\n if not \"features\" in featureCollection.keys():\n raise ValueError('features key needs to be defined {}')\n features = featureCollection[\"features\"]\n groups = defaultdict(list)\n if isinstance(propertiesOrFunc, str):\n propertiesOrFunc = [propertiesOrFunc]\n for row in features:\n if isinstance(propertiesOrFunc, Callable):\n groupByValue = str(propertiesOrFunc(row[\"properties\"]))\n else:\n groupByValue = []\n [groupByValue.append(str(row[\"properties\"].get(prop,\"\"))) for prop in propertiesOrFunc]\n groupByValue = \"|\".join(groupByValue)\n groups[groupByValue].append(row)\n return {key: geojson.FeatureCollection(group) for key, group in groups.items()}",
"def aggregate_statistics(self, stat_col, stat_agg):\n precision_sums = stat_col[self.key_precision]\n recall_sums = stat_col[self.key_recall]\n f1score_sums = stat_col[self.key_f1score]\n supports = stat_col[self.key_f1score+'_support']\n\n # Special case - no samples!\n if sum(supports) == 0:\n stat_agg[self.key_precision] = 0\n stat_agg[self.key_precision+'_std'] = 0\n stat_agg[self.key_recall] = 0\n stat_agg[self.key_recall+'_std'] = 0\n stat_agg[self.key_f1score] = 0\n stat_agg[self.key_f1score+'_std'] = 0\n\n else: \n # Else: calculate weighted precision.\n precisions_avg = np.average(precision_sums, weights=supports)\n precisions_var = np.average((precision_sums-precisions_avg)**2, weights=supports)\n \n stat_agg[self.key_precision] = precisions_avg\n stat_agg[self.key_precision+'_std'] = math.sqrt(precisions_var)\n\n # Calculate weighted recall.\n recalls_avg = np.average(recall_sums, weights=supports)\n recalls_var = np.average((recall_sums-recalls_avg)**2, weights=supports)\n\n stat_agg[self.key_recall] = recalls_avg\n stat_agg[self.key_recall+'_std'] = math.sqrt(recalls_var)\n\n # Calculate weighted f1 score.\n f1scores_avg = np.average(f1score_sums, weights=supports)\n f1scores_var = np.average((f1score_sums-f1scores_avg)**2, weights=supports)\n\n stat_agg[self.key_f1score] = f1scores_avg\n stat_agg[self.key_f1score+'_std'] = math.sqrt(f1scores_var)",
"def aggregate(self, agpath):\n return data.Aggregate(self, agpath)",
"def update_aggregated_data(aggregated_data, datum):\n if 'last_date' not in aggregated_data:\n aggregated_data['last_date'] = datum['date']\n\n if aggregated_data['last_date'] != datum['date']:\n \"\"\"\n We are calculating daily min, max values so only update when hit new date.\n \"\"\"\n\n if aggregated_data['sum'] < aggregated_data['min']:\n aggregated_data['min'] = aggregated_data['sum']\n\n if aggregated_data['sum'] > aggregated_data['max']:\n aggregated_data['max'] = aggregated_data['sum']\n\n aggregated_data['last_date'] = datum['date']\n \n\n sign = 1\n if datum['type'] == 'debit':\n sign = -1\n\n aggregated_data['n'] += 1\n aggregated_data['sum'] += sign * Decimal(datum['amount'])\n\n return aggregated_data",
"def _aggregate(y, aggfunc, weights):\n if weights is None:\n aggfunc = _check_aggfunc(aggfunc, weighted=False)\n y_agg = aggfunc(y, axis=1)\n else:\n aggfunc = _check_aggfunc(aggfunc, weighted=True)\n y_agg = aggfunc(y, axis=1, weights=np.array(weights))\n\n return pd.Series(y_agg, index=y.index)",
"def add_accumulation_features(df, config):\n if config.has_option(\"clustering\", \"bin\"):\n bin_size = float(config.get(\"clustering\", \"bin\"))\n logger.info(\n \"Computation of the accumulation features with bin_size=%s\",\n bin_size\n )\n df = accumulation_2d_neighborhood(df, bin_size)\n df.fillna(0, inplace=True)\n for c in (\"bin_z_range\", \"bin_z_std\", \"bin_density\"):\n df[c] = max_normalize(df[c])\n return df",
"def _aggregate(self, method_name, *args, as_index=None, **kwargs):\n res = self._groupby_obj._wrap_aggregation(\n qc_method=type(self._query_compiler).groupby_rolling,\n numeric_only=False,\n agg_args=args,\n agg_kwargs=kwargs,\n agg_func=method_name,\n rolling_kwargs=self.rolling_kwargs,\n )\n\n if as_index is None:\n as_index = self._as_index\n\n if not as_index:\n res = res.reset_index(\n level=[i for i in range(len(self._groupby_obj._internal_by))],\n drop=False,\n )\n\n return res"
] | [
"0.62384295",
"0.62365556",
"0.6115062",
"0.57873046",
"0.5756204",
"0.5695222",
"0.56700325",
"0.56446403",
"0.5633193",
"0.56284815",
"0.5598864",
"0.55967784",
"0.55186844",
"0.5475055",
"0.54412466",
"0.53967935",
"0.5389561",
"0.5328148",
"0.53219986",
"0.5310613",
"0.53030914",
"0.5264406",
"0.5217204",
"0.52107185",
"0.51994413",
"0.5195169",
"0.5191772",
"0.51684296",
"0.5107142",
"0.5087795"
] | 0.74227715 | 0 |
Convert temperatures from celsius to fahrenhheit This function accepts a dataframe, 1 column(feature) which returns the dataframe with converted values from celsius to fahrenhheit. | def convert(df,celsius):
converted_temp=(df[celsius]*(9/5))+32
return converted_temp | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_f_to_c(temp_in_farenheit): ## ##\n celsiustemp = round((temp_in_farenheit - 32) * 5/9, 1) ##\n return celsiustemp ##",
"def convert_celsius_to_fahrenheit(celsius):\n return celsius * 9.0 / 5 + 32",
"def convertTemp(t, convertTo=\"C\"):\n # check if target temperature is celcius (metric)\n if convertTo == \"C\":\n # returns celcius (metric) temperature\n return round(((5 / 9) * (t - 32)), 1)\n else:\n # returns fahrenheit but rounded\n return round(t, 1)",
"def tempConvert(temp, unit):\n if unit == 'F':\n celsius = (temp - 32) * 5 / 9\n return celsius\n else:\n return temp",
"def convertCelsiusToFahrenhe(C):\n if isinstance(C, str) == True:\n raise ValueError(\"Celsius cannot be a string value\")\n if isinstance(C,complex) == True:\n raise ValueError(\"Celsius cannot be a complex value\")\n if isinstance(C,int) == True:\n raise ValueError(\"Celsius should be a float value, example: 90.00\")\n \n F = (9.0/5.0 * C + 32.0)\n return F",
"def celsius_to_fahrenheit(celsius):\n fahrenheit = (celsius * (9.0/5.0)) + 32.0\n return fahrenheit",
"def celsius_to_fahr(temp):\n return temp * (9/5) + 32",
"def convert_f_to_c(temp_in_farenheit):\n celcius_temp = round(float((temp_in_farenheit) - 32)*(5/9),1)\n return(celcius_temp)",
"def kelvin_to_fahr(temp):\n temp_c = kelvin_to_celsius(temp)\n result = celsius_to_fahr(temp_c)\n return result",
"def _celsius_to_fahrenheit(self) -> None:\n if self.units == \"celsius\":\n self.value = (((self.value / 5) * 9) + 32).__round__(2)\n self.units = \"fahrenheit\"\n else:\n msg = (\n \"Not a valid unit conversion, expected units to be in 'celsius' but instead \"\n + f\"units were in {self.units}.\"\n )\n raise ValueError(msg)",
"def fahrenheit_to_celsius():\n fahrenheit = ent_temperature.get()\n celsius = (5 / 9) * (float(fahrenheit) - 32)\n lbl_result[\"text\"] = f\"{round(celsius, 2)} \\N{DEGREE CELSIUS}\"",
"def fahrenheit_to_celsius(temp):\n return (temp - 32) * 5/9",
"def cel_to_fahren(temp_list):\n fahren_list = [round(temp*9/5+32) for temp in temp_list]\n return fahren_list",
"def fahrenheit_to_celsius(fahrenheit):\n offset = 32\n multiplier = 5 / 9\n celsius = (fahrenheit - offset) * multiplier\n print(\"inside function:\", fahrenheit, offset, multiplier, celsius)\n return celsius",
"def fahrenheit_to_celsius(fahrenheit):\n offset = 32\n multiplier = 5 / 9\n celsius = (fahrenheit - offset) * multiplier\n print(\"inside function:\", fahrenheit, offset, multiplier, celsius)\n return celsius",
"def kelvin_to_fahrenheit(kelvin_temp):\n\n\treturn math.floor(9/5 * (kelvin_temp - 273) + 32)",
"def fahrenheit(T_in_celsius):\n return (T_in_celsius * 9 / 5) + 32",
"def testCtoF(self):\r\n for integer, numeral in self.ctofvalues:\r\n result = conversions_refactored.convert('Celsius', 'Fahrenheit', integer) \r\n self.assertEqual(numeral, result, msg='Incorrect result, calculation error')",
"def convert_f_to_c(temp_in_farenheit):\n \n temp=round((float(temp_in_farenheit)-32)*5/9,1)\n \n return (temp)",
"def fahr_to_celsius(temp):\n tempInCel = (temp - 32) * 5/9\n return tempInCel",
"def celcius_to_fahrenheit(celcius_float):\n return celcius_float * 1.8 + 32",
"def to_fahrenheit(celsius):\n\n return (1.8*celsius) + 32",
"def correct_weather_data(df):\n\n columns = {'Date UTC': 'date',\n 'T° (C)': 'temperature',\n 'P (hPa)': 'pression',\n 'HR (%)': 'HR',\n 'P.rosée (°C)': 'rosee',\n 'Visi (km)': 'visibilite',\n 'Vt. moy. (km/h)': 'v_moy',\n 'Vt. raf. (km/h)': 'v_raf',\n 'Vt. dir (°)': 'v_dir',\n 'RR 3h (mm)': 'RR3h',\n 'Neige (cm)': 'neige',\n 'Nebul. (octats)': 'nebul'}\n\n df = df.rename(columns=columns)\n df['date'] = df['date'].str.replace('h', ':')\n df['date'] = pd.to_datetime(df['date'], dayfirst=True)\n\n return df",
"def testFtoC(self):\r\n for integer, numeral in self.ftocvalues:\r\n result = conversions_refactored.convert('Fahrenheit', 'Celsius', integer) \r\n self.assertEqual(numeral, result, msg='Incorrect result, calculation error')",
"def convert_f_to_c(temp_in_farenheit):\n temp_in_celcius = ((temp_in_farenheit - 32) * 5) / 9\n temp_in_celcius = round(temp_in_celcius, 1)\n return temp_in_celcius",
"def convert_to_fahrenheit(self):\n try:\n self.root.ids.celsius_input.hint_text = 'Enter amount in Celsius'\n self.root.ids.fahrenheit_input.text = '{:.2f}'.format(float(self.root.ids.celsius_input.text)\n * 9.0 / 5 + 32)\n except ValueError:\n self.root.ids.celsius_input.text = ''\n self.root.ids.celsius_input.hint_text = 'Invalid number'",
"def convert_to_celsius(fahrenheit):\n return (fahrenheit - 32) * 5 / 9",
"def temperature(self):\n self.convert_window(\"Temperature\", \"Celsius\", [\"Celsius\", \"Fahrenheit\", \"Kelvin\", \"Rankine\", \"Reaumur\", \"Newton\", \"Romer\", \"Delisle\"])",
"def convert_df_to_features(df, volume=False):\n if volume:\n return df.high.astype(float), df.low.astype(float), df.close.astype(float), df.volume.astype(float)\n else:\n return df.high.astype(float), df.low.astype(float), df.close.astype(float)",
"def CC_WEI_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','CC_WEI']]\n Feature_DF.loc[:,'CC_WEI_TRS'] = Feature_DF.loc[:,'CC_WEI'].pow(4/3)\n Feature_DF = Feature_DF.loc[:,['HNAME','CC_WEI_TRS']]\n\n return Feature_DF"
] | [
"0.6405321",
"0.63570726",
"0.63089514",
"0.6282194",
"0.624191",
"0.6216791",
"0.6167246",
"0.6161009",
"0.6136985",
"0.6065306",
"0.60189754",
"0.59730244",
"0.59621197",
"0.5952359",
"0.5952359",
"0.5928877",
"0.5866788",
"0.5863079",
"0.5858934",
"0.58513784",
"0.584079",
"0.58316916",
"0.58305216",
"0.582773",
"0.58229756",
"0.5810385",
"0.5799572",
"0.57920855",
"0.579163",
"0.5711385"
] | 0.6762713 | 0 |
returns true if strA divides strB | def divs(strA,strB):
for i in range(0,1001):
if strB == strA*i:
return(True)
return(False) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_divisibility(a, b):\n \n float_version = float(a)/b\n int_version = a/b\n if float_version == int_version:\n answer = \"divisible\"\n else:\n answer = \"not divisible\"\n return answer",
"def call(str_a, str_b):\n if not sys.getsizeof(str_a) == sys.getsizeof(str_b):\n return False\n\n comp_a = [int(str_a_char) for str_a_char in bytes(str_a.encode('utf-8'))]\n\n res = 0\n for str_b_char in bytes(str_b.encode('utf-8')):\n res |= str_b_char ^ comp_a.pop(0)\n\n return res == 0",
"def permutations_equal(str_a, str_b):\n if not isinstance(str_a, str) or not isinstance(str_b, str):\n raise AttributeError(\"inputs must be valid str\")\n if len(str_a) != len(str_b):\n return False\n\n return Counter(str_a) == Counter(str_b)",
"def is_divisible(a, b):\n \n remainder = a % b\n # if there's no remainder, then a is divisible by b\n if not remainder:\n return True\n else:\n return False",
"def _is_duplicate(a: str, b: str) -> bool:\n la = len(a)\n lb = len(b)\n diff = abs(la - lb)\n if diff > 50:\n return False\n denom = min(la, lb) + diff / 2\n ratio = levenshtein(a.casefold(), b.casefold()) / denom\n return ratio < 0.1",
"def hamming_distance(StringA,StringB):\n if len(StringA) != String(B):\n raise ValueError(\"The length of sequences are not equal!\")\n return sum(x !=y for (x,y) in zip(StringA,StringB))",
"def div(a, b):\n a = float(a)\n b = float(b)\n return a / b",
"def div(self, a, b):\n raise NotImplementedError",
"def divisable(num1, num2):\n\n # Return the calculated boolean\n return bool(num1 % num2 == 0)",
"def my_strcomp(str1, str2):\n str2 = ''.join(ch for ch in str2 if ch.isalnum() or ch == \",\")\n str1 = ''.join(ch for ch in str1 if ch.isalnum())\n if len(str2) > len(str1):\n return False\n if str1.upper() == str2.upper():\n return True\n same_chars = 0\n for char1, char2 in zip(str1, str2):\n if char1.upper() == char2.upper():\n same_chars += 1\n # if same_chars == len(str2): return True\n return (same_chars / len(str1)) > 0.7 # If more than 80% of chars are equals, return true",
"def str_equals(a, b):\n al = len(a)\n bl = len(b)\n match = True\n for i in range(0, min(al, bl)):\n match &= a[i] == b[i]\n return match",
"def division_algo(a, b):\n return a / b, a % b",
"def string_similarity(a, b):\n return SequenceMatcher(a=a, b=b).ratio()",
"def Stringchecker(s1, s2):\r\n\r\n if len(s1) != len(s2) or len(set(s1)) < len(set(s2)):\r\n return False\r\n d = dict()\r\n for idx,c in enumerate(s1):\r\n if not d.get(c):\r\n d[c] = s2[idx]\r\n elif d[c] != s2[idx]:\r\n return False\r\n return True",
"def compare_str(seq1, seq2):\n if seq1 == seq2:\n return 1\n ld = Levenshtein.distance(seq1, seq2)\n longest = len(seq1 if len(seq1) > len(seq2) else seq2)\n return (longest - ld) / longest",
"def compare_strings(string1: str, string2: str) -> float:\n return SequenceMatcher(None, string1, string2).ratio()",
"def div(a, b):\n if b == 0:\n raise ValueError('zero division error')\n return a / b",
"def div(self, a, b):\n return divmod(a, b)",
"def true_div(a, b):\r\n # see decorator for function body\r",
"def similar_string(first_string, second_string):\n score = score_match(first_string, second_string)\n\n if score >= SCORE_THRESHOLD_NORMAL:\n return True\n\n return False",
"def check_string(str_one, str_two):\n str_one = str_one.lower()\n str_two = str_two.lower()\n # print(str_one,str_two)\n if len(str_two) < len(str_one):\n return bool(re.search(str_two+'$',str_one))\n else:\n return bool(re.search(str_one+'$',str_two))",
"def div(a, b):\n if not type(a) is Blob and not type(b) is Blob:\n raise ValueError('At least one of `a` and `b` should be neoml.Blob.')\n \n return a / b",
"def similar_string_fast(first_string, second_string):\n partial_score = fuzz.ratio(first_string, second_string)\n token_score = fuzz.token_set_ratio(first_string, second_string)\n\n if max(partial_score, token_score) >= SCORE_THRESHOLD_FAST:\n return True\n\n return False",
"def hamdist(str1, str2):\n\n diffs = 0\n for ch1, ch2 in zip(str1, str2):\n if ch1 != ch2:\n diffs += 1\n return diffs",
"def divisible(a, b):\n return not a % b",
"def true_div_inplace(a, b):",
"def division(self, a, b):\n if not check_arguments(a, b): # check if arguments are numbers\n self.last_result = a / b",
"def string_match_ratio(str1, str2):\n sm = edit_distance.SequenceMatcher(a=str1, b=str2)\n return sm.ratio()",
"def exquo(self, a, b):\n return a / b",
"def div(a,b):\r\n return a/b"
] | [
"0.65836626",
"0.6551966",
"0.6452647",
"0.6091636",
"0.6034888",
"0.6031436",
"0.5918683",
"0.5906723",
"0.5901623",
"0.589791",
"0.5895614",
"0.58787555",
"0.5836668",
"0.5830642",
"0.58115834",
"0.5795731",
"0.5777818",
"0.5768081",
"0.5738433",
"0.5724008",
"0.57214195",
"0.5704834",
"0.56893367",
"0.5688269",
"0.56841254",
"0.5677455",
"0.56749076",
"0.5670773",
"0.566804",
"0.56029356"
] | 0.80954874 | 0 |
Followup, group isomorphic strings | def group_isomorphic(strs):
def encode(s):
r, d = [], {}
for c in s:
if c not in d:
d[c] = len(d)
r.append(d[c])
return str(r)
m = defaultdict(list)
for s in strs:
m[encode(s)].append(s)
return list(m.values()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def group_anagrams(strs):\n anagram_grouping = {}\n \n for anagram in strs:\n curr_ana = str(sorted(anagram))\n anagram_grouping.setdefault(curr_ana, [])\n \n anagram_grouping[curr_ana].append(anagram)\n \n return [ anagram_grouping[grouping] for grouping in anagram_grouping ]",
"def group(seq):\n pass # replace with your solution",
"def gen_eq_suf_lists(string):\n\n # Reverse the string\n string = string[::-1]\n\n # Split the string into list of sensible words and sort them\n words = re.split('\\W', string)\n words = list(filter(lambda word : word != '', words))\n words.sort()\n\n # Initialise output list with an empty group\n suffix_groups = [ [] ]\n\n # Walk through words...\n cur_suffix = words[0][:3]\n for word in words:\n # Add word to last group if it has the same suffix\n if word[:3] == cur_suffix:\n suffix_groups[-1].append(word[::-1])\n\n # Make a new group on the encounter of a new suffix\n else:\n suffix_groups.append( [ word[::-1] ] )\n\n # Update the suffix that is compare with\n cur_suffix = word[:3]\n\n return suffix_groups",
"def test_unicodeCombining(self):\n input = raw_unicode(\n r\"Ik ben gei\\u0308nteresseerd in de co\\u00F6rdinatie van mijn knie\\u00EBn, maar kan niet e\\u0301e\\u0301n \\u00E0 twee enqu\\u00EAtes vinden die recht doet aan mijn carri\\u00E8re op Cura\\u00E7ao\")\n output = input.split(\" \")\n output[8] = output[8][0:-1]\n for (itmO, itmV) in zip(output, tokenize_en(input)):\n self.assertEqual(itmO, itmV[0])\n self.assertTrue(input[itmV[1]:].startswith(itmO))",
"def look_and_say(s):\n pass\n lst = list(s)\n groups = []\n for char in lst:\n if groups and groups[-1] and groups[-1][0] == char:\n groups[-1].append(char)\n else:\n groups.append([char])\n ret = []\n for group in groups:\n ret.append(str(len(group)))\n ret.append(group[0])\n\n return ''.join(ret)",
"def _miler_grouper(iterable):\r\n length = len(iterable) + 1\r\n if length == 3:\r\n yield [each.text for each in iterable]\r\n for i in range(3, length, 3):\r\n previous = i - 3\r\n group = iterable[previous: i]\r\n yield [each.text for each in group]",
"def isIsomorphic(s: str, t: str) -> bool:\n\n def encode(s):\n m = {}\n r = []\n for char in s:\n if char not in m:\n m[char] = len(m) # increasing index\n r.append(m[char])\n return str(r)\n\n return encode(s) == encode(t)",
"def grouped_anagrams(strings):\r\n anagram_dict = {}\r\n for string in strings:\r\n # this will take O(n logn) time with n being the number of chars in a word\r\n sorted_chars = \"\".join(sorted(list(string))) \r\n anagram_dict[sorted_chars] = anagram_dict.get(sorted_chars, []) + [string]\r\n\r\n return list(anagram_dict.values())",
"def checkio(data: List[str]) -> str:\n before, after = {}, {}\n to_order = set()\n for string in data:\n string = remove_duplicates(string)\n for i, char in enumerate(string):\n if i > 0:\n before[char] = before.get(char, list())\n if string[i - 1] not in before[char]:\n before[char].append(string[i - 1])\n if i < len(string) - 1:\n after[char] = after.get(char, list())\n if string[i + 1] not in after[char]:\n after[char].append(string[i + 1])\n to_order.add(char)\n order = []\n chars_without_predeccessors = sorted([char for char in to_order if before.get(char, None) is None])\n for head in chars_without_predeccessors:\n for after_head in after.get(head, list()):\n if after_head in order:\n insert_index = order.index(after_head)\n order.insert(insert_index, head)\n break\n if head in order:\n break\n else:\n order.append(head)\n while next_item := get_next(order[-1], after):\n order.append(next_item)\n return ''.join(order)",
"def InterpolateGrouping(self, pattern):\n components = []\n offset = 0\n for match in GROUPING_PATTERN.finditer(pattern):\n components.append([pattern[offset:match.start()]])\n\n # Expand the attribute into the set of possibilities:\n alternatives = match.group(1).split(\",\")\n components.append(_unique(alternatives))\n offset = match.end()\n\n components.append([pattern[offset:]])\n # Now calculate the cartesian products of all these sets to form all\n # strings.\n for vector in itertools.product(*components):\n yield u\"\".join(vector)",
"def solution(s):\n\n ls = []\n i = 0\n if len(s) % 2 == 0:\n while i < len(s)-1:\n ls.append(s[i]+s[i+1])\n i += 2\n else:\n while i < len(s)-2:\n ls.append(s[i]+s[i+1])\n i += 2\n ls.append(s[len(s)-1]+\"_\")\n return ls",
"def grp(iterable):\n if iterable is None:\n return \"\"\n xs = [x for x in iterable if x]\n return \", \".join(list(sorted(set(xs))))",
"def test_reaction_splits_stereogroup(self):\n products = _reactAndSummarize('[C:1]OO[C:2]>>[C:2]O.O[C:1]',\n 'F[C@H](Cl)OO[C@@H](Cl)Br |o1:1,5|')\n # Two product sets, each with two mols:\n self.assertEqual(products.count('|o1:1|'), 4)",
"def solution(s):",
"def rsplit(self, string):\n rhs = string()\n lhs = string()\n pattern_match=string()\n return lhs, pattern_match, rhs",
"def part2(data: str = None) -> str:\n idlist: List[IDProfiler] = getidlist(data)\n for i in range(len(idlist)):\n for j in range(i + 1, len(idlist)):\n shared: str = idlist[i].sharedletters(idlist[j])\n if len(shared) is len(idlist[i].rawstr) - 1:\n return shared",
"def task2(string):\n \"\"\"if len(string) < 2:\n return string\n elif string[0] == string[1]:\n return string[0] + \"-\" + task2(string[1:])\n return string[0] + task2(string[1:])\"\"\"\n if len(string) < 2:\n return string\n a = \"\"\n for i in range(len(string)):\n if i + 1 < len(string) and string[i + 1] == string[i]:\n a += string[i] + \"-\"\n else:\n a += string[i]\n return a",
"def is_isomorphic(self, s1, s2):\n # encode strings\n enc1, enc2 = [], []\n count1, count2 = 0, 0\n dict1, dict2 = dict(), dict()\n for i in range(len(s1)):\n char1, char2 = s1[i], s2[i]\n if char1 in dict1:\n enc1.append(dict1[char1])\n else:\n count1 += 1\n dict1[char1] = count1\n enc1.append(dict1[char1])\n if char2 in dict2:\n enc2.append(dict2[char2])\n else:\n count2 += 1\n dict2[char2] = count2\n enc2.append(dict2[char2])\n return enc1 == enc2 # compare encodings",
"def _decode_multiple_subject(self, decoded: str) -> Set[str]:\n\n result = set()\n\n rematch = self._regex_helper.set_regex(r\"((?:[^~\\*,]+))\").match(\n decoded, rematch=True, return_match=True\n )\n\n if rematch:\n result.update({self.extract_base(x) for x in rematch})\n\n return result",
"def internal_id_to_group(i_id: str) -> str:\n return chr(ord('a') + (int(i_id) % 5))",
"def strSeq_uniquify(strSeq,connector='_'):\n\n fm=\"{}\"+connector+\"{}\"\n\n new_strSeq = []\n for item in strSeq:\n counter = 0\n newitem = item\n while newitem in new_strSeq:\n counter += 1\n newitem = fm.format(item, counter-1)\n new_strSeq.append(newitem)\n\n return new_strSeq",
"def sort_string(raw_str):",
"def lsplit(self, string):\n rhs = string()\n lhs = string()\n pattern_match=string()\n return lhs, pattern_match, rhs",
"def group_handling(existing_uuids: Set[str]) -> None:",
"def _postprocess(\n self,\n result: List[str],\n eojeols: List[str],\n poses: List[str],\n ):\n token_indices = []\n temp_group = []\n for i, res in enumerate(result):\n if (\"<\" in res) or (\">\" in res):\n continue\n if not temp_group:\n temp_group.append(i)\n else:\n if i == (temp_group[-1] + 1):\n temp_group.append(i)\n else:\n token_indices.append(temp_group)\n temp_group = [i]\n token_indices.append(temp_group)\n\n lucrative = 0\n for i, li_index in enumerate(token_indices):\n if poses:\n eojeol = eojeols[i].split(\"+\")\n pos = poses[i].split(\"+\")\n tagged = []\n for e, p in zip(eojeol, pos):\n tagged.append(f\"{e}/{p}\")\n result[li_index[0] - lucrative:li_index[-1] + 1 -\n lucrative] = [\"+\".join(tagged)]\n else:\n result[li_index[0] - lucrative:li_index[-1] + 1 -\n lucrative] = [eojeols[i]]\n lucrative += len(li_index) - 1\n\n return result",
"def group_anagrams_hashmap(self, arr):\n hashmap = defaultdict(list)\n for s in arr:\n hashmap[\"\".join(sorted(s))].append(s)\n\n result = []\n for _, strings in hashmap.items():\n for s in strings:\n result.append(s)\n return result",
"def mystery2(s):\r\n if len(s) <= 1:\r\n return s\r\n else:\r\n result_rest = mystery2(s[1:])\r\n if s[0] == s[-1]:\r\n return result_rest\r\n else:\r\n return result_rest + s[0]",
"def filter_composite_from_subgroups(s):\n dims = []\n for letter, sg in zip('ABCDEFGHIJKLMNOPQRSTUVWZ', s[2:]):\n dims.append('dim{0}'.format(letter))\n if dims:\n return ' '.join(dims)",
"def calculate(self):\n\n chars = list(self.s)\n slen = len(chars)\n result = set([])\n vis = set([])\n q = deque([(0, \"\")])\n while q:\n pos, prev = q.popleft()\n if pos in vis:\n continue\n pos2 = pos + 2\n if slen - pos2 > 4:\n new = str(chars[slen-1-pos-1]) + str(chars[slen-1-pos])\n if new != prev:\n result.add(new)\n q.append((pos2, new))\n pos3 = pos + 3\n if slen - pos3 > 4:\n new = (str(chars[slen-1-pos-2]) +\n str(chars[slen-1-pos-1]) + str(chars[slen-1-pos]))\n if new != prev:\n result.add(new)\n q.append((pos3, new))\n\n vis.add(pos)\n\n return (str(len(result)) + \"\\n\" + \"\\n\".join(sorted(result))\n if result else \"0\")",
"def compress(string):"
] | [
"0.60339314",
"0.58468187",
"0.5554626",
"0.5472503",
"0.54396987",
"0.5400801",
"0.53897846",
"0.537732",
"0.5353079",
"0.53035206",
"0.53021216",
"0.52842623",
"0.5275526",
"0.5262117",
"0.5258697",
"0.52123475",
"0.5208093",
"0.5203539",
"0.5198474",
"0.51681864",
"0.5143105",
"0.51423436",
"0.5140562",
"0.5129122",
"0.51284087",
"0.512083",
"0.5099157",
"0.5090934",
"0.50902176",
"0.5080745"
] | 0.7192948 | 0 |
Return processed audio data. Returns mel curve, x/y data. This method is called every time there is a microphone update. | def update(self, audio_samples):
min_frequency = self._config["general_settings"]["min_frequency"]
max_frequency = self._config["general_settings"]["max_frequency"]
audio_data = {}
# Normalize samples between 0 and 1.
y = audio_samples / 2.0**15
# Construct a rolling window of audio samples.
self.y_roll[:-1] = self.y_roll[1:]
self.y_roll[-1, :] = np.copy(y)
y_data = np.concatenate(self.y_roll, axis=0).astype(np.float32)
vol = np.max(np.abs(y_data))
# Transform audio input into the frequency domain.
N = len(y_data)
N_zeros = 2**int(np.ceil(np.log2(N))) - N
# Pad with zeros until the next power of two.
y_data *= self.fft_window
y_padded = np.pad(y_data, (0, N_zeros), mode='constant')
YS = np.abs(np.fft.rfft(y_padded)[:N // 2])
# Construct a Mel filterbank from the FFT data.
mel = np.atleast_2d(YS).T * self.mel_y.T
# Scale data to values more suitable for visualization.
mel = np.sum(mel, axis=0)
mel = mel**2.0
# Gain normalization.
self.mel_gain.update(np.max(gaussian_filter1d(mel, sigma=1.0)))
mel /= self.mel_gain.value
mel = self.mel_smoothing.update(mel)
x = np.linspace(min_frequency, max_frequency, len(mel))
y = self.fft_plot_filter.update(mel)
audio_data["mel"] = mel
audio_data["vol"] = vol
audio_data["x"] = x
audio_data["y"] = y
return audio_data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def readWaveform(self):\n # prepare data holder\n y = [ 0 for j in range(4) ]\n # in case of previous errors\n self.flushInput()\n for ch in self.chs:\n # mostly for TDS\n self.setCh(ch)\n # calibration factor we will need soon\n (vmult, voff) = self.calibV()\n # read and calibrate data\n data = (numpy.array(self.readData()) - voff) * vmult\n # This is from the formula in TDS manual, without the\n # \"vzero\" in it---I couldn't figure out when that wouldn't\n # be exactly zero.\n y[ch-1]=data[:]\n\n (hstep, hoff) = self.calibH()\n # initialize time array\n t = numpy.array(range(len(y[0])))\n t = (t * hstep) + hoff\n\n # update the sequence number (... for isUpdated())\n self.seq = self.readSeq()\n\n return (t, y)",
"def process(self, data, reset=False):\n data = np.asarray(data)\n self.check_dims(data)\n data = self.highpass_filter(data, reset=reset)\n data = self.lowpass_filter(data, reset=reset)\n data = self.resample(data)\n data = self.reref_data(data)\n data = self.select_channels(data)\n data = self.normalize_data(data)\n data = self.add_context(data)\n return data",
"def get_recorded_audio(self):\n return self.frames",
"def on_update(self):\n\n t_start = time.time()\n\n # get input audio if desired\n if self.input_stream:\n try:\n num_frames = self.input_stream.get_read_available() # number of frames to ask for\n if num_frames:\n data_str = self.input_stream.read(num_frames, False)\n data_np = np.fromstring(data_str, dtype=np.float32)\n self.input_func(data_np, self.num_input_channels)\n except IOError as e:\n print('got error', e)\n\n # Ask the generator to generate some audio samples.\n num_frames = self.stream.get_write_available() # number of frames to supply\n if self.generator and num_frames != 0:\n (data, continue_flag) = self.generator.generate(num_frames, self.num_channels)\n\n # make sure we got the correct number of frames that we requested\n assert len(data) == num_frames * self.num_channels, \\\n \"asked for (%d * %d) frames but got %d\" % (num_frames, self.num_channels, len(data))\n\n # convert type if needed and write to stream\n if data.dtype != np.float32:\n data = data.astype(np.float32)\n self.stream.write(data.tostring())\n\n # send data to listener as well\n if self.listen_func:\n self.listen_func(data, self.num_channels)\n\n # continue flag\n if not continue_flag:\n self.generator = None\n\n # how long this all took\n dt = time.time() - t_start\n a = 0.9\n self.cpu_time = a * self.cpu_time + (1-a) * dt",
"def readAudioData(self, shouldProcess):\n if shouldProcess:\n return gatherData(self.playlists) \n else:\n return pd.read_pickle(\"data/audioDF.pkl\")",
"def callback(self, in_data, *kwargs):\n self.receive_time = time.time()\n audio_data = np.fromstring(in_data, dtype=np.float32)\n self.audio.extend(audio_data)\n # if the queue is full then run SVM\n if len(self.audio) == self.count:\n self.algo_time = time.time()\n self.algo()\n self.final_time = time.time()\n self.comparableAudio = np.append(self.comparableAudio, audio_data)\n return audio_data, pyaudio.paContinue",
"def mic_audio(dur):\n\n audio,b = microphone.record_audio(dur)\n audio = np.hstack([np.frombuffer(i,np.int16) for i in audio])\n return audio",
"def _process(self, data: np.ndarray) -> np.ndarray:\n return data[..., 0] * self.scale",
"def _process(self, data: np.ndarray) -> np.ndarray:\n return data[..., 1] * self.scale",
"def forward(self, audio):\n feature_extractor = self.feature_extractor\n wave_gan = self.wave_gan\n pqmf = self.pqmf\n use_noise_input = self.use_noise_input\n config = self.config\n pad_fn = self.pad_fn\n\n # Added for processing single audio file as in deepspeech armory [Sonal 29Oct20]\n if audio.ndim == 1:\n num_samples = audio.shape[0]\n mel_spectrogram = feature_extractor.transform(audio)\n # Setup inputs\n inputs = ()\n if use_noise_input:\n noise = torch.randn(\n 1,\n 1,\n len(mel_spectrogram) * config[\"hop_size\"],\n device=mel_spectrogram.device,\n )\n inputs += (noise,)\n\n mel_spectrogram = pad_fn(mel_spectrogram.unsqueeze(0).transpose(2, 1))\n inputs += (mel_spectrogram,)\n # Generate\n if config[\"generator_params\"][\"out_channels\"] == 1:\n reconstructed_audio = wave_gan(*inputs).view(-1)\n reconstructed_audio = reconstructed_audio[:num_samples]\n else:\n reconstructed_audio = pqmf.synthesis(wave_gan(*inputs)).view(-1)\n reconstructed_audio = reconstructed_audio[:num_samples]\n return reconstructed_audio\n\n else:\n reconstructions = []\n num_samples = audio.shape[1]\n for idx in range(audio.shape[0]):\n recording = audio[idx, :]\n mel_spectrogram = feature_extractor.transform(recording)\n # Setup inputs\n inputs = ()\n if use_noise_input:\n noise = torch.randn(\n 1,\n 1,\n len(mel_spectrogram) * config[\"hop_size\"],\n device=recording.device,\n )\n inputs += (noise,)\n mel_spectrogram = pad_fn(mel_spectrogram.unsqueeze(0).transpose(2, 1))\n inputs += (mel_spectrogram,)\n # Generate\n if config[\"generator_params\"][\"out_channels\"] == 1:\n reconstructed_audio = wave_gan(*inputs).view(-1)\n reconstructed_audio = reconstructed_audio[:num_samples]\n else:\n reconstructed_audio = pqmf.synthesis(wave_gan(*inputs)).view(-1)\n reconstructed_audio = reconstructed_audio[:, :num_samples]\n reconstructions.append(reconstructed_audio)\n return torch.stack(reconstructions)",
"def _process(self, X):\n # 周波数毎に実施する\n ones = np.ones(self.L.shape[1])\n\n # 初期のポジションベクトル\n n_channels = np.shape(X)[0]\n n_freq_bins = np.shape(X)[1]\n n_frames = np.shape(X)[2]\n\n d = None\n n_mic_pair = 0\n # for m1 in range(1):\n\n step = 2\n\n mic_pairs = self.mic_pairs\n # mic_pairs=[[m1,m2] for m1 in range(n_channels-1) for m2 in range(m1+1,np.minimum(m1+step+1,n_channels)) ]\n mic_pairs = np.array(mic_pairs)\n\n n_mic_pair = np.shape(mic_pairs)[0]\n d = np.array(self.mic_positions[mic_pairs[:, 1]]) - np.array(\n self.mic_positions[mic_pairs[:, 0]]\n )\n # d: n_mic_pair,dim\n\n # for the linear surrogate function, we need the smallest eigenvalue\n # of the covariance matrix of the microphone pairs\n if self.mm_type == SurrogateType.Linear:\n mic_diff_cov = d.T @ d\n mic_diff_cov_ev_max = np.linalg.eigvalsh(mic_diff_cov)[-1]\n else:\n mic_diff_cov_ev_max = None\n\n # 時間周波数毎の初期のポジションベクトル\n position_vector = np.zeros(shape=(n_freq_bins, n_frames, self.dim))\n\n X_temp = X[:, self.freq_bins, :]\n\n sigma = np.angle(X_temp[mic_pairs[:, 1], ...] / X_temp[mic_pairs[:, 0], ...])\n sigma = np.transpose(sigma, (1, 2, 0))\n\n sigma = np.where(np.abs(sigma) < 1.0e-18, np.zeros_like(sigma) + 1.0e-18, sigma)\n z = np.zeros(shape=(n_freq_bins, n_frames, n_mic_pair), dtype=np.int)\n x = np.random.normal(size=n_freq_bins * n_frames * n_mic_pair)\n x = np.reshape(x, newshape=(n_freq_bins, n_frames, n_mic_pair))\n # 初期化\n mode_vec = self.rough_mode_vec[self.freq_bins, :, :]\n mode_vec = np.conjugate(mode_vec)\n\n # Evaluation of the cost function on rough grid\n XX = X[:, self.freq_bins, :].transpose([1, 2, 0]) # (freq, time, chan)\n mv = mode_vec.transpose([0, 2, 1]) # (freq, grid, chan)\n prod = (mv[:, None, :, :] @ XX[:, :, :, None])[..., 0]\n\n amp = np.abs(prod)\n # ft\n index = np.argmax(amp, axis=-1)\n org_shape = np.shape(index)\n index = np.reshape(index, [-1])\n\n # indexに相当する方向を取る\n if self.dim == 2:\n rough_azimuth_recon = self.rough_grid.azimuth[index]\n # ダミー\n rough_colatitude_recon = np.zeros_like(rough_azimuth_recon) + np.pi\n elif self.dim == 3:\n rough_azimuth_recon = self.rough_grid.azimuth[index]\n rough_colatitude_recon = self.rough_grid.colatitude[index]\n\n doas = np.concatenate(\n (\n rough_colatitude_recon[:, None], # colatitude [0, pi]\n rough_azimuth_recon[:, None], # azimuth [0, 2 pi]\n ),\n axis=-1,\n )\n distance = 3.0\n\n # source_locations: 3, n_frames\n source_locations = geom.spherical_to_cartesian(doa=doas, distance=distance)\n source_locations = np.reshape(source_locations, (3, org_shape[0], org_shape[1]))\n\n position_vector[self.freq_bins, :, :] = np.transpose(\n source_locations[: self.dim, :, :], (1, 2, 0)\n )\n\n size = np.einsum(\"fti,fti->ft\", np.conjugate(position_vector), position_vector)\n size = np.sqrt(size)[..., np.newaxis]\n position_vector = position_vector / np.maximum(size, 1.0e-18)\n\n use_clustering = False\n cluster_index = np.random.randint(0, self.num_src, size=n_freq_bins * n_frames)\n cluster_index = np.reshape(cluster_index, (n_freq_bins, n_frames))\n cluster_center = np.random.normal(size=self.num_src * self.dim)\n cluster_center = np.reshape(cluster_center, newshape=(self.num_src, self.dim))\n size = np.einsum(\"ci,ci->c\", np.conjugate(cluster_center), cluster_center)\n size = np.sqrt(size)[..., np.newaxis]\n cluster_center = cluster_center / np.maximum(size, 1.0e-18)\n if use_clustering == True:\n # pを作る\n for k in self.freq_bins:\n for l in range(n_frames):\n position_vector[k, l, :] = cluster_center[cluster_index[k, l], :]\n\n est_p = position_vector[self.freq_bins, ...]\n z = z[self.freq_bins, ...]\n x = x[self.freq_bins, ...]\n freqs = self.freq_hz\n cluster_index = cluster_index[self.freq_bins, ...]\n\n silent_mode = True\n freqs_d = np.einsum(\"f,pi->fpi\", freqs, d)\n for i in range(self.n_mm_itertaions):\n #\n (\n org_cost_0,\n org_cost_1,\n org_cost_2,\n org_cost_3,\n cost_0,\n cost_1,\n cost_2,\n cost_3,\n est_p,\n z,\n x,\n ) = doa_estimation_one_iteration(\n freqs_d,\n est_p,\n sigma,\n z,\n x,\n cluster_index=cluster_index,\n cluster_center=cluster_center,\n iter_num2=self.rooting_n_iter,\n silent_mode=silent_mode,\n surrogate=self.mm_type,\n mic_diff_cov_ev_max=mic_diff_cov_ev_max,\n freqs=freqs,\n mic_diff=d,\n )\n if silent_mode == False:\n print(\"Cost function:\", org_cost_0)\n # est_pから\n # fti\n position_vector[self.freq_bins, ...] = est_p\n\n size = np.einsum(\"fti,fti->ft\", np.conjugate(position_vector), position_vector)\n size = np.sqrt(size)[..., np.newaxis]\n position_vector = position_vector / np.maximum(size, 1.0e-18)\n\n # gridを探す\n\n # position_vectorに相当する方向を取る\n if self.dim == 2:\n azimuth_recon = self.grid.azimuth\n # ダミー\n colatitude_recon = np.zeros_like(azimuth_recon) + np.pi\n elif self.dim == 3:\n azimuth_recon = self.grid.azimuth\n colatitude_recon = self.grid.colatitude\n\n doas = np.concatenate(\n (\n colatitude_recon[:, None], # colatitude [0, pi]\n azimuth_recon[:, None], # azimuth [0, 2 pi]\n ),\n axis=-1,\n )\n distance = 3.0\n # source_locations: 3, n_grid_num\n grid_locations = geom.spherical_to_cartesian(doa=doas, distance=distance)\n size = np.einsum(\"in,in->n\", np.conjugate(grid_locations), grid_locations)\n size = np.sqrt(size)[np.newaxis, ...]\n grid_locations = grid_locations / np.maximum(size, 1.0e-18)\n\n if not self.use_kd_tree:\n grid_index_buf = []\n for k in self.freq_bins:\n prod = np.einsum(\"in,ti->tn\", grid_locations, position_vector[k, ...])\n grid_index = np.argmax(prod, axis=-1)\n grid_index_buf.append(grid_index)\n grid_index_buf = np.array(grid_index_buf)\n\n spire_cost = np.zeros(self.grid.n_points)\n for n in range(self.grid.n_points):\n spire_cost[n] = spire_cost[n] + np.count_nonzero(grid_index_buf == n)\n\n else:\n\n # Same code, but with a kd-tree (Robin version)\n dim = position_vector.shape[-1]\n pv = position_vector[self.freq_bins, ...].reshape((-1, dim))\n _, nn = self.tree.query(pv)\n bin_indices, bin_count = np.unique(nn, return_counts=True)\n\n spire_cost = np.zeros(self.grid.n_points, dtype=np.float)\n spire_cost[bin_indices] = bin_count\n\n self.grid.set_values(spire_cost)",
"def preprocess_sound(data, sample_rate):\n # Convert to mono.\n\n if len(data.shape) > 1:\n data = np.mean(data, axis=1)\n # Resample to the rate assumed by VGGish.\n if sample_rate != params.SAMPLE_RATE:\n data = resampy.resample(data, sample_rate, params.SAMPLE_RATE)\n\n # Compute log mel spectrogram features.\n log_mel = mel_features.log_mel_spectrogram(\n data,\n audio_sample_rate=params.SAMPLE_RATE,\n log_offset=params.LOG_OFFSET,\n window_length_secs=params.STFT_WINDOW_LENGTH_SECONDS,\n hop_length_secs=params.STFT_HOP_LENGTH_SECONDS,\n num_mel_bins=params.NUM_MEL_BINS,\n lower_edge_hertz=params.MEL_MIN_HZ,\n upper_edge_hertz=params.MEL_MAX_HZ)\n\n # Frame features into examples.\n features_sample_rate = 1.0 / params.STFT_HOP_LENGTH_SECONDS\n example_window_length = int(round(\n params.EXAMPLE_WINDOW_SECONDS * features_sample_rate))\n example_hop_length = int(round(\n params.EXAMPLE_HOP_SECONDS * features_sample_rate))\n log_mel_examples = mel_features.frame(\n log_mel,\n window_length=example_window_length,\n hop_length=example_hop_length)\n return log_mel_examples",
"def recive_data(self, data_waveformreceived):\r\n self.adcollector.save_as_binary(self.savedirectory)\r\n self.channel_number = len(data_waveformreceived)\r\n if self.channel_number == 1: \r\n if 'Vp' in self.readinchan:\r\n self.data_collected_0 = data_waveformreceived[0]\r\n \r\n self.PlotDataItem_patch_voltage = PlotDataItem(self.xlabelhere_all, self.data_collected_0)\r\n #use the same color as before, taking advantages of employing same keys in dictionary\r\n self.PlotDataItem_patch_voltage.setPen('w')\r\n self.pw_data.addItem(self.PlotDataItem_patch_voltage)\r\n \r\n self.textitem_patch_voltage = pg.TextItem(('Vp'), color=('w'), anchor=(1, 1))\r\n self.textitem_patch_voltage.setPos(0, 1)\r\n self.pw_data.addItem(self.textitem_patch_voltage)\r\n elif 'Ip' in self.readinchan:\r\n self.data_collected_0 = data_waveformreceived[0]\r\n \r\n self.PlotDataItem_patch_current = PlotDataItem(self.xlabelhere_all, self.data_collected_0)\r\n #use the same color as before, taking advantages of employing same keys in dictionary\r\n self.PlotDataItem_patch_current.setPen('c')\r\n self.pw_data.addItem(self.PlotDataItem_patch_current)\r\n \r\n self.textitem_patch_current = pg.TextItem(('Ip'), color=('w'), anchor=(1, 1))\r\n self.textitem_patch_current.setPos(0, 1)\r\n self.pw_data.addItem(self.textitem_patch_current) \r\n elif 'PMT' in self.readinchan: # repeatnum, PMT_data_index_array, averagenum, ScanArrayXnum\r\n self.data_collected_0 = data_waveformreceived[0]*-1\r\n self.data_collected_0 = self.data_collected_0[0:len(self.data_collected_0)-1]\r\n \r\n # pmt data could come from raster scanning mode or from contour scanning mode.\r\n try:\r\n for i in range(self.repeatnum):\r\n self.PMT_image_reconstructed_array = self.data_collected_0[np.where(self.PMT_data_index_array_repeated == i+1)]\r\n Dataholder_average = np.mean(self.PMT_image_reconstructed_array.reshape(self.averagenum, -1), axis=0)\r\n Value_yPixels = int(len(self.samples_1)/self.ScanArrayXnum)\r\n self.PMT_image_reconstructed = np.reshape(Dataholder_average, (Value_yPixels, self.ScanArrayXnum))\r\n \r\n # Stack the arrays into a 3d array\r\n if i == 0:\r\n self.PMT_image_reconstructed_stack = self.PMT_image_reconstructed\r\n else:\r\n self.PMT_image_reconstructed_stack = np.concatenate((self.PMT_image_reconstructed_stack, self.PMT_image_reconstructed), axis=0)\r\n \r\n Localimg = Image.fromarray(self.PMT_image_reconstructed) #generate an image object\r\n Localimg.save(os.path.join(self.savedirectory, datetime.now().strftime('%Y-%m-%d_%H-%M-%S')+'_PMT_'+self.saving_prefix+'_'+str(i)+'.tif')) #save as tif\r\n \r\n plt.figure()\r\n plt.imshow(self.PMT_image_reconstructed, cmap = plt.cm.gray)\r\n plt.show()\r\n except:\r\n np.save(os.path.join(self.savedirectory, datetime.now().strftime('%Y-%m-%d_%H-%M-%S')+'_PMT_'+self.saving_prefix+'_'+'flatten'), self.data_collected_0)\r\n \r\n elif self.channel_number == 2: \r\n if 'PMT' not in self.readinchan:\r\n self.data_collected_0 = data_waveformreceived[0]\r\n \r\n self.PlotDataItem_patch_voltage = PlotDataItem(self.xlabelhere_all, self.data_collected_0)\r\n #use the same color as before, taking advantages of employing same keys in dictionary\r\n self.PlotDataItem_patch_voltage.setPen('w')\r\n self.pw_data.addItem(self.PlotDataItem_patch_voltage)\r\n \r\n self.textitem_patch_voltage = pg.TextItem(('Vp'), color=('w'), anchor=(1, 1))\r\n self.textitem_patch_voltage.setPos(0, 1)\r\n self.pw_data.addItem(self.textitem_patch_voltage) \r\n \r\n self.data_collected_1 = data_waveformreceived[1]\r\n \r\n self.PlotDataItem_patch_current = PlotDataItem(self.xlabelhere_all, self.data_collected_1)\r\n #use the same color as before, taking advantages of employing same keys in dictionary\r\n self.PlotDataItem_patch_current.setPen('c')\r\n self.pw_data.addItem(self.PlotDataItem_patch_current)\r\n \r\n self.textitem_patch_current = pg.TextItem(('Ip'), color=('w'), anchor=(1, 1))\r\n self.textitem_patch_current.setPos(0, 1)\r\n self.pw_data.addItem(self.textitem_patch_current) \r\n elif 'PMT' in self.readinchan:\r\n self.data_collected_0 = data_waveformreceived[0]*-1\r\n self.data_collected_0 = self.data_collected_0[0:len(self.data_collected_0)-1]\r\n \r\n try:\r\n for i in range(self.repeatnum):\r\n self.PMT_image_reconstructed_array = self.data_collected_0[np.where(self.PMT_data_index_array_repeated == i+1)]\r\n Dataholder_average = np.mean(self.PMT_image_reconstructed_array.reshape(self.averagenum, -1), axis=0)\r\n Value_yPixels = int(len(self.samples_1)/self.ScanArrayXnum)\r\n self.PMT_image_reconstructed = np.reshape(Dataholder_average, (Value_yPixels, self.ScanArrayXnum))\r\n \r\n # Stack the arrays into a 3d array\r\n if i == 0:\r\n self.PMT_image_reconstructed_stack = self.PMT_image_reconstructed\r\n else:\r\n self.PMT_image_reconstructed_stack = np.concatenate((self.PMT_image_reconstructed_stack, self.PMT_image_reconstructed), axis=0)\r\n \r\n Localimg = Image.fromarray(self.PMT_image_reconstructed) #generate an image object\r\n Localimg.save(os.path.join(self.savedirectory, datetime.now().strftime('%Y-%m-%d_%H-%M-%S')+'_PMT_'+self.saving_prefix+'_'+str(i)+'.tif')) #save as tif\r\n \r\n plt.figure()\r\n plt.imshow(self.PMT_image_reconstructed, cmap = plt.cm.gray)\r\n plt.show()\r\n except:\r\n np.save(os.path.join(self.savedirectory, datetime.now().strftime('%Y-%m-%d_%H-%M-%S')+'_PMT_'+self.saving_prefix+'_'+'contourscanning'), self.data_collected_0)\r\n \r\n if 'Vp' in self.readinchan:\r\n self.data_collected_1 = data_waveformreceived[1]\r\n \r\n self.PlotDataItem_patch_voltage = PlotDataItem(self.xlabelhere_all, self.data_collected_1)\r\n #use the same color as before, taking advantages of employing same keys in dictionary\r\n self.PlotDataItem_patch_voltage.setPen('w')\r\n self.pw_data.addItem(self.PlotDataItem_patch_voltage)\r\n \r\n self.textitem_patch_voltage = pg.TextItem(('Vp'), color=('w'), anchor=(1, 1))\r\n self.textitem_patch_voltage.setPos(0, 1)\r\n self.pw_data.addItem(self.textitem_patch_voltage)\r\n elif 'Ip' in self.readinchan:\r\n self.data_collected_1 = data_waveformreceived[1]\r\n \r\n self.PlotDataItem_patch_current = PlotDataItem(self.xlabelhere_all, self.data_collected_1)\r\n #use the same color as before, taking advantages of employing same keys in dictionary\r\n self.PlotDataItem_patch_current.setPen('c')\r\n self.pw_data.addItem(self.PlotDataItem_patch_current)\r\n \r\n self.textitem_patch_current = pg.TextItem(('Ip'), color=('w'), anchor=(1, 1))\r\n self.textitem_patch_current.setPos(0, 1)\r\n self.pw_data.addItem(self.textitem_patch_current)",
"def get_array(self):\r\n samples = getattr(self.instrument, self.devchan+'_samples')()\r\n sample_rate = getattr(self.instrument, self.devchan+'_sample_rate')()\r\n timeout = getattr(self.instrument, self.devchan+'_timeout')()\r\n return self.instrument.read_analog(self.devchan,\r\n samples,\r\n sample_rate,\r\n timeout,\r\n self.chan_config,\r\n self.minv,\r\n self.maxv,\r\n self.triggered,\r\n False\r\n )",
"def decode(self, data):\r\n packet_pointer = Ensemble.GetBaseDataSize(self.name_len)\r\n\r\n for beam in range(self.element_multiplier):\r\n for bin_num in range(self.num_elements):\r\n self.Amplitude[bin_num][beam] = Ensemble.GetFloat(packet_pointer, Ensemble().BytesInFloat, data)\r\n packet_pointer += Ensemble().BytesInFloat\r\n\r\n logging.debug(self.Amplitude)",
"def decide(self, data):\n\n data = struct.unpack('%dh' % (len(data) / 2, ), data)\n self.audio_recorded_in.extend(data)\n\n while len(self.audio_recorded_in) > self.cfg['VAD']['gmm']['framesize']:\n frame = self.audio_recorded_in[:self.cfg['VAD']['gmm']['framesize']]\n self.audio_recorded_in = self.audio_recorded_in[self.cfg['VAD']['gmm']['frameshift']:]\n\n mfcc = self.front_end.param(frame)\n\n log_prob_speech = self.gmm_speech.score(mfcc)\n log_prob_sil = self.gmm_sil.score(mfcc)\n\n self.log_probs_speech.append(log_prob_speech)\n self.log_probs_sil.append(log_prob_sil)\n\n log_prob_speech_avg = 0.0\n for log_prob_speech, log_prob_sil in zip(self.log_probs_speech, self.log_probs_sil):\n log_prob_speech_avg += log_prob_speech - logsumexp([log_prob_speech, log_prob_sil])\n log_prob_speech_avg /= len(self.log_probs_speech)\n\n prob_speech_avg = np.exp(log_prob_speech_avg)\n\n# print 'prob_speech_avg: %5.3f' % prob_speech_avg\n\n self.last_decision = prob_speech_avg\n\n # returns a speech / non-speech decisions\n return self.last_decision",
"def getCalibration(self):\n self.a0 = float(self.getParameter(index=1))\n self.a1 = float(self.getParameter(index=2))\n self.a2 = float(self.getParameter(index=3))\n self.a3 = float(self.getParameter(index=4))\n status = self.getStatus()\n self.wavelength = [ self.a0 + self.a1*x + self.a2*x*x + self.a3*x*x*x \n for x in range(status.pixels)]\n if self.discardTrailingSamples > 0:\n self.wavelength = self.wavelength[:-self.discardTrailingSamples]\n if self.discardLeadingSamples > 0:\n self.wavelength = self.wavelength[self.discardLeadingSamples:]",
"def getWaveform(self, ch=\"CH1\", samples=2500):\n\t\tself.isReady()\n\t\tcounter = 1\n\t\twhile True:\n\t\t\ttry:\t\t\n\t\t\t\twaveform = self.osc.get_waveform(source = ch, start = 1, stop = samples)\n\t\t\t\tbreak\n\t\t\texcept:\n\t\t\t\tprint(\"Retry: \" + str(counter))\n\t\t\t\tcounter += 1\n\t\ty_array = []\n\t\tfor x,y in waveform:\n\t\t\ty_array.append(y)\n\t\treturn y_array",
"def format_raw_audio_cnn(self):\n result_x, doa_from_file = self.load_audio()\n x = np.array([result_x])\n x_data = cnn.reshape_x_for_cnn(cnn.normalize_x_data(cnn.flatten_stereo(x)))\n\n return x_data, doa_from_file",
"def getPhysicalSamples(self, **kwargs):\n # initialise chans, startSample and endSample with the whole dataset\n options = self.parseGetDataKeywords(kwargs)\n # get data\n timeData = self.getUnscaledSamples(\n chans=options[\"chans\"],\n startSample=options[\"startSample\"],\n endSample=options[\"endSample\"],\n )\n # Scalars are applied in getUnscaledSamples to convert to mV - this is for ease of calculation and because each data file in the run might have a separate scaling\n # all that is left is to divide by the dipole length in km and remove the average\n for chan in options[\"chans\"]:\n if chan == \"Ex\":\n # multiply by 1000/self.getChanDx same as dividing by dist in km\n timeData[chan] = 1000 * timeData[chan] / self.getChanDx(chan)\n timeData.addComment(\n \"Dividing channel {} by electrode distance {} km to give mV/km\".format(\n chan, self.getChanDx(chan) / 1000.0\n )\n )\n if chan == \"Ey\":\n # multiply by 1000/self.getChanDy same as dividing by dist in km\n timeData[chan] = 1000 * timeData[chan] / self.getChanDy(chan)\n timeData.addComment(\n \"Dividing channel {} by electrode distance {} km to give mV/km\".format(\n chan, self.getChanDy(chan) / 1000.0\n )\n )\n\n # if remove zeros - False by default\n if options[\"remzeros\"]:\n timeData[chan] = removeZerosChan(timeData[chan])\n # if remove nans - False by default\n if options[\"remnans\"]:\n timeData[chan] = removeNansChan(timeData[chan])\n # remove the average from the data - True by default\n if options[\"remaverage\"]:\n timeData[chan] = timeData[chan] - np.average(\n timeData[chan]\n )\n\n # add comments\n timeData.addComment(\n \"Remove zeros: {}, remove nans: {}, remove average: {}\".format(\n options[\"remzeros\"], options[\"remnans\"], options[\"remaverage\"]\n )\n )\n return timeData",
"def audio_callback(self, indata, frames, time, status):\n if status.input_overflow:\n # NB: This increment operation is not atomic, but this doesn't\n # matter since no other thread is writing to the attribute.\n self.input_overflows += 1\n # NB: self.recording is accessed from different threads.\n # This is safe because here we are only accessing it once (with a\n # single bytecode instruction).\n if self.recording:\n self.audio_q.put(indata.copy())\n self.previously_recording = True\n else:\n if self.previously_recording:\n self.audio_q.put(None)\n self.previously_recording = False\n\n self.peak = max(self.peak, np.max(np.abs(indata)))\n try:\n self.metering_q.put_nowait(self.peak)\n except queue.Full:\n pass\n else:\n self.peak = 0",
"def AcquiredData (self, arguments=None) :\n\t\tself.OODriver.Wrapper_getSpectrum(self.wrapperHandle,self.spectrometerIndex,self.bufferHandle)\n\t\t\n\t\tif self.OODriver.Wrapper_isSaturated(self.wrapperHandle,self.spectrometerIndex) :\n\t\t\tprint \"Warning: OcenOptics spectrometer is saturated!\"\n\t\t\t\n\t\ttry : return self.buffer[self.spectral_interval]\n\t\texcept AttributeError : return self.buffer",
"def get_spectrum(self):\n\n self.sock.send('Q')\n self.sock.send(str(100 * self.center_wl))\n\n response = self.sock.recv(7)\n if not response:\n raise InstrumentError(\n 'No response from Labview client, try reconnecting')\n\n datalen = int(response)\n data = ''\n\n while datalen > 0:\n # read data in chunks\n dt = self.sock.recv(datalen)\n data += dt\n datalen -= len(dt)\n\n data = data.split(\"\\n\")[:-1]\n for i in range(len(data)):\n data[i] = data[i].split(\"\\t\")\n\n data = n.array(data,dtype=float)\n\n wl = data[0]\n ccd = data[1:]\n\n return wl,ccd\n\n #self.sock.close()",
"def read_data(self):\n self.data = reduce_spectrum(self.filename)",
"def _get_input_data_for_model(self, extra_data=None):\n extra_data = {} if extra_data is None else extra_data\n if self.metadata['sample_rate'] is not None:\n if self.audio_signal.sample_rate != self.metadata['sample_rate']:\n self.audio_signal.resample(self.metadata['sample_rate'])\n\n self.audio_signal.stft_params = self.metadata['stft_params']\n self.audio_signal.stft()\n\n data = {'mix': self.audio_signal}\n data.update(extra_data)\n data = self.transform(data)\n\n for key in data:\n if torch.is_tensor(data[key]):\n data[key] = data[key].unsqueeze(0).to(self.device).float()\n if self.metadata['num_channels'] == 1:\n # then each channel is processed indep\n data[key] = data[key].transpose(0, self.channel_dim)\n self.input_data = data\n return self.input_data",
"def __call__(self, gradient):\n audio_out = self.modem.convert_data_to_audio(gradient.flatten())\n decoded_gradients = self.modem.convert_audio_to_floats(audio_out)\n\n # if you want to regret being alive,\n # self.stream.write(audio_out.tobytes())\n\n return decoded_gradients.reshape(gradient.shape)",
"def get_raw(self):\r\n samples = getattr(self.instrument, self.devchan+'_samples')()\r\n if (not self.averaging) and (samples>1):\r\n raise ValueError('For multiple samples averaging must be on.')\r\n sample_rate = getattr(self.instrument, self.devchan+'_sample_rate')()\r\n timeout = getattr(self.instrument, self.devchan+'_timeout')()\r\n return self.instrument.read_analog(self.devchan,\r\n samples,\r\n sample_rate,\r\n timeout,\r\n self.chan_config,\r\n self.minv,\r\n self.maxv,\r\n self.triggered,\r\n self.averaging,\r\n )",
"def record():\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, input_device_index=0, channels=1, rate=RATE, input=True, output=True, frames_per_buffer=CHUNK_SIZE)\n num_silent = 0\n snd_started = False\n\n r = array('h')\n while 1:\n snd_data = array('h', stream.read(CHUNK_SIZE, exception_on_overflow = False))\n if byteorder == 'big':\n snd_data.byteswap()\n r.extend(snd_data)\n\n silent = is_silent(snd_data)\n if silent and snd_started:\n num_silent += 1\n elif not silent and not snd_started:\n print(\"Sound started.\")\n snd_started = True\n\n if snd_started and num_silent> 10:\n break\n\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n r = normalize(r)\n #r = trim(r)\n #r = add_silence(r, 0.5)\n return sample_width, r",
"def record_chunk(self):\n data = self.stream.read(nFFT)\n data_array = bytearray(data)\n self.cur_input = []\n for i in range(nFFT):\n amp = struct.unpack('H', data_array[:2])\n for _ in range(2):\n data_array.pop(0)\n self.cur_input.append(amp)",
"def update(self) -> np.ndarray:\r\n data: np.ndarray = self._read_arduino()\r\n processed_data: np.ndarray = SpikerStream._process_data(data)\r\n return processed_data"
] | [
"0.6034653",
"0.59338874",
"0.5892915",
"0.5839982",
"0.5827902",
"0.57516843",
"0.574115",
"0.5740674",
"0.5702725",
"0.5690479",
"0.568213",
"0.56605065",
"0.5635295",
"0.56270736",
"0.5599267",
"0.5588864",
"0.5587347",
"0.5577383",
"0.5565157",
"0.55617267",
"0.55504036",
"0.5549361",
"0.55434984",
"0.5530988",
"0.5527446",
"0.5519829",
"0.5518551",
"0.5512879",
"0.5503725",
"0.5493217"
] | 0.62964714 | 0 |
Returns centerfrequencies and band edges for a mel filter bank | def melfrequencies_mel_filterbank(self, num_bands, freq_min, freq_max, num_fft_bands):
mel_max = self.hertz_to_mel(freq_max)
mel_min = self.hertz_to_mel(freq_min)
delta_mel = abs(mel_max - mel_min) / (num_bands + 1.0)
frequencies_mel = mel_min + delta_mel * arange(0, num_bands + 2)
lower_edges_mel = frequencies_mel[:-2]
upper_edges_mel = frequencies_mel[2:]
center_frequencies_mel = frequencies_mel[1:-1]
return center_frequencies_mel, lower_edges_mel, upper_edges_mel | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_filterbanks(nfilt=26,nfft=512,samplerate=16000,lowfreq=0,highfreq=None):\n highfreq= highfreq or samplerate/2\n assert highfreq <= samplerate/2, \"highfreq is greater than samplerate/2\"\n\n # compute points evenly spaced in mels\n lowmel = hz2mel(lowfreq)\n highmel = hz2mel(highfreq)\n\n # check kaldi/src/feat/Mel-computations.h \n fbank = numpy.zeros([nfilt,nfft//2+1])\n mel_freq_delta = (highmel-lowmel)/(nfilt+1)\n for j in range(0,nfilt):\n leftmel = lowmel+j*mel_freq_delta\n centermel = lowmel+(j+1)*mel_freq_delta\n rightmel = lowmel+(j+2)*mel_freq_delta\n for i in range(0,nfft//2):\n mel=hz2mel(i*samplerate/nfft)\n if mel>leftmel and mel<rightmel:\n if mel<centermel:\n fbank[j,i]=(mel-leftmel)/(centermel-leftmel)\n else:\n fbank[j,i]=(rightmel-mel)/(rightmel-centermel)\n return fbank",
"def mfccInitFilterBanks(fs, nfft):\n\n # filter bank params:\n lowfreq = 133.33\n linsc = 200/3.\n logsc = 1.0711703\n numLinFiltTotal = 13\n numLogFilt = 27\n\n if fs < 8000:\n nlogfil = 5\n\n # Total number of filters\n nFiltTotal = numLinFiltTotal + numLogFilt\n\n # Compute frequency points of the triangle:\n freqs = numpy.zeros(nFiltTotal+2)\n freqs[:numLinFiltTotal] = lowfreq + numpy.arange(numLinFiltTotal) * linsc\n freqs[numLinFiltTotal:] = freqs[numLinFiltTotal-1] * logsc ** numpy.arange(1, numLogFilt + 3)\n heights = 2./(freqs[2:] - freqs[0:-2])\n\n # Compute filterbank coeff (in fft domain, in bins)\n fbank = numpy.zeros((nFiltTotal, nfft))\n nfreqs = numpy.arange(nfft) / (1. * nfft) * fs\n\n for i in range(nFiltTotal):\n lowTrFreq = freqs[i]\n cenTrFreq = freqs[i+1]\n highTrFreq = freqs[i+2]\n\n lid = numpy.arange(numpy.floor(lowTrFreq * nfft / fs) + 1, numpy.floor(cenTrFreq * nfft / fs) + 1, dtype=numpy.int)\n lslope = heights[i] / (cenTrFreq - lowTrFreq)\n rid = numpy.arange(numpy.floor(cenTrFreq * nfft / fs) + 1, numpy.floor(highTrFreq * nfft / fs) + 1, dtype=numpy.int)\n rslope = heights[i] / (highTrFreq - cenTrFreq)\n fbank[i][lid] = lslope * (nfreqs[lid] - lowTrFreq)\n fbank[i][rid] = rslope * (highTrFreq - nfreqs[rid])\n\n return fbank, freqs",
"def mel_filterbank(n_filters, n_fft, sr):\n freq2mel = lambda f: 2595. * np.log10(1 + f / 700.)\n mel2freq = lambda m: 700. * (10 ** (m / 2595.) - 1)\n\n lowfreq = 0\n highfreq = sr // 2\n\n lowmel = freq2mel(lowfreq)\n highmel = freq2mel(highfreq)\n\n melpoints = np.linspace(lowmel, highmel, 1 + n_filters + 1)\n\n # must convert from freq to fft bin number\n fft_bins = ((n_fft + 1) * mel2freq(melpoints) // sr).astype(np.int32)\n\n filterbank = np.zeros((n_filters, n_fft // 2))\n for j in range(n_filters):\n for i in range(fft_bins[j], fft_bins[j + 1]):\n filterbank[j, i] = (i - fft_bins[j]) / (fft_bins[j + 1] - fft_bins[j])\n for i in range(fft_bins[j + 1], fft_bins[j + 2]):\n filterbank[j, i] = (fft_bins[j + 2] - i) / (fft_bins[j + 2] - fft_bins[j + 1])\n\n mel_filter = filterbank.T / filterbank.sum(axis=1).clip(1e-16)\n mel_inv_filter = filterbank\n\n return mel_filter, mel_inv_filter, melpoints",
"def mfccInitFilterBanks(fs, nfft):\n # filter bank params:\n lowfreq = 133.33\n linsc = 200/3.\n logsc = 1.0711703\n numLinFiltTotal = 13\n numLogFilt = 27\n\n if fs < 8000:\n nlogfil = 5\n\n # Total number of filters\n nFiltTotal = numLinFiltTotal + numLogFilt\n\n # Compute frequency points of the triangle:\n freqs = np.zeros(nFiltTotal+2)\n freqs[:numLinFiltTotal] = lowfreq + np.arange(numLinFiltTotal) * linsc\n freqs[numLinFiltTotal:] = freqs[numLinFiltTotal-1] * logsc ** np.arange(1, numLogFilt + 3)\n heights = 2./(freqs[2:] - freqs[0:-2])\n\n # Compute filterbank coeff (in fft domain, in bins)\n fbank = np.zeros((nFiltTotal, nfft))\n nfreqs = np.arange(nfft) / (1. * nfft) * fs\n\n for i in range(nFiltTotal):\n lowTrFreq = freqs[i]\n cenTrFreq = freqs[i+1]\n highTrFreq = freqs[i+2]\n\n lid = np.arange(np.floor(lowTrFreq * nfft / fs) + 1,\n np.floor(cenTrFreq * nfft / fs) + 1,\n dtype=np.int)\n lslope = heights[i] / (cenTrFreq - lowTrFreq)\n rid = np.arange(np.floor(cenTrFreq * nfft / fs) + 1,\n np.floor(highTrFreq * nfft / fs) + 1,\n dtype=np.int)\n rslope = heights[i] / (highTrFreq - cenTrFreq)\n fbank[i][lid] = lslope * (nfreqs[lid] - lowTrFreq)\n fbank[i][rid] = rslope * (highTrFreq - nfreqs[rid])\n\n return fbank, freqs",
"def center_frequencies(self):\n return (band.center_frequency for band in self)",
"def get_filterbanks(nfilt=20,nfft=512,samplerate=16000,lowfreq=0,highfreq=None):\n highfreq= highfreq or samplerate/2\n assert highfreq <= samplerate/2, \"highfreq is greater than samplerate/2\"\n \n # compute points evenly spaced in mels\n lowmel = hz2mel(lowfreq)\n highmel = hz2mel(highfreq)\n melpoints = pylab.linspace(lowmel,highmel,nfilt+2)\n # our points are in Hz, but we use fft bins, so we have to convert\n # from Hz to fft bin number\n bin = pylab.floor((nfft+1)*mel2hz(melpoints)/samplerate)\n\n fbank = pylab.zeros([nfilt,nfft/2+1])\n for j in xrange(0,nfilt):\n for i in xrange(int(bin[j]),int(bin[j+1])):\n fbank[j,i] = (i - bin[j])/(bin[j+1]-bin[j])\n for i in xrange(int(bin[j+1]),int(bin[j+2])):\n fbank[j,i] = (bin[j+2]-i)/(bin[j+2]-bin[j+1])\n return fbank",
"def get_filterbanks(nfilt=20,nfft=512,samplerate=16000,lowfreq=0,highfreq=None):\r\n highfreq= highfreq or samplerate/2\r\n assert highfreq <= samplerate/2, \"highfreq is greater than samplerate/2\"\r\n \r\n # compute points evenly spaced in mels\r\n lowmel = hz2mel(lowfreq)\r\n highmel = hz2mel(highfreq)\r\n melpoints = np.linspace(lowmel,highmel,nfilt+2)\r\n # our points are in Hz, but we use fft bins, so we have to convert\r\n # from Hz to fft bin number\r\n bin = np.floor((nfft+1)*mel2hz(melpoints)/samplerate)\r\n\r\n fbank = np.zeros([nfilt,nfft/2+1])\r\n for j in xrange(0,nfilt):\r\n for i in xrange(int(bin[j]),int(bin[j+1])):\r\n fbank[j,i] = (i - bin[j])/(bin[j+1]-bin[j])\r\n for i in xrange(int(bin[j+1]),int(bin[j+2])):\r\n fbank[j,i] = (bin[j+2]-i)/(bin[j+2]-bin[j+1])\r\n return fbank",
"def make_filter_banks(power_frames, sampling_rate, NFFT, num_filt = 40):\n low_freq_mel = 0\n high_freq_mel = Hz_to_Mel(sampling_rate/2) # Convert Hz to Mel\n #mel_points = np.arange(low_freq_mel, high_freq_mel, (high_freq_mel - low_freq_mel)/(num_filt + 2)) # Equally spaced in Mel scale\n mel_points = np.linspace(low_freq_mel, high_freq_mel, num_filt + 2) # Equally spaced in Mel scale\n #hz_points = Mel_to_Hz(mel_points) # Convert Mel to Hz\n bins = np.floor((NFFT + 1) * Mel_to_Hz(mel_points) / sampling_rate)\n \n #bank = np.empty((num_filt, int(np.floor(NFFT / 2 + 1))))\n bank = np.zeros((num_filt, int(np.floor(NFFT / 2 + 1))))\n for m in range(1, num_filt + 1):\n f_s = bins[m - 1 : m + 2]\n f_prev = int(f_s[0]) # left\n f = int(f_s[1]) # center\n f_next = int(f_s[2]) # right\n\n np.put(bank[m - 1], list(range(f_prev)), 0) # k < f_prev\n\n for k in range(f_prev, f):\n np.put(bank, ((m - 1)*int(np.floor(NFFT / 2 + 1))) + k, (k - f_prev) / (f - f_prev)) \n \n for k in range(f, f_next):\n np.put(bank, ((m - 1)*int(np.floor(NFFT / 2 + 1))) + k, (f_next - k) / (f_next - f))\n\n np.put(bank[m - 1], list(range(f_next, len(bank))), 0) # k > f_next\n\n filter_banks = np.where(np.dot(power_frames, bank.T) == 0, np.finfo(float).eps, np.dot(power_frames, bank.T))\n #filter_banks = np.where(filter_banks == 0, np.finfo(float).eps, filter_banks) # Numerical Stability\n filter_banks = 20 * np.log10(filter_banks) # dB\n\n return filter_banks",
"def bands(self) -> int:\n ...",
"def _make_filters(self):\n\n \"\"\"\n filter_bank = bandpass_filterbank(\n self.bands, fs=self.fs, order=order, output=output\n )\n\n return [lambda sig: sosfiltfilt(bpf, sig) for bpf in filter_bank]\n \"\"\"\n\n # This seems to work only for Octave bands out of the box\n centers = self.centers\n n = len(self.centers)\n\n new_bands = [[centers[0] / 2, centers[1]]]\n for i in range(1, n - 1):\n new_bands.append([centers[i - 1], centers[i + 1]])\n new_bands.append([centers[-2], self.fs / 2])\n\n n_freq = self.n_fft // 2 + 1\n freq_resp = np.zeros((n_freq, n))\n freq = np.arange(n_freq) / self.n_fft * self.fs\n\n for b, (band, center) in enumerate(zip(new_bands, centers)):\n lo = np.logical_and(band[0] <= freq, freq < center)\n freq_resp[lo, b] = 0.5 * (1 + np.cos(2 * np.pi * freq[lo] / center))\n\n if b != n - 1:\n hi = np.logical_and(center <= freq, freq < band[1])\n freq_resp[hi, b] = 0.5 * (1 - np.cos(2 * np.pi * freq[hi] / band[1]))\n else:\n hi = center <= freq\n freq_resp[hi, b] = 1.0\n\n filters = np.fft.fftshift(\n np.fft.irfft(freq_resp, n=self.n_fft, axis=0),\n axes=[0],\n )\n\n # remove the first sample to make them odd-length symmetric filters\n self.filters = filters[1:, :]",
"def create_filter_bank():\r\n kernels = []\r\n for theta in range(0, 2):\r\n theta = theta / 2. * np.pi\r\n for sigma in (3, 5):\r\n for frequency in (0.10, 0.25):\r\n kernel = np.real(gabor_kernel(frequency, theta=theta,\r\n sigma_x=sigma, sigma_y=sigma))\r\n kernels.append(kernel)\r\n print(len(kernels))\r\n return kernels",
"def melfilterbank(M, N, fs=1, fl=0.0, fh=0.5):\n\n # all center frequencies of the filters\n f = (N / fs) * invmelscale(\n melscale(fl * fs)\n + (np.arange(M + 2) * (melscale(fh * fs) - melscale(fl * fs)) / (M + 1))\n )\n\n # Construct the triangular filter bank\n H = np.zeros((M, N // 2 + 1))\n k = np.arange(N // 2 + 1)\n for m in range(1, M + 1):\n I = np.where(np.logical_and(f[m - 1] < k, k < f[m]))\n H[m - 1, I] = (\n 2 * (k[I] - f[m - 1]) / ((f[m + 1] - f[m - 1]) * (f[m] - f[m - 1]))\n )\n I = np.where(np.logical_and(f[m] <= k, k < f[m + 1]))\n H[m - 1, I] = (\n 2 * (f[m + 1] - k[I]) / ((f[m + 1] - f[m - 1]) * (f[m + 1] - f[m]))\n )\n\n return H",
"def filterbank(min_freq, max_freq, number, srate, N):\n points = numpy.linspace(M(min_freq), M(max_freq), number + 2)\n freqs = Mi(points)\n bins = freq2bin(freqs, srate, N)\n\n filters = numpy.zeros((number, N/2 +1))\n\n for i in xrange(0, number):\n bot = int(math.floor(bins[i]))\n mid = int(round(bins[i+1]))\n top = int(math.ceil(bins[i+2]))\n\n filters[i][bot:mid] = numpy.linspace(0, 1, mid - bot +1)[:-1]\n filters[i][mid:top+1] = numpy.linspace(1, 0, top - mid +1)\n\n return filters",
"def octave_bands(fc=1000, third=False, start=0.0, n=8):\n\n div = 1\n if third:\n div = 3\n\n # Octave Bands\n fcentre = fc * (\n 2.0 ** (np.arange(start * div, (start + n) * div - (div - 1)) / div)\n )\n fd = 2 ** (0.5 / div)\n bands = np.array([[f / fd, f * fd] for f in fcentre])\n\n return bands, fcentre",
"def n_band(self):\n pass",
"def fbank(signal,samplerate=16000,winlen=0.025,winstep=0.01,\n nfilt=26,nfft=512,lowfreq=0,highfreq=None,preemph=0.97): \n highfreq= highfreq or samplerate/2\n print \"preemph %s\"%(preemph)\n signal = sigproc.preemphasis(signal,preemph)\n frames = sigproc.framesig(signal, winlen*samplerate, winstep*samplerate)\n matchframes(frames[0], frames[1])\n pspec = sigproc.powspec(frames,nfft)\n energy = pylab.sum(pspec,1) # this stores the total energy in each frame\n energy = pylab.where(energy == 0, pylab.finfo(float).eps, energy) # if energy is zero, we get problems with log\n fb = get_filterbanks(nfilt, nfft, samplerate, lowfreq, highfreq)\n print \"len(fb) %s\"%(len(fb))\n colour = \"k-\"\n for i in range(len(fb)):\n if colour == \"k-\":\n colour = \"r-\"\n else:\n colour = \"k-\"\n startedplot = False\n midpoint = 0\n for j in range(len(fb[i])):\n if fb[i][j] > 0:\n if startedplot == False:\n startedplot = j\n if j > 0:\n pylab.plot([j-1, j], [fb[i][j-1], fb[i][j]], colour)\n if fb[i][j] == 1.0:\n midpoint = j\n else:\n if not startedplot == False:\n pylab.plot([j-1, j], [fb[i][j-1], 0], colour)\n try:\n print \"slope to midpoint %.3f, slope from midpoint %.3f\"%(1.0/float(midpoint-startedplot), 1.0/float(midpoint-j+1))\n except:\n pass\n break\n pylab.show()\n feat = pylab.dot(pspec, fb.T) # compute the filterbank energies\n feat = pylab.where(feat == 0, pylab.finfo(float).eps, feat) # if feat is zero, we get problems with log\n return feat, energy",
"def bands(self):\n\t\treturn self._bands",
"def filterBankEdges(img):\n imgE = Views.extendBorder(img)\n opTop = as2DKernel(imgE, [-1]*3 + [0]*3 + [1]*3)\n opBottom = as2DKernel(imgE, [1]*3 + [0]*3 + [-1]*3)\n opLeft = as2DKernel(imgE, [-1, 0, 1] * 3)\n opRight = as2DKernel(imgE, [1, 0, -1] * 3)\n return [opTop, opBottom, opLeft, opRight]",
"def createMaks(self):\n mask = np.zeros((self.height, self.width)) # (H, W)\n center = self.width // 2\n\n for lat in range(self.height):\n count = int(self.counts[lat])\n # print(lat, count)\n # print(center - count, center, center + count)\n mask[lat][center: center + count] = 1\n mask[lat][center - count: center] = 1\n\n return mask # (H, W)",
"def narrowIncandPeakInfo(self):\r\n\t\tself.narrowIncandBaseline = (np.mean(self.narrowBandIncandData[0:10]))\r\n\t\t\t\t\r\n\t\traw_narrowIncand_max = np.amax(self.narrowBandIncandData)\r\n\t\tnarrowIncand_max = raw_narrowIncand_max - self.narrowIncandBaseline\t\t\r\n\t\tnarrowIncand_max_index = np.argmax(self.narrowBandIncandData)\r\n\t\t\r\n\t\tself.narrowIncandMax =narrowIncand_max\r\n\t\tself.narrowIncandMaxPos = narrowIncand_max_index",
"def map(self, mapunit):\n\n #The number of bands to measure the LF for\n if len(mapunit['luminosity'].shape)>1:\n self.nbands = mapunit['luminosity'].shape[1]\n else:\n mapunit['luminosity'] = np.atleast_2d(mapunit['luminosity']).T\n self.nbands = 1\n\n #If only measuring for centrals, get the appropriate\n #rows of the mapunit\n\n mu = {}\n if self.central_only:\n delete_after_map = True\n for k in mapunit.keys():\n mu[k] = mapunit[k][mapunit['central']==1]\n else:\n delete_after_map = False\n mu = mapunit\n\n #Want to count galaxies in bins of luminosity for\n #self.nbands different bands in self.nzbins\n #redshift bins\n if self.lumcounts is None:\n self.lumcounts = np.zeros((self.njack, len(self.magbins)-1,\n self.nbands, self.nzbins))\n\n #Assume redshifts are provided, and that the\n #mapunit is sorted in terms of them\n \n if self.lightcone:\n for i, z in enumerate(self.zbins[:-1]):\n zlidx = mu['redshift'].searchsorted(self.zbins[i])\n zhidx = mu['redshift'].searchsorted(self.zbins[i+1])\n\n #Count galaxies in bins of luminosity\n for j in range(self.nbands):\n if not self.CMASS:\n c, e = np.histogram(mu['luminosity'][zlidx:zhidx,j],\n bins=self.magbins)\n else:\n cidx = self.selectCMASS(mu['appmag'][zlidx:zhidx])\n c, e = np.histogram(mu['luminosity'][zlidx:zhidx,j][cidx],\n bins=self.magbins)\n \n self.lumcounts[self.jcount,:,j,i] += c\n else:\n for j in range(self.nbands):\n if not self.CMASS:\n c, e = np.histogram(mu['luminosity'][:,j],\n bins=self.magbins)\n else:\n cidx = self.selectCMASS(mu['appmag'][:])\n c, e = np.histogram(mu['luminosity'][:,j][cidx],\n bins=self.magbins)\n \n self.lumcounts[self.jcount,:,j,0] += c\n\n if delete_after_map:\n True",
"def calbands( band = 0, tmo = 30 ) :\n optimizeThresholds(band,tmo)\n flattenPhases(band,tmo)\n calibrateSpectra(band=band,tmo=tmo)",
"def compute_melmat(self, num_mel_bands=12, freq_min=64, freq_max=8000,\n num_fft_bands=513, sample_rate=16000):\n center_frequencies_mel, lower_edges_mel, upper_edges_mel = self.melfrequencies_mel_filterbank(\n num_mel_bands,\n freq_min,\n freq_max,\n num_fft_bands\n )\n\n center_frequencies_hz = self.mel_to_hertz(center_frequencies_mel)\n lower_edges_hz = self.mel_to_hertz(lower_edges_mel)\n upper_edges_hz = self.mel_to_hertz(upper_edges_mel)\n freqs = linspace(0.0, sample_rate / 2.0, num_fft_bands)\n melmat = zeros((num_mel_bands, num_fft_bands))\n\n for imelband, (center, lower, upper) in enumerate(zip(\n center_frequencies_hz, lower_edges_hz, upper_edges_hz)):\n\n left_slope = (freqs >= lower) == (freqs <= center)\n melmat[imelband, left_slope] = (\n (freqs[left_slope] - lower) / (center - lower)\n )\n\n right_slope = (freqs >= center) == (freqs <= upper)\n melmat[imelband, right_slope] = (\n (upper - freqs[right_slope]) / (upper - center)\n )\n return melmat, (center_frequencies_mel, freqs)",
"def band_penalty(self):\n fc_ix = np.argmin(np.abs(self.f - self.fc)) # Index to frequency array closes to center frequency\n # Number of indexes on each side of center frequency, not extending outside, only up to 10 kHz\n n = min(fc_ix, self.ix10k - fc_ix)\n if n == 0:\n return 0.0\n return np.mean(np.square(self.fr[fc_ix - n:fc_ix] - self.fr[fc_ix + n - 1:fc_ix - 1:-1]))",
"def power_in_bands(self, bands=None, avgaxis=None):\n if bands is None:\n bands = _third_octave_bands\n\n # center frequencies\n fcs = np.asarray([b[0] for b in bands])\n Npow2 = 2 ** (self.nt - 1).bit_length()\n f = np.fft.fftfreq(Npow2, d=1 / self.fs)\n\n shape = list(self.in_freq.shape)\n shape[-1] = len(bands)\n P = np.zeros(shape)\n for i, (fc, fl, fu) in enumerate(bands):\n if fu < self.fs / 2: # include only bands in frequency range\n iband = np.logical_and(fl <= f, f < fu)\n P[..., i] = np.sum(\n np.abs(np.fft.fft(self.in_time, n=Npow2, axis=-1)[..., iband]) ** 2\n * 2 # energy from negative and positive frequencies\n * self.dt\n / self.nt\n / self.time_length,\n axis=-1,\n )\n else:\n P[..., i] = 0\n\n if avgaxis is not None:\n P = P.mean(axis=avgaxis)\n\n return P, fcs",
"def analysis(self, x, band=None):\n\n if band is None:\n bands = range(self.filters.shape[1])\n else:\n bands = [band]\n\n output = np.zeros((x.shape[0], len(bands)), dtype=x.dtype)\n\n for i, b in enumerate(bands):\n output[:, i] = fftconvolve(x, self.filters[:, b], mode=\"same\")\n\n if output.shape[1] == 1:\n return output[:, 0]\n else:\n return output",
"def mw_boundaries(self):\n phi = np.arange(0., 2.0*np.pi, 0.1)\n theta_l = np.ones_like(phi)* 110 * np.pi / 180.\n theta_h = np.ones_like(phi)* 70 * np.pi / 180.\n ra_l, dec_l = self.gc2radec(phi, theta_l)\n ra_h, dec_h = self.gc2radec(phi, theta_h)\n return (ra_h, dec_h), (ra_l, dec_l)",
"def get_freq_grid():\n (bins_per_octave, n_octaves, _, _, f_min, _) = get_hcqt_params()\n freq_grid = librosa.cqt_frequencies(\n bins_per_octave*n_octaves, f_min, bins_per_octave=bins_per_octave\n )\n return freq_grid",
"def spectral_centroid(sign, fs): #center portion of the signal\n f, ff = plotfft(sign, fs)\n if not np.sum(ff):\n return 0\n else:\n return np.dot(f,ff/np.sum(ff))",
"def band_penalty(self):\n fc_ix = np.argmin(np.abs(self.f - self.fc)) # Index to frequency array closes to center frequency\n # Number of indexes on each side of center frequency, not extending outside, only up to 10 kHz\n n = min(fc_ix, self.ix10k - fc_ix)\n if n == 0:\n return 0.0\n return np.mean(np.square(self.fr[fc_ix - n:fc_ix] - (self.gain - self.fr[fc_ix + n - 1:fc_ix - 1:-1])))"
] | [
"0.62791246",
"0.62104696",
"0.6200014",
"0.61953336",
"0.6114515",
"0.6007833",
"0.5868187",
"0.58487403",
"0.5750124",
"0.57495075",
"0.5738013",
"0.5718509",
"0.56999505",
"0.5677948",
"0.56582195",
"0.5656211",
"0.55078876",
"0.54467714",
"0.5432982",
"0.53907806",
"0.53900474",
"0.538837",
"0.5378068",
"0.53498715",
"0.5318314",
"0.5280279",
"0.5273371",
"0.52707916",
"0.52454585",
"0.5234653"
] | 0.698832 | 0 |
This function renames columns of a pandas dataframe It converts column names to snake case if rename_dict is not passed. | def cleanup_column_names(df, rename_dict={}, do_inplace=True):
if not rename_dict:
return df.rename(columns={col: col.lower().replace(' ', '_')
for col in df.columns.values.tolist()},
inplace=do_inplace)
else:
return df.rename(columns=rename_dict, inplace=do_inplace) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def col_rename(col_dict, df_con_rename):\n\n for ex, nex in col_dict.items():\n df_con_rename = df_con_rename.withColumnRenamed(ex, nex)\n return df_con_rename",
"def lowercase_all_column_names(df:DataFrame)->DataFrame:\n for col in df.columns:\n df = df.withColumnRenamed(col, col.lower())\n return df",
"def rename_colnames(colnames):\n\n # Keys need to be lower case!\n lookup = {\"date\" : \"datumsec\",\n \"ddd\" : \"dd\",\n \"ffkmh\" : \"ff\",\n \"gustkmh\" : \"ffx\",\n \"p0hpa\" : \"psta\",\n \"pseahpa\" : \"pmsl\",\n \"ptnd\" : \"ptend\",\n \"nh\" : \"nh\",\n \"nt\" : \"nt\",\n \"n\" : \"n\",\n \"inso\" : \"sunday\",\n \"vis\" : \"vv\"}\n\n # Rename\n colnames = [x.lower() for x in colnames]\n import re\n for i in range(0, len(colnames)):\n for k in lookup.keys():\n if re.match(\"^{:s}$\".format(colnames[i].lower()), k):\n colnames[i] = lookup[k]\n\n return colnames",
"def standardize_columns(df):\n rename_pairs = [(from_col, to_col) for (from_col, to_col) in RENAME\n if from_col in df.columns]\n return df.rename(columns=dict(rename_pairs))",
"def _clean_up_table_column_names(loop_dict):\n \n # Make the column names all lowercase\n # and remove any underscores from the beginning\n for key in loop_dict.keys():\n rename_dict = { x:re.sub(r\"\"\"^_\"\"\", '', x.lower()) for x in loop_dict[key].columns }\n loop_dict[key].rename(columns=rename_dict, inplace=True)\n \n return loop_dict",
"def lowercase_columns(df):\n cols = list(df.columns)\n lower_cols = [col.lower() for col in cols]\n df.columns = lower_cols\n return df",
"def uppercase_all_column_names(df:DataFrame)->DataFrame:\n for col in df.columns:\n df = df.withColumnRenamed(col, col.upper())\n return df",
"def _rename_cols(df, prefix):\n df.columns = [\n ColNameFormatter.fmt(col_name)\n if col_name in NON_DUPLICATE_COLS\n else '{}{}'.format(prefix, col_name)\n for col_name in df.columns.values\n ]",
"def wrap_columns_name(self, format_string):\n self._data_frame = self._data_frame.rename(\n columns=lambda column: format_string.format(column)\n )",
"def _manage_cols(df, drop_list=[], name_dict={}):\n\n for colname in drop_list:\n if colname not in df:\n raise ValueError(f\"Can't drop column '{colname}' - '{colname}' does not exist in dataframe\")\n for colname in list(name_dict.keys()):\n if colname not in df:\n raise ValueError(f\"Can't rename '{colname}' to '{name_dict[colname]}' - '{colname}' does not exist in dataframe\")\n if colname in drop_list:\n raise ValueError(f\"Can't rename '{colname}' to '{name_dict[colname]}' - '{colname}' in drop_list\")\n\n column_names = np.setdiff1d(list(df.columns), list(name_dict.keys()))\n lower_columns = [name.lower().replace(' ','').replace('_','') for name in column_names]\n for i in range(len(column_names)):\n name_dict[column_names[i]] = lower_columns[i]\n \n df = df.drop(drop_list, axis=1)\n df = df.rename(columns=name_dict)\n \n return df",
"def _get_col_rename(df, dftype):\n \n # Build a dictionary of column renamings for use in pandas rename function\n renamed_columns = {}\n column_names = list(df.columns)\n lower_columns = [name.lower().replace(' ','').replace('_','') for name in column_names]\n for i in range(len(column_names)):\n renamed_columns[column_names[i]] = lower_columns[i]\n\n if dftype == 'csv':\n # build csv rename dictionary\n renamed_columns['museumcatno'] = 'museumcatnumber'\n renamed_columns['huc8number'] = 'huc8'\n elif dftype == 'api':\n # build api rename dictionary\n renamed_columns['key'] = 'specimennumber'\n renamed_columns['decimallatitude'] = 'latitude'\n renamed_columns['decimallongitude'] = 'longitude'\n renamed_columns['latlongsource'] = 'source'\n renamed_columns['latlongaccuracy'] = 'accuracy'\n else:\n raise ValueError(f\"Dataframe type '{dftype}' invalid - Accepted inputs are 'csv' or 'api'\")\n\n return renamed_columns",
"def rename_columns(df, prefix='x'):\n df = df.copy()\n df.columns = [prefix + str(i) for i in df.columns]\n return df",
"def clean_headers(df):\n filtered_headers = [header.replace(\"'\",'').replace(' ', '').replace('(', '').replace(')', '').replace('.', '').replace('[', '').replace(']', '') for header in df.columns]\n map_to_new_headers = {}\n for i in range(len(df.columns)):\n map_to_new_headers[df.columns[i]] = filtered_headers[i]\n\n return df.rename(columns = map_to_new_headers)",
"def substitute_names(df):\n\n masking_tag = '_sql'\n duplicated_names = ['SwitchName', 'Fabric_Name', 'SwitchMode', 'Memory_Usage', 'Flash_Usage', 'Speed']\n replace_dct = {orig_name + masking_tag: orig_name for orig_name in duplicated_names}\n df.rename(columns=replace_dct, inplace=True)",
"def rename_columns(dataframe,new_prefix='pca_',old_colomn_starting_index=2,new_column_starting_index=1):\n old_column_index = old_colomn_starting_index\n new_column_index = new_column_starting_index\n for i in range(0,n_comp):\n if column_name:\n dataframe = dataframe.withColumnRenamed('_'+str(old_colomn_starting_index),column_name+'_'+new_prefix+str(new_column_starting_index))\n else:\n dataframe = dataframe.withColumnRenamed('_'+str(old_colomn_starting_index),new_prefix+str(new_column_starting_index))\n old_colomn_starting_index+=1\n new_column_starting_index+=1\n return dataframe",
"def rename_id_col(df: pd.DataFrame):\r\n for col in df.columns:\r\n if \"id\" in col:\r\n df.rename(columns={col: col.replace(\"-\", \"_\")}, inplace=True)\r\n return df",
"def change_col_prefix(df, old_prefix, new_prefix ):\n op_regex = old_prefix + '.+'\n op_cols = list(df.filter(regex=op_regex).columns)\n np_cols = [col.replace(old_prefix,new_prefix) for col in op_cols]\n rename_map = {x[0]:x[1] for x in zip(op_cols, np_cols)}\n return df.rename(columns=rename_map)",
"def rename_columns(columns, mapper, keep_original):\n for name, rename in mapper.items():\n if name in columns:\n columns[rename] = org_copy.deepcopy(columns[name])\n if 'parent' in columns[name]:\n parents = columns[name]['parent']\n else:\n parents = {}\n if not keep_original: del columns[name]\n columns[rename]['name'] = rename\n for parent_name, parent_spec in list(parents.items()):\n new_parent_map = {}\n if parent_name in mapper:\n new_name = mapper[parent_name]\n new_parent_map[new_name] = parent_spec\n columns[rename]['parent'] = new_parent_map\n if columns[rename].get('values'):\n values = columns[rename]['values']\n if isinstance(values, str):\n if values in mapper:\n columns[rename]['values'] = mapper[values]",
"def parse_column_names(df):\n cols = set(df.columns.tolist())\n if \"StreamID\" in cols:\n df.rename(columns={\"StreamID\": \"stream_id\"}, inplace=True)\n if \"TimesViewed\" in cols:\n df.rename(columns={\"TimesViewed\": \"times_viewed\"}, inplace=True)\n if \"total_price\" in cols:\n df.rename(columns={\"total_price\": \"price\"}, inplace=True)\n\n return df",
"def rename_bar_cols(df: pd.DataFrame) -> pd.DataFrame:\n if set(df.columns) == REQUIRED_COLS:\n return df\n\n return df.rename(columns={\n 'Date': DATE_COL,\n 'Open': OPEN_COL,\n 'High': HIGH_COL,\n 'Low': LOW_COL,\n 'Close': CLOSE_COL,\n 'Adj Close': ADJ_CLOSE_COL,\n 'Volume': VOL_COL\n })",
"def city_rename(df, target=None):\n if not target:\n target = ['city']\n for col in target:\n df[col] = df[col].apply(\n lambda text: col + '-' + str(text).replace(' ', '_'))\n return None",
"def test_rename_columns(dupcols):\n # Rename the first column\n d1 = rename(dupcols, columns='Name', names='Person')\n assert d1.columns[0] == 'Person'\n assert dupcols.columns[0] == 'Name'\n assert d1.columns[1] == 'A'\n assert d1.columns[2] == 'A'\n for col in d1.columns:\n assert isinstance(col, Column)\n assert d1.shape == (7, 3)\n # Rename the first column and the second column\n d1 = rename(dupcols, columns=['Name', 'A'], names=['Person', 'Col2'])\n assert d1.columns[0] == 'Person'\n assert d1.columns[1] == 'Col2'\n assert d1.columns[2] == 'A'\n for col in d1.columns:\n assert isinstance(col, Column)\n assert d1.shape == (7, 3)\n # Rename the first column and the last column\n d1 = rename(dupcols, columns=['Name', 2], names=['Person', 'Col2'])\n assert d1.columns[0] == 'Person'\n assert d1.columns[1] == 'A'\n assert d1.columns[2] == 'Col2'\n for col in d1.columns:\n assert isinstance(col, Column)\n assert d1.shape == (7, 3)",
"def regulate_column_names(df, test_type):\n # No regulation needed for covid_ag test data\n if test_type == \"covid_ag\":\n return df\n\n if \"AnalyteResult1\" in df.keys():\n df = df.rename({\"AnalyteResult1\": \"FluA\",\n \"AnalyteResult2\": \"FluB\"}, axis=1)\n elif \"Result1\" in df.keys():\n df = df.rename({\"Result1\": \"FluA\", \"Result2\": \"FluB\"}, axis=1)\n if \"Zip\" not in df.keys():\n df = df.rename({\"ZipCode\": \"Zip\"}, axis=1)\n return df",
"def colset(df, cols_dic):\n return df[list(cols_dic)].rename(columns=cols_dic)",
"def rename_cyano_columns(df): \n cols = list(df.columns)\n for i, col in enumerate(df.columns):\n if col.lower().find(\"pro\") != -1 and col.lower().find(\"abun\") != -1: # prochlorococcus abundance\n cols[i] = PROC\n elif col.lower().find(\"syn\") != -1 and col.lower().find(\"abun\") != -1: # synechococcus abundance\n cols[i] = SYNC\n elif col.lower().find(\"pico\") != -1 and col.lower().find(\"abun\") != -1: # picoeukaryote abundance\n cols[i] = PICO\n df.columns = cols \n return df.columns",
"def rename_columns(self, rename_map):\n\n def rename(event):\n \"\"\"renaming mapper function.\"\"\"\n\n def renamed_dict(event):\n \"\"\"Handle renaming the columns in the data regardless\n of event type.\"\"\"\n\n new_dict = thaw(event.data())\n\n for old, new in list(rename_map.items()):\n new_dict[new] = new_dict.pop(old)\n\n return new_dict\n\n renamed_data = renamed_dict(event)\n\n # reassemble as per apropos for the event type\n # with the newly renamed data payload\n\n if isinstance(event, Event):\n return Event(event.timestamp(), renamed_data)\n elif isinstance(event, TimeRangeEvent):\n return TimeRangeEvent(\n (event.begin(), event.end()),\n renamed_data\n )\n elif isinstance(event, IndexedEvent):\n return IndexedEvent(event.index(), renamed_data)\n\n # an else isn't possible since Collection sanitizes\n # the input.\n\n return self.map(rename)",
"def _rearrange_columns(self, df):\n if self.all_columns is None:\n content_columns = [c for c in df.columns if not c.startswith(\"_\")]\n indicator_columns = [\"__in_{}\".format(t) for t in self.table_names\n ] if self.add_full_join_indicators else []\n fanout_columns = _get_fanout_columns(\n self.table_info) if self.add_full_join_fanouts else []\n self.all_columns = content_columns + indicator_columns + fanout_columns\n df = df[self.all_columns]\n if not self.disambiguate_column_names:\n df.columns = [\n c if c.startswith(\"_\") else c.split(\":\")[1] for c in df.columns\n ]\n return df",
"def normalize_columns(df, colnames):\r\n for col in colnames:\r\n s = df[col]\r\n df[col] = s.sub(s.min()).div((s.max() - s.min()))\r\n print(f'''Normalized Columns: {colnames}''')\r\n\r\n return df",
"def _remap_column_names(self, frame):\n\n frame[TransactionColumns.BANK.name] = self.INSTITUTION\n frame[TransactionColumns.ACCOUNT.name] = self.account\n frame.rename(columns=self._FIELD_2_TRANSACTION, inplace=True)\n frame[TransactionColumns.CHECK_NO.name] = None\n return frame",
"def rename_table_columns(\n self, table: Table, names: List[Union[str, None]], strict: bool = False\n ):\n self._requires_table(table)\n before = table.columns\n\n if strict and len(before) != len(names):\n raise ValueError(\"Column lengths do not match\")\n\n after = []\n for old, new in zip_longest(before, names):\n if old is None:\n break\n elif new is None:\n after.append(old)\n else:\n after.append(new)\n\n table.columns = after"
] | [
"0.73472214",
"0.71769214",
"0.712079",
"0.7030749",
"0.6953978",
"0.6815323",
"0.6753903",
"0.6625544",
"0.6609549",
"0.6517022",
"0.6508362",
"0.64940655",
"0.6339",
"0.62789905",
"0.6255128",
"0.6241007",
"0.6222985",
"0.6205978",
"0.6147987",
"0.61346877",
"0.6118897",
"0.609892",
"0.60508287",
"0.60417074",
"0.5990375",
"0.59108675",
"0.59100896",
"0.5882789",
"0.58454275",
"0.582576"
] | 0.82928956 | 0 |
This function should be overriden in the derived classes and return moreorless successfull guess about calling convention | def guess_calling_convention(self):
return calldef_types.CALLING_CONVENTION_TYPES.UNKNOWN | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def guess_caller(vr):\n if \"source\" in vr.metadata and len(vr.metadata[\"source\"]) == 1:\n # Callers that follow the VCF spec: FreeBayes, pindel\n caller = vr.metadata[\"source\"][0].split(None, 1)[0]\n elif \"GATKCommandLine.MuTect\" in vr.metadata:\n # GATK/SATK 3.4+\n caller = \"MuTect\"\n elif \"GATKCommandLine.HaplotypeCaller\" in vr.metadata:\n caller = \"HaplotypeCaller\"\n elif \"GATKCommandLine.UnifiedGenotyper\" in vr.metadata:\n caller = \"UnifiedGenotyper\"\n elif \"GATKCommandLine\" not in vr.metadata:\n raise ValueError(\"Bad VCF header missing caller info:\\n%s\"\n % vr.metadata)\n else:\n if len(vr.metadata[\"GATKCommandLine\"]) == 2:\n # It's \"private\" to UG vs. HC, via vcf_comp\n caller = \"UnifiedGenotyper\"\n else:\n # GATK tools don't follow the spec\n gatk_info = vr.metadata[\"GATKCommandLine\"]\n assert len(gatk_info) == 1\n ##GATKCommandLine=<ID=UnifiedGenotyper,CommandLineOptions=\"...\n caller = gatk_info[0][\"ID\"]\n return caller",
"def who_is_calling():\n return sys._getframe(2).f_code.co_name",
"def _lookup_method(self, call):\n raise Exception(\"_lookup_method must be implemented by subclasses.\")",
"def test_010(self):\n caller = self.get_caller([SingleMethod])\n self.assertEqual(\"I have very little to say.\", caller())",
"def guess_version(self):\n\t\ttry:\n\t\t\tself.hdf5file[\"/Analyses/Basecall_2D_%03d/BaseCalled_template\" % (self.group)]\n\t\t\treturn 'classic'\n\t\texcept KeyError:\n\t\t\tpass\n\n\t\ttry:\n\t\t\tself.hdf5file[\"/Analyses/Basecall_1D_%03d/BaseCalled_template\" % (self.group)]\n\t\t\treturn 'metrichor1.16'\n\t\texcept KeyError:\n\t\t\tpass\n\n\t\t# less likely\n try:\n self.hdf5file[\"/Analyses/Basecall_RNN_1D_%03d/BaseCalled_template\" % (self.group)]\n return 'r9rnn'\n except KeyError:\n pass\n\n\t\treturn 'prebasecalled'",
"def __call__(fun_name):",
"def identify_method(self, func):",
"def find_actual_caller(self):\n\n # Gleaned from code in the logging module itself...\n try:\n f = sys._getframe(1)\n ##f = inspect.currentframe(1)\n except Exception:\n f = None\n # On some versions of IronPython, currentframe() returns None if\n # IronPython isn't run with -X:Frames.\n if f is not None:\n f = f.f_back\n rv = \"(unknown module)\", \"(unknown file)\", 0, \"(unknown function)\"\n while hasattr(f, \"f_code\"):\n co = f.f_code\n filename = os.path.normcase(co.co_filename)\n mod = inspect.getmodule(f)\n\n if mod is None:\n modname = '__main__'\n else:\n modname = mod.__name__\n\n if modname == __name__:\n # Crawl back until the first frame outside of this module\n f = f.f_back\n continue\n\n rv = (modname, filename, f.f_lineno, co.co_name)\n break\n return rv",
"def __call__(self, *args, **kwargs): # real signature unknown\n pass",
"def findCaller(cls):\n f = currentframe()\n # On some versions of IronPython, currentframe() returns None if\n # IronPython isn't run with -X:Frames.\n if f is not None:\n f = f.f_back\n rv = \"(unknown file)\", 0, \"(unknown function)\"\n while hasattr(f, \"f_code\"):\n co = f.f_code\n filename = os.path.normcase(co.co_filename)\n if filename == _srcfile:\n f = f.f_back\n continue\n rv = (co.co_filename, f.f_lineno, co.co_name)\n break\n return rv",
"def findCallerPatch():\n f = currentframe()\n #On some versions of IronPython, currentframe() returns None if\n #IronPython isn't run with -X:Frames.\n if f is not None:\n f = f.f_back\n rv = \"(unknown file)\", 0, \"(unknown function)\"\n while hasattr(f, \"f_code\"):\n co = f.f_code\n filename = os.path.normcase(co.co_filename)\n if filename == _srcfile:\n f = f.f_back\n continue\n rv = (filename, f.f_lineno, co.co_name)\n break\n return rv",
"def test_no_matching_method(self):\n self.cook_obj.prepare_chapati(4)\n self.assertEquals(sys.stdout.getvalue().strip(), \"4 chapatis ready\")",
"def cmdfile_paradigm(self) -> str:\n raise NotImplementedError",
"def gettrace(): # real signature unknown; restored from __doc__\n pass",
"def isCall(self) -> bool:\n ...",
"def __call__():",
"def __call__():",
"def __call__():",
"def __call__():",
"def __call__():",
"def exc_info(): # real signature unknown; restored from __doc__\n pass",
"def _call_method(self, call, method):\n raise Exception(\"_call_method must be implemented by subclasses.\")",
"def get_type(args_str, entry_type):\r\n # The C-method-implementations accept self as the first argument,\r\n # so a one-argument method will be invoked with zero arguments in Python.\r\n no_args = 1 if entry_type == \"method\" else 0\r\n return (\"METH_NOARGS\" if len(args_str.split(\",\")) == no_args\r\n else \"METH_VARARGS\")",
"def can_set_native_method_prefix(self):\r\n return self._can_set_native_method_prefix",
"def excepthook(exctype, value, traceback): # real signature unknown; restored from __doc__\n pass",
"def _is_call(self, words):\n if words[0] == 'call':\n if len(words) != 3:\n raise SyntaxError(\"File line {}: Invalid number of arguments for C_CALL command.\".format(self._file_line))\n return True\n else:\n return False",
"def callersName():\r\n import sys\r\n return sys._getframe(2).f_code.co_name",
"def get_code(cls, *args, **kwargs): # real signature unknown\n pass",
"def get_code(cls, *args, **kwargs): # real signature unknown\n pass",
"def get_code(cls, *args, **kwargs): # real signature unknown\n pass"
] | [
"0.6016491",
"0.5614761",
"0.5589595",
"0.53914446",
"0.5228716",
"0.52274674",
"0.5044258",
"0.50382453",
"0.5025339",
"0.50055414",
"0.50054145",
"0.49566883",
"0.49546763",
"0.49457833",
"0.49422875",
"0.49309742",
"0.49309742",
"0.49309742",
"0.49309742",
"0.49309742",
"0.49254405",
"0.4915336",
"0.4881322",
"0.4880926",
"0.48780486",
"0.48761594",
"0.4852739",
"0.48449135",
"0.48449135",
"0.48449135"
] | 0.7013889 | 0 |
list of class/class declaration types, extracted from the operator arguments | def class_types(self):
if None is self.__class_types:
self.__class_types = []
for type_ in self.argument_types:
decl = None
type_ = type_traits.remove_reference(type_)
if type_traits_classes.is_class(type_):
decl = type_traits_classes.class_traits.get_declaration(
type_)
elif type_traits_classes.is_class_declaration(type_):
tt = type_traits_classes.class_declaration_traits
decl = tt.get_declaration(type_)
else:
pass
if decl:
self.__class_types.append(decl)
return self.__class_types | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_types(*args, **kwargs) -> list:\n arg_types = []\n for arg in args:\n arg_types.append(type(arg))\n for values in kwargs.values():\n arg_types.append(type(values))\n return arg_types",
"def signature(cls) -> List[Term]:\n el = []\n for term in cls.__dict__.values():\n if not isinstance(term, (Constant, Function)):\n continue\n el.append(deepcopy(term))\n return el",
"def get_init_arguments_and_types(cls) -> List[Tuple[str, Tuple, Any]]:\n return get_init_arguments_and_types(cls)",
"def types(self) -> list:\n if self._types is None:\n fdist = self.fdist # ranked order\n types_ = list(fdist.type.values)\n self._types = types_\n return self._types",
"def GetParsedTypes(cls):\n return cls._parser_clases.keys()",
"def get_types(func):\n return _get_types(func, util.is_classmethod(func), util.is_method(func))",
"def get_check_types():",
"def get_metacls(self):\n return type",
"def TypeSpecs(self) -> Dict[str, tf.TypeSpec]:\n return self._type_specs",
"def ntypes(self): # -> list[str]:\n ...",
"def getClassification(*args, satisfies: AnyStr=\"\", **kwargs)->List[AnyStr]:\n pass",
"def etypes(self): # -> list[None]:\n ...",
"def ntypes(self): # -> list[None]:\n ...",
"def etypes(self): # -> list[str]:\n ...",
"def types(self) -> List[str]:\n return self._types",
"def get_init_arguments_and_types(cls) -> List[Tuple[str, Tuple, Any]]:\n trainer_default_params = inspect.signature(cls).parameters\n name_type_default = []\n for arg in trainer_default_params:\n arg_type = trainer_default_params[arg].annotation\n arg_default = trainer_default_params[arg].default\n try:\n arg_types = tuple(arg_type.__args__)\n except AttributeError:\n arg_types = (arg_type,)\n\n name_type_default.append((arg, arg_types, arg_default))\n\n return name_type_default",
"def classes(self) -> Tuple[Type, ...]:\n self._deprecation()\n return tuple(self.values())",
"def terminal_types(self):\n return (self,)",
"def _f_in_parameters(self) -> List[Tuple[str, str]]:\n result = list() # type: List[Tuple[str, str]]\n for param in self.params:\n type_list = param.f_type()\n for type_name, postfix in type_list:\n result.append((type_name, param.name + postfix))\n return result",
"def get_op_types(self):\n return self.cur_config['ops']",
"def parameterTypes(self, p_int): # real signature unknown; restored from __doc__\n return []",
"def gettypes(self):\n return [str(self.sd.xlate(t[0])) for t in self.sd.types]",
"def input_types(self) -> List[Union[DataType, np.dtype]]:\n return [x.type for x in self.inputs]",
"def get_type_list(cls):\n\n from pygments.lexers import get_all_lexers\n return [(name, aliases[0]) for name, aliases, filetypes, mimetypes in get_all_lexers()]",
"def type(cls):",
"def get_list(self):\n return self._FF_TYPES",
"def get_list(self):\n return self._FF_TYPES",
"def classes(self):\n if self.classname:\n return [self.classname]\n return []",
"def argument_types(self):\r\n class ArgumentsIterator(collections.Sequence):\r\n def __init__(self, parent):\r\n self.parent = parent\r\n self.length = None\r\n\r\n def __len__(self):\r\n if self.length is None:\r\n self.length = conf.lib.clang_getNumArgTypes(self.parent)\r\n\r\n return self.length\r\n\r\n def __getitem__(self, key):\r\n # FIXME Support slice objects.\r\n if not isinstance(key, int):\r\n raise TypeError(\"Must supply a non-negative int.\")\r\n\r\n if key < 0:\r\n raise IndexError(\"Only non-negative indexes are accepted.\")\r\n\r\n if key >= len(self):\r\n raise IndexError(\"Index greater than container length: \"\r\n \"%d > %d\" % ( key, len(self) ))\r\n\r\n result = conf.lib.clang_getArgType(self.parent, key)\r\n if result.kind == TypeKind.INVALID:\r\n raise IndexError(\"Argument could not be retrieved.\")\r\n\r\n return result\r\n\r\n assert self.kind == TypeKind.FUNCTIONPROTO\r\n return ArgumentsIterator(self)",
"def get_types(self) :\n\n return list(self.types)[1:]"
] | [
"0.6474247",
"0.6150577",
"0.61504894",
"0.61134005",
"0.60474235",
"0.604117",
"0.6033915",
"0.59675",
"0.5876862",
"0.5862628",
"0.581305",
"0.5805958",
"0.58015627",
"0.57808363",
"0.5776354",
"0.5773225",
"0.5770651",
"0.577052",
"0.57593215",
"0.5735517",
"0.5732222",
"0.57174015",
"0.5684121",
"0.5658731",
"0.5619323",
"0.5598936",
"0.5598936",
"0.55985975",
"0.5579677",
"0.55758935"
] | 0.69868934 | 0 |
Iterate through the condensed FAQ entries to expand all of the keywords and answers | def parse_faq_entries(entries):
parsed_entries = {}
for entry in entries:
for keyword in entry["keywords"]:
if keyword not in parsed_entries:
parsed_entries[keyword] = entry["answer"]
else:
print("Error: Found duplicate keyword '{}' in pre-configured FAQ entries.".format(keyword))
exit(1)
return parsed_entries | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process(data_item, article_id):\n questions = []\n answers = []\n paragraph = [article_id, data_item['context']]\n\n for item in data_item['qas']:\n question = [item[\"id\"], item[\"question\"], item['is_impossible']]\n questions.append(question)\n if item['is_impossible']:\n continue\n answer_options = item[\"answers\"]\n answer_set = set()\n for option in answer_options:\n answer_tuple = (option['text'], option['answer_start'])\n answer_set.add(answer_tuple)\n for index, answer_tuple in enumerate(answer_set):\n answer = [\"{}_{}\".format(item[\"id\"], index+1), item[\"id\"], answer_tuple[0], answer_tuple[1]]\n answers.append(answer)\n return paragraph, questions, answers",
"async def faq(self, ctx):\n embed = discord.Embed(title='FAQ',\n color=self.bot.color)\n entries = {'How do I add this bot to my server?':\n 'Use `invite` or click the link in `help` (you must have Manage Server permissions).',\n 'Hey, can you add (some feature)?':\n 'Use `suggest`.',\n 'None of the commands are working!':\n 'The bot may be missing permissions or you may have been automatically blacklisted for spam. '\n 'If the problem persists, report it.',\n 'What character is that in the profile picture?':\n '[Shiro from Sewayaki Kitsune no Senko-san!](https://myanimelist.net/character/167062/Shiro)'}\n for name, value in entries.items():\n embed.add_field(name=name, value=value, inline=False)\n embed.set_footer(text='Have other questions? Join the support discord or PM me @Trackpad#1234.')\n\n await ctx.send(embed=embed)",
"def build_corpus_questions(criteria_incl_question=True, criteria_incl_snip=False, criteria_incl_long=False, level=0):\r\n\r\n\tprint('\\nbuilding questions and answers')\r\n\r\n\tif load_corpus_questions():\r\n\t\treturn\r\n\r\n\timport xml.etree.ElementTree as ET\r\n\r\n\tquestion_count = 0\r\n\tno_abstract_tag = 0\r\n\tno_abstract_file = 0\r\n\tlong_count = 0\r\n\t\r\n\tglobal search_criteria_dict, solution_dict, linked_abstracts_dict\r\n\t\r\n\tsearch_criteria_dict = collections.defaultdict(list)\r\n\tsolution_dict = collections.defaultdict(list)\r\n\tlinked_abstracts_dict = collections.defaultdict(list)\r\n\tcommon_map_dict = collections.defaultdict(list)\r\n\t\r\n\ttree = ET.parse(paths.path_data_questions)\r\n\troot = tree.getroot()\r\n\tfor record in root.findall('record'):\r\n\t\trecord_id = record.get('id')\r\n\t\tquestion_text = preprocess_document(record.find('question').text,True)\r\n\r\n\t\tif level == 0:\r\n\t\t\tkey = record_id # key\r\n\t\t\r\n\t\tanswer = record.find('answer')\r\n\t\tif answer is not None:\r\n\t\t\tfor s in answer.findall('snip'):\r\n\t\t\t\tif s is not None:\r\n\t\t\t\t\tsnip_id = s.get('id')\r\n\t\t\t\t\tsnip_text = preprocess_document(s.find('sniptext').text,True)\r\n\t\t\t\t\t\r\n\t\t\t\t\tif level == 1:\r\n\t\t\t\t\t\tkey = record_id + '_' + snip_id # key\r\n\t\t\t\t\t\r\n\t\t\t\t\tfor i,l in enumerate(s.findall('long')):\r\n\t\t\t\t\t\tif l is not None:\r\n\t\t\t\t\t\t\tlong_id = l.get('id')\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tif level == 2:\r\n\t\t\t\t\t\t\t\tkey = record_id + '_' + snip_id + '_' + long_id # key\r\n\t\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tif criteria_incl_question:\r\n\t\t\t\t\t\t\t\tfor x in question_text:\r\n\t\t\t\t\t\t\t\t\tsearch_criteria_dict[key].append(x) # question\r\n\t\t\t\t\t\t\tif criteria_incl_snip:\r\n\t\t\t\t\t\t\t\tfor x in snip_text:\r\n\t\t\t\t\t\t\t\t\tsearch_criteria_dict[key].append(x) # snip\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tlong_text = l.find('longtext')\r\n\t\t\t\t\t\t\tif long_text is not None:\r\n\t\t\t\t\t\t\t\tlong_text = preprocess_document(long_text.text,True)\r\n\t\t\t\t\t\t\t\tfor x in long_text:\r\n\t\t\t\t\t\t\t\t\tsolution_dict[key].append(x) # long - answer\r\n\t\t\t\t\t\t\t\tif criteria_incl_long:\r\n\t\t\t\t\t\t\t\t\tfor x in long_text:\r\n\t\t\t\t\t\t\t\t\t\tsearch_criteria_dict[key].append(x) # long - search\r\n\r\n\t\t\t\t\t\t\tif key not in search_criteria_dict.keys():\r\n\t\t\t\t\t\t\t\tsearch_criteria_dict[key].append('')\r\n\r\n\t\t\t\t\t\t\tlong_refs = l.findall('ref')\r\n\t\t\t\t\t\t\tfor long_ref in long_refs:\r\n\t\t\t\t\t\t\t\tabstract = long_ref.get('abstract')[10:]\r\n\t\t\t\t\t\t\t\tabstract_path = paths.path_data_abstracts + '/' + abstract\r\n\t\t\t\t\t\t\t\tabstract_sentences = abstracts_dict[abstract]\r\n\t\t\t\t\t\t\t\tlinked_abstracts_dict[key].append(abstract) # linked abstracts\r\n\t\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\t\tlong_count += 1\r\n\t\t\t\t\t\t\t\t\r\n\t\tquestion_count += 1\r\n\t\t# print(str(question_count) + ' : ' + str(question_text) + ' : ' + str(no_abstract_file) + ' : ' + str(no_abstract_tag) + ' : ' + str(long_count))\r\n\r\n\tpickle.dump(search_criteria_dict,open(paths.path_data_questions_pickle,\"wb\"))\r\n\tpickle.dump(solution_dict,open(paths.path_data_answers_pickle,\"wb\"))\r\n\tpickle.dump(linked_abstracts_dict,open(paths.path_data_linkedabstracts_pickle,\"wb\"))\r\n\t\r\n\tprint(len(search_criteria_dict))\r\n\tprint(len(solution_dict))\r\n\tprint(len(linked_abstracts_dict))\r\n\t\r\n\tprint('\\ncorpus build complete')",
"def google_qa_quick(questions, **kwargs):\n nlp = StanfordCoreNLP('stanford-corenlp-full-2018-10-05', lang='zh')\n all_summary = []\n print('start qa_quick')\n try:\n for index, question in enumerate(questions):\n print('Start dealing with question {}.'.format(index))\n all_summary.append(get_summaries(question, **kwargs))\n except:\n pass\n\n result = []\n answer_types = [('PERSON',), ('STATE_OR_PROVINCE', 'CITY'), ('DATE', 'TIME')]\n for question, summaries in zip(questions, all_summary):\n answer_scores = defaultdict(int)\n if question.startswith('谁') or question.endswith('谁'):\n answer_type = answer_types[0]\n max_ngram = 1\n elif '哪里' in question:\n answer_type = answer_types[1]\n max_ngram = 2\n else:\n answer_type = answer_types[2]\n max_ngram = 3\n for summary in summaries:\n for sentence in sentences(summary, nlp):\n for ngram in candidate_answers(sentence, question, answer_type, max_ngram):\n answer_scores[ngram] += ngram_score(\n ngram, 1)\n ngrams_with_scores = sorted(answer_scores.items(),\n key=lambda x: x[1],\n reverse=True)\n result.append([(\"\".join(ngram), score)\n for (ngram, score) in ngrams_with_scores])\n return result",
"def find_answers(soup, answer_counter, question_counter, url, columns):\n dictionaries = []\n divs = soup.find_all(\"div\", class_=\"Answer AnswerBase\") # finds all the div tags with the answer class\n q_div = soup.find_all(\"h1\")\n for q in q_div:\n question_text = q.find(\"span\", class_=\"ui_qtext_rendered_qtext\")\n question_counter += 1\n for d in divs:\n answers = d.find_all(\"p\") # within the div tags finds all the paragraph tags so answers can be kept together\n answer_counter += 1\n all_mispelled = set()\n length = 0\n with open(str(answer_counter) + '_Experiences in life_' + str(question_counter) + \".txt\", \"w+\") as f:\n for a in answers:\n if filter_url_or_answer(a.text):\n break\n f.write(a.text) # writes each answer in a separate text file\n f.write(\"\\n\")\n mispelled, line_length = check_spelling(a.text)\n length += line_length\n all_mispelled.update(set(mispelled))\n dictionary = make_dictionary(answer_counter, question_counter, url, columns, list(all_mispelled), question_text.text, length)\n dictionaries.append(dictionary)\n return answer_counter, question_counter, dictionaries",
"def faq():\n return render_template('faq.html',\n title='FAQ and stuff about Data and maps')",
"def entity_link_nq(nq_data):\n fp = tf.gfile.Open(ARGS.fb2wiki, \"r\")\n fb2wiki = json.load(fp)\n for i in nq_data.keys():\n tokens = nq_data[i][\"document_tokens\"]\n if ARGS.annotate_candidates:\n for idx, la_cand in enumerate(nq_data[i][\"long_answer_candidates\"]):\n answer, answer_map, entities, entity_map = extract_and_link_text(la_cand, tokens, fb2wiki)\n if answer:\n # nq_data[i][\"long_answer_candidates\"][idx][\"text_answer\"] = answer\n # nq_data[i][\"long_answer_candidates\"][idx][\"answer_map\"] = answer_map\n nq_data[i][\"long_answer_candidates\"][idx][\"google_entity_map\"] = entity_map\n if ARGS.annotate_short_answers:\n for idx, ann in enumerate(nq_data[i][\"annotations\"]):\n short_ans = ann[\"short_answers\"]\n if not short_ans:\n continue\n for sid in range(len(short_ans)):\n ans = short_ans[sid]\n answer, answer_map, entities, entity_map = extract_and_link_text(ans, tokens, fb2wiki)\n if answer:\n # nq_data[i][\"annotations\"][idx][\"short_answers\"][sid][\n # \"text_answer\"] = answer\n # nq_data[i][\"annotations\"][idx][\"short_answers\"][sid][\n # \"answer_map\"] = answer_map\n nq_data[i][\"annotations\"][idx][\"short_answers\"][sid][\n \"google_entity_map\"] = entity_map\n if ARGS.annotate_long_answers:\n for idx, ann in enumerate(nq_data[i][\"annotations\"]):\n long_ans = ann[\"long_answer\"]\n answer, answer_map, entities, entity_map = extract_and_link_text(long_ans, tokens, fb2wiki)\n if answer:\n # nq_data[i][\"annotations\"][idx][\"long_answer\"][\"text_answer\"] = answer\n # nq_data[i][\"annotations\"][idx][\"long_answer\"][\n # \"google_answer_map\"] = answer_map\n nq_data[i][\"annotations\"][idx][\"long_answer\"][\n \"google_entity_map\"] = entity_map\n if ARGS.annotate_question:\n print(i, nq_data[i][\"question_text\"])\n question_text = str(nq_data[i][\"question_text\"].encode('utf-8'))\n entities, entity_map = sample_analyze_entities(question_text, fb2wiki)\n nq_data[i]['google_question_entity_map'] = entity_map\n time.sleep(3)\n return nq_data",
"def read_squad_examples(input_file, return_answers, context_only=False, question_only=False,\n draft=False, draft_num_examples=12, append_title=False):\n with open(input_file, \"r\") as reader:\n input_data = json.load(reader)[\"data\"]\n\n examples = []\n ans_cnt = 0\n no_ans_cnt = 0\n\n # Only word-based tokenization is peformed (whitespace based)\n for doc_idx, entry in enumerate(input_data):\n title = entry['title'][0] if type(entry['title']) == list else entry['title']\n assert type(title) == str\n\n for par_idx, paragraph in enumerate(entry[\"paragraphs\"]):\n # Do not load context for question only\n if not question_only:\n paragraph_text = paragraph[\"context\"]\n title_offset = 0\n if append_title:\n title_str = '[ ' + ' '.join(title.split('_')) + ' ] '\n title_offset += len(title_str)\n paragraph_text = title_str + paragraph_text\n # Note that we use the term 'word' for whitespace based words, and 'token' for subtokens (for BERT input)\n doc_words, char_to_word_offset = context_to_words_and_offset(paragraph_text)\n\n # 1) Context only ends here\n if context_only:\n metadata = {}\n if \"pubmed_id\" in entry:\n entry_keys = [\n \"pubmed_id\", \"sha\", \"title_original\", \"title_entities\",\n \"journal\", \"authors\", \"article_idx\"\n ]\n para_keys = [\"context_entities\"]\n for entry_key in entry_keys:\n if entry_key in entry:\n metadata[entry_key] = entry[entry_key]\n for para_key in para_keys:\n if para_key in paragraph:\n metadata[para_key] = paragraph[para_key]\n # metadata[\"pubmed_id\"] = (metadata[\"pubmed_id\"] if not pd.isnull(metadata[\"pubmed_id\"])\n # else 'NaN')\n example = SquadExample(\n doc_words=doc_words,\n title=title,\n doc_idx=doc_idx,\n par_idx=par_idx,\n metadata=metadata)\n examples.append(example)\n\n if draft and len(examples) == draft_num_examples:\n return examples\n continue\n\n # 2) Question only or 3) context/question pair\n else:\n for qa in paragraph[\"qas\"]:\n qas_id = str(qa[\"id\"])\n question_text = qa[\"question\"]\n\n # Noisy question skipping\n if len(question_text.split(' ')) == 1:\n logger.info('Skipping a single word question: {}'.format(question_text))\n continue\n if \"I couldn't could up with another question.\" in question_text:\n logger.info('Skipping a strange question: {}'.format(question_text))\n continue\n\n start_position = None\n end_position = None\n orig_answer_text = None\n\n # For pre-processing that should return answers together\n if return_answers:\n assert type(qa[\"answers\"]) == dict or type(qa[\"answers\"]) == list, type(qa[\"answers\"])\n if type(qa[\"answers\"]) == dict:\n qa[\"answers\"] = [qa[\"answers\"]]\n\n # No answers\n if len(qa[\"answers\"]) == 0:\n orig_answer_text = \"\"\n start_position = -1 # Word-level no-answer => -1\n end_position = -1\n no_ans_cnt += 1\n # Answer exists\n else:\n answer = qa[\"answers\"][0]\n ans_cnt += 1\n\n orig_answer_text = answer[\"text\"]\n answer_offset = answer[\"answer_start\"] + title_offset\n answer_length = len(orig_answer_text)\n start_position = char_to_word_offset[answer_offset]\n end_position = char_to_word_offset[answer_offset + answer_length - 1]\n\n # Only add answers where the text can be exactly recovered from the context\n actual_text = \" \".join(doc_words[start_position:(end_position + 1)])\n cleaned_answer_text = \" \".join(\n tokenization.whitespace_tokenize(orig_answer_text)) # word based tokenization\n if actual_text.find(cleaned_answer_text) == -1:\n logger.warning(\"Could not find answer: '%s' vs. '%s'\",\n actual_text, cleaned_answer_text)\n continue\n\n # Question only ends here\n if question_only:\n example = SquadExample(\n qas_id=qas_id,\n question_text=question_text)\n\n # Context/question pair ends here\n else:\n example = SquadExample(\n qas_id=qas_id,\n question_text=question_text,\n paragraph_text=paragraph_text,\n doc_words=doc_words,\n orig_answer_text=orig_answer_text,\n start_position=start_position,\n end_position=end_position,\n title=title,\n doc_idx=doc_idx,\n par_idx=par_idx)\n examples.append(example)\n\n if draft and len(examples) == draft_num_examples:\n return examples\n\n # Testing for shuffled draft (should comment out above 'draft' if-else statements)\n if draft:\n random.shuffle(examples)\n logger.info(str(len(examples)) + ' were collected before draft for shuffling')\n return examples[:draft_num_examples]\n\n logger.info('Answer/no-answer stat: %d vs %d'%(ans_cnt, no_ans_cnt))\n return examples",
"def gen_questions(self, number_of_questions):",
"def extract_questions_from_text(self, text):\n questions = []\n\n for match in self.QUESTION_RE.finditer(text):\n match_dict = match.groupdict()\n\n answer_type = match_dict['answer_type']\n number1 = match_dict.pop('number1')\n\n if answer_type == 'O':\n if re.search('(?i)to ask the Deputy President', match_dict['intro']):\n match_dict['dp_number'] = number1\n elif re.search('(?i)to ask the President', match_dict['intro']):\n match_dict['president_number'] = number1\n else:\n match_dict['oral_number'] = number1\n elif answer_type == 'W':\n match_dict['written_number'] = number1\n\n match_dict['translated'] = bool(match_dict['translated'])\n match_dict['questionto'] = match_dict['questionto'].replace(':', '')\n match_dict['questionto'] = self.correct_minister_title(match_dict['questionto'])\n\n questions.append(match_dict)\n\n return questions",
"def quick_quiz(character_set):",
"def description_ques(analysis):\n if analysis.sv[0].vrb_tense.startswith('present'):\n analysis.sv[0].vrb_tense = 'present progressive'\n if analysis.sv[0].vrb_tense.startswith('past'):\n analysis.sv[0].vrb_tense = 'present progressive'\n sentence = y_o_question(analysis)\n for i in sentence:\n if i == 'liking':\n sentence[sentence.index(i)] = 'like'\n return ['what'] + sentence",
"def get_answers(self):\r\n anshtml = '<span class=\"openended-answer\"><pre><code>{0}</code></pre></span>'.format(self.answer)\r\n return {self.answer_id: anshtml}",
"def fill_question(self, response, question_answer):\n question_answer['source_url'] = response.url\n\n question_answer['question_title'] = response.xpath('//*[@id=\"question-header\"]/h1/a/text()').extract_first()\n question_answer['question_body'] = BeautifulSoup(\n response.xpath(self.gt.css_to_xpath('.postcell .post-text')).extract_first()).text\n question_answer['question_tags'] = list(set(\n response.xpath('//*[contains(concat(\" \", normalize-space(@class), \" \"), \" post-tag \")]/text()').extract()))\n # would like to specify the hierarchy of the css tags\n question_answer['question_upvotes'] = int(response.xpath(\n '//*[contains(concat(\" \", normalize-space(@class), \" \"), \" vote-count-post \")]/text()').extract_first())\n question_answer['question_view_count'] = int(\n response.xpath(self.gt.css_to_xpath('#qinfo .label-key') + '/b/text()').extract()[1].split(' ')[0])\n\n author_name = response.xpath(\n self.gt.css_to_xpath('.owner .user-details') + '/a/text()').extract_first()\n question_answer['question_author'] = {'author_id': '{}_{}'.format(self.allowed_domains[0], author_name),\n 'author_name': author_name}\n\n se_date_format = '%b %d \\'%y at %H:%M' # if date not current year\n se_date_format_curr_year = '%b %d at %H:%M' # if date current year\n try:\n try:\n question_answer['question_date'] = date_to_solr_format(datetime.strptime(response.xpath(\n self.gt.css_to_xpath('.owner .user-action-time .relativetime') + '/text()').extract_first(),\n se_date_format))\n except ValueError:\n question_answer['question_date'] = date_to_solr_format(datetime.strptime(response.xpath(\n self.gt.css_to_xpath('.owner .user-action-time .relativetime') + '/text()').extract_first(),\n se_date_format_curr_year))\n except (ValueError, TypeError):\n pass\n # Look for duplicates\n duplicate_url = response.xpath(self.gt.css_to_xpath('.question-originals-of-duplicate')+'/ul/li/a/@href').extract_first()\n if duplicate_url:\n print('duplicate question')\n self.duplicate_count += 1\n print('duplicate question count: {}'.format(self.duplicate_count))\n duplicate_url = \"https://superuser.com\" + duplicate_url\n print(duplicate_url)\n self.logger.info('duplicate url: {}'.format(duplicate_url))\n question_answer['question_original_url'] = duplicate_url\n self.duplicate_url = duplicate_url\n\n return question_answer",
"def update_questionnaire_answers(dataframe):\n # update collection with questionnaire answers\n for index, row in dataframe.iterrows():\n collection.update_one(\n {\n 'Codename': row.name\n },\n {\n '$set': {\n 'likeable': row['συμπαθές'],\n 'creative': row['δημιουργικό'],\n 'calm': row['ήρεμο/συναισθηματικά ισορροπημένο'],\n 'outgoing': row['εξωστρεφές'],\n 'post_cohesion': row['συνοχή'],\n 'self_centered': row['εγωκεντρικό / νάρκισσος'],\n 'short_tempered': row['ευέξαπτο'],\n 'professional': row['αξιόπιστο / σωστός επαγγελματίας'],\n 'fitness_advice': row['εμπιστευόμουν για θέματα fitness'],\n 'general_advice': row['εμπιστευόμουν για τομέα διαφορετικό'],\n 'follow_probability': row['follow'],\n }\n }\n )",
"def question(dico):\n l = []\n for i in range(len(dico)):\n l.append(dico[i][0])\n affichage_question(dico,l)",
"def _make_suggestions(self):\n\n #build concordance based on current approved\n concordance = dict()\n for term in self.tree.get_children('approved'):\n words = [word.strip(',.:;*').lower() \\\n for word in str(self.tree.item(term)['values'][0]).split(' ')]\n for word in words:\n# if word == 'ad':\n# messagebox.showwarning(\"word == 'ad'\",\"concordance={}\".format(concordance))\n# pass\n if word not in ['and', 'the', 'a', 'to', 'of'] \\\n and not word.isdigit():\n if word not in concordance:\n concordance[word] = set([term, ])\n else:\n concordance[word].add(term)\n# if word == 'ad':\n# messagebox.showwarning(\"word 'ad' added?\",\"concordance={}\".format(concordance))\n# pass\n \n \n #so concordance now holds a list of words in approved terms along with\\\n #list of index of terms() they occur in\n \n for term in self.tree.get_children('suggestions'):\n self._look_in_concordance(term, concordance)\n\n for term in self.tree.get_children('unknown'):\n self._look_in_concordance(term, concordance)\n\n self._collapse_all()",
"def substitutions(exam, email, show_all):\n original_exam = get_exam(exam=exam)\n exam = get_exam(exam=exam)\n exam = scramble(email, exam, keep_data=True)\n question_substitutions = get_all_substitutions(original_exam, exam)\n questions = extract_questions(exam)\n for question in questions:\n substitutions = question_substitutions[question[\"id\"]]\n if substitutions or show_all:\n print(get_name(question), substitutions)",
"def evaluate_questions(self):\n for question in self.question_list:\n question.evaluate_question()",
"def get_answers():\n count = 1\n for i in range(200): # TODO : Fetch number of all items first\n r = requests.get('http://api.stackexchange.com/2.2/answers?site=eosio&filter=!b1MMEb*6iF.PM5&pagesize=100&page={}'.format(count))\n data = json.loads(r.text)\n for item in data['items']:\n own = item['owner']['user_id']\n dsp = item['owner']['display_name']\n qn_id = item['question_id']\n try:\n owner = User.objects.get(username=own, se_display_name=dsp)\n question = Question.objects.get(se_question_id=qn_id)\n except Exception:\n owner = None\n question = None\n if owner and question:\n Answer.objects.create(owner=owner, question=question, body=item['body'],\n se_question_id=qn_id, is_accepted=item['is_accepted'],\n se_answer_id=item['answer_id'], score=item['score'])\n\n count += 1\n print(count)",
"def question_new_search():",
"def w_question(analysis):\n if analysis.sv:\n #Opinion is a what question so we have to make some changes\n if analysis.sv[0].vrb_main[0].endswith('like'):\n verb = analysis.sv[0].vrb_main[0]\n analysis.sv[0].vrb_main[0] = verb[:len(verb) - 4] + 'think+of'\n\n #processing as yes or no question\n phrase = y_o_question(analysis)\n\n #Specific processing for invitation\n if analysis.aim == 'invitation':\n return ['how', 'about'] + phrase[1:]\n\n #Specific processing for classification\n if analysis.aim.startswith('classification'):\n aim_question = other_functions.list_rebuilding(analysis.aim)\n return ['what', 'kind', 'of'] + aim_question[1:] + phrase\n\n #It is an how question\n if other_functions.is_an_adj(analysis.aim) == 1:\n return ['how'] + [analysis.aim] + phrase\n elif analysis.aim == 'manner':\n return ['how'] + phrase\n\n if analysis.aim == 'thing' or analysis.aim == 'situation' or analysis.aim == 'explication' or analysis.aim == 'opinion':\n return ['what'] + phrase\n return ['what'] + [analysis.aim] + phrase",
"def expand (keys):\r\n finalkeys = set()\r\n\r\n for key in keys:\r\n returnkeyset = set()\r\n\r\n if SLASH in key:\r\n has_tags = True\r\n tag_tail = key.split(SLASH)[1]\r\n key = key.split(SLASH)[0]\r\n else:\r\n has_tags = False\r\n tag_tail = EMPTYCHAR\r\n if ATSIGN in key or PERIOD not in key or PERIOD+BLANK in key or key[0].isnumeric():\r\n all_keys = [key]\r\n else:\r\n key_parts = key.split(PERIOD)\r\n if len(key_parts)==2:\r\n all_keys = [key_parts[1],\r\n key_parts[0]+BLANK+key_parts[1],\r\n key_parts[0][0]+BLANK+key_parts[1]]\r\n else:\r\n abbreviated = EMPTYCHAR\r\n for x in key_parts[0:-1]:\r\n abbreviated += x[0].upper()\r\n\r\n\r\n all_keys = [key_parts[-1],\r\n key_parts[0]+BLANK+key_parts[-1],\r\n BLANK.join(key_parts),\r\n abbreviated+BLANK+key_parts[-1]]\r\n for k in all_keys:\r\n returnkeyset.add(k+SLASH*has_tags+tag_tail)\r\n\r\n if len(returnkeyset) > 1:\r\n if input('ADD '+', '.join(returnkeyset)+' AS EQUIVALENCES?') in YESTERMS:\r\n\r\n display.noteprint(('ADDING EQUIVALENTS',', '.join(returnkeyset)))\r\n self.default_dict['equivalences'].new_class(list(returnkeyset))\r\n finalkeys.add(key.replace('.',' '))\r\n else:\r\n finalkeys.update(returnkeyset)\r\n else:\r\n finalkeys.update(returnkeyset)\r\n\r\n return finalkeys",
"def process_question(qu):\n\n ## global ranking\n rank_info = {}\n rank_info_k = [\"viewcount\",\"score\",\"favoritecount\"]\n for k in rank_info_k:\n rank_info[k] = int(qu[k])\n qu.pop(k,None)\n\n rank_info[\"creationdate\"] = qu[\"creationdate\"]\n\n if qu[\"acceptedanswer\"]:\n qu[\"acceptedanswer\"] = list(qu[\"acceptedanswer\"])\n else:\n qu[\"acceptedanswer\"] = []\n\n qu.pop('comments',None) # discard comments, maybe add back later\n qu[\"rank_info\"] = rank_info\n\n return qu",
"def convert_questions_to_features(examples, tokenizer, max_query_length=None):\n\n unique_id = 1000000000\n question_features = []\n\n for (example_index, example) in enumerate(tqdm(examples, desc='Converting questions')):\n\n query_tokens = tokenizer.tokenize(example.question_text)\n if max_query_length is None:\n max_query_length = len(query_tokens)\n if len(query_tokens) > max_query_length:\n query_tokens = query_tokens[0:max_query_length]\n\n for _ in enumerate(range(1)):\n tokens_ = []\n tokens_.append(\"[CLS]\")\n for token in query_tokens:\n tokens_.append(token)\n tokens_.append(\"[SEP]\")\n\n input_ids_ = tokenizer.convert_tokens_to_ids(tokens_)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask_ = [1] * len(input_ids_)\n\n # Zero-pad up to the sequence length.\n while len(input_ids_) < max_query_length + 2:\n input_ids_.append(0)\n input_mask_.append(0)\n\n assert len(input_ids_) == max_query_length + 2\n assert len(input_mask_) == max_query_length + 2\n\n if example_index < 1:\n # logger.info(\"*** Example ***\")\n # logger.info(\"unique_id: %s\" % (unique_id))\n # logger.info(\"example_index: %s\" % (example_index))\n logger.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in query_tokens]))\n # logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids_]))\n # logger.info(\n # \"input_mask: %s\" % \" \".join([str(x) for x in input_mask_]))\n\n question_features.append(\n QuestionFeatures(\n unique_id=unique_id,\n example_index=example_index,\n tokens_=tokens_,\n input_ids=input_ids_,\n input_mask=input_mask_))\n unique_id += 1\n\n return question_features",
"def test_tags_faq(self):\n\n po = self.catalog.load_pageobject('TagsPage')\n po.goto_page()\n start_url = po.current_url()\n\n # the link brings up a popup, so we need to get the\n # handle of that window and check the switch to it\n parent_h = self.browser._browser.current_window_handle\n\n # press the FAQ link\n self.browser.proxy_client.new_har(\"page\")\n po.goto_faq()\n\n # click on the link that opens a new window\n handles = self.browser._browser.window_handles\n handles.remove(parent_h)\n self.browser._browser.switch_to_window(handles.pop())\n\n # get the page load details of the window\n har_entry = self.browser.page_load_details()\n end_url = po.current_url()\n\n # switch back to the main window\n self.browser._browser.switch_to_window(parent_h)\n\n # check for errors\n assert har_entry is not None, \\\n \"failed to load the uri: %s. http archive unavailable.\" \\\n % (end_url)\n assert self.browser.error_loading_page(har_entry) is False, \\\n \"while on the tags page %s,\" % (start_url) \\\n + \" pressing the Tags FAQ link returned error\" \\\n + \" response code on page %s.\" % (end_url) \\\n + \" http archive follows:\\n%s\" % (pprint.pformat(har_entry))",
"def _process_row(self, question):\n\n # this returns a list, e.g. [('today', 'DATE'), ('Patrick', 'PERSON')]\n ent_lst = self.text_preprocessor.compute_ner(question.context)\n\n ent_dct = {}\n for ent_str, ner_category in ent_lst:\n # perform uncasing\n ent_dct[ent_str.lower()] = ner_category\n\n new_answers = []\n for ans_struct in question.answers:\n ans_text = ans_struct['text'].lower() # perform uncasing\n if ans_text not in ent_dct:\n continue\n\n ans_struct['ner_category'] = ent_dct[ans_text]\n new_answers.append(ans_struct)\n\n question.answers = new_answers\n return question",
"def __debug_print_questions__(self):\n for k in sorted(self.questions.keys()):\n print(\"Question: %s\" %k)\n for a in self.questions[k].answers:\n print(\"\\t%s\" % a)",
"def parse_question(question):\n\tcontext = question['support']\n\tanswer = question['correct_answer']\n\ttarget = question['question']\n\n\tcontext_words = context.split(\" \")[0: 510]\n\ttarget_words = target.split(\" \")\n\n\tpunc_filter = str.maketrans('', '', string.punctuation)\n\n\tcontext_words = [word.translate(punc_filter) for word in context_words]\n\ttarget_words = [word.translate(punc_filter) for word in target_words]\n\tanswer_words = [word.translate(punc_filter) for word in answer.split(\" \")]\n\n\tbio_embeddings = [EMBEDER['O']]\n\tinside_answer = False\n\tanswer_index = 0\n\tcan_be_inside_answer = True\n\n\t# The following loop and above code does:\n\t# -Find where the answer is and place a B tag\n\t# -While still in the answer (the answer is more than one word) put an I tag\n\t# -Outside of the answer place a O tag\n\t# -Start and end with an O tag for BERT's automatic\n\t# -start token and end token representing the start and end of a sentence.\n\tfor word in context_words:\n\t\tif word.lower() == answer_words[0].lower() and can_be_inside_answer:\n\t\t\tbio_embeddings.append(EMBEDER[\"B\"])\n\t\t\tanswer_index += 1\n\t\t\tinside_answer = True\n\t\t\tcan_be_inside_answer = False\n\t\telif inside_answer:\n\t\t\tif len(answer_words) > 1:\n\t\t\t\tif word.lower() != answer_words[answer_index]:\n\t\t\t\t\tinside_answer = False\n\t\t\t\t\tbio_embeddings.append(EMBEDER[\"O\"])\n\t\t\t\telse:\n\t\t\t\t\tbio_embeddings.append(EMBEDER[\"I\"])\n\t\t\telse:\n\t\t\t\tinside_answer = False\n\t\t\t\tbio_embeddings.append(EMBEDER[\"O\"])\n\t\telse:\n\t\t\tbio_embeddings.append(EMBEDER[\"O\"])\n\tbio_embeddings.append(EMBEDER[\"O\"])\n\n\tground_truth = torch.tensor([BERT_TOKENIZER.encode(target_words)])\n\tcontext_words = torch.tensor([BERT_TOKENIZER.encode(context_words)])\n\n\tassert len(bio_embeddings) == len(context_words[0]), f'The BIO tags are not equal in length to the embeddings! ' \\\n\t f'{None} & {len(bio_embeddings)} & {len(context_words[0])}'\n\treturn context_words, bio_embeddings, ground_truth",
"def write_all_correct_answers_for_wrong_qs_in_csv(self):\n # from nose.tools import set_trace; set_trace()\n my_csv_file = open(\"output_csv\", \"w\")\n for form_card_ind in range(2, 5):\n for div_ind in range(2, 4):\n question_title = self.score_page.find_all_grading_questions_title(form_card_ind, div_ind)\n print question_title\n answers_list = self.score_page.find_all_correct_answers_from_each_section(form_card_ind, div_ind)\n print answers_list\n if len(answers_list):\n my_csv_file.write('Q: ' + str(question_title[0]) + \"\\n\")\n for each in answers_list:\n print each\n my_csv_file.write(str(each) + \"\\n\")"
] | [
"0.6092856",
"0.604264",
"0.5708487",
"0.57038796",
"0.5662298",
"0.5381249",
"0.5374494",
"0.53232485",
"0.5256146",
"0.52354884",
"0.52330977",
"0.52246875",
"0.5207684",
"0.5201199",
"0.51908934",
"0.5187937",
"0.5181439",
"0.5181076",
"0.51760966",
"0.5160842",
"0.5153228",
"0.5149229",
"0.5128725",
"0.5121706",
"0.51028144",
"0.5087667",
"0.50860965",
"0.5043617",
"0.5040963",
"0.5037024"
] | 0.67152536 | 0 |
Loads the FAQ from disk into memory | def read_faq_from_disk():
return json.load(open("./faq.json")) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_knowledge(self):\n MemoryManager.load_memory(self.knowledge_file)",
"def load_corpus_questions():\r\n\tglobal search_criteria_dict, solution_dict, linked_abstracts_dict\r\n\tif os.path.exists(paths.path_data_questions_pickle):\r\n\t\tprint('\\nloading questions and answers')\r\n\t\tsearch_criteria_dict = pickle.load(open(paths.path_data_questions_pickle,\"rb\"))\r\n\t\tsolution_dict = pickle.load(open(paths.path_data_answers_pickle,\"rb\"))\r\n\t\tlinked_abstracts_dict = pickle.load(open(paths.path_data_linkedabstracts_pickle,\"rb\"))\r\n\t\t\r\n\t\tprint(len(search_criteria_dict))\r\n\t\tprint(len(solution_dict))\r\n\t\tprint(len(linked_abstracts_dict))\r\n\t\t\r\n\t\treturn True\r\n\telse:\r\n\t\treturn False",
"def test_reader(qn_filepath, answers_dirpath):\n qns = get_questions(qn_filepath)\n for qn in qns:\n if qn.qid == 100:\n q = qn\n break\n assert q\n docs = get_documents(answers_dirpath, q.qid)\n print docs\n print docs[0].content",
"def load_knowledge(net, filepath):\n\treloaded = loadz(filepath)\n\tknowledge = [(name, reloaded[name]) for name in sorted(reloaded.keys())]\n\tset_knowledge(net, knowledge)",
"def load_corpus_abstracts():\r\n\t\r\n\tglobal abstracts_dict\r\n\tif os.path.exists(paths.path_data_abstracts_pickle):\r\n\t\tprint('\\nloading abstracts')\r\n\t\tabstracts_dict = pickle.load(open(paths.path_data_abstracts_pickle,\"rb\"))\r\n\t\treturn True\r\n\telse:\r\n\t\treturn False",
"def load_questions(self, verbose=True):\n for question in self.question_list:\n question.load_question(self.data)",
"def load_data(self):",
"def load(self):\r\n self.read(self.filename)",
"def load(self):",
"def loadArtworks(catalog):\n booksfile = cf.data_dir + 'MoMA/Artworks-utf8-small.csv'\n input_file = csv.DictReader(open(booksfile, encoding='utf-8'))\n for artwork in input_file:\n model.addArtwork(catalog, artwork)",
"def __init__(self, file, question_list, encoding, read_file=True, load_questions=True, verbose=True):\n self.file = file\n self.question_list = question_list\n self.encoding = encoding\n\n self.data = pd.DataFrame\n\n if read_file:\n self.read_file()\n\n if load_questions:\n self.load_questions(verbose)",
"def load_data(self) -> None:",
"def loadArtworks(catalog):\n artfile = cf.data_dir + 'Artworks-utf8-large.csv'\n input_file = csv.DictReader(open(artfile, encoding='utf-8'))\n for artwork in input_file:\n model.addArtwork(catalog, artwork)",
"def loadArtworks(catalog):\n artworksfile = cf.data_dir + 'MoMA/Artworks-utf8-small.csv'\n input_file = csv.DictReader(open(artworksfile, encoding='utf-8'))\n for artwork in input_file:\n model.addArtwork(catalog, artwork)",
"def loadArtworks(catalog):\n artworksfile = cf.data_dir + 'MoMA/Artworks-utf8-10pct.csv'\n input_file = csv.DictReader(open(artworksfile, encoding='utf-8'))\n for artwork in input_file:\n model.addArtwork(catalog, artwork)",
"def deserialize(path):\n with open(path, 'rb') as f:\n temp = pickle.load(f)\n for q in temp.questions:\n q.on_deserialize()\n return temp",
"def load(self):\n\n raise NotImplementedError",
"def load(journal: Journal, file: Path) -> None:",
"def _get_vocab_files(self):\n question_vocab, answer_vocab = {}, {}\n qdict_path = os.path.join(self.cache_dir, self.exp_type + '_qdict.json')\n adict_prefix = '_adict.json'\n if self.use_ocr:\n adict_prefix = '_ocr' + adict_prefix\n adict_path = os.path.join(self.cache_dir, self.exp_type + adict_prefix)\n if os.path.exists(qdict_path) and os.path.exists(adict_path):\n self.logger.info('restoring vocab')\n with open(qdict_path,'r') as f:\n q_dict = json.load(f)\n with open(adict_path,'r') as f:\n a_dict = json.load(f)\n else:\n q_dict, a_dict = self._make_vocab_files()\n with open(qdict_path,'w') as f:\n json.dump(q_dict, f)\n with open(adict_path,'w') as f:\n json.dump(a_dict, f)\n self.logger.info('question vocab size: {}'.format(len(q_dict)))\n self.logger.info('answer vocab size: {}'.format(len(a_dict)))\n self.qdict = q_dict\n self.adict = a_dict",
"def load_explainer(self):\n explainer_path = os.path.join(self.model_path, \"explainer.dill\")\n csv_path = os.path.join(self.model_path, self.__csv_path)\n if os.path.isfile(explainer_path):\n with open(explainer_path, \"rb\") as f:\n self.__explainer = dill.load(f)\n elif os.path.isfile(csv_path):\n print(\"[WARN] Making new explainer!\")\n self.__explainer = make_explainer(\n pd.read_csv(csv_path),\n self.FEATURES\n )\n with open(explainer_path, \"wb\") as f:\n dill.dump(self.__explainer, f)\n else:\n print(\"[WARN] Explainer not found!\")",
"def load(self):\n pass",
"def load(self):\n pass",
"def load(self):\n pass",
"def load(self):\n pass",
"def load_exam(options):\n try:\n csvFile = open(options.get('o'), 'rb')\n except IOError as (errno,strerror):\n print \"I/O error({0}): {1}\".format(errno, strerror)\n \n csvReader = reader(csvFile, delimiter=\":\")\n \n for data in csvReader:\n new_exam = Exam.objects.create()\n new_exam.exam_code = data[0]\n new_exam.exam_name = data[1]\n new_exam.save()\n print \"Added ({0} : {1})\".format(data[0], data[1])",
"def load(self):\n raise NotImplementedError",
"def load(self):\n raise NotImplementedError",
"def _load_disk(self):",
"def _load_disk(self):",
"def load_templates(self):\n\n self.templates = []\n\n if os.path.exists(\"question_templates.txt\"):\n for line in open(\"question_templates.txt\", \"r\"):\n self.templates.append(line.replace(\"\\n\", \"\"))"
] | [
"0.6981235",
"0.62057495",
"0.61173797",
"0.59236383",
"0.5770463",
"0.57657015",
"0.5753063",
"0.5708415",
"0.56874204",
"0.5681949",
"0.56623375",
"0.5654585",
"0.5630125",
"0.56003106",
"0.55774397",
"0.55369693",
"0.5526067",
"0.5508884",
"0.5482488",
"0.5453618",
"0.54515654",
"0.54515654",
"0.54515654",
"0.54515654",
"0.54479635",
"0.5433589",
"0.5433589",
"0.5418888",
"0.5418888",
"0.5367675"
] | 0.72727674 | 0 |
Checks whether or not a message that was sent belongs to an active conversation that the bot is in | def is_active_conv(timestamp):
debug_print("Checking to see if {} is an active conversation.".format(timestamp))
debug_print(ACTIVE_CONVS)
return timestamp in ACTIVE_CONVS | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def filter(self, message):\n conversations = Conversations()\n return conversations.get_conversation(message.from_user.id) is not None",
"def filter(self, message):\n conversations = Conversations()\n conversation = conversations.get_conversation(message.from_user.id)\n if conversation is None:\n return False\n\n return conversation.type == self.conversation_type",
"def has_message(self, character):\n messages = get_messages(character)\n messages = [ message[MESSAGE].id for message in messages ]\n if self.message.id in messages:\n return True\n else:\n return False",
"def joined(self):\n return str(self) in holder.bot.conn.channels.keys()",
"def has_talk(self):\n if self.applicant.talks.filter(Q(status=SUBMITTED) |\n Q(status=UNDER_CONSIDERATION) |\n Q(status=PROVISIONAL) |\n Q(status=ACCEPTED)):\n return True\n return False",
"def is_chat(message):\n labels = message.get('X-Gmail-Labels', \"\").split(',')\n return 'Chat' in labels",
"def is_bot(self) -> bool:",
"def handle_message(self, msg, status):\n\n body = ensure_unicode(msg.Body)\n chat_id = get_chat_id(msg.Chat)\n\n if len(body) == 0:\n return False\n\n for name, cmd in self.commands.items():\n if body == name:\n cmd(msg, chat_id)\n return True\n\n\n if self.troller_is_running.get(chat_id):\n response = self.alice.respond(body)\n if response:\n msg.Chat.SendMessage(response)\n return True\n else:\n return False\n else:\n return False",
"def contains_message(response, message):\n if len(response.context['messages']) != 1:\n return False\n\n full_message = str(list(response.context['messages'])[0])\n\n return message in full_message",
"def has_messages(self) -> bool:\n return self._has_messages",
"def check_if_bot(self, user_id):\n return str(self.get_int_index(bot_id, 9)) in str(user_id)",
"def mentioned_in(self, message: Message) -> bool:\n if message.guild is None or message.guild.id != self.guild.id:\n return False\n\n if self._user.mentioned_in(message):\n return True\n\n return any(self._roles.has(role.id) for role in message.role_mentions)",
"def is_bot(self):\n return self._is_bot",
"def check_status(bot, chat_id, query):\n\n remain = remain_time(chat_id)\n query_id = query.id\n\n message = bot_collection[chat_id].get_remained_message(remain)\n\n bot.answer_callback_query(callback_query_id=query_id, text=message)\n\n pass",
"def test_user_not_in_conversation(self):\n self.client.post('/conversation/{}/message'.format(self.conversation.id), {\n \"sender_id\": self.user_a.id,\n \"receiver_id\": self.user_b.id,\n \"text\": \"test message\"\n }, format='json')\n response = self.client.post('/conversation/{}/message'.format(self.conversation.id), {\n \"sender_id\": self.user_a.id,\n \"receiver_id\": self.user_c.id,\n \"text\": \"test message\"\n }, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(Message.objects.count(), 1)",
"def is_active(self):\n if not self._relaypid:\n return False\n\n self._lock.acquire()\n relaypid = None\n portoffset = None\n try:\n relaypid, portoffset = self._check_tcprelay()\n except AttributeError:\n logger.debug(\n \"No active TCPRELAY tunnel on locationid - {0}\"\n \"\".format(self.locationid_param))\n finally:\n self._lock.release()\n\n return (\n self._relaypid == relaypid and\n self._portoffset == portoffset\n )",
"def send_opener(self, match, message):\n # TODO check if there is a relationship between self and this tinder_id\n if self.api.is_conversation_is_empty(match.id):\n logging.debug(LOG_TAG + 'Messaged {0} ({1}) with the message: \"{2}\"'.format(match.name,\n match.id,\n message))\n return True\n else:\n return False",
"def is_component_to_component_message(self) -> bool:\n return self.is_to_public_id and self.is_sender_public_id",
"def can_message(guild, channel):\n\treturn authorized(guild, channel) and not muted(guild, channel)",
"def is_character_alive(self):\n return self.get_model.get_character.alive",
"def ccheck(self, msg):\r\n if msg.channel == self.channel or (msg.channel.is_private and self.ispm):\r\n return True\r\n return False",
"def is_waiting_for_message(self):\r\n return self.waiting_for_message",
"def has_sender(self):\n return self.balance > 0",
"async def interaction_check(self, interaction: Interaction) -> bool:\n if interaction.user != self.interaction_owner:\n await interaction.response.send_message(\n \":x: This is not your command to react to!\",\n ephemeral=True\n )\n return False\n return True",
"def is_active(self):\n return not self.pending",
"def has_message_available(self):\n return not self.feedback_log.empty()",
"def check_message(msg):\n words_of_message = msg.split()\n find = False\n for key in gc_words:\n if words_of_message in gc_words[key]['groups']:\n getattr(neuron.general_conversations, key)()\n find = True\n break\n for key in fc_words:\n if words_of_message in fc_words[key]['groups']:\n getattr(neuron.forecast, key)()\n find = True\n break\n for key in twitter_words:\n if words_of_message in twitter_words[key]['groups']:\n getattr(neuron.twitter, key)()\n find = True\n break\n for key in pipo_words:\n if words_of_message in pipo_words[key]['groups']:\n getattr(neuron.pipotron, key)()\n find = True\n break\n if not find:\n neuron.general_conversations.undefined()",
"def is_user_message(message):\n return (message.get('message') and\n message['message'].get('text') and\n not message['message'].get(\"is_echo\"))",
"def is_participant(self, message: discord.Message):\n if message.author in self.participants:\n self.participants.remove(message.author)\n return True\n\n return False",
"def talk(self):\r\n if self.conversation is not None:\r\n print(\"[\" + self.name + \" says]: \" + self.conversation)\r\n else:\r\n print(self.name + \" doesn't want to talk to you\")"
] | [
"0.68307483",
"0.6709261",
"0.62747544",
"0.609343",
"0.59765625",
"0.59107685",
"0.58387476",
"0.57660055",
"0.5739228",
"0.57104665",
"0.56541896",
"0.5643942",
"0.56159776",
"0.5584048",
"0.55786204",
"0.55633813",
"0.55561924",
"0.5555065",
"0.55538917",
"0.55532825",
"0.5548147",
"0.5544811",
"0.55435395",
"0.5531817",
"0.551193",
"0.5508308",
"0.55058765",
"0.5491386",
"0.5478681",
"0.54779136"
] | 0.7009033 | 0 |
PrettyPrint to stdout if in debug mode | def debug_print(debug_data):
if DEBUG_MODE == "true":
pp.pprint(debug_data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def debug_print(text):\r\n if settings.debug:\r\n print (text)",
"def debug():",
"def debugPrint(text: str):\r\n if DEBUG:\r\n print(text)",
"def debugprint(debugobject, debugstring):\n if CMDLINEARGS.debug:\n print \"===== \" + debugstring + \" =====\"\n pprint.pprint(debugobject)\n print \"===== \" + debugstring + \" =====\"\n print \"\"",
"def debugprint(debugobject, debugstring):\n if CMDLINEARGS.debug:\n print \"===== \" + debugstring + \" =====\"\n pprint.pprint(debugobject)\n print \"===== \" + debugstring + \" =====\"\n print \"\"",
"def output_debug(text):\n if conf.debug:\n output_message('[DEBUG] ' + text)",
"def print_debug(msg):\n if IS_DEBUG:\n print(msg)",
"def pprint(self, parameter_s=''):\n ptformatter = self.shell.display_formatter.formatters['text/plain']\n ptformatter.pprint = bool(1 - ptformatter.pprint)\n print('Pretty printing has been turned',\n ['OFF','ON'][ptformatter.pprint])",
"def cli(debug):\n print(f\"Debug mode is {'on' if debug else 'off'}\")",
"def debug_print(self, *content):\n if self.debug:\n print(*content)",
"def dprint(msg, debug):\n if debug:\n six.print_(msg)",
"def debug(string):\n if verbose:\n print string\n return",
"def output_debug_info(self):",
"def debug(string):\n if conf.DEBUG:\n outputs.print_debug(string)",
"def debug(msg):\n if settings.DEBUG:\n print \"DEBUG: cli.%(msg)s\" % locals()",
"def debug_print(self, s, linefeed=True):\n if(DEBUG):\n sys.stderr.write(s)\n if(linefeed):\n sys.stderr.write('\\n')",
"def _debug_print(message):\n\n if _debug == True:\n print(message)",
"def debug() -> bool:",
"def main(debug):\n click.echo('Debug mode is {{}}'.format(debug))",
"def DEBUG(*args, **kwargs):\n if __name__ != \"__main__\":\n print(*args, **kwargs)",
"def print_debug(obj):\n\n if not DEBUG:\n return False\n\n if hasattr(obj, 'lower'):\n # string/unicode... just print it.\n print('Debug: {}'.format(obj))\n elif isinstance(obj, (list, tuple)):\n # list/tuple, format it...\n header = 'Debug: '\n spaces = (' ' * len(header))\n if obj:\n print('{}{}'.format(header, obj[0]))\n if len(obj) > 1:\n otherlines = '\\n{}'.format(spaces).join(obj[1:])\n print('\\n{}{}'.format(spaces, otherlines))\n else:\n # different type of object\n print('Debug: {!r}'.format(obj))\n\n return True",
"def debug(s):\n if app.config['DEBUG']:\n print(s)",
"def debug(s):\n if app.config['DEBUG']:\n print(s)",
"def dprint(msg):\n if defaults.debug:\n print('debug: %s' % msg)",
"def debug_print(*a):\n if enable_debug_output:\n print(' '.join(map(str, a)))",
"def _PRINT_DEBUG(*args):\n print(sys.stderr, args, file=sys.stderr)",
"def printdebug(self, msg):\n if self.debug > 0:\n print(msg)",
"def __debugInfo(self, msg):\n\t\tif self.verbosity:\n\t\t\tprint(stylize(\"[*] DEBUG: {}\".format(msg), colored.fg(\"wheat_1\")))",
"def debug_print(self, *args, **kwargs):\n print(\"APP_DEBUG_PRINT\", args, kwargs)",
"def print_debug(context: str = \"\") -> None:\r\n print(context)\r\n print(\"This is the current board\")\r\n print(example)\r\n print(\"This is the conflict space\")\r\n print(conflict_space)\r\n print(\"This is the safeboard\")\r\n print(safeboard)"
] | [
"0.72024435",
"0.7062981",
"0.6972337",
"0.6967862",
"0.6967862",
"0.6959475",
"0.6913338",
"0.6794894",
"0.6765527",
"0.67562",
"0.67441654",
"0.6719388",
"0.6710147",
"0.6696881",
"0.6689538",
"0.66720015",
"0.6669009",
"0.6667993",
"0.6667386",
"0.6667287",
"0.66486543",
"0.6634663",
"0.6634663",
"0.6634612",
"0.66206264",
"0.66005343",
"0.65751296",
"0.65561223",
"0.64853823",
"0.64729387"
] | 0.7648205 | 0 |
Remove polygons from this cell. The function or callable `test` is called for each polygon in the cell. If its return value evaluates to True, the corresponding polygon is removed from the cell. | def remove_polygons(self, test):
filtered_polys = []
for element in self.polygons:
pld = [(poly, l, dt) for poly, l, dt in zip(element.polygons, element.layers, element.datatypes)
if not test(poly, l, dt)]
if len(pld) == 0:
pass # we don't need no empty polygons!
else:
polys, layers, dts = zip(*pld)
element.polygons = polys
element.layers = layers
element.datatypes = dts
filtered_polys.append(element)
self.polygons = filtered_polys
return self | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_polygons(self, test):\n empty = []\n for element in self.elements:\n if isinstance(element, PolygonSet):\n ii = 0\n while ii < len(element.polygons):\n if test(element.polygons[ii], element.layers[ii],\n element.datatypes[ii]):\n element.polygons.pop(ii)\n element.layers.pop(ii)\n element.datatypes.pop(ii)\n else:\n ii += 1\n if len(element.polygons) == 0:\n empty.append(element)\n for element in empty:\n self.elements.remove(element)\n return self",
"def remove_polygon(api_key, hexagon_id, hexagon_shape,\n api_endpoint=(\"https://engine.tygron.com/api/session/event/\"\n \"editorbuilding/remove_polygons/?\")):\n multi = geometry.MultiPolygon([hexagon_shape])\n remove = geometry.mapping(multi)\n r = requests.post(url=api_endpoint+api_key, json=[hexagon_id, 1, remove])\n return",
"def test_delete_polygon_successfully(self, api_client):\n poly = baker.make(Polygon)\n url = self.base_url + f\"/polygons/{poly.id}/\"\n response = api_client().delete(url)\n assert response.status_code == 204",
"def prune_and_polygon(cls, ground_truths, detections):\n\t\tif not hasattr(ground_truths[0], 'intersection'):\n\t\t\tground_truths = [cls.Polygon(value) for value in ground_truths]\n\t\tif not hasattr(detections[0], 'intersection'):\n\t\t\tdetections = [cls.Polygon(value) for value in detections]\n\t\tground_truths = [value for value in ground_truths if value.length > 0.]\n\t\treturn (ground_truths, detections)",
"def delete_polygon(self, poly: QGraphicsPolygonItem, delete_from_coord_list=False):\n\n self.poly_list.remove(poly)\n\n if poly in self.hole_list:\n self.hole_list.remove(poly)\n\n for item in poly.childItems():\n if isinstance(item, PyQt5.QtWidgets.QGraphicsLineItem):\n self.edge_list.remove(item)\n if item in self.potential_edge_splitters:\n self.potential_edge_splitters.remove(item)\n\n if delete_from_coord_list:\n for point in self.poly_to_list(poly, \"Global\"):\n self.point_coord_list = np.delete(self.point_coord_list, np.where(\n np.all(self.point_coord_list == [[point.x(), point.y()]], axis=1))[0][0], axis=0)\n\n poly.hide()",
"def test_clip_points_by_polygons(self):\n\n # Name input files\n point_name = join(TESTDATA, 'population_5x5_jakarta_points.shp')\n point_layer = read_layer(point_name)\n points = numpy.array(point_layer.get_geometry())\n attrs = point_layer.get_data()\n\n # Loop through polygons\n for filename in ['polygon_0.shp', 'polygon_1.shp', 'polygon_2.shp',\n 'polygon_3.shp', 'polygon_4.shp',\n 'polygon_5.shp', 'polygon_6.shp']:\n\n polygon_layer = read_layer(join(TESTDATA, filename))\n polygon = polygon_layer.get_geometry()[0]\n\n # Clip\n indices = inside_polygon(points, polygon)\n\n # Sanity\n for point in points[indices, :]:\n assert is_inside_polygon(point, polygon)\n\n # Explicit tests\n if filename == 'polygon_0.shp':\n assert len(indices) == 6\n elif filename == 'polygon_1.shp':\n assert len(indices) == 2\n assert numpy.allclose(points[indices[0], :],\n [106.8125, -6.1875])\n assert numpy.allclose(points[indices[1], :],\n [106.8541667, -6.1875])\n assert numpy.allclose(attrs[indices[0]]['value'],\n 331941.6875)\n assert numpy.allclose(attrs[indices[1]]['value'],\n 496445.8125)\n elif filename == 'polygon_2.shp':\n assert len(indices) == 7\n elif filename == 'polygon_3.shp':\n assert len(indices) == 0 # Degenerate\n elif filename == 'polygon_4.shp':\n assert len(indices) == 0 # Degenerate\n elif filename == 'polygon_5.shp':\n assert len(indices) == 8\n elif filename == 'polygon_6.shp':\n assert len(indices) == 6",
"def remove_excess_polygon(polygons_dict, region):\n start_len = len(polygons_dict)\n poly_region_default_area = area(\n geojson.Feature(geometry=region, properties={}).geometry)\n idx = 0\n iteration_range = start_len\n while idx < iteration_range:\n intersection_polygon_area = 0\n poly_list = []\n poly_copy = copy.deepcopy(polygons_dict)\n del poly_copy[idx]\n for el in poly_copy:\n el_poly = shapely.geometry.asShape(el['geometry'])\n poly_list.append(el_poly)\n union_poly = cascaded_union(poly_list)\n intersection_polygon = union_poly.intersection(region)\n if not (intersection_polygon.is_empty and union_poly.is_empty):\n intersection_polygon_area = area(geojson.Feature(geometry=intersection_polygon, properties={}).geometry)\n else:\n break\n if float(\"{0:.2f}\".format(poly_region_default_area)) == float(\"{0:.2f}\".format(intersection_polygon_area)):\n del polygons_dict[idx]\n iteration_range -= 1\n else:\n idx += 1\n if len(polygons_dict) > 0 and (len(polygons_dict) != start_len):\n return polygons_dict\n else:\n return None",
"def test_clip_raster_by_polygons(self):\n\n # Name input files\n poly = join(TESTDATA, 'kabupaten_jakarta_singlepart.shp')\n grid = join(TESTDATA, 'population_5x5_jakarta.asc')\n\n # Get layers using API\n P = read_layer(poly)\n R = read_layer(grid)\n\n M = len(P)\n N = len(R)\n assert N == 56\n\n # Clip\n C = clip_raster_by_polygons(R, P)\n assert len(C) == M\n\n # Check points inside polygon\n tot = 0\n for c in C:\n tot += len(c)\n assert tot == 14\n\n # Check that points are inside the right polygon\n for i, polygon in enumerate(P.get_geometry()):\n\n points = C[i][0]\n values = C[i][1]\n\n # Sanity first\n for point in points:\n assert is_inside_polygon(point, polygon)\n\n # Specific tests against raster pixel values inside polygons\n # The values are read from qgis\n if i == 0:\n assert len(points) == 6\n assert numpy.allclose(values[0], 200951)\n assert numpy.allclose(values[1], 283237)\n assert numpy.allclose(values[2], 278385)\n assert numpy.allclose(values[3], 516061)\n assert numpy.allclose(values[4], 207414)\n assert numpy.allclose(values[5], 344466)\n\n elif i == 1:\n assert len(points) == 2\n msg = ('Got wrong coordinates %s, expected %s'\n % (str(points[0, :]), str([106.8125, -6.1875])))\n assert numpy.allclose(points[0, :], [106.8125, -6.1875]), msg\n assert numpy.allclose(points[1, :], [106.8541667, -6.1875])\n assert numpy.allclose(values[0], 331942)\n assert numpy.allclose(values[1], 496446)\n elif i == 2:\n assert len(points) == 7\n assert numpy.allclose(values[0], 268579)\n assert numpy.allclose(values[1], 155795)\n assert numpy.allclose(values[2], 403674)\n assert numpy.allclose(values[3], 259280)\n assert numpy.allclose(values[4], 284526)\n assert numpy.allclose(values[5], 334370)\n assert numpy.allclose(values[6], 143325)\n elif i == 3:\n assert len(points) == 0 # Degenerate\n elif i == 4:\n assert len(points) == 0 # Degenerate\n elif i == 5:\n assert len(points) == 8\n assert numpy.allclose(values[0], 279103)\n assert numpy.allclose(values[1], 205762)\n assert numpy.allclose(values[2], 428705)\n assert numpy.allclose(values[3], 331093)\n assert numpy.allclose(values[4], 227514)\n assert numpy.allclose(values[5], 249308)\n assert numpy.allclose(values[6], 215739)\n assert numpy.allclose(values[7], 147447)\n elif i == 6:\n assert len(points) == 6\n assert numpy.allclose(values[0], 61836.4)\n assert numpy.allclose(values[1], 165723)\n assert numpy.allclose(values[2], 151307)\n assert numpy.allclose(values[3], 343787)\n assert numpy.allclose(values[4], 303627)\n assert numpy.allclose(values[5], 225232)\n\n # Generate layer objects\n values = [{'value': x} for x in C[i][1]]\n point_layer = Vector(data=values, geometry=points,\n projection=P.get_projection())\n\n if len(point_layer) > 0:\n # Geometry is only defined for layers that are not degenerate\n assert point_layer.is_point_data\n\n polygon_layer = Vector(geometry=[polygon],\n projection=P.get_projection())\n assert polygon_layer.is_polygon_data\n\n # Generate spatial data for visualisation with e.g. QGIS\n if False:\n point_layer.write_to_file('points_%i.shp' % i)\n polygon_layer.write_to_file('polygon_%i.shp' % i)",
"def _crop_region(polygons, left, bottom, right, top, precision):\n cropped_polygons = []\n for p in polygons:\n clipped_polys = clipper._chop(p, [top, bottom], 1, 1 / precision)\n # polygon, [cuts], axis, scale\n for cp in clipped_polys[1]:\n result = clipper._chop(cp, [left, right], 0, 1 / precision)\n cropped_polygons += list(result[1])\n return cropped_polygons",
"def test_clip_points_by_polygons_with_holes0(self):\n\n # Define an outer ring\n outer_ring = numpy.array([[106.79, -6.233],\n [106.80, -6.24],\n [106.78, -6.23],\n [106.77, -6.21],\n [106.79, -6.233]])\n\n # Define inner rings\n inner_rings = [numpy.array([[106.77827, -6.2252],\n [106.77775, -6.22378],\n [106.78, -6.22311],\n [106.78017, -6.22530],\n [106.77827, -6.2252]])[::-1],\n numpy.array([[106.78652, -6.23215],\n [106.78642, -6.23075],\n [106.78746, -6.23143],\n [106.78831, -6.23307],\n [106.78652, -6.23215]])[::-1]]\n\n v = Vector(geometry=[Polygon(outer_ring=outer_ring,\n inner_rings=inner_rings)])\n assert v.is_polygon_data\n\n # Write it to file\n tmp_filename = unique_filename(suffix='.shp')\n v.write_to_file(tmp_filename)\n\n # Read polygon it back\n L = read_layer(tmp_filename)\n P = L.get_geometry(as_geometry_objects=True)[0]\n\n outer_ring = P.outer_ring\n inner_ring0 = P.inner_rings[0]\n inner_ring1 = P.inner_rings[1]\n\n # Make some test points\n points = generate_random_points_in_bbox(outer_ring, 1000, seed=13)\n\n # Clip to outer ring, excluding holes\n indices = inside_polygon(points, P.outer_ring, holes=P.inner_rings)\n\n # Sanity\n for point in points[indices, :]:\n # Must be inside outer ring\n assert is_inside_polygon(point, outer_ring)\n\n # But not in any of the inner rings\n assert not is_inside_polygon(point, inner_ring0)\n assert not is_inside_polygon(point, inner_ring1)\n\n if False:\n # Store for visual check\n pol = Vector(geometry=[P])\n tmp_filename = unique_filename(suffix='.shp')\n pol.write_to_file(tmp_filename)\n print 'Polygon with holes written to %s' % tmp_filename\n\n pts = Vector(geometry=points[indices, :])\n tmp_filename = unique_filename(suffix='.shp')\n pts.write_to_file(tmp_filename)\n print 'Clipped points written to %s' % tmp_filename",
"def _crop_edge_polygons(all_polygons, bboxes,\n left, bottom, right, top,\n precision):\n polygons_in_rect_i = _find_bboxes_in_rect(bboxes, left, bottom, right, top)\n polygons_edge_i = _find_bboxes_on_rect_edge(bboxes, left, bottom, right,\n top)\n polygons_in_rect_no_edge_i = polygons_in_rect_i & (~polygons_edge_i)\n\n # Crop polygons along the edge and recombine them with polygons inside the\n # rectangle\n polygons_edge = all_polygons[polygons_edge_i]\n polygons_in_rect_no_edge = all_polygons[polygons_in_rect_no_edge_i]\\\n .tolist()\n polygons_edge_cropped = _crop_region(polygons_edge, left, bottom, right,\n top, precision = precision)\n polygons_to_process = polygons_in_rect_no_edge + polygons_edge_cropped\n\n return polygons_to_process",
"def test_polygons_with_inner_rings(self):\n\n # Define two (closed) outer rings - clock wise direction\n outer_rings = [numpy.array([[106.79, -6.233],\n [106.80, -6.24],\n [106.78, -6.23],\n [106.77, -6.21],\n [106.79, -6.233]]),\n numpy.array([[106.76, -6.23],\n [106.72, -6.23],\n [106.72, -6.22],\n [106.72, -6.21],\n [106.76, -6.23]])]\n\n tmp_filename = unique_filename(suffix='.shp')\n\n # Do outer rings first (use default geometry type polygon)\n v_ref = Vector(geometry=outer_rings)\n assert v_ref.is_polygon_data\n\n v_ref.write_to_file(tmp_filename)\n v_file = read_layer(tmp_filename)\n assert v_file == v_ref\n assert v_file.is_polygon_data\n\n # Do it again but with (closed) inner rings as well\n\n # Define inner rings (counter clock wise)\n inner_rings = [\n # 2 rings for feature 0\n [numpy.array([[106.77827, -6.2252],\n [106.77775, -6.22378],\n [106.78, -6.22311],\n [106.78017, -6.22530],\n [106.77827, -6.2252]])[::-1],\n numpy.array([[106.78652, -6.23215],\n [106.78642, -6.23075],\n [106.78746, -6.23143],\n [106.78831, -6.23307],\n [106.78652, -6.23215]])[::-1]],\n # 1 ring for feature 1\n [numpy.array([[106.73709, -6.22752],\n [106.73911, -6.22585],\n [106.74265, -6.22814],\n [106.73971, -6.22926],\n [106.73709, -6.22752]])[::-1]]]\n\n polygons = []\n for i, outer_ring in enumerate(outer_rings):\n p = Polygon(outer_ring=outer_ring, inner_rings=inner_rings[i])\n polygons.append(p)\n\n v_ref = Vector(geometry=polygons)\n assert v_ref.is_polygon_data\n data_bbox = v_ref.get_bounding_box()\n\n # Check data from Vector object\n geometry = v_ref.get_geometry(as_geometry_objects=True)\n for i, g in enumerate(geometry):\n assert numpy.allclose(g.outer_ring, outer_rings[i])\n if i == 0:\n assert len(g.inner_rings) == 2\n else:\n assert len(g.inner_rings) == 1\n\n for j, ring in enumerate(inner_rings[i]):\n assert numpy.allclose(ring, g.inner_rings[j])\n\n # Write to file and read again\n v_ref.write_to_file(tmp_filename)\n #print 'With inner rings, written to ', tmp_filename\n v_file = read_layer(tmp_filename)\n assert v_file == v_ref\n assert v_file.is_polygon_data\n assert numpy.allclose(v_file.get_bounding_box(), data_bbox,\n rtol=1.0e-12, atol=1.0e-12)\n\n # Check data from file\n geometry = v_file.get_geometry(as_geometry_objects=True)\n for i, g in enumerate(geometry):\n assert numpy.allclose(g.outer_ring, outer_rings[i])\n if i == 0:\n assert len(g.inner_rings) == 2\n else:\n assert len(g.inner_rings) == 1\n\n for j, ring in enumerate(inner_rings[i]):\n assert numpy.allclose(ring, g.inner_rings[j])",
"def test_list_polygons(self, api_client):\n baker.make(Polygon, _quantity=2)\n url = self.base_url + \"/polygons/\"\n response = api_client().get(url)\n assert response.status_code == 200\n assert len(json.loads(response.content)) == 2",
"def check_cross_polygon(polygons_dict, region):\n result_poly_name = ''\n start_len = len(polygons_dict)\n poly_names = []\n poly_region_default_area = area(geojson.Feature(geometry=region, properties={}).geometry)\n for main_el in polygons_dict:\n for child_el in polygons_dict:\n intersection_region_area = 0\n main_poly = shapely.geometry.asShape(main_el['geometry'])\n child_poly = shapely.geometry.asShape(child_el['geometry'])\n intersection_polygon = main_poly.intersection(child_poly)\n control_area = area(\n geojson.Feature(geometry=child_poly, properties={}).geometry)\n if not intersection_polygon.is_empty and area(\n geojson.Feature(geometry=intersection_polygon, properties={}).geometry) < control_area:\n intersection_region = region.intersection(intersection_polygon)\n if not intersection_region.is_empty:\n intersection_region_area = area(\n geojson.Feature(geometry=intersection_region, properties={}).geometry)\n if float(\"{0:.2f}\".format(intersection_region_area)) == float(\n \"{0:.2f}\".format(poly_region_default_area)):\n poly_names.append(main_el[\"properties\"][\"Name\"])\n poly_names.append(child_el[\"properties\"][\"Name\"])\n if poly_names:\n result_poly_name = sorted(set(poly_names))[0]\n idx = 0\n iteration_range = len(polygons_dict)\n while idx < iteration_range:\n if polygons_dict[idx][\"properties\"][\"Name\"] != result_poly_name:\n del polygons_dict[idx]\n iteration_range -= 1\n else:\n idx += 1\n if len(polygons_dict) != start_len:\n return polygons_dict\n else:\n return None",
"def exclude_small_shapes(x,regionalized=False):\n # if its a single polygon, just return the polygon geometry\n if x.geometry.geom_type == 'Polygon':\n return x.geometry\n\n # if its a multipolygon, we start trying to simplify and\n # remove shapes if its too big.\n elif x.geometry.geom_type == 'MultiPolygon':\n\n if regionalized == False:\n area1 = 0.1\n area2 = 250\n\n elif regionalized == True:\n area1 = 0.01\n area2 = 50\n\n # dont remove shapes if total area is already very small\n if x.geometry.area < area1:\n return x.geometry\n # remove bigger shapes if country is really big\n\n if x['GID_0'] in ['CHL','IDN']:\n threshold = 0.01\n elif x['GID_0'] in ['RUS','GRL','CAN','USA']:\n if regionalized == True:\n threshold = 0.01\n else:\n threshold = 0.01\n\n elif x.geometry.area > area2:\n threshold = 0.1\n else:\n threshold = 0.001\n\n # save remaining polygons as new multipolygon for the\n # specific country\n new_geom = []\n for y in x.geometry:\n if y.area > threshold:\n new_geom.append(y)\n\n return MultiPolygon(new_geom)",
"def contains_polygons(self, polygons):\n assert isinstance(polygons, collections.Iterable), \\\n 'Invalid list of polygons'\n merged_poly = None\n geo = self.get_geometry()\n for poly in polygons:\n if merged_poly is None:\n merged_poly = geo.union(poly)\n else:\n merged_poly = merged_poly.union(poly)\n return merged_poly.area == geo.area",
"def clip_polygon(subject, clipper, operation = 'difference'):\n Subject = Polygon()\n Clipper = Polygon()\n\n for s in subject:\n Subject.add(Vertex(s))\n\n for c in clipper:\n Clipper.add(Vertex(c))\n\n clipped = Clipper.difference(Subject)\\\n if operation == 'reversed-diff'\\\n else Subject.__getattribute__(operation)(Clipper)\n\n clipped = [(ext.points,[hole.points for hole in holes]) for ext,holes in clipped]\n return clipped",
"def proc_unfilled_polygon(self, tokens):\n\n return self._proc_polygon(tokens, filled=False)",
"def removeIslands(self):\n if isinstance(self.substrates, Polygon):\n return\n mainland = []\n for i, substrate in enumerate(self.substrates.geoms):\n ismainland = True\n for j, otherSubstrate in enumerate(self.substrates.geoms):\n if j == i:\n continue\n if Polygon(otherSubstrate.exterior.coords).contains(substrate):\n ismainland = False\n break\n if ismainland:\n mainland.append(substrate)\n self.substrates = shapely.geometry.collection.GeometryCollection(mainland)\n self.oriented = False",
"def test_clip_points_by_polygons_with_holes_real(self):\n\n # Read real polygon with holes\n filename = '%s/%s' % (TESTDATA, 'donut.shp')\n L = read_layer(filename)\n\n # --------------------------------------------\n # Pick one polygon that has 2 inner rings\n P = L.get_geometry(as_geometry_objects=True)[1]\n\n outer_ring = P.outer_ring\n inner_ring0 = P.inner_rings[0]\n inner_ring1 = P.inner_rings[1]\n\n # Make some test points\n points_in_bbox = generate_random_points_in_bbox(outer_ring, 1000)\n points_in_inner_ring0 = populate_polygon(inner_ring0, 2, seed=13)\n points_in_inner_ring1 = populate_polygon(inner_ring1, 2, seed=17)\n points = numpy.concatenate((points_in_bbox,\n points_in_inner_ring0,\n points_in_inner_ring1))\n\n # Clip\n indices = inside_polygon(points, P.outer_ring, holes=P.inner_rings)\n\n # Sanity\n for point in points[indices, :]:\n # Must be inside outer ring\n assert is_inside_polygon(point, outer_ring)\n\n # But not in any of the inner rings\n assert not is_inside_polygon(point, inner_ring0)\n assert not is_inside_polygon(point, inner_ring1)\n\n # ---------------------------------------------------------\n # Pick a polygon that has 1 inner ring (nice visualisation)\n P = L.get_geometry(as_geometry_objects=True)[9]\n\n outer_ring = P.outer_ring\n inner_ring = P.inner_rings[0]\n\n # Make some test points\n points = generate_random_points_in_bbox(outer_ring, 500)\n\n # Clip\n indices = inside_polygon(points, P.outer_ring, holes=P.inner_rings)\n\n # Sanity\n for point in points[indices, :]:\n # Must be inside outer ring\n assert is_inside_polygon(point, outer_ring)\n\n # But not in the inner ring\n assert not is_inside_polygon(point, inner_ring)\n\n # Store for visual check (nice one!)\n # Uncomment os.remove if you want see the layers\n pol = Vector(geometry=[P])\n tmp_filename = unique_filename(suffix='.shp')\n pol.write_to_file(tmp_filename)\n #print 'Polygon with holes written to %s' % tmp_filename\n os.remove(tmp_filename)\n\n pts = Vector(geometry=points[indices, :])\n tmp_filename = unique_filename(suffix='.shp')\n pts.write_to_file(tmp_filename)\n #print 'Clipped points written to %s' % tmp_filename\n os.remove(tmp_filename)",
"def test_random_polygon(self):\n p = g.trimesh.path.polygons.random_polygon()\n assert p.area > 0.0\n assert p.is_valid",
"def test_donut_polygons(self):\n\n # Read real polygon with holes\n filename = '%s/%s' % (TESTDATA, 'donut.shp')\n L = read_layer(filename)\n\n # Write this new object, read it again and check\n tmp_filename = unique_filename(suffix='.shp')\n L.write_to_file(tmp_filename)\n\n # Read back\n R = read_layer(tmp_filename)\n msg = ('Geometry of polygon was not preserved after reading '\n 'and re-writing')\n\n # Check\n assert R == L, msg",
"def generatePolygons():",
"def clear_geometries(self):",
"def geojson_to_polygons_groundtruth(js_):\n\n burnt_polys = []\n building_polys = []\n for i, feat in enumerate(js_['features']):\n o = {\n \"coordinates\": feat['geometry']['coordinates'],\n \"type\": feat['geometry']['type']\n }\n s = json.dumps(o)\n\n # convert to geojson.geometry.Polygon\n g1 = geojson.loads(s)\n\n # covert to shapely.geometry.polygon.Polygon\n g2 = shape(g1)\n\n if feat['properties']['color'] == 'red': # red for the burnt region\n burnt_polys.append(g2)\n else: # for the building poly\n if feat['properties']['Burnt_Label']:\n building_polys.append([g2, [feat['properties']['BuildingID'], 'blue',\n True]]) # mark building polygons as 'blue' for burnt for now\n else:\n building_polys.append([g2, [feat['properties']['BuildingID'], 'yellow',\n False]]) # mark building polygons as 'yellow' for non-burnt for now\n return burnt_polys, building_polys",
"def Erase(inputgeodf, erasegeodf):\n return gpd.overlay(inputgeodf, gpd.GeoDataFrame({'geometry': erasegeodf.unary_union}), how='difference')",
"def remove_paths(self, test):\n ii = 0\n while ii < len(self.paths):\n if test(self.paths[ii]):\n self.paths.pop(ii)\n else:\n ii += 1\n return self",
"def del_rectangles(image, rects):\n for r in rects:\n cv2.rectangle(image,\n (r.x, r.y),\n (r.x + r.w - 1, r.y + r.h - 1),\n color=0,\n thickness=-1)",
"def test_polygonize():\n # A collection with one non-zero-area Polygon is returned as a Polygon.\n geom1 = GeometryCollection([POLY, ZERO_POLY])\n result1 = polygonize(geom1)\n assert result1.geom_type == \"Polygon\"\n assert result1.area == 1.0\n\n # A collection with multiple non-zero-area polygons is returned as a MultiPolygon.\n geom2 = GeometryCollection([POLY, POLY])\n result2 = polygonize(geom2)\n assert result2.geom_type == \"MultiPolygon\"\n assert result2.area == 2.0\n\n # Zero-area geometries are not permitted.\n with pytest.raises(ValueError) as err:\n _ = polygonize(ZERO_POLY)\n assert err.match(\"Geometry has zero area\")",
"def checkPolygonsSet(self, workflow: Workflow):\n pluginmaskclass = pluginmanager.get_plugin_by_name('Polygon Mask', 'ProcessingPlugin')\n for process in workflow.processes:\n if isinstance(process, pluginmaskclass):\n if process.polygon.value is None:\n self.startPolygonMasking(process)\n return True\n return False"
] | [
"0.80205756",
"0.6350466",
"0.6192517",
"0.61731756",
"0.60202783",
"0.600424",
"0.587869",
"0.5788355",
"0.5787628",
"0.560524",
"0.549923",
"0.5498585",
"0.54652745",
"0.54613364",
"0.5453875",
"0.5414428",
"0.5398999",
"0.5386263",
"0.535309",
"0.5336463",
"0.53328997",
"0.5295127",
"0.5267529",
"0.5264528",
"0.52280134",
"0.5200638",
"0.5172011",
"0.5169474",
"0.5166584",
"0.5166377"
] | 0.7925255 | 1 |
Remove paths from this cell. The function or callable `test` is called for each `FlexPath` or `RobustPath` in the cell. If its return value evaluates to True, the corresponding label is removed from the cell. | def remove_paths(self, test):
ii = 0
while ii < len(self.paths):
if test(self.paths[ii]):
self.paths.pop(ii)
else:
ii += 1
return self | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_labels(self, test):\n ii = 0\n while ii < len(self.labels):\n if test(self.labels[ii]):\n self.labels.pop(ii)\n else:\n ii += 1\n return self",
"def remove_labels(self, test):\n ii = 0\n while ii < len(self.labels):\n if test(self.labels[ii]):\n self.labels.pop(ii)\n else:\n ii += 1\n return self",
"def remove(self, path):\r\n return self.paths.remove(path)",
"def remove(path):",
"def remove(self,path):\n path = os.path.join(self.testpath,path)\n if os.path.isfile(path):\n os.remove(path)\n if os.path.isdir(path):\n shutil.rmtree(path)",
"def unwatch(self, path):\n path_obj = Path(path)\n if not path_obj.exists():\n raise FileObserverException(\"Can not unwatch non exist path\")\n parent_path = str(path_obj.parent)\n child_paths = self._watch_dog_observed_paths.get(parent_path, [])\n if path in child_paths:\n child_paths.remove(path)\n self._observed_paths.pop(path, None)\n if not child_paths:\n self._watch_dog_observed_paths.pop(parent_path, None)\n if self._observed_watches[parent_path]:\n self._observer.unschedule(self._observed_watches[parent_path])\n self._observed_watches.pop(parent_path, None)",
"def del_path(self):\n\n try:\n self.path_listbox.delete(self.path_listbox.curselection())\n self.set_check_paths()\n except Exception as e:\n print(e)\n pass",
"def clear(self):\n for pathItem in self.pathItem_list:\n self.scene.removeItem(pathItem)",
"def remove_cat(self, path: Path):\n if not self.active:\n return\n if path is None:\n return\n for i, coord in enumerate(path.path):\n self.cat[coord[1]][coord[0]].remove((path.identifier, i))",
"def delete_by_path(data: Dict[str, T], path: Sequence[str]):\n del get_by_path(data, path[:-1])[path[-1]]",
"def remove_by_path(self, path):\n if path.startswith(collection.Collection.CONTENT_PATH):\n if path.endswith(\n '/{}'.format(collection.Collection.BLUEPRINT_PATH)):\n # If this is a blueprint then remove the entire collection.\n col_path = path[len(collection.Collection.CONTENT_PATH):]\n # Get just the directory.\n col_path = os.path.split(col_path)[0]\n collection_path = col_path[1:] # Remove /\n with self._lock:\n if collection_path in self._cache:\n del self._cache[collection_path]\n else:\n # Search for an existing collection path.\n col_path = path[len(collection.Collection.CONTENT_PATH):]\n col_path = os.path.split(col_path)[0]\n while col_path != os.sep:\n collection_path = col_path[1:]\n with self._lock:\n if collection_path in self._cache:\n # Do a 'wildcard' match on the path to remove all\n # locales.\n generic_key = CollectionCache.generate_cache_key(\n path, '')\n for key in self._cache[collection_path]['docs'].keys():\n if key.startswith(generic_key):\n del self._cache[\n collection_path]['docs'][key]\n return\n col_path = os.path.split(col_path)[0]",
"def remove_indiv_files(path):\n if isinstance(path, FSMap):\n path.fs.delete(path.root, recursive=True)\n else:\n fname, ext = os.path.splitext(path)\n if ext == '.zarr':\n shutil.rmtree(path)\n else:\n os.remove(path)",
"def remove_images_without_label(path_folder):\n\n\n #labels = os.listdir(path_folder + \"labels/val/\")\n labels = os.listdir(path_folder + \"labels/val/\")\n images = os.listdir(path_folder + \"images/val/\")\n for i in images:\n name_i = i.split(\".\")\n if name_i[0] + '.xml' not in labels:\n os.remove(path_folder + \"images/val/\" + i)",
"def delete_path():\n #TODO delete path from database\n pass",
"def remove_labels_without_images(path_folder):\n\n labels = os.listdir(path_folder + \"LABELS_polar\")\n images = os.listdir(path_folder + \"POLAR\")\n for l in labels:\n name_l = l.split(\".\")\n if name_l[0] + '.tiff' not in images:\n os.remove(path_folder + \"LABELS_polar/\" + l)",
"def rm_path():\n shutil.rmtree(options.input_path)",
"def removeSpeciesGlyph(self, *args):\n return _libsbml.Layout_removeSpeciesGlyph(self, *args)",
"def remove_samples(img, path):\n bool_list = tf.equal(path, remove)\n in_list = tf.math.count_nonzero(bool_list) > 0\n return not in_list",
"def remove(self):\n path = os.path.abspath(path)\n if path in self.files:\n del self.files[path]\n return True\n return False",
"def CleanPaths(pathlist):\n for path1 in pathlist:\n for path2 in pathlist[::-1]:\n if path2[::-1] == path1:\n pathlist.remove(path2)\n break",
"def removePath(self, path):\n self.pushMode(CLI_MODES.shell)\n output = self.sendCmd(\"rm -rf %s\" % path)\n self.popMode()\n return output",
"def test_check_uncheck_path(pathmanager):\n # Assert that all paths are checked.\n for row in range(pathmanager.listwidget.count()):\n assert pathmanager.listwidget.item(row).checkState() == Qt.Checked",
"def _delete_data (self, path):\n head, tail = os.path.split(path)\n for subdir, dirs, files in os.walk(head):\n for file in files:\n if tail in file:\n os.remove(os.path.join(subdir, file))",
"def removeSpeciesReferenceGlyph(self, *args):\n return _libsbml.Layout_removeSpeciesReferenceGlyph(self, *args)",
"def RemovePath(*path):\n file_path = os.path.join(*path)\n if os.path.exists(file_path):\n if os.path.isdir(file_path):\n RemoveDirectory(file_path)\n else:\n RemoveFile(file_path)",
"def rm(self, paths):\n \n self.db_connect()\n\n # Expand and get the absolute paths\n expanded_paths = self.expand_paths(paths)\n \n query = \"DELETE FROM %s WHERE path=? AND doi IS NULL\" % PUBLICATIONS_TABLE\n with self.connection:\n c = self.connection.cursor()\n for f in expanded_paths:\n c.execute(query, [f])\n\n self.db_disconnect()\n \n return",
"def drop_path(input, p=0.2, training=True, inplace=False):\n if not training or p <= 0:\n return input\n return FunctionLib.apply(\n 'DropPath', input.device, [input],\n outputs=[input if inplace else None], ratio=p)",
"def unlink(self, path: PathLike):",
"def remove(path: str):\n _fs().remove(path)",
"def remove(self, *args):\n return _libsbml.ListOfSpeciesReferenceGlyphs_remove(self, *args)"
] | [
"0.6118493",
"0.6118493",
"0.5951783",
"0.591622",
"0.57983315",
"0.5620718",
"0.5586181",
"0.55428076",
"0.5444423",
"0.5415851",
"0.53787106",
"0.534371",
"0.5319035",
"0.53083396",
"0.52550906",
"0.5251843",
"0.5233213",
"0.5228396",
"0.51878136",
"0.5161153",
"0.5153678",
"0.5137448",
"0.5136106",
"0.51203024",
"0.50898904",
"0.50634426",
"0.5051716",
"0.5027983",
"0.5026341",
"0.50196373"
] | 0.7509545 | 0 |
Remove labels from this cell. The function or callable `test` is called for each label in the cell. If its return value evaluates to True, the corresponding label is removed from the cell. | def remove_labels(self, test):
ii = 0
while ii < len(self.labels):
if test(self.labels[ii]):
self.labels.pop(ii)
else:
ii += 1
return self | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def RemoveLabel(self, label):\n if self.labels is None:\n self.labels = set()\n else:\n try:\n self.labels.remove(label)\n except KeyError:\n pass",
"def remove_label(self, ):\n if self.AttributeNames.LABEL in self.attrs:\n del self.attrs[self.AttributeNames.LABEL]\n return self",
"async def removed_label(event, gh, *args, **kwargs):\n if event.data[\"label\"][\"name\"] == TRIVIAL_LABEL:\n await set_status(event, gh)",
"def deletemessageslabels(self, uidlist, labels):\n\n labels = labels - self.ignorelabels\n result = self._messagelabels_aux('-X-GM-LABELS', uidlist, labels)\n if result:\n for uid in uidlist:\n self.messagelist[uid]['labels'] = self.messagelist[uid]['labels'] - labels",
"def remove_label(self, key: str):\n del self.labels[key]",
"def remove(self, label):\n\n\t\t\tself[label].remove()",
"def _remove_function_from_labels_toggles(self, fun):\n for s_group in self.labels_toggles:\n for w in s_group:\n w.on_trait_change(fun, 'value', remove=True)",
"def remove_label(self, label):\n for category in self.get_categories(LABELS_SCHEME):\n if category.label == label:\n self.category.remove(category)",
"def unsetLabel(self):\n return _libsbml.GeneProduct_unsetLabel(self)",
"def test_issue_clear_labels(self):\n pass",
"def forget_labels(labels_to_forget=\"none\"):\n\t\t\tassert labels_to_forget in {\"none\",\"originally unlabelled\",\"all\"}\n\t\t\tif labels_to_forget != \"none\":\n\t\t\t\tif labels_to_forget == \"originally unlabelled\":\n\t\t\t\t\tself.train_labels___0_unlab__neg1_exclud=self.train_orig_labels.copy()\n\t\t\t\telif labels_to_forget == \"all\":\n\t\t\t\t\tself.train_labels___0_unlab__neg1_exclud=np.zeros(self.num_train)\n\t\t\t\telse:\n\t\t\t\t\tassert False\n\t\t\t\tself.bool_train_labelled=(self.train_labels___0_unlab__neg1_exclud>0)\n\t\t\t\tself.bool_train_unlabelled=(self.train_labels___0_unlab__neg1_exclud==0)\n\t\t\t\tself.bool_train_excluded=(self.train_labels___0_unlab__neg1_exclud<0)\n\t\t\t\tself.num_train_labelled=sum(self.bool_train_labelled)\n\t\t\t\tself.num_train_unlabelled=sum(self.bool_train_unlabelled)\n\t\t\t\tself.num_train_excluded=sum(self.bool_train_excluded)",
"def test_issue_remove_label(self):\n pass",
"def label_drop(self, labnames=None, drop_all=False):\n vallabs = self._vallabs\n if labnames is None:\n if drop_all:\n # Create copy of keys. Otherwise, set of keys changes.\n labnames = set(vallabs.keys()) \n else:\n msg = \"must specify label name(s) or drop_all==True\"\n raise ValueError(msg)\n else:\n if isinstance(labnames, str):\n labnames = (labnames,)\n elif (not isinstance(labnames, collections.Iterable)\n or not all(isinstance(value, str) for value in labnames)):\n raise TypeError(\"labnames should be str or iterable of str\") \n labnames = set(name for value in labnames\n for name in value.split())\n if not labnames.issubset(vallabs.keys()):\n bad_names = \", \".join(str(lbl) for lbl in \n labnames.difference(vallabs.keys()))\n raise KeyError(bad_names + \" are not defined labels\")\n for name in labnames:\n del vallabs[name]\n self._changed = True",
"def user_labels_erase(*args):\n return _ida_hexrays.user_labels_erase(*args)",
"def remove_recog_label(self, event):\n\t\tc=self.seqframe\n\t\tc.delete('recogseqlabel')\n\t\treturn",
"def remove_labels(number, labels):\n\n cmds = [github_cli, 'pr', 'edit', str(number)]\n for lab in labels:\n cmds += ['--remove-label', lab]\n\n with subprocess.Popen(cmds) as p:\n _, err = p.communicate()\n print(err)",
"def clear_all(cls):\n del cls.text_labels[:]",
"def unlabel_messages(self, org, messages, label):\n pass",
"def remove_labels(self, phrases: Union[List[Phrase], List[str]]) -> None:\n for phrase in phrases:\n phrase_string = phrase if isinstance(phrase, str) else phrase.phrase_string\n if phrase_string not in self.phrase_index:\n raise TypeError(f'unknown phrase {phrase_string}')\n else:\n for label in self.has_labels[phrase_string]:\n self.is_label_of[label].remove(phrase_string)\n if len(self.is_label_of[label]) == 0:\n del self.is_label_of[label]\n del self.has_labels[phrase_string]",
"def remove_labels(record_dictionary, labels_to_remove, inplace=True):\n\n if not inplace:\n purged_dictionary = {}\n\n labels_to_remove_set = set(labels_to_remove)\n\n for key in record_dictionary:\n\n data, labels = record_dictionary[key]\n\n remove_ids = [\n idx for (idx, val) in enumerate(labels)\n if val in labels_to_remove_set\n ]\n\n if inplace:\n record_dictionary[key] = (np.delete(data, remove_ids, 1),\n np.delete(labels, remove_ids, 0))\n else:\n purged_dictionary[key] = (np.delete(data, remove_ids, 1),\n np.delete(labels, remove_ids, 0))\n\n if not inplace:\n return purged_dictionary",
"def remove(self: TokenMatcher, label: str) -> None:\n try:\n del self._patterns[label]\n del self._callbacks[label]\n except KeyError:\n raise ValueError(\n f\"The label: {label} does not exist within the matcher rules.\"\n )",
"def remove_labels(gce, module, instance_name, labels):\n zone = module.params.get('zone')\n\n if not instance_name:\n module.fail_json(msg='Must supply instance_name', changed=False)\n\n if not labels:\n module.fail_json(msg='Must supply labels', changed=False)\n\n modified_labels = {}\n for key in labels:\n modified_labels[gce_convert_str(key)] = gce_convert_str(labels[key])\n\n try:\n node = gce.ex_get_node(instance_name, zone=zone)\n except ResourceNotFoundError:\n module.fail_json(msg='Instance %s not found in zone %s' % (instance_name, zone), changed=False)\n except GoogleBaseError, e:\n module.fail_json(msg=str(e), changed=False)\n\n node_labels = node.extra['labels']\n changed = False\n labels_changed = []\n\n for l in modified_labels:\n if l in node_labels:\n node_labels.pop(l, None)\n changed = True\n labels_changed.append(l)\n\n if not changed:\n return False, None\n\n try:\n gce.ex_set_node_labels(node, node_labels)\n return True, labels_changed\n except (GoogleBaseError, InvalidRequestError) as e:\n module.fail_json(msg=str(e), changed=False)",
"def remove_assigned_cells(self):\r\n cells = list(self.cells)\r\n for cell in ifilter(lambda cell: cell.symbol is not None, cells):\r\n cell.remove_group(self)\r\n self.cells.remove(cell)\r\n return len(cells) != len(self.cells)",
"def clear_lines(self):\r\n\r\n # iterate through the stored labels\r\n for cur_label in self.list_labels:\r\n # make sure label exists\r\n if cur_label is not None:\r\n # delete the label\r\n cur_label.deleteLater()\r\n\r\n # clear the list\r\n self.list_labels.clear()",
"def clearLabelMap(self,label=None):\r\n # productive\r\n profprint()\r\n widget = slicer.modules.NeedleFinderWidget\r\n print \"clearing label map\"\r\n self.undoRedo.saveState()\r\n labelImage = self.labelMapNode.GetImageData()\r\n shape = list(labelImage.GetDimensions()).reverse() # ??? this code has no effect, shape=None !!!\r\n labelArray = vtk.util.numpy_support.vtk_to_numpy(labelImage.GetPointData().GetScalars()).reshape(shape)\r\n if not label:\r\n labelArray[:] = 0\r\n else:\r\n labelArray[labelArray==label]=0\r\n self.editUtil.markVolumeNodeAsModified(widget.labelMapNode)",
"def mask_labels(labels):\n def do_one_row(row):\n erase = False\n for i, _ in enumerate(row):\n if erase:\n row[i] = 0\n else:\n if row[i] == 10:\n erase = True\n row[i] = 1\n return row\n\n ret = np.copy(labels)\n return np.apply_along_axis(do_one_row, axis=1, arr=ret)",
"def removeLabelFromPage(self, label, page):\n return self.pm_getSpaceManager().removeLabelFromPage(self._unbox(label), self._unbox(page))",
"def test_keep_labels(self):\n # Create some arbitrary data and labels\n data = array([[1], [2], [3], [4], [5], [6]])\n labels = array([1, 1, 2, 2, 3, 3])\n\n # Create a LabeledCData object\n lcdata = LabeledCData(data, labels)\n\n self.assertTrue(array_equal(lcdata.data, data))\n self.assertTrue(array_equal(lcdata.labels, labels))\n\n # Make sure 3 is in the labels, for contrast\n self.assertIn(3, lcdata.labels)\n\n # Only keep the 1 and 2 labels\n lcdata.keep_data_with_labels([1, 2])\n\n # Make sure 3 has been removed from the labels, for contrast\n self.assertNotIn(3, lcdata.labels)\n\n # Correct answers\n newdata = array([[1], [2], [3], [4]])\n newlabels = array([1, 1, 2, 2])\n\n # Make sure the new data is correct\n self.assertTrue(array_equal(lcdata.data, newdata))\n self.assertTrue(array_equal(lcdata.labels, newlabels))",
"def unset_label(self):\n self.set_label(self.label.prev_str, self.label.prev_color)",
"def delete_label(self, label_key):\n # type: (str) -> bool\n headers = Headers({\"content-type\": \"application/json\", \"accept\": \"application/json\"})\n response_result = self.connection.api_call(\n \"DELETE\",\n [\"v1\", \"datasets\", self.dataset_id, \"resources\", self.id, \"labels\", label_key],\n headers=headers,\n )\n\n if response_result:\n # Sync the latest data from API to prevent inconsistency\n self.refresh()\n\n return True"
] | [
"0.65408283",
"0.6485277",
"0.63599694",
"0.6339318",
"0.6332027",
"0.6300747",
"0.6225751",
"0.6205483",
"0.6164012",
"0.6159746",
"0.6075823",
"0.6052966",
"0.59618044",
"0.58318126",
"0.58288985",
"0.58265567",
"0.5751653",
"0.5742358",
"0.56979746",
"0.5692929",
"0.56562746",
"0.56517595",
"0.56506556",
"0.5623976",
"0.5561791",
"0.5510771",
"0.54874724",
"0.5442874",
"0.5434871",
"0.54117966"
] | 0.8710152 | 1 |
Return the set of datatypes in this cell. Returns | def get_datatypes(self):
datatypes = set()
for element in itertools.chain(self.polygons, self.paths):
datatypes.update(element.datatypes)
for reference in self.references:
datatypes.update(reference.ref_cell.get_datatypes())
return datatypes | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_datatypes(self):\n datatypes = set()\n for element in self.elements:\n if isinstance(element, PolygonSet):\n datatypes.update(element.datatypes)\n elif isinstance(element, CellReference) or isinstance(\n element, CellArray):\n datatypes.update(element.ref_cell.get_datatypes())\n return datatypes",
"def data_types(self):\n return self['data_types']",
"def get_types(self):\n return self.column_type",
"def GetCellTypes(self):\n if not self.VTKObject.GetCellTypesArray():\n return None\n return vtkDataArrayToVTKArray(\n self.VTKObject.GetCellTypesArray(), self)",
"def gettypes(self):\n return [str(self.sd.xlate(t[0])) for t in self.sd.types]",
"def get_datatypes(self, tid):\n return self._parametersdict[\"DATATYPES\"].get(tid)",
"def get_types(self):\n return self.types",
"def get_data_types(self):\n data_types = set()\n for er in self.exercise_recordings:\n for data_type in er.data_types:\n if data_type not in data_types:\n data_types.add(data_type)\n return list(data_types)",
"def type_set(self) -> Set[str]:\n typs = {self.type}\n for s in self.segments:\n typs |= s.type_set()\n return typs",
"def dtypes(self) -> List[str]:\n\n return [column.dtype for column in self.plaincolumns]",
"def column_types(self):\n return self._hndl.column_types()",
"def get_texttypes(self):\n texttypes = set()\n for reference in self.references:\n texttypes.update(reference.ref_cell.get_textypes())\n for label in self.labels:\n texttypes.add(label.texttype)\n return texttypes",
"def dtypes(self):\n return self.to_pandas().dtypes",
"def data_types(self):",
"def get_set_types(self):\n if not self._refreshed:\n self.refresh()\n return self._setTypes",
"def types(self) -> list:\n if self._types is None:\n fdist = self.fdist # ranked order\n types_ = list(fdist.type.values)\n self._types = types_\n return self._types",
"def get_types(self) :\n\n return list(self.types)[1:]",
"def _getDTypeList(self):\n return self._dtype",
"def getDatasetTypes(self):\n\n list = []\n for attr in dir(self):\n if attr.startswith(\"map_\"):\n list.append(attr[4:])\n return list",
"def get_type_term_set(self):\n term_set = self._term_set\n if term_set is None:\n term_set = set()\n type_tuples = self.get_type_tuples()\n for p_type in type_tuples:\n term_set.update(p_type)\n self._term_set = term_set\n return term_set",
"def types(self) -> List[str]:\n return self._types",
"def getDataTypes(self, name: unicode) -> List[ghidra.program.model.data.DataType]:\n ...",
"def types():\n sql = \"\"\"SELECT DISTINCT sample_type\n FROM barcodes.sample\n ORDER BY sample_type\"\"\"\n with pm.sql.TRN:\n pm.sql.TRN.add(sql)\n return pm.sql.TRN.execute_fetchflatten()",
"def _variable_types(self):\n return self._variable_single_types + self._variable_array_types",
"def test_get_datatypes(self):\n obs = _get_datatypes(self.metadata_map.ix[:, self.headers])\n exp = ['float8', 'varchar', 'integer']\n self.assertEqual(obs, exp)",
"def data_types(self) -> 'outputs.AwsS3DataConnectorDataTypesResponse':\n return pulumi.get(self, \"data_types\")",
"def type_shapes(self):\n return self._type_shapes",
"def data_all_types(df):\n \n printmd (\"**Type of every column in the data**\")\n print(\"\")\n print(df.dtypes)",
"def data_types():\n\n return ...",
"def readAggregatedSimpleTypes(self):\n types = {}\n # SETs\n for m in re.finditer(\"TYPE (\\w*) = SET (.*);\", self.data):\n typename, typetype = m.groups() \n types[typename] = 'SET ' + typetype\n \n # BAGs\n for m in re.finditer(\"TYPE (\\w*) = BAG (.*);\", self.data):\n typename, typetype = m.groups() \n types[typename] = 'BAG ' + typetype\n \n # LISTs\n for m in re.finditer(\"TYPE (\\w*) = LIST (.*);\", self.data):\n typename, typetype = m.groups() \n types[typename] = 'LIST ' + typetype\n \n # ARRAYs\n for m in re.finditer(\"TYPE (\\w*) = ARRAY (.*);\", self.data):\n typename, typetype = m.groups() \n types[typename] = 'ARRAY ' + typetype\n \n # STRING vectors\n for m in re.finditer(\"TYPE (\\w*) = STRING\\((.*);\", self.data):\n typename, typetype = m.groups() \n types[typename] = 'STRING(' + typetype\n \n return types"
] | [
"0.79858327",
"0.7341515",
"0.7284648",
"0.7140661",
"0.70937794",
"0.70203876",
"0.6859303",
"0.6851648",
"0.6838585",
"0.67846173",
"0.6766891",
"0.67228407",
"0.6693372",
"0.6677914",
"0.66178626",
"0.6582833",
"0.6564898",
"0.65541935",
"0.6541892",
"0.6519175",
"0.64853585",
"0.64302635",
"0.64166725",
"0.6412573",
"0.64006233",
"0.63874495",
"0.63799185",
"0.63730025",
"0.636609",
"0.6316717"
] | 0.7746643 | 1 |
Return the set of texttypes in this cell. Returns | def get_texttypes(self):
texttypes = set()
for reference in self.references:
texttypes.update(reference.ref_cell.get_textypes())
for label in self.labels:
texttypes.add(label.texttype)
return texttypes | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def GetCellTypes(self):\n if not self.VTKObject.GetCellTypesArray():\n return None\n return vtkDataArrayToVTKArray(\n self.VTKObject.GetCellTypesArray(), self)",
"def types(self) -> List[str]:\n return self._types",
"def get_text_data_list(self):\n return [self.name, str(self.type)]",
"def doc_types(self):\n return self._extract_set('doc_type')",
"def get_types(self):\n return self.types",
"def type_set(self) -> Set[str]:\n typs = {self.type}\n for s in self.segments:\n typs |= s.type_set()\n return typs",
"def gettypes(self):\n return [str(self.sd.xlate(t[0])) for t in self.sd.types]",
"def etypes(self): # -> list[str]:\n ...",
"def data_types(self):\n return self['data_types']",
"def get_types(self):\n return self.column_type",
"def get_datatypes(self):\n datatypes = set()\n for element in self.elements:\n if isinstance(element, PolygonSet):\n datatypes.update(element.datatypes)\n elif isinstance(element, CellReference) or isinstance(\n element, CellArray):\n datatypes.update(element.ref_cell.get_datatypes())\n return datatypes",
"def get_types(self) :\n\n return list(self.types)[1:]",
"def get_types(self) -> List[str]:\n return sorted(list(self._radii.keys()))",
"def etypes(self) -> Sequence[str]:\n\n return [can_etype[1] for can_etype in self.canonical_etypes]",
"def get_type_term_set(self):\n term_set = self._term_set\n if term_set is None:\n term_set = set()\n type_tuples = self.get_type_tuples()\n for p_type in type_tuples:\n term_set.update(p_type)\n self._term_set = term_set\n return term_set",
"def used_text_keys(self):\n text_func = self._used_text_keys\n args = ()\n kwargs = {'tks': {'tks': []}}\n DataSet._apply_to_texts(text_func, self._meta, args, kwargs)\n return kwargs['tks']['tks']",
"def ntypes(self): # -> list[str]:\n ...",
"def treetype(self):\n\t\treturn self._treetype",
"def get_set_types(self):\n if not self._refreshed:\n self.refresh()\n return self._setTypes",
"def etypes(self): # -> list[None]:\n ...",
"def get_type_list(cls):\n\n from pygments.lexers import get_all_lexers\n return [(name, aliases[0]) for name, aliases, filetypes, mimetypes in get_all_lexers()]",
"def column_types(self):\n return self._hndl.column_types()",
"def text_bases(cls):\n return cls._TEXT_BASES",
"def get_datatypes(self):\n datatypes = set()\n for element in itertools.chain(self.polygons, self.paths):\n datatypes.update(element.datatypes)\n for reference in self.references:\n datatypes.update(reference.ref_cell.get_datatypes())\n return datatypes",
"def getAtomTypes(self):\n return self._raw_data['AMBER_ATOM_TYPE']",
"def getTypesList():\n return Gw2Spidy._request('types')['results']",
"def get_data_types(self):\n data_types = set()\n for er in self.exercise_recordings:\n for data_type in er.data_types:\n if data_type not in data_types:\n data_types.add(data_type)\n return list(data_types)",
"def getMimeTypes(self): #$NON-NLS-1$\r",
"def _get_types(self):\n types = {'word': [constants.PAD, constants.UNK],\n 'char': [constants.PAD, constants.UNK],\n 'tag': [constants.PAD],\n }\n\n for _, filepath in self.directory.items():\n if filepath is not None:\n conll_file = os.path.basename(filepath) # get name of conll file\n types['word'].extend(set(self.conll_parser.words(conll_file)))\n types['char'].extend(set(chain(*[list(w) for w in self.conll_parser.words(conll_file)])))\n types['tag'].extend(set([tag[-1] for tag in self.conll_parser.tagged_words(conll_file)]))\n\n # ensure that we have only unique types\n types['word'] = list(set(types['word']))\n types['char'] = list(set(types['char']))\n types['tag'] = list(set(types['tag']))\n\n return types",
"def getProposalTypesVocab(self):\n list = DisplayList()\n # Acquire the types\n types = self.aq_inner.aq_parent.getProposalTypes()\n for type in types:\n list.add(type, type)\n return list"
] | [
"0.6864288",
"0.67568284",
"0.67244345",
"0.6621044",
"0.66001546",
"0.6592116",
"0.65660393",
"0.6514978",
"0.6501299",
"0.6420463",
"0.63704634",
"0.62760276",
"0.62488717",
"0.62406945",
"0.62165576",
"0.6175571",
"0.6156153",
"0.61535233",
"0.6119918",
"0.6073806",
"0.6062748",
"0.605189",
"0.6027209",
"0.6020691",
"0.5982821",
"0.59519356",
"0.59292686",
"0.5919834",
"0.591973",
"0.5833662"
] | 0.852494 | 0 |
Return the set of classes for the SVG representation of this cell. Returns | def get_svg_classes(self):
ld = set()
lt = set()
for element in itertools.chain(self.polygons, self.paths):
ld.update(zip(element.layers, element.datatypes))
for label in self.labels:
lt.add((label.layer, label.texttype))
for reference in self.references:
ref_cell = reference.ref_cell
if isinstance(ref_cell, Cell):
ref = ref_cell.get_svg_classes()
ld.update(ref[0])
lt.update(ref[1])
return ld, lt | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def classes(self):\n return self.browser.classes(self)",
"def classes(self):\n return self._.d",
"def getClasses(self):\n self._process()\n return self._sets",
"def classes(self):\r\n return self._classes",
"def get_classes(self):\n return self._classes",
"def classes(self):\n raise NotImplementedError(\"Please implement this yourself.\")",
"def get_classes(self):\n query = read_query('structure exploration/classes')\n response = self._submit_query(query)\n\n return [elem['c']['value'].split('/')[-1] for elem in response]",
"def get_class_defs(self):\n return list(self._get_class_defs().values())",
"def get_classes(self):\n out_classes = ()\n classes = super(NamedEntityRecognizerModel, self).get_classes()\n\n for c in classes:\n out_classes += (c[:2],)\n\n return ((self.outside_class, self.outside_class_display),) + out_classes",
"def return_classes(self):\n\n\t\t \n\t\t \n\t\treturn self.classes",
"def get_classes(self):\n return",
"def get_path_class_shapes(self):\n df = self.df_roi\n self.tumor_shape = MultiPolygon([self._get_shape(i) for i in df[df.class_ == \"Tumor\"]['geometry']])\n self.stroma_shape = MultiPolygon([self._get_shape(i) for i in df[df.class_ == \"Stroma\"]['geometry']])\n self.dcis_shape = MultiPolygon([self._get_shape(i) for i in df[df.class_ == \"Other\"]['geometry']]) \n\n # path_class_qupath_names = [\"Tumor\", \"Stroma\", \"Other\"]\n # for path_class in path_class_qupath_names:\n # mpolygon = MultiPolygon([self._get_shape(i) for i in df[df.class_ == path_class]['geometry']])\n\n # # replace name\n # if path_class == \"Other\":\n # path_class = \"dcis\"\n\n # attr_name = path_class.lower() + \"_shape\"\n # setattr(self, path_class, mpolygon)",
"def classes(self):\n return str(self._classes)",
"def classes(self) -> Iterable[GDScriptClass]:\n for item in self._classes_by_type_id.values():\n yield item",
"def constructClassTable(G, classes):\n res = dict((c, set()) for c in classes)\n for v, data in G.nodes(data=True):\n c = data['class']\n if c in classes:\n res[c].add(v)\n return res",
"def class_types(self) -> Set[str]:\n # NOTE: This version is simple, but some dependent classes\n # (notably RawSegment) override this with something more\n # custom.\n return self._class_types",
"def classes(self):\n if self.classname:\n return [self.classname]\n return []",
"def get_classes(self):\n return list(range(self.num_clss))",
"def get_classes_with_colors(self):\n i = 0\n out_classes = ()\n classes = super(NamedEntityRecognizerModel, self).get_classes()\n\n for c in classes:\n if len(c) != 3:\n c += (self.default_colors[i],)\n i += 1\n out_classes += (c,)\n\n return (\n (self.outside_class, self.outside_class_display, self.outside_color),\n ) + out_classes",
"def classes(self) -> List[Any]:\n return list(self.label_counts.keys())",
"def classes(self):\n if not hasattr(self, '_unique_classes'):\n # build when we don't have\n self._unique_classes = self.data['label'].unique()\n self._unique_classes.sort()\n\n ret = self._unique_classes\n return ret",
"def get_category_classes(self):\n\n return self.catbrowser.get_category_classes()",
"def CSSClasses(self):",
"def classes_(self):\n try:\n return self.encoder.classes_\n except:\n return self.classes",
"def classes_(self):\n try:\n return self.encoder.classes_\n except:\n return self.classes",
"def classes(attrs):\n return attrs.get('class', '').split()",
"def classes(self):\n return list(self._classes_generator())",
"def getCellTypes(self):\n sc_data = Utils.convertAnnDataToDf(self.sc_data)\n try:\n self.sc_annot, self.de_dict = Annotate.annotateTree(sc_data, self.refDataset, self.refAnnot)\n except:\n print(\"Columns of annotations should be cell type levels. Additionally, higher levels should contain lower levels bound with ':'. Example structure; level1 (including B-cells), level2 (including B-cells:Naive)\")",
"def class_labels(self):\n return self._class_labels",
"def getFeatureClassNames(self):\n return self.featureClasses.keys()"
] | [
"0.6673467",
"0.64993554",
"0.6337325",
"0.632925",
"0.6292911",
"0.6184021",
"0.6183241",
"0.6177004",
"0.6168098",
"0.6159375",
"0.6154537",
"0.6150669",
"0.6049886",
"0.60440177",
"0.6030368",
"0.59519166",
"0.5938959",
"0.5927353",
"0.5877505",
"0.58711433",
"0.586793",
"0.5827431",
"0.582341",
"0.57922775",
"0.57922775",
"0.5791617",
"0.5702565",
"0.56210244",
"0.56153536",
"0.5605014"
] | 0.7874248 | 0 |
Write an SVG fragment representation of this object. | def to_svg(self, outfile, scaling, precision, attributes):
outfile.write('<g id="')
outfile.write(self.name.replace("#", "_"))
outfile.write('" ')
outfile.write(attributes)
outfile.write(">\n")
for polygon in self.polygons:
polygon.to_svg(outfile, scaling, precision)
for path in self.paths:
path.to_svg(outfile, scaling, precision)
for label in self.labels:
label.to_svg(outfile, scaling, precision)
for reference in self.references:
reference.to_svg(outfile, scaling, precision)
outfile.write("</g>\n") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_svg(\n self,\n outfile,\n scaling=10,\n style=None,\n fontstyle=None,\n background=\"#222\",\n pad=\"5%\",\n precision=None,\n ):\n bb = self.get_bounding_box()\n if bb is None:\n return\n close = True\n if hasattr(outfile, \"__fspath__\"):\n outfile = open(outfile.__fspath__(), \"w\")\n elif isinstance(outfile, (basestring, Path)):\n outfile = open(outfile, \"w\")\n else:\n close = False\n if style is None:\n style = {}\n if fontstyle is None:\n fontstyle = {}\n bb *= scaling\n x = bb[0, 0]\n y = -bb[1, 1]\n w = bb[1, 0] - bb[0, 0]\n h = bb[1, 1] - bb[0, 1]\n if background is not None:\n if isinstance(pad, basestring):\n if pad[-1] == \"%\":\n pad = max(w, h) * float(pad[:-1]) / 100\n else:\n pad = float(pad)\n x -= pad\n y -= pad\n w += 2 * pad\n h += 2 * pad\n outfile.write(\n \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<svg xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\"\n width=\"{}\" height=\"{}\" viewBox=\"{} {} {} {}\">\n<defs>\n<style type=\"text/css\">\n\"\"\".format(\n numpy.format_float_positional(w, trim=\"0\", precision=precision),\n numpy.format_float_positional(h, trim=\"0\", precision=precision),\n numpy.format_float_positional(x, trim=\"0\", precision=precision),\n numpy.format_float_positional(y, trim=\"0\", precision=precision),\n numpy.format_float_positional(w, trim=\"0\", precision=precision),\n numpy.format_float_positional(h, trim=\"0\", precision=precision),\n )\n )\n ldkeys, ltkeys = self.get_svg_classes()\n for k in ldkeys:\n l, d = k\n if k in style:\n style_dict = style[k]\n else:\n c = \"rgb({}, {}, {})\".format(\n *[\n int(255 * c + 0.5)\n for c in colorsys.hsv_to_rgb(\n (l % 3) / 3.0 + (l % 6 // 3) / 6.0 + (l // 6) / 11.0,\n 1 - ((l + d) % 8) / 12.0,\n 1 - (d % 3) / 4.0,\n )\n ]\n )\n style_dict = {\"stroke\": c, \"fill\": c, \"fill-opacity\": \"0.5\"}\n outfile.write(\".l{}d{} {{\".format(l, d))\n outfile.write(\" \".join(\"{}: {};\".format(*x) for x in style_dict.items()))\n outfile.write(\"}\\n\")\n for k in ltkeys:\n l, t = k\n if k in fontstyle:\n style_dict = fontstyle[k]\n else:\n c = \"rgb({}, {}, {})\".format(\n *[\n int(255 * c + 0.5)\n for c in colorsys.hsv_to_rgb(\n (l % 3) / 3.0 + (l % 6 // 3) / 6.0 + (l // 6) / 11.0,\n 1 - ((l + t) % 8) / 12.0,\n 1 - (t % 3) / 4.0,\n )\n ]\n )\n style_dict = {\"stroke\": \"none\", \"fill\": c}\n outfile.write(\".l{}t{} {{\".format(l, t))\n outfile.write(\" \".join(\"{}: {};\".format(*x) for x in style_dict.items()))\n outfile.write(\"}\\n\")\n outfile.write(\"</style>\\n\")\n for cell in self.get_dependencies(True):\n cell.to_svg(outfile, scaling, precision, \"\")\n outfile.write(\"</defs>\\n\")\n if background is not None:\n outfile.write(\n '<rect x=\"{}\" y=\"{}\" width=\"{}\" height=\"{}\" fill=\"{}\" stroke=\"none\"/>\\n'.format(\n numpy.format_float_positional(x, trim=\"0\", precision=precision),\n numpy.format_float_positional(y, trim=\"0\", precision=precision),\n numpy.format_float_positional(w, trim=\"0\", precision=precision),\n numpy.format_float_positional(h, trim=\"0\", precision=precision),\n background,\n )\n )\n self.to_svg(outfile, scaling, precision, 'transform=\"scale(1 -1)\"')\n outfile.write(\"</svg>\")\n if close:\n outfile.close()",
"def to_svg(self, outfile, scaling, precision):\n if isinstance(self.ref_cell, Cell):\n name = self.ref_cell.name\n else:\n name = self.ref_cell\n transform = \"translate({} {})\".format(\n numpy.format_float_positional(\n scaling * self.origin[0], trim=\"0\", precision=precision\n ),\n numpy.format_float_positional(\n scaling * self.origin[1], trim=\"0\", precision=precision\n ),\n )\n if self.rotation is not None:\n transform += \" rotate({})\".format(\n numpy.format_float_positional(\n self.rotation, trim=\"0\", precision=precision\n )\n )\n if self.x_reflection:\n transform += \" scale(1 -1)\"\n if self.magnification is not None:\n transform += \" scale({})\".format(\n numpy.format_float_positional(\n self.magnification, trim=\"0\", precision=precision\n )\n )\n outfile.write('<use transform=\"')\n outfile.write(transform)\n outfile.write('\" xlink:href=\"#')\n outfile.write(name.replace(\"#\", \"_\"))\n outfile.write('\"/>\\n')",
"def _repr_svg_(self):\n pass",
"def save_svg(string, file_name):\n file_handle = file(file_name, \"w\")\n file_handle.write(string)\n file_handle.close()",
"def write_footer(out):\n\n out.write(\"\"\" </g>\n</svg>\n\"\"\")",
"def to_svg(self, outfile, scaling, precision):\n if isinstance(self.ref_cell, Cell):\n name = self.ref_cell.name\n else:\n name = self.ref_cell\n transform = \"translate({} {})\".format(\n numpy.format_float_positional(\n scaling * self.origin[0], trim=\"0\", precision=precision\n ),\n numpy.format_float_positional(\n scaling * self.origin[1], trim=\"0\", precision=precision\n ),\n )\n if self.rotation is not None:\n transform += \" rotate({})\".format(\n numpy.format_float_positional(\n self.rotation, trim=\"0\", precision=precision\n )\n )\n if self.x_reflection:\n transform += \" scale(1 -1)\"\n mag = (\n \"\"\n if self.magnification is None\n else \" scale({})\".format(\n numpy.format_float_positional(\n self.magnification, trim=\"0\", precision=precision\n )\n )\n )\n for ii in range(self.columns):\n dx = scaling * self.spacing[0] * ii\n for jj in range(self.rows):\n dy = scaling * self.spacing[1] * jj\n outfile.write('<use transform=\"')\n outfile.write(transform)\n outfile.write(\n \" translate({} {})\".format(\n numpy.format_float_positional(\n dx, trim=\"0\", precision=precision\n ),\n numpy.format_float_positional(\n dy, trim=\"0\", precision=precision\n ),\n )\n )\n outfile.write(mag)\n outfile.write('\" xlink:href=\"#')\n outfile.write(name.replace(\"#\", \"_\"))\n outfile.write('\"/>\\n')",
"def to_svg(self, separate=False, include_junctions=False):\n serialize_as_svg(self.output, separate, include_junctions)",
"def write_header(out):\n\n out.write(\"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<!DOCTYPE svg PUBLIC \"-//W3C//DTD SVG 1.0//EN\" \"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd\" [\n<!ENTITY ns_svg \"http://www.w3.org/2000/svg\">\n]>\n<svg xmlns=\"&ns_svg;\" width=\"%d\" height=\"%d\" overflow=\"visible\">\n <g id=\"Layer_1\">\n\"\"\" % (WIDTH, HEIGHT))",
"def export_as_svg(self):\n from ExportCommand import ExportCommand\n\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n file_path, _ = QFileDialog.getSaveFileName(self, \"Export as svg\", os.getcwd(), \"svg file(*.svg)\",\n options=options)\n if file_path:\n cmd = ExportCommand(self.graphicsView.scene(), 'svg')\n cmd.display_message.connect(self.onAddMessage)\n if cmd.execute(file_path):\n QMessageBox.information(self, self.tr('Information'), self.tr('Successfully export to svg file'))\n else:\n QMessageBox.information(self, self.tr('Error'), self.tr('Fail to export to svg file'))",
"def render_svg(svg):\n b64 = base64.b64encode(svg.encode('utf-8')).decode(\"utf-8\")\n html = r'<img src=\"data:image/svg+xml;base64,%s\"/>' % b64\n st.write(html, unsafe_allow_html=True)",
"def getSvgHtml(svgFile, width, height):\n html = '<object type=\"image/svg+xml\" data=\"%s\" width=\"%s\" height=\"%s\"/>'\n return html % (svgFile, width, height)",
"def output_svg(self, string_to_output):\n self._output_object.add_report(string_to_output)",
"def svg(self) -> str:\n data = {\n 'x': self.x,\n 'y': self.y,\n 'width': self.width,\n 'height': self.height,\n 'text_x': self.x + 30,\n 'text_y': self.y + 20,\n 'name': self.person.name\n }\n return PERSON_BOX_TEMPLATE.format(**data)",
"def save_canvas_svg(self, filename):\n canvasvg.saveall(filename, self.canvas)",
"def create_svg(svg_tag, img_width, img_height, out_path):\n script_dir = utils.get_script_dir()\n svg_template_path = utils.join_paths_str(script_dir, \"./templates/template.svg\")\n with open(svg_template_path, \"rt\") as fin:\n with open(out_path, \"wt\") as fout:\n for line in fin:\n fout.write(\n line.replace(\"INSERT_WIDTH\", str(img_width))\n .replace(\"INSERT_HEIGHT\", str(img_height))\n .replace(\"INSERT_OBJECT\", svg_tag)\n )",
"def _repr_svg_(self):\n if not IPythonConsole.ipython_useSVG:\n return None\n mol = self.owner.mol\n keku = IPythonConsole.kekulizeStructures\n size = IPythonConsole.molSize\n opts = IPythonConsole.drawOptions\n return Draw._moltoSVG(\n mol, size, self.aix, \"\", keku, drawOptions=opts, highlightBonds=self.bix\n )",
"def _repr_svg_(self):\n if not IPythonConsole.ipython_useSVG:\n return None\n mol = self.owner.mol\n keku = IPythonConsole.kekulizeStructures\n size = IPythonConsole.molSize\n opts = IPythonConsole.drawOptions\n return Draw._moltoSVG(\n mol, size, self.aix, \"\", keku, drawOptions=opts, highlightBonds=self.bix\n )",
"def dump_graph(self) -> str:\n graph_dot_file = f'{self._name}.dot'\n graph_diagram_file = f'{self._name}.svg'\n write_dot(self._graph, graph_dot_file)\n subprocess.check_output(\n shlex.split(f'dot -Tsvg {graph_dot_file} -o {graph_diagram_file}')\n )\n return graph_diagram_file",
"def svg(self, scale_factor=1., stroke_color=None, opacity=None):\n if self.is_empty:\n return '<g />'\n if stroke_color is None:\n stroke_color = \"#66cc99\" if self.is_valid else \"#ff3333\"\n return '<g>' + \\\n ''.join(p.svg(scale_factor, stroke_color, opacity) for p in self.geoms) + \\\n '</g>'",
"def write_svg(self, filename):\n\n aspect_ratio = self.nx / self.ny\n # Pad the maze all around by this amount.\n padding = 10\n # Height and width of the maze image (excluding padding), in pixels\n height = 500\n width = int(height * aspect_ratio)\n # Scaling factors mapping maze coordinates to image coordinates\n scy, scx = height / self.ny, width / self.nx\n\n def write_wall(f, x1, y1, x2, y2):\n \"\"\"Write a single wall to the SVG image file handle f.\"\"\"\n f.write('\\t<line x1=\"{}\" y1=\"{}\" x2=\"{}\" y2=\"{}\"/>\\n'\n .format(x1, y1, x2, y2),)\n\n def write_circle(file, x_coordinate, y_coordinate, radius, color):\n \"\"\"Write an image to the SVG\"\"\"\n file.write('\\t<circle cx=\"{}\" cy=\"{}\" r=\"{}\" fill=\"{}\"/>\\n'\n .format(x_coordinate, y_coordinate, radius, color))\n\n # Write the SVG image file for maze\n with open(filename, 'w') as f:\n # SVG preamble and styles.\n f.write('<?xml version=\"1.0\" encoding=\"utf-8\"?>')\n f.write('<svg\\n\\txmlns=\"http://www.w3.org/2000/svg\"\\n'\n '\\txmlns:xlink=\"http://www.w3.org/1999/xlink\"\\n')\n f.write('\\twidth=\"{:d}\" height=\"{:d}\" viewBox=\"{} {} {} {}\">'\n .format(width+2*padding, height+2*padding,\n -padding, -padding, width+2*padding, height+2*padding))\n f.write('<defs>\\n<style type=\"text/css\"><![CDATA[line {\\n')\n f.write('\\tstroke: #000000;\\n\\tstroke-linecap: square;\\n\\tstroke-width: 5;\\n}')\n f.write(']]></style>\\n</defs>\\n')\n # Draw the \"South\" and \"East\" walls of each cell, if present (these\n # are the \"North\" and \"West\" walls of a neighbouring cell in\n # general, of course).\n for x in range(self.nx):\n for y in range(self.ny):\n # print(str(x) + \" \" + str(y))\n if self.cell_at(x, y).walls['S']:\n x1, y1, x2, y2 = x*scx, (y+1)*scy, (x+1)*scx, (y+1)*scy\n write_wall(f, x1, y1, x2, y2)\n if self.cell_at(x, y).walls['E']:\n x1, y1, x2, y2 = (x+1)*scx, y*scy, (x+1)*scx, (y+1)*scy\n write_wall(f, x1, y1, x2, y2)\n\n # Draw any circle in the maze\n if self.cell_at(x, y).occupied:\n adjustment = (3*padding) / 2\n _x = x*scx + adjustment\n _y = y * scy + adjustment\n\n if self.cell_at(x, y).is_current_position:\n write_circle(file=f,\n x_coordinate=_x,\n y_coordinate=_y,\n radius=padding,\n color=\"blue\")\n elif self.cell_at(x, y).is_objective:\n write_circle(file=f,\n x_coordinate=_x,\n y_coordinate=_y,\n radius=padding,\n color=\"green\")\n else:\n write_circle(file=f,\n x_coordinate=_x,\n y_coordinate=_y,\n radius=padding,\n color=\"red\")\n\n # Draw the North and West maze border, which won't have been drawn\n # by the procedure above.\n f.write('\\t<line x1=\"0\" y1=\"0\" x2=\"{}\" y2=\"0\"/>\\n'.format(width))\n f.write('\\t<line x1=\"0\" y1=\"0\" x2=\"0\" y2=\"{}\"/>\\n'.format(height))\n f.write('</svg>')",
"def _write(self, stream):\n\n self._img.append(self.make_path())\n self._img.append(self.make_border())\n self._img.append(self.make_text())\n\n ET.ElementTree(self._img).write(stream, encoding=\"UTF-8\", xml_declaration=True)",
"def save(filename, canvas):\n data = write_svg.to_string(canvas).encode('utf-8')\n with gzip.open(filename, 'wb') as f:\n f.write(data)",
"def __merger_svg(self):\n pass",
"def get_svgout(self):\n return tempfile.mktemp(dir=self.tmpdir, suffix='.svg')",
"def write_gml(self, f):\n G = self.graph.copy()\n\n # networkx doesn't like writing non-string attributes to GML\n for u, v in G.edges:\n for key in list(G[u][v].keys()):\n G[u][v][key] = str(G[u][v][key])\n nx.readwrite.gml.write_gml(G, f)",
"def draw_SVG_square(self, (w, h), (x, y), (rx, ry), parent):\n style = self.get_style()\n attr = {\n 'style': style,\n 'height': str(h),\n 'width': str(w),\n 'x': str(x),\n 'y': str(y),\n 'rx': str(rx),\n 'ry': str(ry)\n }\n return inkex.etree.SubElement(parent, inkex.addNS('rect', 'svg'), attr)",
"def _repr_svg_(self):\n try:\n return self.mol._repr_svg_()\n except AttributeError:\n return None",
"def to_xml(self):\n start_str = GeometryTopologyData.__to_xml_vector__(self.start, self.format)\n size_str = GeometryTopologyData.__to_xml_vector__(self.size, self.format)\n structure = super(BoundingBox, self).to_xml()\n\n return '<BoundingBox>%s<Start>%s</Start><Size>%s</Size></BoundingBox>' % (structure, start_str, size_str)",
"def post_process_svg(self):\n post_processor = PostProcessor(svg_path=self.rendered_file_path)\n\n post_processor.post_process(graph_representation=self.graph_representation)\n\n post_processor.write()\n\n self.display.display(\"The graph has been exported to {}\".format(self.rendered_file_path))\n\n return self.rendered_file_path",
"def wrap_in_html(self,svgofmodel):\n html= '''<html>\\n%s\\n%s\\n%s\\n</g></g></g></svg></body></html>\\n'''\n svgbody= '''<body onload=\"javascript:setTimeout("location.reload(true);",%d);\">\\n''' % self.vrefreshms\n svgbody += \"<h4>GeoGad</h4>\"\n svghead= '<svg xmlns=\"http://www.w3.org/2000/svg\" version=\"1.2\" baseProfile=\"tiny\" width=\"%dpx\" height=\"%dpx\">\\n'\n svghead= svghead % (self.vboxX,self.vboxY)\n svghead+= '<rect x=\"1\" y=\"1\" width=\"%d\" height=\"%d\" fill=\"none\" stroke=\"blue\" stroke-width=\"4\"/>\\n'% (self.vboxX,self.vboxY)\n svghead+= '<g fill=\"none\" stroke=\"black\" stroke-width=\"%0.2f\">\\n' % self.vlinewidth\n svghead+= '<g transform=\"scale(%0.2f,%0.2f)\">\\n' % (self.vscaleX,self.vscaleY)\n svghead+= '<g transform=\"translate(%0.2f,%0.2f)\">\\n' % (self.vtranX,self.vtranY)\n return html % (svgbody,svghead,svgofmodel)"
] | [
"0.64034855",
"0.62927806",
"0.6089161",
"0.6064701",
"0.60599434",
"0.60150325",
"0.58418894",
"0.58044386",
"0.5791056",
"0.570158",
"0.5671584",
"0.5624105",
"0.5615185",
"0.5587682",
"0.5575867",
"0.55718195",
"0.55718195",
"0.5566151",
"0.55476576",
"0.55405116",
"0.55103576",
"0.5477267",
"0.5464671",
"0.54343",
"0.543222",
"0.53781575",
"0.5368952",
"0.5362211",
"0.534768",
"0.5346295"
] | 0.678781 | 0 |
Transform a set of polygons. This reference transformation is used to transform the given polygons in place. | def _transform_polygons(self, polygons):
if self.rotation is not None:
ct = numpy.cos(self.rotation * numpy.pi / 180.0)
st = numpy.sin(self.rotation * numpy.pi / 180.0) * _mpone
if self.x_reflection:
xrefl = numpy.array((1, -1))
if self.magnification is not None:
mag = numpy.array((self.magnification, self.magnification), dtype=float)
if self.origin is not None:
orgn = numpy.array(self.origin)
if isinstance(polygons, dict):
for kk in polygons.keys():
for ii in range(len(polygons[kk])):
if self.x_reflection:
polygons[kk][ii] = polygons[kk][ii] * xrefl
if self.magnification is not None:
polygons[kk][ii] = polygons[kk][ii] * mag
if self.rotation is not None:
polygons[kk][ii] = (
polygons[kk][ii] * ct + polygons[kk][ii][:, ::-1] * st
)
if self.origin is not None:
polygons[kk][ii] = polygons[kk][ii] + orgn
else:
for ii in range(len(polygons)):
if self.x_reflection:
polygons[ii] = polygons[ii] * xrefl
if self.magnification is not None:
polygons[ii] = polygons[ii] * mag
if self.rotation is not None:
polygons[ii] = polygons[ii] * ct + polygons[ii][:, ::-1] * st
if self.origin is not None:
polygons[ii] = polygons[ii] + orgn
return polygons | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _transform_polygons(self, polygons):\n if self.rotation is not None:\n ct = numpy.cos(self.rotation * numpy.pi / 180.0)\n st = numpy.sin(self.rotation * numpy.pi / 180.0) * _mpone\n if self.magnification is not None:\n mag = numpy.array((self.magnification, self.magnification), dtype=float)\n if self.origin is not None:\n orgn = numpy.array(self.origin)\n if self.x_reflection:\n xrefl = numpy.array((1, -1))\n if isinstance(polygons, dict):\n out_polygons = {}\n for kk in polygons.keys():\n out_polygons[kk] = []\n for ii in range(self.columns):\n for jj in range(self.rows):\n spc = numpy.array([self.spacing[0] * ii, self.spacing[1] * jj])\n for points in polygons[kk]:\n if self.magnification:\n out_polygons[kk].append(points * mag + spc)\n else:\n out_polygons[kk].append(points + spc)\n if self.x_reflection:\n out_polygons[kk][-1] = out_polygons[kk][-1] * xrefl\n if self.rotation is not None:\n out_polygons[kk][-1] = (\n out_polygons[kk][-1] * ct\n + out_polygons[kk][-1][:, ::-1] * st\n )\n if self.origin is not None:\n out_polygons[kk][-1] = out_polygons[kk][-1] + orgn\n else:\n out_polygons = []\n for ii in range(self.columns):\n for jj in range(self.rows):\n spc = numpy.array([self.spacing[0] * ii, self.spacing[1] * jj])\n for points in polygons:\n if self.magnification is not None:\n out_polygons.append(points * mag + spc)\n else:\n out_polygons.append(points + spc)\n if self.x_reflection:\n out_polygons[-1] = out_polygons[-1] * xrefl\n if self.rotation is not None:\n out_polygons[-1] = (\n out_polygons[-1] * ct + out_polygons[-1][:, ::-1] * st\n )\n if self.origin is not None:\n out_polygons[-1] = out_polygons[-1] + orgn\n return out_polygons",
"def recombine(uniques, intersections):\n polygons = []\n for i,u in enumerate(uniques):\n try:\n segs = [seg for seg in u]\n except TypeError: # single seg\n if u is None:\n segs = []\n else:\n segs = [u,]\n\n \n segs.extend([p for p in intersections[i] if p is not None])\n segs.extend([p[i] for p in intersections if p[i] is not None]) # transpose, get the lower triangle\n merged = shapely.ops.linemerge(segs)\n print(\"Merging poly %i with %s segments\"%(i,len(segs)))\n if type(merged) is not shapely.geometry.LineString:\n for seg in segs:\n plt.plot(seg.xy[0], seg.xy[1])\n plt.show()\n \n assert type(merged) is shapely.geometry.LineString\n polygons.append(shapely.geometry.Polygon(merged))\n return polygons",
"def _write_polygons(\n self,\n shapes: Iterable[Polygon],\n emissions: Iterable[float],\n info: EmissionInfo,\n source_group: int,\n ):\n\n # Rasterize the polygon on a grid\n shapes_serie = gpd.GeoSeries(shapes)\n # get polygon bounds\n minx, miny, maxx, maxy = shapes_serie.total_bounds\n # Create a grid for the rasterization\n x = np.arange(minx, maxx, self.polygon_raster_size)\n y = np.arange(miny, maxy, self.polygon_raster_size)\n\n # Get the emission per cell\n average_cells_proportion = (self.polygon_raster_size**2) / shapes_serie.area\n cell_emissions = np.array(emissions) * average_cells_proportion\n\n # WARNING: this might be not exactly mass convserving\n rasterized_emissions = rasterize(\n shapes=zip(shapes, cell_emissions),\n out_shape=(len(x), len(y)),\n transform=from_bounds(minx, miny, maxx, maxy, len(x), len(y)),\n all_touched=False,\n merge_alg=MergeAlg.add,\n )[\n ::-1, :\n ] # flip the y axis\n\n # Get the coordinates of the rasterized polygon\n indices = np.array(np.where(rasterized_emissions)).T\n\n # Write the polygon\n with open(self.file_cadastre, \"a\") as f:\n for i_x, i_y in indices:\n f.write(\n f\"{x[i_x]},{y[i_y]},{info.height},\"\n f\"{self.polygon_raster_size},{self.polygon_raster_size},{info.vertical_extension},\"\n f\"{rasterized_emissions[i_x, i_y]},0,0,0,{source_group},\\n\"\n )",
"def savedxf_polylist(list_of_polygons, filename=None, \n debug=False, save_as='POLYGON', union = False):\n try:\n os.remove('buffer.geojson')\n except:\n pass\n\n GNEW = []\n\n for p in list_of_polygons:\n \n if p.is_valid:\n GNEW += [p]\n if not p.is_valid:\n pnew = p.buffer(0)\n if pnew.is_valid:\n GNEW += [pnew]\n if debug: print('new polygon made from self intersecting polygon, is valid: ',pnew.is_valid)\n else:\n if debug: print('self intersecting polygon thrown out.')\n else: pass\n\n if not GNEW:\n GNEW = [empty()]\n \n if union:\n buffer_obj = unary_union(GNEW)\n else:\n buffer_obj = MultiPolygon(GNEW)\n\n if debug: print('started writing file ...')\n f = open(\"buffer.geojson\", \"wb\")\n f.write(json.dumps(mapping(buffer_obj)))\n f.close()\n if debug: print('finished.')\n\n if debug: print('started conversion of geojson to dxf ...')\n if filename == None:\n filename = 'buffer'\n if debug: print('save as MULTILINESTRING or POLYGON...')\n # --config(\"DXF_WRITE_HATCH\", \"NO\")\n os.system('ogr2ogr -f DXF '+filename+'.dxf buffer.geojson')\n if debug: \n print('finished.')\n print('saved '+filename+'.dxf')",
"def polygons(self):\n if self.type == 'Polygon':\n polygons = [self._geojson['geometry']['coordinates']]\n elif self.type == 'MultiPolygon':\n polygons = self._geojson['geometry']['coordinates']\n return [ [ [_lat_lons_from_geojson(s) for\n s in ring ] for\n ring in polygon] for\n polygon in polygons]",
"def convert_to_polygon_gate(self, transforms, n_vertices=128):\n # FlowJo stores ellipsoid vertex values differently from any other gate.\n # They are stored in the binned \"display space\", so range from 0.0 - 256.0.\n # The binned space is linear over the transform range.\n #\n # To convert to a polygon:\n # 1. Determine center & rotation angle from foci\n # 2. Translate foci & edge vertices such that center is at origin\n # 3. Rotate foci & edge vertices such that major/minor axes are || to x/y axes\n # 4. Determine major axis orientation (x vs y-axis)\n # 5. Use foci & major axis to determine minor axis (2nd FJ point is unreliable)\n # 6. Generate new x, y points from ellipse definition for set of angles\n # 7. Rotate & translate coordinates back to original orientation\n # 8. Scale any dimensions using biex transform\n # 9. Create PolygonGate from the new set of coordinates\n # Find center of ellipse\n foci = copy.deepcopy(self.foci) / 256.0\n center = (foci[0] + foci[1]) / 2.0\n\n # Determine rotation of ellipse\n slope = (foci[1][1] - foci[0][1]) / (foci[1][0] - foci[0][0])\n theta_rad = np.arctan(slope)\n cos, sin = np.cos(theta_rad), np.sin(theta_rad)\n r = np.array(((cos, -sin), (sin, cos)))\n\n # Translate foci & edge vertices to the origin\n foci_origin = foci - center\n edge_vertices_origin = (copy.deepcopy(self.edge_vertices) / 256.0) - center\n\n # According to FlowJo devs, edge vertices are ordered as:\n # 1st & 2nd points are major axis\n # 3rd & 4th points are minor axis\n # Rotate edge vertices\n # Only need are one major & one minor point since the other is symmetric\n foci_rotated = _rotate_point_around_point(foci_origin[0], r)\n rv1 = _rotate_point_around_point(edge_vertices_origin[0], r)\n rv3 = _rotate_point_around_point(edge_vertices_origin[2], r)\n\n # However, I don't trust that the 1st point is always the major\n # axis or if it is always on x or y, so we'll make sure.\n # Use absolute values & find max\n rv1 = np.abs(rv1)\n rv3 = np.abs(rv3)\n rv1_max_pos = rv1.argmax()\n rv3_max_pos = rv3.argmax()\n\n if rv1_max_pos == rv3_max_pos:\n raise FlowJoWSPParsingError(\n \"Cannot determine major axis of FlowJo ellipse gate '%s'\" % self.gate_name\n )\n\n rv1_max_val = rv1[rv1_max_pos]\n rv3_max_val = rv3[rv3_max_pos]\n\n if rv1_max_val >= rv3_max_val:\n # rv1 is major axis (even if a circle)\n a = rv1_max_val\n else:\n # rv3 is major axis\n a = rv3_max_val\n\n # Also, calculate b from foci and found 'a', since the\n # minor vertex stored by FlowJo seems off\n b = np.sqrt(np.abs((foci_rotated[0]) ** 2 - (a ** 2)))\n\n # Calculate set of angles for getting points on ellipse\n angles = [2 * np.pi * (i / n_vertices) for i in range(n_vertices)]\n\n # Calculate x, y coordinates for each of the angles\n # x = a * cos(θ)\n # y = b * sin(θ)\n if rv1_max_pos == 0:\n # major axis is the x-axis\n x = a * np.cos(angles)\n y = b * np.sin(angles)\n else:\n # minor axis is the x-axis\n x = b * np.cos(angles)\n y = a * np.sin(angles)\n\n # rotate ellipse to the original orientation, then translate\n inv_r = np.linalg.inv(r)\n xy = np.vstack([x, y]).T\n\n # this will be the final set of polygon vertices\n xy_rot_trans = np.dot(xy, inv_r) + center\n\n # the final complication is the different scaling of biex transforms\n for i, xform in enumerate(transforms):\n if isinstance(xform, WSPBiexTransform):\n # biex transform is always scaled from 0-4096\n xform_range = 4096.0\n else:\n # all others are scaled from 0-1\n xform_range = 1.0\n\n xy_rot_trans[:, i] *= xform_range\n\n return PolygonGate(self.gate_name, self.dimensions, xy_rot_trans, use_complement=self.use_complement)",
"def test_clip_points_by_polygons(self):\n\n # Name input files\n point_name = join(TESTDATA, 'population_5x5_jakarta_points.shp')\n point_layer = read_layer(point_name)\n points = numpy.array(point_layer.get_geometry())\n attrs = point_layer.get_data()\n\n # Loop through polygons\n for filename in ['polygon_0.shp', 'polygon_1.shp', 'polygon_2.shp',\n 'polygon_3.shp', 'polygon_4.shp',\n 'polygon_5.shp', 'polygon_6.shp']:\n\n polygon_layer = read_layer(join(TESTDATA, filename))\n polygon = polygon_layer.get_geometry()[0]\n\n # Clip\n indices = inside_polygon(points, polygon)\n\n # Sanity\n for point in points[indices, :]:\n assert is_inside_polygon(point, polygon)\n\n # Explicit tests\n if filename == 'polygon_0.shp':\n assert len(indices) == 6\n elif filename == 'polygon_1.shp':\n assert len(indices) == 2\n assert numpy.allclose(points[indices[0], :],\n [106.8125, -6.1875])\n assert numpy.allclose(points[indices[1], :],\n [106.8541667, -6.1875])\n assert numpy.allclose(attrs[indices[0]]['value'],\n 331941.6875)\n assert numpy.allclose(attrs[indices[1]]['value'],\n 496445.8125)\n elif filename == 'polygon_2.shp':\n assert len(indices) == 7\n elif filename == 'polygon_3.shp':\n assert len(indices) == 0 # Degenerate\n elif filename == 'polygon_4.shp':\n assert len(indices) == 0 # Degenerate\n elif filename == 'polygon_5.shp':\n assert len(indices) == 8\n elif filename == 'polygon_6.shp':\n assert len(indices) == 6",
"def _rasterize_polygons(polygons, bounds = [[-100, -100], [100, 100]],\n dx = 1, dy = 1):\n try:\n from skimage import draw\n except:\n raise ImportError('The fill function requires the module '\n '\"scikit-image\" to operate. Please retry '\n 'after installing scikit-image:\\n\\n'\n '$ pip install --upgrade scikit-image')\n\n # Prepare polygon array by shifting all points into the first quadrant and\n # separating points into x and y lists\n xpts = []\n ypts = []\n for p in polygons:\n p_array = np.asarray(p)\n x = p_array[:, 0]\n y = p_array[:, 1]\n xpts.append((x-bounds[0][0])/dx - 0.5)\n ypts.append((y-bounds[0][1])/dy - 0.5)\n\n # Initialize the raster matrix we'll be writing to\n xsize = int(np.ceil((bounds[1][0]-bounds[0][0]))/dx)\n ysize = int(np.ceil((bounds[1][1]-bounds[0][1]))/dy)\n raster = np.zeros((ysize, xsize), dtype = np.bool)\n\n # TODO: Replace polygon_perimeter with the supercover version\n for n in range(len(xpts)):\n rr, cc = draw.polygon(ypts[n], xpts[n], shape = raster.shape)\n rrp, ccp = draw.polygon_perimeter(ypts[n], xpts[n],\n shape = raster.shape, clip = False)\n raster[rr, cc] = 1\n raster[rrp, ccp] = 1\n\n return raster",
"def reproject_coordinates(coordinates, inproj, outproj, flat=False):\n if flat:\n return np.array([transform(inproj, outproj, coord[0], coord[1]) for coord in coordinates]).flatten()\n return [list(transform(inproj, outproj, coord[0], coord[1])) for coord in coordinates]",
"def _union_polygons(polygons, precision = 1e-4, max_points = 4000):\n polygons = _merge_floating_point_errors(polygons, tol = precision/1000)\n unioned = gdspy.boolean(polygons, [], operation = 'or',\n precision = precision, max_points = max_points)\n return unioned",
"def generatePolygons():",
"def shape_to_polygons(lines):\n from itertools import tee, izip\n def pairwise(iterable):\n a,b = tee(iterable)\n next(b, None)\n return izip(a, b)\n polygons = [[tuple(lines[0]['shape'])]]\n for a, b in pairwise(lines):\n if a['fid'] != b['fid']:\n polygons.append([])\n polygons[-1].append(tuple(b['shape']))\n return polygons",
"def transforming_coordinates(self, coordinates_lists, transform): \n \n transformed_coordinates_lists = []\n for coordinates_list in coordinates_lists:\n transformed_coordinates_list = []\n for coordinate in coordinates_list:\n coordinate = tuple(coordinate)\n transformed_coordinate = list(transform(coordinate[0], coordinate[1]))\n transformed_coordinates_list.append(transformed_coordinate)\n transformed_coordinates_lists.append(transformed_coordinates_list)\n \n \n return transformed_coordinates_lists",
"def transformed(self, T):\n new = Polygon2()\n new.path = self.path.transformed(Affine2D(T.A))\n return new",
"def get_polygonsets(self, depth=None):\n if not isinstance(self.ref_cell, Cell):\n return []\n if self.rotation is not None:\n ct = numpy.cos(self.rotation * numpy.pi / 180.0)\n st = numpy.sin(self.rotation * numpy.pi / 180.0) * _mpone\n if self.x_reflection:\n xrefl = numpy.array((1, -1))\n if self.magnification is not None:\n mag = numpy.array((self.magnification, self.magnification), dtype=float)\n if self.origin is not None:\n orgn = numpy.array(self.origin)\n polygonsets = self.ref_cell.get_polygonsets(depth=depth)\n for ps in polygonsets:\n for ii in range(len(ps.polygons)):\n if self.x_reflection:\n ps.polygons[ii] = ps.polygons[ii] * xrefl\n if self.magnification is not None:\n ps.polygons[ii] = ps.polygons[ii] * mag\n if self.rotation is not None:\n ps.polygons[ii] = (\n ps.polygons[ii] * ct + ps.polygons[ii][:, ::-1] * st\n )\n if self.origin is not None:\n ps.polygons[ii] = ps.polygons[ii] + orgn\n return polygonsets",
"def clean_geometries(self, geo_nodes):\n for node in geo_nodes:\n if mc.nodeType(node) != 'mesh':\n node = mc.listRelatives(node, shapes=True, fullPath=True)[0]\n\n # Do the in mesh out mesh connection and the blendshape between\n # a cube and the original geometry\n cube = mc.polyCube()[0]\n cubeShape = mc.listRelatives(cube, s=True)[0]\n mc.connectAttr(\n \"{0}.outMesh\".format(node),\n \"{0}.inMesh\".format(cubeShape),\n f=True\n )\n mc.blendShape(node, cubeShape, w=(0, 1), o='world')\n\n # Rename the old object and all of it's shapes\n # This is a workaround to rename the shapeDeformed as well\n transform = mc.listRelatives(node, parent=True, fullPath=True)[0]\n renamed = mc.rename(\n transform,\n \"{0}_OM\".format(transform.split('|')[-1]),\n ignoreShape=True\n )\n for shape in mc.listRelatives(renamed, s=True, f=True):\n mc.rename(shape, \"{0}_OM\".format(shape.split('|')[-1]))\n\n # Rename the cube and it's shapes to orignial geo name\n new_node = mc.rename(\n cube,\n transform.split('|')[-1],\n ignoreShape=True\n )\n mc.rename(\n mc.listRelatives(new_node, s=True)[0],\n node.split('|')[-1]\n )\n\n # Unparent the old object and parent the new one\n parent = mc.listRelatives(renamed, parent=True, fullPath=True)\n if parent is not None:\n mc.parent(new_node, parent[0])\n mc.parent(renamed, world=True)",
"def toFillPolygons(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n pass",
"def orient_polygons(values, polygon_offsets, ring_offsets):\n num_rings = len(ring_offsets) - 1\n\n # Compute expected orientation of rings\n expected_ccw = np.zeros(len(ring_offsets) - 1, dtype=np.bool_)\n expected_ccw[polygon_offsets[:-1]] = True\n\n # Compute actual orientation of rings\n is_ccw = np.zeros(num_rings)\n for i in range(num_rings):\n is_ccw[i] = compute_area(values, ring_offsets[i:i + 2]) >= 0\n\n # Compute indices of rings to flip\n flip_inds = np.nonzero(is_ccw != expected_ccw)\n ring_starts = ring_offsets[:-1]\n ring_stops = ring_offsets[1:]\n flip_starts = ring_starts[flip_inds]\n flip_stops = ring_stops[flip_inds]\n\n for i in range(len(flip_starts)):\n flip_start = flip_starts[i]\n flip_stop = flip_stops[i]\n\n xs = values[flip_start:flip_stop:2]\n ys = values[flip_start + 1:flip_stop:2]\n values[flip_start:flip_stop:2] = xs[::-1]\n values[flip_start + 1:flip_stop:2] = ys[::-1]",
"def _make_collections(polygons, opacity=1):\n collection = []\n for color in polygons:\n collection.append(\n Poly3DCollection(\n [p.points_matrix for p in polygons[color]],\n alpha=opacity,\n facecolor=color,\n edgecolors=\"black\",\n )\n )\n return collection",
"def _make_collections(polygons, opacity=1):\n collection = []\n for color in polygons:\n collection.append(Poly3DCollection(\n [p.points_matrix for p in polygons[color]],\n alpha=opacity,\n facecolor=color,\n edgecolors='black'))\n return collection",
"def contains_polygons(self, polygons):\n assert isinstance(polygons, collections.Iterable), \\\n 'Invalid list of polygons'\n merged_poly = None\n geo = self.get_geometry()\n for poly in polygons:\n if merged_poly is None:\n merged_poly = geo.union(poly)\n else:\n merged_poly = merged_poly.union(poly)\n return merged_poly.area == geo.area",
"def _crop_edge_polygons(all_polygons, bboxes,\n left, bottom, right, top,\n precision):\n polygons_in_rect_i = _find_bboxes_in_rect(bboxes, left, bottom, right, top)\n polygons_edge_i = _find_bboxes_on_rect_edge(bboxes, left, bottom, right,\n top)\n polygons_in_rect_no_edge_i = polygons_in_rect_i & (~polygons_edge_i)\n\n # Crop polygons along the edge and recombine them with polygons inside the\n # rectangle\n polygons_edge = all_polygons[polygons_edge_i]\n polygons_in_rect_no_edge = all_polygons[polygons_in_rect_no_edge_i]\\\n .tolist()\n polygons_edge_cropped = _crop_region(polygons_edge, left, bottom, right,\n top, precision = precision)\n polygons_to_process = polygons_in_rect_no_edge + polygons_edge_cropped\n\n return polygons_to_process",
"def _scale_polygon(self, event):\n if not self._selection_completed:\n return\n\n if self._old_box_extents == self._box.extents:\n return\n\n # Create transform from old box to new box\n x1, y1, w1, h1 = self._box._rect_bbox\n old_bbox = self._get_bbox()\n t = (transforms.Affine2D()\n .translate(-old_bbox.x0, -old_bbox.y0)\n .scale(1 / old_bbox.width, 1 / old_bbox.height)\n .scale(w1, h1)\n .translate(x1, y1))\n\n # Update polygon verts. Must be a list of tuples for consistency.\n new_verts = [(x, y) for x, y in t.transform(np.array(self.verts))]\n self._xys = [*new_verts, new_verts[0]]\n self._draw_polygon()\n self._old_box_extents = self._box.extents",
"def _join_polygons(self, selected_polygons):\r\n # Do a spatial join in order to count the number of time lapse polygons intersect each \"cell\" in the raster-like\r\n # polygon template. We are effectively applying the template to a specific set of time lapse polygons, doing the\r\n # count, and creating the raw output. The result is a polygon feature class of raster-like cells with a field\r\n # called Join_Count that shows the number of input time lapse polygons that intersect the cell using the specified\r\n # match_option.\r\n # Create a FieldMappings object for Spatial Join to preserve informational input fields\r\n field_mappings = arcpy.FieldMappings()\r\n for field in FIELDS_TO_PRESERVE:\r\n fmap = arcpy.FieldMap()\r\n fmap.addInputField(self.time_lapse_polygons, field)\r\n fmap.mergeRule = \"First\"\r\n field_mappings.addFieldMap(fmap)\r\n # Do the spatial join\r\n temp_spatial_join_fc = os.path.join(self.scratch_gdb, \"SpatialJoin\")\r\n t0 = time.time()\r\n arcpy.analysis.SpatialJoin(\r\n self.raster_template,\r\n selected_polygons,\r\n temp_spatial_join_fc,\r\n \"JOIN_ONE_TO_ONE\", # Output keeps only one copy of each \"cell\" when multiple time lapse polys intersect it\r\n \"KEEP_COMMON\", # Delete any \"cells\" that don't overlap the time lapse polys being considered\r\n field_mapping=field_mappings, # Preserve some fields from the original data\r\n match_option=\"HAVE_THEIR_CENTER_IN\"\r\n )\r\n self.logger.info(f\"Finished spatial join in {time.time() - t0} seconds.\")\r\n return temp_spatial_join_fc",
"def apply_translate( triangles, translate_tris=(0,0,0) ):\n checkShapeValidity( triangles )\n \n for i in range(len(triangles)): # each tri in triangles\n for j in range(len(triangles[i])): # each point in a tri\n for k in range(len(translate_tris)): # each axis in a point\n triangles[i][j][k] = float(triangles[i][j][k]) + float(translate_tris[k])",
"def rasterize_vector(shp, rows, cols, geoTrans=None, saveto=None, method='within', transform=None):\r\n # Open the shapefile\r\n shp = ogr.Open(shp)\r\n\r\n # Get the layer from the shape\r\n layer = shp.GetLayer()\r\n\r\n # Get the layer's information\r\n lyrSrs = layer.GetSpatialRef().ExportToWkt()\r\n\r\n # Optionally transform to specified transformation\r\n if transform and transform.ExportToWkt() != lyrSrs:\r\n # Get the layer geometry\r\n poly = layer.GetNextFeature()\r\n geom = poly.GetGeometryRef()\r\n\r\n # Transform the geometry.\r\n geom.TransformTo(transform)\r\n\r\n # Create a new layer.\r\n lyr_driver = ogr.GetDriverByName('ESRI Shapefile')\r\n\r\n lyr_driver_name = tempfile.NamedTemporaryFile(suffix='.shp').name\r\n lyr_source = lyr_driver.CreateDataSource(lyr_driver_name)\r\n new_lyr = lyr_source.CreateLayer(lyr_driver_name, transform, geom_type=ogr.wkbPolygon)\r\n\r\n # Add an ID field to tie the geometry to\r\n id_field = ogr.FieldDefn('id', ogr.OFTInteger)\r\n new_lyr.CreateField(id_field)\r\n\r\n # Set the transformed geometry\r\n feature_defn = new_lyr.GetLayerDefn()\r\n feature = ogr.Feature(feature_defn)\r\n feature.SetGeometry(geom)\r\n feature.SetField('id',1)\r\n new_lyr.CreateFeature(feature)\r\n\r\n # Set the existing layer to be the new layer\r\n layer = new_lyr\r\n lyrSrs = transform.ExportToWkt()\r\n\r\n # Create the raster's name\r\n if not saveto:\r\n remove = True\r\n saveto = tempfile.NamedTemporaryFile(suffix='.tif')\r\n saveto = saveto.name\r\n else:\r\n remove = False\r\n\r\n # Create the new raster\r\n driver = gdal.GetDriverByName('GTiff')\r\n outRas = driver.Create(saveto, cols, rows, 1)\r\n outRas.SetProjection(lyrSrs)\r\n outRas.SetGeoTransform(geoTrans)\r\n outRas.GetRasterBand(1).Fill(1)\r\n\r\n # Rasterize the layer\r\n if method.lower() == 'touches':\r\n gdal.RasterizeLayer(outRas,[1],layer,None, None, [0], ['ALL_TOUCHED=TRUE'])\r\n else: # Just default to this.\r\n gdal.RasterizeLayer(outRas,[1],layer,None, None, [0])\r\n arr = outRas.ReadAsArray()\r\n if remove:\r\n os.remove(saveto)\r\n\r\n # Return the numpy array\r\n return arr",
"def transform_geometries(datasource, src_epsg, dst_epsg):\n # Part 1\n src_srs = osr.SpatialReference()\n src_srs.ImportFromEPSG(src_epsg)\n dst_srs = osr.SpatialReference()\n dst_srs.ImportFromEPSG(dst_epsg)\n transformation = osr.CoordinateTransformation(src_srs, dst_srs)\n layer = datasource.GetLayerByIndex(0)\n \n # Part 2\n geoms = []\n layer.ResetReading()\n for feature in layer:\n geom = feature.GetGeometryRef().Clone()\n geom.Transform(transformation)\n geoms.append(geom)\n return geoms",
"def transform(self, results: Dict) -> Dict:\n # gt_polygons -> gt_masks\n if 'gt_polygons' in results.keys():\n gt_polygons = results.pop('gt_polygons')\n gt_polygons = [[gt_polygon] for gt_polygon in gt_polygons]\n gt_masks = PolygonMasks(gt_polygons, *results['img_shape'])\n\n if self.poly2mask:\n gt_masks = gt_masks.to_bitmap()\n\n results['gt_masks'] = gt_masks\n # gt_ignore_flags -> gt_ignored\n if 'gt_ignored' in results.keys():\n gt_ignored = results.pop('gt_ignored')\n results['gt_ignore_flags'] = gt_ignored\n\n return results",
"def reproject_coordinates_batch(aX_in, aY_in, spatial_reference_source, spatial_reference_target=None):\n #Reproject a list of x,y coordinates. \n\n if spatial_reference_target is not None:\n\n pass\n else:\n spatial_reference_target = osr.SpatialReference()\n spatial_reference_target.ImportFromEPSG(4326)\n \n pass\n\n \n if int(osgeo.__version__[0]) >= 3:\n # GDAL 3 changes axis order: https://github.com/OSGeo/gdal/issues/1546\n \n spatial_reference_source.SetAxisMappingStrategy(osgeo.osr.OAMS_TRADITIONAL_GIS_ORDER)\n spatial_reference_target.SetAxisMappingStrategy(osgeo.osr.OAMS_TRADITIONAL_GIS_ORDER)\n\n \n pTransform = osr.CoordinateTransformation( spatial_reference_source, spatial_reference_target)\n\n npoint = len(aX_in)\n x_new=list()\n y_new=list()\n for i in range(npoint):\n x0 = aX_in[i]\n y0 = aY_in[i]\n \n x1,y1, z = pTransform.TransformPoint( x0,y0)\n\n x_new.append(x1)\n y_new.append(y1)\n \n return x_new,y_new",
"def poly_merge(s0, label):\n if s0.geom_type == 'Polygon':\n return s0\n ff = copy(s0)\n try:\n nc = len(s0.geoms)\n buffer_size = 100.0\n\n while ff.geom_type == 'MultiPolygon' and len(ff.geoms) > 1 and buffer_size <= 500.0:\n tmp0 = copy(s0)\n tmp1 = tmp0.buffer(+buffer_size)\n tmp2 = tmp1.buffer(-buffer_size)\n ff = shapely.ops.cascaded_union((tmp2, s0))\n buffer_size += 50.0\n except ValueError:\n print('!!! Error in poly_merge')\n return ff"
] | [
"0.7867194",
"0.5765913",
"0.55117023",
"0.55035955",
"0.5446485",
"0.5436907",
"0.5436537",
"0.5431551",
"0.5428468",
"0.5427553",
"0.5419052",
"0.5384409",
"0.53389496",
"0.53316414",
"0.53243774",
"0.5288421",
"0.52567446",
"0.5184014",
"0.5177287",
"0.5158781",
"0.5148572",
"0.5139445",
"0.5130814",
"0.5119154",
"0.51131374",
"0.51091623",
"0.5101075",
"0.5078337",
"0.5072025",
"0.5063464"
] | 0.7864645 | 1 |
Transform a set of polygons. This reference transformation is used to transform the given polygons. | def _transform_polygons(self, polygons):
if self.rotation is not None:
ct = numpy.cos(self.rotation * numpy.pi / 180.0)
st = numpy.sin(self.rotation * numpy.pi / 180.0) * _mpone
if self.magnification is not None:
mag = numpy.array((self.magnification, self.magnification), dtype=float)
if self.origin is not None:
orgn = numpy.array(self.origin)
if self.x_reflection:
xrefl = numpy.array((1, -1))
if isinstance(polygons, dict):
out_polygons = {}
for kk in polygons.keys():
out_polygons[kk] = []
for ii in range(self.columns):
for jj in range(self.rows):
spc = numpy.array([self.spacing[0] * ii, self.spacing[1] * jj])
for points in polygons[kk]:
if self.magnification:
out_polygons[kk].append(points * mag + spc)
else:
out_polygons[kk].append(points + spc)
if self.x_reflection:
out_polygons[kk][-1] = out_polygons[kk][-1] * xrefl
if self.rotation is not None:
out_polygons[kk][-1] = (
out_polygons[kk][-1] * ct
+ out_polygons[kk][-1][:, ::-1] * st
)
if self.origin is not None:
out_polygons[kk][-1] = out_polygons[kk][-1] + orgn
else:
out_polygons = []
for ii in range(self.columns):
for jj in range(self.rows):
spc = numpy.array([self.spacing[0] * ii, self.spacing[1] * jj])
for points in polygons:
if self.magnification is not None:
out_polygons.append(points * mag + spc)
else:
out_polygons.append(points + spc)
if self.x_reflection:
out_polygons[-1] = out_polygons[-1] * xrefl
if self.rotation is not None:
out_polygons[-1] = (
out_polygons[-1] * ct + out_polygons[-1][:, ::-1] * st
)
if self.origin is not None:
out_polygons[-1] = out_polygons[-1] + orgn
return out_polygons | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _transform_polygons(self, polygons):\n if self.rotation is not None:\n ct = numpy.cos(self.rotation * numpy.pi / 180.0)\n st = numpy.sin(self.rotation * numpy.pi / 180.0) * _mpone\n if self.x_reflection:\n xrefl = numpy.array((1, -1))\n if self.magnification is not None:\n mag = numpy.array((self.magnification, self.magnification), dtype=float)\n if self.origin is not None:\n orgn = numpy.array(self.origin)\n if isinstance(polygons, dict):\n for kk in polygons.keys():\n for ii in range(len(polygons[kk])):\n if self.x_reflection:\n polygons[kk][ii] = polygons[kk][ii] * xrefl\n if self.magnification is not None:\n polygons[kk][ii] = polygons[kk][ii] * mag\n if self.rotation is not None:\n polygons[kk][ii] = (\n polygons[kk][ii] * ct + polygons[kk][ii][:, ::-1] * st\n )\n if self.origin is not None:\n polygons[kk][ii] = polygons[kk][ii] + orgn\n else:\n for ii in range(len(polygons)):\n if self.x_reflection:\n polygons[ii] = polygons[ii] * xrefl\n if self.magnification is not None:\n polygons[ii] = polygons[ii] * mag\n if self.rotation is not None:\n polygons[ii] = polygons[ii] * ct + polygons[ii][:, ::-1] * st\n if self.origin is not None:\n polygons[ii] = polygons[ii] + orgn\n return polygons",
"def recombine(uniques, intersections):\n polygons = []\n for i,u in enumerate(uniques):\n try:\n segs = [seg for seg in u]\n except TypeError: # single seg\n if u is None:\n segs = []\n else:\n segs = [u,]\n\n \n segs.extend([p for p in intersections[i] if p is not None])\n segs.extend([p[i] for p in intersections if p[i] is not None]) # transpose, get the lower triangle\n merged = shapely.ops.linemerge(segs)\n print(\"Merging poly %i with %s segments\"%(i,len(segs)))\n if type(merged) is not shapely.geometry.LineString:\n for seg in segs:\n plt.plot(seg.xy[0], seg.xy[1])\n plt.show()\n \n assert type(merged) is shapely.geometry.LineString\n polygons.append(shapely.geometry.Polygon(merged))\n return polygons",
"def polygons(self):\n if self.type == 'Polygon':\n polygons = [self._geojson['geometry']['coordinates']]\n elif self.type == 'MultiPolygon':\n polygons = self._geojson['geometry']['coordinates']\n return [ [ [_lat_lons_from_geojson(s) for\n s in ring ] for\n ring in polygon] for\n polygon in polygons]",
"def _write_polygons(\n self,\n shapes: Iterable[Polygon],\n emissions: Iterable[float],\n info: EmissionInfo,\n source_group: int,\n ):\n\n # Rasterize the polygon on a grid\n shapes_serie = gpd.GeoSeries(shapes)\n # get polygon bounds\n minx, miny, maxx, maxy = shapes_serie.total_bounds\n # Create a grid for the rasterization\n x = np.arange(minx, maxx, self.polygon_raster_size)\n y = np.arange(miny, maxy, self.polygon_raster_size)\n\n # Get the emission per cell\n average_cells_proportion = (self.polygon_raster_size**2) / shapes_serie.area\n cell_emissions = np.array(emissions) * average_cells_proportion\n\n # WARNING: this might be not exactly mass convserving\n rasterized_emissions = rasterize(\n shapes=zip(shapes, cell_emissions),\n out_shape=(len(x), len(y)),\n transform=from_bounds(minx, miny, maxx, maxy, len(x), len(y)),\n all_touched=False,\n merge_alg=MergeAlg.add,\n )[\n ::-1, :\n ] # flip the y axis\n\n # Get the coordinates of the rasterized polygon\n indices = np.array(np.where(rasterized_emissions)).T\n\n # Write the polygon\n with open(self.file_cadastre, \"a\") as f:\n for i_x, i_y in indices:\n f.write(\n f\"{x[i_x]},{y[i_y]},{info.height},\"\n f\"{self.polygon_raster_size},{self.polygon_raster_size},{info.vertical_extension},\"\n f\"{rasterized_emissions[i_x, i_y]},0,0,0,{source_group},\\n\"\n )",
"def generatePolygons():",
"def test_clip_points_by_polygons(self):\n\n # Name input files\n point_name = join(TESTDATA, 'population_5x5_jakarta_points.shp')\n point_layer = read_layer(point_name)\n points = numpy.array(point_layer.get_geometry())\n attrs = point_layer.get_data()\n\n # Loop through polygons\n for filename in ['polygon_0.shp', 'polygon_1.shp', 'polygon_2.shp',\n 'polygon_3.shp', 'polygon_4.shp',\n 'polygon_5.shp', 'polygon_6.shp']:\n\n polygon_layer = read_layer(join(TESTDATA, filename))\n polygon = polygon_layer.get_geometry()[0]\n\n # Clip\n indices = inside_polygon(points, polygon)\n\n # Sanity\n for point in points[indices, :]:\n assert is_inside_polygon(point, polygon)\n\n # Explicit tests\n if filename == 'polygon_0.shp':\n assert len(indices) == 6\n elif filename == 'polygon_1.shp':\n assert len(indices) == 2\n assert numpy.allclose(points[indices[0], :],\n [106.8125, -6.1875])\n assert numpy.allclose(points[indices[1], :],\n [106.8541667, -6.1875])\n assert numpy.allclose(attrs[indices[0]]['value'],\n 331941.6875)\n assert numpy.allclose(attrs[indices[1]]['value'],\n 496445.8125)\n elif filename == 'polygon_2.shp':\n assert len(indices) == 7\n elif filename == 'polygon_3.shp':\n assert len(indices) == 0 # Degenerate\n elif filename == 'polygon_4.shp':\n assert len(indices) == 0 # Degenerate\n elif filename == 'polygon_5.shp':\n assert len(indices) == 8\n elif filename == 'polygon_6.shp':\n assert len(indices) == 6",
"def savedxf_polylist(list_of_polygons, filename=None, \n debug=False, save_as='POLYGON', union = False):\n try:\n os.remove('buffer.geojson')\n except:\n pass\n\n GNEW = []\n\n for p in list_of_polygons:\n \n if p.is_valid:\n GNEW += [p]\n if not p.is_valid:\n pnew = p.buffer(0)\n if pnew.is_valid:\n GNEW += [pnew]\n if debug: print('new polygon made from self intersecting polygon, is valid: ',pnew.is_valid)\n else:\n if debug: print('self intersecting polygon thrown out.')\n else: pass\n\n if not GNEW:\n GNEW = [empty()]\n \n if union:\n buffer_obj = unary_union(GNEW)\n else:\n buffer_obj = MultiPolygon(GNEW)\n\n if debug: print('started writing file ...')\n f = open(\"buffer.geojson\", \"wb\")\n f.write(json.dumps(mapping(buffer_obj)))\n f.close()\n if debug: print('finished.')\n\n if debug: print('started conversion of geojson to dxf ...')\n if filename == None:\n filename = 'buffer'\n if debug: print('save as MULTILINESTRING or POLYGON...')\n # --config(\"DXF_WRITE_HATCH\", \"NO\")\n os.system('ogr2ogr -f DXF '+filename+'.dxf buffer.geojson')\n if debug: \n print('finished.')\n print('saved '+filename+'.dxf')",
"def convert_to_polygon_gate(self, transforms, n_vertices=128):\n # FlowJo stores ellipsoid vertex values differently from any other gate.\n # They are stored in the binned \"display space\", so range from 0.0 - 256.0.\n # The binned space is linear over the transform range.\n #\n # To convert to a polygon:\n # 1. Determine center & rotation angle from foci\n # 2. Translate foci & edge vertices such that center is at origin\n # 3. Rotate foci & edge vertices such that major/minor axes are || to x/y axes\n # 4. Determine major axis orientation (x vs y-axis)\n # 5. Use foci & major axis to determine minor axis (2nd FJ point is unreliable)\n # 6. Generate new x, y points from ellipse definition for set of angles\n # 7. Rotate & translate coordinates back to original orientation\n # 8. Scale any dimensions using biex transform\n # 9. Create PolygonGate from the new set of coordinates\n # Find center of ellipse\n foci = copy.deepcopy(self.foci) / 256.0\n center = (foci[0] + foci[1]) / 2.0\n\n # Determine rotation of ellipse\n slope = (foci[1][1] - foci[0][1]) / (foci[1][0] - foci[0][0])\n theta_rad = np.arctan(slope)\n cos, sin = np.cos(theta_rad), np.sin(theta_rad)\n r = np.array(((cos, -sin), (sin, cos)))\n\n # Translate foci & edge vertices to the origin\n foci_origin = foci - center\n edge_vertices_origin = (copy.deepcopy(self.edge_vertices) / 256.0) - center\n\n # According to FlowJo devs, edge vertices are ordered as:\n # 1st & 2nd points are major axis\n # 3rd & 4th points are minor axis\n # Rotate edge vertices\n # Only need are one major & one minor point since the other is symmetric\n foci_rotated = _rotate_point_around_point(foci_origin[0], r)\n rv1 = _rotate_point_around_point(edge_vertices_origin[0], r)\n rv3 = _rotate_point_around_point(edge_vertices_origin[2], r)\n\n # However, I don't trust that the 1st point is always the major\n # axis or if it is always on x or y, so we'll make sure.\n # Use absolute values & find max\n rv1 = np.abs(rv1)\n rv3 = np.abs(rv3)\n rv1_max_pos = rv1.argmax()\n rv3_max_pos = rv3.argmax()\n\n if rv1_max_pos == rv3_max_pos:\n raise FlowJoWSPParsingError(\n \"Cannot determine major axis of FlowJo ellipse gate '%s'\" % self.gate_name\n )\n\n rv1_max_val = rv1[rv1_max_pos]\n rv3_max_val = rv3[rv3_max_pos]\n\n if rv1_max_val >= rv3_max_val:\n # rv1 is major axis (even if a circle)\n a = rv1_max_val\n else:\n # rv3 is major axis\n a = rv3_max_val\n\n # Also, calculate b from foci and found 'a', since the\n # minor vertex stored by FlowJo seems off\n b = np.sqrt(np.abs((foci_rotated[0]) ** 2 - (a ** 2)))\n\n # Calculate set of angles for getting points on ellipse\n angles = [2 * np.pi * (i / n_vertices) for i in range(n_vertices)]\n\n # Calculate x, y coordinates for each of the angles\n # x = a * cos(θ)\n # y = b * sin(θ)\n if rv1_max_pos == 0:\n # major axis is the x-axis\n x = a * np.cos(angles)\n y = b * np.sin(angles)\n else:\n # minor axis is the x-axis\n x = b * np.cos(angles)\n y = a * np.sin(angles)\n\n # rotate ellipse to the original orientation, then translate\n inv_r = np.linalg.inv(r)\n xy = np.vstack([x, y]).T\n\n # this will be the final set of polygon vertices\n xy_rot_trans = np.dot(xy, inv_r) + center\n\n # the final complication is the different scaling of biex transforms\n for i, xform in enumerate(transforms):\n if isinstance(xform, WSPBiexTransform):\n # biex transform is always scaled from 0-4096\n xform_range = 4096.0\n else:\n # all others are scaled from 0-1\n xform_range = 1.0\n\n xy_rot_trans[:, i] *= xform_range\n\n return PolygonGate(self.gate_name, self.dimensions, xy_rot_trans, use_complement=self.use_complement)",
"def shape_to_polygons(lines):\n from itertools import tee, izip\n def pairwise(iterable):\n a,b = tee(iterable)\n next(b, None)\n return izip(a, b)\n polygons = [[tuple(lines[0]['shape'])]]\n for a, b in pairwise(lines):\n if a['fid'] != b['fid']:\n polygons.append([])\n polygons[-1].append(tuple(b['shape']))\n return polygons",
"def _union_polygons(polygons, precision = 1e-4, max_points = 4000):\n polygons = _merge_floating_point_errors(polygons, tol = precision/1000)\n unioned = gdspy.boolean(polygons, [], operation = 'or',\n precision = precision, max_points = max_points)\n return unioned",
"def _rasterize_polygons(polygons, bounds = [[-100, -100], [100, 100]],\n dx = 1, dy = 1):\n try:\n from skimage import draw\n except:\n raise ImportError('The fill function requires the module '\n '\"scikit-image\" to operate. Please retry '\n 'after installing scikit-image:\\n\\n'\n '$ pip install --upgrade scikit-image')\n\n # Prepare polygon array by shifting all points into the first quadrant and\n # separating points into x and y lists\n xpts = []\n ypts = []\n for p in polygons:\n p_array = np.asarray(p)\n x = p_array[:, 0]\n y = p_array[:, 1]\n xpts.append((x-bounds[0][0])/dx - 0.5)\n ypts.append((y-bounds[0][1])/dy - 0.5)\n\n # Initialize the raster matrix we'll be writing to\n xsize = int(np.ceil((bounds[1][0]-bounds[0][0]))/dx)\n ysize = int(np.ceil((bounds[1][1]-bounds[0][1]))/dy)\n raster = np.zeros((ysize, xsize), dtype = np.bool)\n\n # TODO: Replace polygon_perimeter with the supercover version\n for n in range(len(xpts)):\n rr, cc = draw.polygon(ypts[n], xpts[n], shape = raster.shape)\n rrp, ccp = draw.polygon_perimeter(ypts[n], xpts[n],\n shape = raster.shape, clip = False)\n raster[rr, cc] = 1\n raster[rrp, ccp] = 1\n\n return raster",
"def get_polygonsets(self, depth=None):\n if not isinstance(self.ref_cell, Cell):\n return []\n if self.rotation is not None:\n ct = numpy.cos(self.rotation * numpy.pi / 180.0)\n st = numpy.sin(self.rotation * numpy.pi / 180.0) * _mpone\n if self.x_reflection:\n xrefl = numpy.array((1, -1))\n if self.magnification is not None:\n mag = numpy.array((self.magnification, self.magnification), dtype=float)\n if self.origin is not None:\n orgn = numpy.array(self.origin)\n polygonsets = self.ref_cell.get_polygonsets(depth=depth)\n for ps in polygonsets:\n for ii in range(len(ps.polygons)):\n if self.x_reflection:\n ps.polygons[ii] = ps.polygons[ii] * xrefl\n if self.magnification is not None:\n ps.polygons[ii] = ps.polygons[ii] * mag\n if self.rotation is not None:\n ps.polygons[ii] = (\n ps.polygons[ii] * ct + ps.polygons[ii][:, ::-1] * st\n )\n if self.origin is not None:\n ps.polygons[ii] = ps.polygons[ii] + orgn\n return polygonsets",
"def transforming_coordinates(self, coordinates_lists, transform): \n \n transformed_coordinates_lists = []\n for coordinates_list in coordinates_lists:\n transformed_coordinates_list = []\n for coordinate in coordinates_list:\n coordinate = tuple(coordinate)\n transformed_coordinate = list(transform(coordinate[0], coordinate[1]))\n transformed_coordinates_list.append(transformed_coordinate)\n transformed_coordinates_lists.append(transformed_coordinates_list)\n \n \n return transformed_coordinates_lists",
"def transformed(self, T):\n new = Polygon2()\n new.path = self.path.transformed(Affine2D(T.A))\n return new",
"def reproject_coordinates(coordinates, inproj, outproj, flat=False):\n if flat:\n return np.array([transform(inproj, outproj, coord[0], coord[1]) for coord in coordinates]).flatten()\n return [list(transform(inproj, outproj, coord[0], coord[1])) for coord in coordinates]",
"def toFillPolygons(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n pass",
"def clean_geometries(self, geo_nodes):\n for node in geo_nodes:\n if mc.nodeType(node) != 'mesh':\n node = mc.listRelatives(node, shapes=True, fullPath=True)[0]\n\n # Do the in mesh out mesh connection and the blendshape between\n # a cube and the original geometry\n cube = mc.polyCube()[0]\n cubeShape = mc.listRelatives(cube, s=True)[0]\n mc.connectAttr(\n \"{0}.outMesh\".format(node),\n \"{0}.inMesh\".format(cubeShape),\n f=True\n )\n mc.blendShape(node, cubeShape, w=(0, 1), o='world')\n\n # Rename the old object and all of it's shapes\n # This is a workaround to rename the shapeDeformed as well\n transform = mc.listRelatives(node, parent=True, fullPath=True)[0]\n renamed = mc.rename(\n transform,\n \"{0}_OM\".format(transform.split('|')[-1]),\n ignoreShape=True\n )\n for shape in mc.listRelatives(renamed, s=True, f=True):\n mc.rename(shape, \"{0}_OM\".format(shape.split('|')[-1]))\n\n # Rename the cube and it's shapes to orignial geo name\n new_node = mc.rename(\n cube,\n transform.split('|')[-1],\n ignoreShape=True\n )\n mc.rename(\n mc.listRelatives(new_node, s=True)[0],\n node.split('|')[-1]\n )\n\n # Unparent the old object and parent the new one\n parent = mc.listRelatives(renamed, parent=True, fullPath=True)\n if parent is not None:\n mc.parent(new_node, parent[0])\n mc.parent(renamed, world=True)",
"def _make_collections(polygons, opacity=1):\n collection = []\n for color in polygons:\n collection.append(\n Poly3DCollection(\n [p.points_matrix for p in polygons[color]],\n alpha=opacity,\n facecolor=color,\n edgecolors=\"black\",\n )\n )\n return collection",
"def contains_polygons(self, polygons):\n assert isinstance(polygons, collections.Iterable), \\\n 'Invalid list of polygons'\n merged_poly = None\n geo = self.get_geometry()\n for poly in polygons:\n if merged_poly is None:\n merged_poly = geo.union(poly)\n else:\n merged_poly = merged_poly.union(poly)\n return merged_poly.area == geo.area",
"def _make_collections(polygons, opacity=1):\n collection = []\n for color in polygons:\n collection.append(Poly3DCollection(\n [p.points_matrix for p in polygons[color]],\n alpha=opacity,\n facecolor=color,\n edgecolors='black'))\n return collection",
"def construct_polygon(self, polygon_longs: List, polygon_lats: List) -> gpd.GeoDataFrame:\n\n polygon_geom = Polygon(zip(polygon_longs, polygon_lats))\n\n crs = {'init': 'epsg:4326'}\n polygon = gpd.GeoDataFrame(index=[0], crs=crs, geometry=[polygon_geom])\n\n polygon.to_file(filename=f'{self.polygon_path}/polygon_{self.postfix}.geojson', driver='GeoJSON')\n polygon.to_file(filename=f'{self.polygon_path}/polygon_{self.postfix}.shp', driver=\"ESRI Shapefile\")\n\n self.monitor.info(\"-> Created area polygon.\")\n return polygon",
"def orient_polygons(values, polygon_offsets, ring_offsets):\n num_rings = len(ring_offsets) - 1\n\n # Compute expected orientation of rings\n expected_ccw = np.zeros(len(ring_offsets) - 1, dtype=np.bool_)\n expected_ccw[polygon_offsets[:-1]] = True\n\n # Compute actual orientation of rings\n is_ccw = np.zeros(num_rings)\n for i in range(num_rings):\n is_ccw[i] = compute_area(values, ring_offsets[i:i + 2]) >= 0\n\n # Compute indices of rings to flip\n flip_inds = np.nonzero(is_ccw != expected_ccw)\n ring_starts = ring_offsets[:-1]\n ring_stops = ring_offsets[1:]\n flip_starts = ring_starts[flip_inds]\n flip_stops = ring_stops[flip_inds]\n\n for i in range(len(flip_starts)):\n flip_start = flip_starts[i]\n flip_stop = flip_stops[i]\n\n xs = values[flip_start:flip_stop:2]\n ys = values[flip_start + 1:flip_stop:2]\n values[flip_start:flip_stop:2] = xs[::-1]\n values[flip_start + 1:flip_stop:2] = ys[::-1]",
"def _join_polygons(self, selected_polygons):\r\n # Do a spatial join in order to count the number of time lapse polygons intersect each \"cell\" in the raster-like\r\n # polygon template. We are effectively applying the template to a specific set of time lapse polygons, doing the\r\n # count, and creating the raw output. The result is a polygon feature class of raster-like cells with a field\r\n # called Join_Count that shows the number of input time lapse polygons that intersect the cell using the specified\r\n # match_option.\r\n # Create a FieldMappings object for Spatial Join to preserve informational input fields\r\n field_mappings = arcpy.FieldMappings()\r\n for field in FIELDS_TO_PRESERVE:\r\n fmap = arcpy.FieldMap()\r\n fmap.addInputField(self.time_lapse_polygons, field)\r\n fmap.mergeRule = \"First\"\r\n field_mappings.addFieldMap(fmap)\r\n # Do the spatial join\r\n temp_spatial_join_fc = os.path.join(self.scratch_gdb, \"SpatialJoin\")\r\n t0 = time.time()\r\n arcpy.analysis.SpatialJoin(\r\n self.raster_template,\r\n selected_polygons,\r\n temp_spatial_join_fc,\r\n \"JOIN_ONE_TO_ONE\", # Output keeps only one copy of each \"cell\" when multiple time lapse polys intersect it\r\n \"KEEP_COMMON\", # Delete any \"cells\" that don't overlap the time lapse polys being considered\r\n field_mapping=field_mappings, # Preserve some fields from the original data\r\n match_option=\"HAVE_THEIR_CENTER_IN\"\r\n )\r\n self.logger.info(f\"Finished spatial join in {time.time() - t0} seconds.\")\r\n return temp_spatial_join_fc",
"def toSubpathPolygons(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n pass",
"def _scale_polygon(self, event):\n if not self._selection_completed:\n return\n\n if self._old_box_extents == self._box.extents:\n return\n\n # Create transform from old box to new box\n x1, y1, w1, h1 = self._box._rect_bbox\n old_bbox = self._get_bbox()\n t = (transforms.Affine2D()\n .translate(-old_bbox.x0, -old_bbox.y0)\n .scale(1 / old_bbox.width, 1 / old_bbox.height)\n .scale(w1, h1)\n .translate(x1, y1))\n\n # Update polygon verts. Must be a list of tuples for consistency.\n new_verts = [(x, y) for x, y in t.transform(np.array(self.verts))]\n self._xys = [*new_verts, new_verts[0]]\n self._draw_polygon()\n self._old_box_extents = self._box.extents",
"def _crop_edge_polygons(all_polygons, bboxes,\n left, bottom, right, top,\n precision):\n polygons_in_rect_i = _find_bboxes_in_rect(bboxes, left, bottom, right, top)\n polygons_edge_i = _find_bboxes_on_rect_edge(bboxes, left, bottom, right,\n top)\n polygons_in_rect_no_edge_i = polygons_in_rect_i & (~polygons_edge_i)\n\n # Crop polygons along the edge and recombine them with polygons inside the\n # rectangle\n polygons_edge = all_polygons[polygons_edge_i]\n polygons_in_rect_no_edge = all_polygons[polygons_in_rect_no_edge_i]\\\n .tolist()\n polygons_edge_cropped = _crop_region(polygons_edge, left, bottom, right,\n top, precision = precision)\n polygons_to_process = polygons_in_rect_no_edge + polygons_edge_cropped\n\n return polygons_to_process",
"def transform_geometries(datasource, src_epsg, dst_epsg):\n # Part 1\n src_srs = osr.SpatialReference()\n src_srs.ImportFromEPSG(src_epsg)\n dst_srs = osr.SpatialReference()\n dst_srs.ImportFromEPSG(dst_epsg)\n transformation = osr.CoordinateTransformation(src_srs, dst_srs)\n layer = datasource.GetLayerByIndex(0)\n \n # Part 2\n geoms = []\n layer.ResetReading()\n for feature in layer:\n geom = feature.GetGeometryRef().Clone()\n geom.Transform(transformation)\n geoms.append(geom)\n return geoms",
"def apply_translate( triangles, translate_tris=(0,0,0) ):\n checkShapeValidity( triangles )\n \n for i in range(len(triangles)): # each tri in triangles\n for j in range(len(triangles[i])): # each point in a tri\n for k in range(len(translate_tris)): # each axis in a point\n triangles[i][j][k] = float(triangles[i][j][k]) + float(translate_tris[k])",
"def transform(self, results: Dict) -> Dict:\n # gt_polygons -> gt_masks\n if 'gt_polygons' in results.keys():\n gt_polygons = results.pop('gt_polygons')\n gt_polygons = [[gt_polygon] for gt_polygon in gt_polygons]\n gt_masks = PolygonMasks(gt_polygons, *results['img_shape'])\n\n if self.poly2mask:\n gt_masks = gt_masks.to_bitmap()\n\n results['gt_masks'] = gt_masks\n # gt_ignore_flags -> gt_ignored\n if 'gt_ignored' in results.keys():\n gt_ignored = results.pop('gt_ignored')\n results['gt_ignore_flags'] = gt_ignored\n\n return results",
"def __init__(self, sources, material, polygons, xmlnode=None):\n\n max_offset = max([ max([input[0] for input in input_type_array])\n for input_type_array in sources.values()\n if len(input_type_array) > 0])\n\n vcounts = numpy.zeros(len(polygons), dtype=numpy.int32)\n for i, poly in enumerate(polygons):\n vcounts[i] = len(poly) / (max_offset + 1)\n\n if len(polygons) > 0:\n indices = numpy.concatenate(polygons)\n else:\n indices = numpy.array([], dtype=numpy.int32)\n\n super(Polygons, self).__init__(sources, material, indices, vcounts, xmlnode)\n\n if xmlnode is not None: self.xmlnode = xmlnode\n else:\n acclen = len(polygons)\n\n self.xmlnode = E.polygons(count=str(acclen), material=self.material)\n\n all_inputs = []\n for semantic_list in self.sources.values():\n all_inputs.extend(semantic_list)\n for offset, semantic, sourceid, set, src in all_inputs:\n inpnode = E.input(offset=str(offset), semantic=semantic, source=sourceid)\n if set is not None:\n inpnode.set('set', str(set))\n self.xmlnode.append(inpnode)\n\n for poly in polygons:\n self.xmlnode.append(E.p(' '.join(map(str, poly.flatten().tolist()))))"
] | [
"0.785101",
"0.5796514",
"0.559841",
"0.55872154",
"0.55240583",
"0.5515661",
"0.55025566",
"0.5487049",
"0.5435642",
"0.54168725",
"0.54126465",
"0.53939325",
"0.53696126",
"0.5351234",
"0.5342615",
"0.5257946",
"0.5232036",
"0.521499",
"0.52135664",
"0.51902133",
"0.5137937",
"0.511902",
"0.51175386",
"0.5098632",
"0.5092457",
"0.5089627",
"0.50895995",
"0.5088985",
"0.5088155",
"0.50828266"
] | 0.78590536 | 0 |
Rename an existing cell in the library. | def rename_cell(self, cell, name, update_references=True):
if isinstance(cell, Cell):
old_name = cell.name
if old_name not in self.cells:
raise ValueError(
"[GDSPY] Cell named {0} not present in library.".format(old_name)
)
if self.cells[old_name] is not cell:
raise ValueError(
"[GDSPY] Cell named {0} doesn't match library's.".format(old_name)
)
else:
old_name = cell
if old_name not in self.cells:
raise ValueError(
"[GDSPY] Cell named {0} not present in library.".format(old_name)
)
cell = self.cells[old_name]
if name in self.cells:
raise ValueError(
"[GDSPY] Cell named {0} already present in library. "
"Use `add` to overwrite cells.".format(name)
)
del self.cells[old_name]
self.cells[name] = cell
cell.name = name
if update_references:
return self.replace_references(old_name, cell)
return 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def name(self, new_name):\n self.rename(new_name)",
"def cellModified(self):\n\n x = self.tableWidget_journals.currentRow()\n y = self.tableWidget_journals.currentColumn()\n if y == self.NAME_COLUMN:\n newName = str(self.tableWidget_journals.item(x, y).text()).strip().encode('raw_unicode_escape')\n # check that no other journal has this name and it is not empty\n update = True\n if newName == \"\":\n update = False\n for c in self.journals:\n if c['name'] == newName:\n update = False\n if update:\n # update source list and database\n cur = self.settings['conn'].cursor()\n cur.execute(\"update journal set name=? where name=?\", (newName, self.journals[x]['name']))\n self.settings['conn'].commit()\n self.journals[x]['name'] = newName\n else: # put the original text in the cell\n self.tableWidget_journals.item(x, y).setText(self.journals[x]['name'])",
"def upgrade_cell(cell):\n cell.setdefault(\"metadata\", NotebookNode())\n cell.id = random_cell_id()\n if cell.cell_type == \"code\":\n cell.pop(\"language\", \"\")\n if \"collapsed\" in cell:\n cell.metadata[\"collapsed\"] = cell.pop(\"collapsed\")\n cell.source = cell.pop(\"input\", \"\")\n cell.execution_count = cell.pop(\"prompt_number\", None)\n cell.outputs = upgrade_outputs(cell.outputs)\n elif cell.cell_type == \"heading\":\n cell.cell_type = \"markdown\"\n level = cell.pop(\"level\", 1)\n cell.source = \"{hashes} {single_line}\".format(\n hashes=\"#\" * level,\n single_line=\" \".join(cell.get(\"source\", \"\").splitlines()),\n )\n elif cell.cell_type == \"html\":\n # Technically, this exists. It will never happen in practice.\n cell.cell_type = \"markdown\"\n return cell",
"def hxlrename():\n run_script(hxlrename_main)",
"def rename(self, newname):\n # set the new column name\n self.colname = newname",
"def rename_node(self, node, name):\r\n\r\n if not name:\r\n raise ValueError(\"No node name provided for rename\")\r\n if name in self.nodes():\r\n raise ValueError(\"Node with name '%s' already exists\" % name)\r\n\r\n old_name = self.node_name(node)\r\n\r\n del self.nodes[old_name]\r\n self.nodes[name] = node",
"def rename(old, new):",
"def rename(old, new):",
"def rename(self, name, new_name):\n renames = {}\n if new_name in self._data.columns:\n msg = \"Cannot rename '{}' into '{}'. Column name already exists!\"\n raise ValueError(msg.format(name, new_name))\n\n self._in_blacklist(new_name)\n self._check_against_weak_dupes(new_name)\n\n if not self._dimensions_comp == 'ignore':\n self.undimensionize([name] + self.sources(name))\n name = self._dims_free_arr_name(name)\n\n for no, s in enumerate(self.sources(name), start=1):\n if '_' in s and s.split('_')[-1].isdigit():\n new_s_name = '{}_{}'.format(new_name, s.split('_')[-1])\n else:\n new_s_name = '{}_{}'.format(new_name, no)\n self._add_all_renames_to_mapper(renames, s, new_s_name)\n\n self._add_all_renames_to_mapper(renames, name, new_name)\n\n self.rename_from_mapper(renames)\n\n if self._dimensions_comp and not self._dimensions_comp == 'ignore':\n self.dimensionize(new_name)\n\n return None",
"def renamed(self, source, dest):\r\n self.__close_and_reload(source, new_filename=dest)",
"def rename(oldname, newname):",
"def rename(self,oldName,newName):\n #--Update references\n fileInfo = self[oldName]\n self[newName] = self[oldName]\n del self[oldName]\n self.table.moveRow(oldName,newName)\n #--FileInfo\n fileInfo.name = newName\n #--File system\n newPath = os.path.join(fileInfo.dir,newName)\n oldPath = os.path.join(fileInfo.dir,oldName)\n renameFile(oldPath,newPath)\n #--Done\n fileInfo.madeBackup = False",
"def rename(self, new_name):\n\n self.__enforce_connected()\n current_url = self.url\n self._set_field(\"name\",new_name)\n self.set_json(self._http_client.update(current_url, self.get_json()))",
"def rename(self, name):\n return self.parent.rename(self, name)",
"def new_cell(self, name, overwrite_duplicate=False, update_references=True):\n cell = Cell(name)\n self.add(cell, False, overwrite_duplicate, update_references)\n return cell",
"def rename(path, new_path):\n fs.rename(path, new_path)",
"def OnRenameTimer(self):\r\n\r\n self.EditLabel(self._current, self._curColumn)",
"def newname(self, newname) :\n\t\ttry :\n\t\t\tself._newname = newname\n\t\texcept Exception as e:\n\t\t\traise e",
"def rename(self, name, overwrite=False):\n return _image.image_rename(self, name, overwrite)",
"def rename(self, name):\n self.name = name",
"def rename_NmTensor(self, tensor: 'NmTensor', new_name: str):\n # Find old name if exists\n old_name = tensor.unique_name\n for custom_name, unique_name in self._nmtensor_naming_dict.items():\n if unique_name == tensor.unique_name:\n old_name = custom_name\n\n if old_name != tensor.unique_name:\n del self._nmtensor_naming_dict[old_name]\n\n if new_name in self._nmtensor_naming_dict:\n raise KeyError(f\"{new_name} already exists in current graph. Please use a unique name\")\n self._nmtensor_naming_dict[new_name] = tensor.unique_name",
"def rename(self, name):\n return _coconut_tail_call(self.__class__, name)",
"def rename_file(self, path, new_name):\n try:\n self.rename_narrative(self._parse_path(path), self.get_userid(), new_name)\n except WorkspaceError as err:\n raise HTTPError(err.http_code, err.message)\n except Exception as err:\n raise HTTPError(\n 500, \"An error occurred while renaming your Narrative: {}\".format(err)\n )",
"def update_cell(self, cell):\n\n if cell.uuid not in self._cells:\n error_str = \"Trying to update a non-existing cell with uuid: \"\\\n + str(cell.uuid)\n raise KeyError(error_str)\n\n if not isinstance(cell, Cell):\n error_str = \"Trying to update an object with the wrong type. \"\\\n + \"Cell expected.\"\n raise TypeError(error_str)\n\n cell_to_update = self._cells[cell.uuid]\n\n cell_to_update.data = cell.data\n cell_to_update.points = cell.points",
"def rename(self, label_value: int, new_name: str) -> None:\n seginfo = self.infos[label_value]\n seginfo.name = new_name\n # propagate state changes\n self._update_state_from_infos()",
"def rename(self, identifier):\n self._line[7] = self._speaker = identifier",
"def update_name(old_name, new_name, phonebook):\n\n phonebook_data = read_phonebook(phonebook)\n\n if not phonebook_data.get(old_name):\n raise NoEntryError(\"This entry does not exist! \"\n \"(Names are case-sensitive.)\")\n\n else:\n print \"Previous entry:\", old_name, phonebook_data[old_name]\n number = phonebook_data[old_name]\n del phonebook_data[old_name]\n phonebook_data[new_name] = number\n print \"New entry:\", new_name, phonebook_data[new_name]\n save(phonebook_data, phonebook)",
"def rename(self, name: str):\n self.doc['name'] = name",
"def rename(self, name=None, destination=None):\n raise NotImplementedError\n return None",
"def rename_experiment(self, experiment_id, new_name):\n return self.dbclient.update_by_id(Tables.EXPERIMENTS, experiment_id, {\n ExperimentAttr.NAME: new_name\n })"
] | [
"0.58167297",
"0.57922983",
"0.5770913",
"0.5732452",
"0.5716103",
"0.5663834",
"0.56498533",
"0.56498533",
"0.56417304",
"0.5641613",
"0.56392765",
"0.558007",
"0.5527694",
"0.55087227",
"0.54747593",
"0.5461296",
"0.53917825",
"0.53753626",
"0.5354214",
"0.5341221",
"0.5337005",
"0.53362316",
"0.5331687",
"0.5327757",
"0.5324774",
"0.5323733",
"0.5302943",
"0.52891415",
"0.52703756",
"0.5265338"
] | 0.8026026 | 0 |
Replace cells in all references in the library. All `CellReference` and `CellArray` using the `old_cell` are updated to reference `new_cell`. Matching with `old_cell` is by name only. | def replace_references(self, old_cell, new_cell):
if isinstance(old_cell, Cell):
old_name = old_cell.name
else:
old_name = old_cell
if not isinstance(new_cell, Cell) and new_cell in self.cells:
new_cell = self.cells[new_cell]
replacements = 0
for cell in self.cells.values():
for ref in cell.references:
if isinstance(ref.ref_cell, Cell):
if ref.ref_cell.name == old_name:
ref.ref_cell = new_cell
replacements += 1
elif ref.ref_cell == old_name:
ref.ref_cell = new_cell
replacements += 1
return replacements | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rename_cell(self, cell, name, update_references=True):\n if isinstance(cell, Cell):\n old_name = cell.name\n if old_name not in self.cells:\n raise ValueError(\n \"[GDSPY] Cell named {0} not present in library.\".format(old_name)\n )\n if self.cells[old_name] is not cell:\n raise ValueError(\n \"[GDSPY] Cell named {0} doesn't match library's.\".format(old_name)\n )\n else:\n old_name = cell\n if old_name not in self.cells:\n raise ValueError(\n \"[GDSPY] Cell named {0} not present in library.\".format(old_name)\n )\n cell = self.cells[old_name]\n if name in self.cells:\n raise ValueError(\n \"[GDSPY] Cell named {0} already present in library. \"\n \"Use `add` to overwrite cells.\".format(name)\n )\n del self.cells[old_name]\n self.cells[name] = cell\n cell.name = name\n if update_references:\n return self.replace_references(old_name, cell)\n return 0",
"def cell_replace_node(self,c,n_old,n_new):\n for ni in range(self.max_sides):\n if self.cells['nodes'][c,ni] == n_old:\n self.cells['nodes'][c,ni] = n_new\n if self._node_to_cells is not None:\n self._node_to_cells[n_old].remove(c)\n self._node_to_cells[n_new].append(c)",
"def replace(self,object,newObject):\n if object in self.cell.objects:\n objIndex = self.cell.objects.index(object)\n self.cell.objects[objIndex] = newObject\n else:\n objIndex = self.cell.tempObjects.index(object)\n self.cell.tempObjects[objIndex] = newObject\n self.cell.setChanged()",
"def setReference(self, updatedIndices):\n # self.colors[:] = [self.colors[i] for i in updatedIndices]\n self.cellData[:] = [self.cellData[i] for i in updatedIndices]",
"def map (a_data,a_column,a_old,a_new) :\n loc_new_data = a_data\n a_data[a_column].replace(a_old,a_new,inplace=True)",
"def replace_cell_solutions(cell):\n if is_markdown_solution_cell(cell):\n return copy.deepcopy(MARKDOWN_ANSWER_CELL_TEMPLATE)\n elif is_code_cell(cell):\n source = get_source(cell)\n stripped_source = replace_solutions(source)\n new_cell = copy.deepcopy(cell)\n new_cell.source = \"\\n\".join(stripped_source)\n return new_cell\n else:\n return copy.deepcopy(cell)",
"def replace_entry(item_to_replace, new_item):\n db_copy = DATABASE\n\n highlights = db_copy[\"highlights\"]\n for count, item in enumerate(highlights):\n if item == item_to_replace:\n highlights[count] = new_item\n \n db_copy[\"highlights\"] = highlights\n\n utilities.write_new_highlights_json(json.dumps(db_copy))",
"def swap_cells(pl_file_name, cell1, cell2):\r\n line_cell_1 = ''\r\n line_cell_2 = ''\r\n data = []\r\n with open(pl_file_name) as p:\r\n for num, line in enumerate(p):\r\n if cell1 in line:\r\n data = line.split()\r\n if data[0] == cell1:\r\n line_cell_1 = line\r\n if cell2 in line:\r\n data = line.split()\r\n if data[0] == cell2:\r\n line_cell_2 = line\r\n\r\n with open(pl_file_name) as p:\r\n data = p.readlines()\r\n\r\n for i in range(len(data)):\r\n if data[i] == line_cell_1:\r\n data[i] = line_cell_2.replace(cell2, cell1)\r\n if data[i] == line_cell_2:\r\n data[i] = line_cell_1.replace(cell1, cell2)\r\n\r\n with open(pl_file_name, 'w') as p:\r\n p.writelines(data)",
"def process_cell(self, neighbourhood: List[Cell], old_cell: Cell) -> Cell:",
"def replaceRefsById(self,refReplacer):\n log = self.log\n oldIds = set(refReplacer.newIds.keys())\n replCount = {}\n for cell in self.cells:\n objects = cell.getObjects()\n for object in objects.list():\n (iMod,iObj,oldId,objRecords) = object[:4]\n if oldId.lower() in oldIds:\n newId = refReplacer.getNewId(oldId)\n newObject = (iMod,iObj,newId,objRecords)\n objects.replace(object,newObject)\n replCount[oldId] = replCount.get(oldId,0) + 1\n #--Add Records?\n newRecords = refReplacer.getSrcRecords()\n if newRecords:\n selfIds = set([record.getId().lower() for record in self.records if record.getId()])\n log.setHeader(_('Records added:'))\n for newId in sorted(newRecords.keys()):\n if newId not in selfIds:\n self.records.append(newRecords[newId])\n log(newId)\n #--Log\n log.setHeader(_('References replaced:'))\n for oldId in sorted(replCount.keys(),key=lambda a: a.lower()):\n log('%03d %s' % (replCount[oldId], oldId))\n #--Return number of references replaced.\n return sum(replCount.values())",
"def _update_references(self, oldref, newref, key_in_ref):\n keys = self._backreference_keys(oldref, key_in_ref)\n assert(keys is not None)\n self.__update_field_references(oldref, newref,\n list(set(self.__class__.REFERENCE_FIELDS)\n .intersection(keys)))\n if hasattr(self, \"_refs\"):\n # note: keeping the two types of nonfield references separate helps\n # in subclasses where only one must be redefined\n self.__update_dependent_line_references(oldref, newref,\n set(self.__class__.DEPENDENT_LINES)\n .intersection(self._refs.keys())\n .intersection(keys))\n self.__update_other_references(oldref, newref,\n list(set(self.__class__.OTHER_REFERENCES)\n .intersection(self._refs.keys())\n .intersection(keys)))",
"def replace_input(self, current_input_ref, new_input_ref):\n inputs = self.get_recipe_inputs()\n for (input_role_name, input_role) in inputs.items():\n for item in input_role.get(\"items\", []):\n if item.get(\"ref\", None) == current_input_ref:\n item[\"ref\"] = new_input_ref",
"def __replaceArrRefs(self, tnode, replace_table):\n\n if isinstance(tnode, ast.NumLitExp):\n return tnode\n\n elif isinstance(tnode, ast.StringLitExp):\n return tnode\n\n elif isinstance(tnode, ast.IdentExp):\n return tnode\n\n elif isinstance(tnode, ast.ArrayRefExp):\n aref_str = str(tnode)\n if aref_str in replace_table:\n iname = replace_table[aref_str]\n return ast.IdentExp(iname)\n else:\n return tnode\n\n elif isinstance(tnode, ast.FunCallExp):\n tnode.exp = self.__replaceArrRefs(tnode.exp, replace_table)\n tnode.args = [self.__replaceArrRefs(a, replace_table) for a in tnode.args]\n return tnode\n\n elif isinstance(tnode, ast.UnaryExp):\n tnode.exp = self.__replaceArrRefs(tnode.exp, replace_table)\n return tnode\n\n elif isinstance(tnode, ast.BinOpExp):\n tnode.lhs = self.__replaceArrRefs(tnode.lhs, replace_table)\n tnode.rhs = self.__replaceArrRefs(tnode.rhs, replace_table)\n return tnode\n\n elif isinstance(tnode, ast.ParenthExp):\n tnode.exp = self.__replaceArrRefs(tnode.exp, replace_table)\n return tnode\n\n elif isinstance(tnode, ast.ExpStmt):\n if tnode.exp:\n tnode.exp = self.__replaceArrRefs(tnode.exp, replace_table)\n return tnode\n\n elif isinstance(tnode, ast.CompStmt):\n tnode.stmts = [self.__replaceArrRefs(s, replace_table) for s in tnode.stmts]\n return tnode\n\n elif isinstance(tnode, ast.IfStmt):\n tnode.test = self.__replaceArrRefs(tnode.test, replace_table)\n tnode.true_stmt = self.__replaceArrRefs(tnode.true_stmt, replace_table)\n if tnode.false_stmt:\n tnode.false_stmt = self.__replaceArrRefs(\n tnode.false_stmt, replace_table\n )\n return tnode\n\n elif isinstance(tnode, ast.ForStmt):\n if tnode.init:\n tnode.init = self.__replaceArrRefs(tnode.init, replace_table)\n if tnode.test:\n tnode.test = self.__replaceArrRefs(tnode.test, replace_table)\n if tnode.iter:\n tnode.iter = self.__replaceArrRefs(tnode.iter, replace_table)\n tnode.stmt = self.__replaceArrRefs(tnode.stmt, replace_table)\n return tnode\n\n else:\n err(\n \"orio.module.ortildriver.transformation internal error:OrTilDriver: unknown type of AST: %s\"\n % tnode.__class__.__name__\n )",
"def replace_number(self, old, new):\r\n self.numbers[old] = new",
"def referenceIntersections(self, combine=False):\n self.cellReferenceIntersections = modules.separateIntersections(self.cellData[:-1], self.cellData[-1], self.caseInsensitive, combine)",
"def patch_notebooks(notebooks_dir):\n\n nb_convert_config = Config()\n nb_convert_config.NotebookExporter.preprocessors = [\"nbconvert.preprocessors.ClearOutputPreprocessor\"]\n output_remover = nbconvert.NotebookExporter(nb_convert_config)\n for notebookfile in Path(notebooks_dir).glob(\"**/*.ipynb\"):\n if (\n not str(notebookfile.name).startswith(\"test_\")\n and notebookfile.name not in EXCLUDED_NOTEBOOKS\n ):\n nb = nbformat.read(notebookfile, as_version=nbformat.NO_CONVERT)\n found = False\n for cell in nb[\"cells\"]:\n replace_dict = cell.get(\"metadata\", {}).get(\"test_replace\")\n if replace_dict is not None:\n found = True\n for source_value, target_value in replace_dict.items():\n if source_value not in cell[\"source\"]:\n raise ValueError(\n f\"Processing {notebookfile} failed: {source_value} does not exist in cell\"\n )\n cell[\"source\"] = cell[\"source\"].replace(\n source_value, target_value\n )\n cell[\"source\"] = \"# Modified for testing\\n\" + cell[\"source\"]\n print(\n f\"Processed {notebookfile}: {source_value} -> {target_value}\"\n )\n if not found:\n print(f\"No replacements found for {notebookfile}\")\n nb_without_out, _ = output_remover.from_notebook_node(nb)\n with notebookfile.with_name(f\"test_{notebookfile.name}\").open(\"w\", encoding=\"utf-8\") as out_file:\n out_file.write(nb_without_out)",
"def replace(self, old, new):\n self.log('replace({0}, {1})'.format(old, new))\n if old is False:\n return False\n if isinstance(old, str):\n old = old.split('\\n')\n if not isinstance(old, list):\n raise TypeError(\"Parameter 'old' not a 'string' or 'list', is {0}\".format(type(old)))\n if not isinstance(new, str):\n raise TypeError(\"Parameter 'new' not a 'string', is {0}\".format(type(new)))\n local_changes = False\n for this in old:\n if this in self.contents:\n while this in self.contents:\n index = self.contents.index(this)\n self.changed = local_changes = True\n self.contents.remove(this)\n self.contents.insert(index, new)\n self.log('Replaced \"{0}\" with \"{1}\" at line {2}'.format(this, new, index))\n else:\n self.log('\"{0}\" not in {1}'.format(this, self.filename))\n return local_changes",
"def update_module_refs(self, generation):\n self.species_module_ref_map = {}\n\n if Config.blueprint_nodes_use_representatives:\n # For representatives species_module_ref_map becomes: representative -> chosen module\n reps = self.representatives\n for rep, (spc_index, module_index) in self.species_module_index_map.items():\n if rep not in reps: # removes reps that no longer exist\n continue\n self.species_module_ref_map[rep] = generation.module_population.species[spc_index][module_index]\n else:\n for spc_index, module_index in self.species_module_index_map.items():\n if isinstance(module_index, tuple):\n \"\"\"this is an override index. this module is found in a different species\"\"\"\n if not Config.allow_cross_species_mappings:\n raise Exception('Cross species mapping disabled, but received tuple as value in map')\n spc, mod = module_index\n self.species_module_ref_map[spc_index] = generation.module_population.species[spc][mod]\n else:\n self.species_module_ref_map[spc_index] = generation.module_population.species[spc_index][\n module_index]",
"def addr_to_replace(addr, byte_index, addr_idx_map):\n global all_addrs\n if(do_replace_addr(addr)):\n if(addr not in all_addrs):\n all_addrs.append(addr)\n if addr not in addr_idx_map.keys():\n addr_idx_map[addr] = [byte_index,]\n else:\n addr_idx_map[addr].append(byte_index)\n return",
"def update_cell_nodes(self):\n self.cells['nodes'] = -1\n\n for c in range(self.Ncells()):\n # consider two edges at a time, and find the common node\n for i,(ja,jb) in enumerate(circular_pairs(self.cell_to_edges(c))):\n for n in self.edges['nodes'][ja,:]: \n if n in self.edges['nodes'][jb]:\n self.cells['nodes'][c,i] = n\n break",
"def replace(self, old, new, count=None):\n return asarray(replace(self, old, new, count))",
"def diffFromReplace(self, replace_graph, *, new_replaces_old=True):\n if new_replaces_old:\n replace = {old:new for new, _, old in replace_graph}\n else:\n replace = {old:new for old, _, new in replace_graph}\n\n def iri_replace(t):\n return tuple(replace[e] if e in replace else e for e in t)\n\n add, rem, same = [self.__class__() for _ in range(3)]\n for t in self:\n nt = iri_replace(t)\n if nt != t:\n add.add(nt), rem.add(t)\n else:\n same.add(t)\n\n return add, rem, same",
"def getObjectMap(self,oldRefs):\n objMap = {} #--objMap[cellId][oldIObj] = newIObj\n #--Old cells\n for oldCell in oldRefs.cells:\n cellId = oldCell.getId()\n newCell = self.cells_id.get(cellId)\n #--Cell deleted?\n if not newCell:\n objMap[cellId] = -1\n continue\n cellObjMap = {}\n newObjects = newCell.getObjects().list()\n nextObjectIndex = {}\n #--Old Objects\n for oldObject in oldCell.getObjects().list():\n (iMod,oldIObj,objId) = oldObject[:3]\n if iMod: continue #--Skip mods to masters\n #--New Objects\n objIndex = nextObjectIndex.get(objId,0)\n newIObj = -1 #--Delete by default\n while objIndex < len(newObjects):\n newObject = newObjects[objIndex]\n objIndex += 1\n if newObject[0]: continue #--Skip mods to masters\n if newObject[2] == objId:\n newIObj = newObject[1]\n break\n nextObjectIndex[objId] = objIndex\n #--Obj map has changed?\n if newIObj != oldIObj:\n cellObjMap[oldIObj] = (newIObj,objId)\n #--Save mapping for this cell?\n if cellObjMap: objMap[cellId] = cellObjMap\n #--Done\n return objMap",
"def overwrite_field(self,cells=None,edges=None,source='depth_max',target='depth_mean'):\n if cells is not None:\n self.cells[target][cells]=self.cells[source][cells]\n if edges is not None:\n self.edges[target][edges]=self.edges[source][edges]",
"def update(self, new):\n return self.replace(None, new)",
"def update_cell(self, cell):\n\n if cell.uuid not in self._cells:\n error_str = \"Trying to update a non-existing cell with uuid: \"\\\n + str(cell.uuid)\n raise KeyError(error_str)\n\n if not isinstance(cell, Cell):\n error_str = \"Trying to update an object with the wrong type. \"\\\n + \"Cell expected.\"\n raise TypeError(error_str)\n\n cell_to_update = self._cells[cell.uuid]\n\n cell_to_update.data = cell.data\n cell_to_update.points = cell.points",
"def updateCells(cell_positions):\n # Build a set of canditates for live cells at the next generation, instead of looking through the whole grid\n # These will be dead neighbours of living cells\n possible_future_cells = set()\n # Make sets of cells to add and remove at the end of the check\n cells_remove = set()\n cells_add = set()\n for cell in cell_positions:\n # Get adjacent squares\n neighbours_dict = cellNeighbours(cell)\n number_live_neighbours = 0\n # Check which of these corresponds to another living cell\n for square in neighbours_dict.values():\n if square in cell_positions:\n number_live_neighbours+=1\n else:\n possible_future_cells.add(square)\n\n # Any live cell with fewer than two live neighbours dies, as if caused by under-population\n if number_live_neighbours<2:\n cells_remove.add(cell)\n # Any live cell with two or three live neighbours lives on to the next generation\n # do nothing\n # Any live cell with more than three live neighbours dies, as if by overcrowding\n elif number_live_neighbours>3:\n cells_remove.add(cell)\n # Any dead cell with exactly three live neighbours becomes a live cell, as if by reproduction\n for cell_candidate in possible_future_cells:\n cell_candidate_neighbours = cellNeighbours(cell_candidate).values()\n # Count number of live neighbours\n count = 0\n for square in cell_candidate_neighbours:\n if square in cell_positions:\n count+=1\n if count == 3:\n cells_add.add(cell_candidate)\n # Update cell_positions by removing dead cells and adding new-born cells\n for cell in cells_add:\n cell_positions.add(cell)\n for cell in cells_remove:\n cell_positions.remove(cell)\n # Return the update live cell list\n return cell_positions",
"def apply_new_mask(ifgs, mask_old, mask_new):\n\n \n for ifg_n, ifg in enumerate(ifgs): # Loop through each source\n ifg_r2 = col_to_ma(ifg, mask_old) # turn it from a row vector into a rank 2 masked array \n ifg_r2_new_mask = ma.array(ifg_r2, mask = mask_new) # apply the new mask \n ifg_r1_new_mask = ma.compressed(ifg_r2_new_mask) # convert to row vector \n if ifg_n == 0: # if it's the first ifg.. \n n_pixs_new = ifg_r1_new_mask.shape[0] # get the new number of pixels \n ifgs_new_mask = np.zeros((ifgs.shape[0], n_pixs_new)) # initiate an array of the correct size\n ifgs_new_mask[ifg_n, :] = ifg_r1_new_mask # put the row vector into the array\n return ifgs_new_mask",
"def update_replace(self, replaceObj, task_graph, **kwargs):\n ContextCompositeNode.update_replace(self, replaceObj, task_graph,\n **kwargs)\n # replace the numbers from the context\n if 'map' in self.conf and 'iternum' in kwargs:\n for i in range(len(self.conf['map'])):\n val = kwargs['iternum']\n map_obj = self.conf['map'][i]\n xpath = map_obj['xpath']\n expr = parse(xpath)\n expr.update(replaceObj, val)",
"def changeBaseElements(self, change = None, swap = None,\\\n cell = 1, verbose = 1):\n\n if (change is not None) and (swap is not None):\n string = \"Cant use both change and swap at the same time\"\n ut.infoPrint(string)\n\n elif change is not None:\n if type(change[\"from\"]) == str: change[\"from\"] = bytes(change[\"from\"], \"utf-8\")\n if type(change[\"to\"]) == str: change[\"to\"] = bytes(change[\"to\"], \"utf-8\")\n\n if cell == 1:\n self.mass_1[self.spec_1 == change[\"from\"]] = change[\"mass\"]\n self.spec_1[self.spec_1 == change[\"from\"]] = change[\"to\"]\n elif cell == 2:\n self.mass_2[self.spec_2 == change[\"from\"]] = change[\"mass\"]\n self.spec_2[self.spec_2 == change[\"from\"]] = change[\"to\"]\n else:\n return\n \n if verbose > 0:\n string = \"Changing elements: %s --> %s and updating mass to: %.4f for cell %i\"\\\n % (change[\"from\"].decode(\"utf-8\"), change[\"to\"].decode(\"utf-8\"),\\\n change[\"mass\"], cell)\n ut.infoPrint(string)\n\n elif swap is not None:\n if type(swap[\"swap_1\"]) == str: swap[\"swap_1\"] = bytes(swap[\"swap_1\"], \"utf-8\")\n if type(swap[\"swap_2\"]) == str: swap[\"swap_2\"] = bytes(swap[\"swap_2\"], \"utf-8\")\n\n if cell == 1:\n mass1 = self.mass_1[self.spec_1 == swap[\"swap_1\"]][0]\n spec1 = self.spec_1[self.spec_1 == swap[\"swap_1\"]][0]\n mask1 = self.spec_1 == swap[\"swap_1\"]\n\n mass2 = self.mass_1[self.spec_1 == swap[\"swap_2\"]][0]\n spec2 = self.spec_1[self.spec_1 == swap[\"swap_2\"]][0]\n mask2 = self.spec_1 == swap[\"swap_2\"]\n\n self.mass_1[mask1] = mass2\n self.spec_1[mask1] = spec2\n self.mass_1[mask2] = mass1\n self.spec_1[mask2] = spec1\n\n elif cell == 2:\n mass1 = self.mass_2[self.spec_2 == swap[\"swap_1\"]][0]\n spec1 = self.spec_2[self.spec_2 == swap[\"swap_1\"]][0]\n mask1 = self.spec_2 == swap[\"swap_1\"]\n\n mass2 = self.mass_2[self.spec_2 == swap[\"swap_2\"]][0]\n spec2 = self.spec_2[self.spec_2 == swap[\"swap_2\"]][0]\n mask2 = self.spec_2 == swap[\"swap_2\"]\n\n self.mass_2[mask1] = mass2\n self.spec_2[mask1] = spec2\n self.mass_2[mask2] = mass1\n self.spec_2[mask2] = spec1\n\n else:\n return\n \n if verbose > 0:\n string = \"Swaping elements: %s and %s and swaping masses: %.4f to %.4f for cell %i\"\\\n % (swap[\"swap_1\"].decode(\"utf-8\"), swap[\"swap_2\"].decode(\"utf-8\"),\\\n mass1, mass2, cell)\n ut.infoPrint(string)\n\n else:\n return"
] | [
"0.60519004",
"0.5878847",
"0.5818164",
"0.57240033",
"0.5468772",
"0.5437272",
"0.5320144",
"0.5285266",
"0.52246344",
"0.5099337",
"0.505342",
"0.5046812",
"0.50462395",
"0.5032131",
"0.4985268",
"0.49477512",
"0.49297217",
"0.48797628",
"0.4874498",
"0.48279476",
"0.48227584",
"0.48141432",
"0.48099604",
"0.47874466",
"0.4762018",
"0.47530976",
"0.4725214",
"0.47222206",
"0.47210512",
"0.46960807"
] | 0.7644548 | 0 |
Extract a cell from the this GDSII file and include it in the current global library, including referenced dependencies. | def extract(self, cell, overwrite_duplicate=False):
warnings.warn(
"[GDSPY] extract and the use of the global library is deprecated.",
category=DeprecationWarning,
stacklevel=2,
)
import gdspy
cell = self.cells.get(cell, cell)
gdspy.current_library.add(
cell, include_dependencies=True, overwrite_duplicate=overwrite_duplicate
)
return cell | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def extract(self, cell):\n cell = self.cell_dict.get(cell, cell)\n current_library.add(cell)\n current_library.add(cell.get_dependencies(True))\n return cell",
"def import_gds(filename, cellname = None, flatten = False):\n gdsii_lib = gdspy.GdsLibrary()\n gdsii_lib.read_gds(filename)\n top_level_cells = gdsii_lib.top_level()\n if cellname is not None:\n if cellname not in gdsii_lib.cells:\n raise ValueError('[PHIDL] import_gds() The requested cell '\n '(named %s) is not present in file %s' \\\n % (cellname,filename))\n topcell = gdsii_lib.cells[cellname]\n elif cellname is None and len(top_level_cells) == 1:\n topcell = top_level_cells[0]\n elif cellname is None and len(top_level_cells) > 1:\n raise ValueError('[PHIDL] import_gds() There are multiple top-level '\n 'cells, you must specify `cellname` to select of '\n 'one of them')\n\n if flatten == False:\n D_list = []\n c2dmap = {}\n for cell in gdsii_lib.cells.values():\n D = Device(name = cell.name)\n D.polygons = cell.polygons\n D.references = cell.references\n D.name = cell.name\n for label in cell.labels:\n rotation = label.rotation\n if rotation is None:\n rotation = 0\n l = D.add_label(text = label.text,\n position = np.asfarray(label.position),\n magnification = label.magnification,\n rotation = rotation*180/np.pi,\n layer = (label.layer, label.texttype))\n l.anchor = label.anchor\n c2dmap.update({cell:D})\n D_list += [D]\n\n for D in D_list:\n # First convert each reference so it points to the right Device\n converted_references = []\n for e in D.references:\n ref_device = c2dmap[e.ref_cell]\n if isinstance(e, gdspy.CellReference):\n dr = DeviceReference(\n device = ref_device,\n origin = e.origin,\n rotation = e.rotation,\n magnification = e.magnification,\n x_reflection = e.x_reflection\n )\n dr.owner = D\n converted_references.append(dr)\n elif isinstance(e, gdspy.CellArray):\n dr = CellArray(\n device = ref_device,\n columns = e.columns,\n rows = e.rows,\n spacing = e.spacing,\n origin = e.origin,\n rotation = e.rotation,\n magnification = e.magnification,\n x_reflection = e.x_reflection,\n )\n dr.owner = D\n converted_references.append(dr)\n D.references = converted_references\n # Next convert each Polygon\n temp_polygons = list(D.polygons)\n D.polygons = []\n for p in temp_polygons:\n D.add_polygon(p)\n\n topdevice = c2dmap[topcell]\n return topdevice\n\n elif flatten == True:\n D = Device('import_gds')\n polygons = topcell.get_polygons(by_spec = True)\n\n for layer_in_gds, polys in polygons.items():\n D.add_polygon(polys, layer = layer_in_gds)\n return D",
"def add_cells(self):\n # To delete all common cells\n self.delete_cells()\n if self.removed_last_bracket:\n new_lib = self.base_doc + self.cell_doc + '\\n}\\n'\n else:\n end_part = self.base_doc[-4:] \n if '}' in end_part:\n new_lib = self.base_doc[0:-4] + '\\n\\t\\t' + self.cell_doc + '\\n}\\n'\n # print(new_lib)\n try: \n with open(self.output_file, \"w\") as file_doc:\n file_doc.write(new_lib)\n return True\n except:\n return False",
"def import_data(self):\n\n self.worksheet = (\n xlrd.open_workbook(filename=self.source).sheet_by_index(0)\n )\n # Import conversion data from worksheet and store as scipy arrays\n self.T_exp = np.array(\n self.worksheet.col_values(0, start_rowx=4, end_rowx=None)\n ) + 273.15\n self.HCout_raw = np.array(\n self.worksheet.col_values(4, start_rowx=4, end_rowx=None)\n )\n self.HCin_raw = np.array(\n self.worksheet.col_values(8, start_rowx=4, end_rowx=None)\n )\n self.eta_exp = (\n (self.HCin_raw - self.HCout_raw) / self.HCin_raw\n )\n self.T_model = np.linspace(\n self.T_exp[0] - 50, self.T_exp[-1] + 50, 25\n )\n self.T_array = self.T_model",
"def openCif(self, filename):\r\n cf = CifFile.ReadCif(filename)\r\n \r\n #Assuming all data is in one outer block like NIST examples:\r\n data = cf[cf.keys()[0]]\r\n \r\n #Create a Crystollographic Unit Cell\r\n a = data['_cell_length_a']\r\n b = data['_cell_length_b']\r\n c = data['_cell_length_c']\r\n \r\n alpha = data['_cell_angle_alpha']\r\n gamma = data['_cell_angle_gamma']\r\n beta = data['_cell_angle_beta']\r\n \r\n spaceGroupInt = int(data['_symmetry_Int_Tables_number'])\r\n spaceGroup = SpaceGroups.GetSpaceGroup(spaceGroupInt)\r\n \r\n unitcell = Cell(spaceGroup, 0,0,0, a, b, c, alpha, gamma, beta)\r\n \r\n atomLabels = data['_atom_site_label']\r\n atomSymbol = data['_atom_site_type_symbol']\r\n xPositions = data['_atom_site_fract_x']\r\n yPositions = data['_atom_site_fract_y']\r\n zPositions = data['_atom_site_fract_z']\r\n \r\n atoms = [] #for the cell window\r\n for i in range(len(atomLabels)):\r\n #unitcell.generateAtoms((float(xPositions[i]), float(yPositions[i]), float(zPositions[i])), atomLabels[i])\n\r\n aData = [atomLabels[i], 0, float(xPositions[i]), float(yPositions[i]), float(zPositions[i])]\r\n #--Added to atomData: single ion anisotropy, spin magnitude, valence\r\n aData.append(0.0)#Dx\r\n aData.append(0.0)#Dy\r\n aData.append(0.0)#Dz\r\n aData.append(1)#Spin Magnitude\r\n aData.append('')#valence\r\n #-------------------------------------------------------------------\r\n atoms.append(aData)\r\n \r\n self.atomTable.SetValue(i, 0, atomLabels[i])\r\n self.atomTable.SetValue(i, 2, xPositions[i])\r\n self.atomTable.SetValue(i, 3, yPositions[i])\r\n self.atomTable.SetValue(i, 4, zPositions[i])\r\n \r\n #Create a Magnetic Cell\r\n self.MagCell = MagneticCell(unitcell, 1,1,1, spaceGroup)\r\n\r\n\r\n Na = 1 #Cif files only contain 1 unit cell\r\n Nb = 1\r\n Nc = 1\r\n \r\n #self.cellChange(spaceGroupInt, a, b, c, alpha, beta, gamma, magNa = Na, magNb = Nb, magNc = Nc, cutNa = Na, cutNb = Nb, cutNc = Nc, atomData = atoms)\n self.updateCell(spaceGroupInt, a, b, c, alpha, beta, gamma, magNa = Na, magNb = Nb, magNc = Nc, cutNa = Na, cutNb = Nb, cutNc = Nc, atomData = atoms)\n self.refreshGUI()\n \n \r\n #send signal to the cell window to show the info that has been loaded and to vtkWindow to draw it\r\n n = self.atomTable.GetNumberRows()\r\n for i in range(n):\r\n print self.atomTable.GetValue(i, 0)\r\n send(signal = \"File Load\", sender = \"Session\", spaceGroup = spaceGroupInt, a = a, b = b, c = c, alpha = alpha, beta = beta, gamma = gamma, magNa = Na, magNb = Nb, magNc = Nc, cutNa = Na, cutNb = Nb, cutNc = Nc)",
"def format_script_for_cell(path):\n header = '\\n# Cell content replaced by load magic replacement.\\n'\n with open(str(path), encoding='utf8') as f:\n solution = f.read()\n if not solution:\n raise RuntimeError('Solution {} has no content.'.format(path))\n return header + solution",
"def add(\n self,\n cell,\n include_dependencies=True,\n overwrite_duplicate=False,\n update_references=True,\n ):\n if isinstance(cell, Cell):\n cell_set = set([cell])\n if include_dependencies:\n cell_set.update(cell.get_dependencies(True))\n else:\n cell_set = set(cell)\n if include_dependencies:\n for c in cell:\n cell_set.update(c.get_dependencies(True))\n for c in cell_set:\n if (\n not overwrite_duplicate\n and c.name in self.cells\n and self.cells[c.name] is not c\n ):\n raise ValueError(\n \"[GDSPY] Cell named {0} already present in library.\".format(c.name)\n )\n if (\n overwrite_duplicate\n and update_references\n and c.name in self.cells\n and self.cells[c.name] is not c\n ):\n self.replace_references(c.name, c)\n self.cells[c.name] = c\n return self",
"def find_extra_content(cell_text):\n for line in cell_text.split('\\n'):\n m = re.match('#\\s?%load.*', line)\n if not m and line:\n raise RuntimeError('Solution cell has extra content: {}'.format(cell_text))",
"def insert_needed_import(note, exer_name):\n txt_import = ''\n if exer_name in ('lp_duality', 'lp_interactive', 'lp_two_phases'):\n txt_interactive = open(PATH_UTILS + 'interactive_simplex_note.md', 'r', encoding='utf-8').read()\n note['cells'] += [nb.v4.new_markdown_cell(txt_interactive)]\n note.cells[-1].metadata = {\"init_cell\": True, \"editable\": False, \"deletable\": False}\n txt_import = open(PATH_UTILS + 'interactive_simplex.py', 'r', encoding='utf-8').read()\n else:\n txt_pulp = open(PATH_UTILS + 'pulp_note.md', 'r', encoding='utf-8').read()\n note['cells'] += [nb.v4.new_markdown_cell(txt_pulp)]\n note.cells[-1].metadata = {\"init_cell\": True, \"editable\": False, \"deletable\": False, \"tags\": ['run_start']}\n txt_import = open(PATH_UTILS + 'import_pulp.md', 'r', encoding='utf-8').read()\n note['cells'] += [nb.v4.new_code_cell(txt_import)]\n note.cells[-1].metadata = {\"init_cell\": True, \"editable\": False, \"deletable\": False, \"tags\": ['run_start']}",
"def read_xd_master_file(path, errorpointer):\n filepointer = open(path, 'r')\n for line in filepointer.readlines():\n if 'TITLE' in line:\n compound_name = line.partition('!')[2].lstrip().rstrip()\n if 'CELL' in line:\n cell = [float(i) for i in line.split(\" \") if '.' in i]\n break\n filepointer.close()\n try:\n return compound_name, cell\n except:\n errorpointer.write(path + '\\n')\n return None, None",
"def import_heat_data(self):\n worksheet = (\n xlrd.open_workbook(filename=self.filename_heat).sheet_by_index(0)\n ) \n self.exh.corrected_reading = np.array(worksheet.col_values(0,\n start_rowx=self.start_rowx, end_rowx=self.end_rowx)) \n self.exh.datum = worksheet.cell_value(2,4) # manometer datum (in) \n self.exh.pressure_drop = ( (self.exh.corrected_reading -\n self.exh.datum) * 2. * self.H2O_kPa ) \n # pressure drop across heat exchanger (kPa)\n self.cummins.torque = np.array(worksheet.col_values(1,\n start_rowx=self.start_rowx, end_rowx=self.end_rowx))\n self.exh.T_inlet_array = np.array(worksheet.col_values(2,\n start_rowx=self.start_rowx, end_rowx=self.end_rowx)) \n self.exh.T_outlet_array = np.array(worksheet.col_values(3,\n start_rowx=self.start_rowx, end_rowx=self.end_rowx)) \n self.cool.T_inlet_array = np.array(worksheet.col_values(5,\n start_rowx=self.start_rowx, end_rowx=self.end_rowx)) \n self.cool.T_outlet_array = np.array(worksheet.col_values(4,\n start_rowx=self.start_rowx, end_rowx=self.end_rowx))",
"def _import_source_data(self, source_file: str) -> None:\n with open(source_file, 'r') as csv_file:\n reader = csv.DictReader(csv_file)\n for row in reader:\n self.cell_map.append(\n Cell(\n datamap_id=None,\n cell_key=row['cell_key'],\n cell_value=None, # have no need of a value in dm\n cell_reference=row['cell_reference'],\n template_sheet=row['template_sheet'],\n bg_colour=row['bg_colour'],\n fg_colour=row['fg_colour'],\n number_format=row['number_format'],\n verification_list=None))",
"def _import_bh_(self):",
"def __init__(self, link, geom_col='geometry'):\n self.df = gpd.read_file(link)\n (self.df['x'], self.df['y']) = self._add_coordinate_data(self.df, geom_col)\n\n return None",
"def importIGG():\n df = load_file(\"data-luminex-igg\")\n df = pd.melt(df, id_vars=[\"subject\"])\n\n df[\"variable\"] = df[\"variable\"].str.replace(\"IgG.\", \"\", regex=False)\n\n return df",
"def geo2cell(geofile, posfile):",
"def to_import_string(self):\n cell = self.cell or \"\"\n return (\n cell\n + \"//\"\n + self.package\n + (\":\" + self.name if self.name is not None else \"\")\n )",
"def read_cells(filename):\n\n import pandas as pd\n\n min_x = -1.77\n min_y = 174.0\n min_z = -183.0\n\n size_x = 0.972\n size_y = 3.69\n size_z = 0.976\n\n frame = pd.read_csv(filename, skiprows=3)\n # frame = pd.read_csv(filename)\n\n# print(\"X range:\",min(frame['Position X']), max(frame['Position X']), \"dynamic range:\", max(frame['Position X'])-min(frame['Position X']))\n# print(\"Y range:\",min(frame['Position Y']), max(frame['Position Y']), \"dynamic range:\", max(frame['Position Y'])-min(frame['Position Y']))\n# print(\"Z range:\",min(frame['Position Z']), max(frame['Position Z']), \"dynamic range:\", max(frame['Position Z'])-min(frame['Position Z']))\n#\n # will need to check IMARIS for correspondence between exported um files and pixel values\n # X and Z on csv files are my X and Y on resliced images\n\n frame[\"Pixel X\"] = (frame['Position X'] - min_x) / size_x\n frame[\"Pixel X\"] = frame[\"Pixel X\"].round().astype(int)\n\n frame[\"Pixel Y\"] = (frame['Position Z'] - min_z) / size_z\n frame[\"Pixel Y\"] = frame[\"Pixel Y\"].round().astype(int)\n\n frame[\"Pixel Z\"] = (frame['Position Y'] - min_y) / size_y\n frame[\"Pixel Z\"] = frame[\"Pixel Z\"].round().astype(int)\n\n print(\"X pixel range:\", min(frame[\"Pixel X\"]), max(\n frame[\"Pixel X\"]), \"dynamic range:\", max(frame[\"Pixel X\"]) - min(frame[\"Pixel X\"]))\n print(\"Y pixel range:\", min(frame[\"Pixel Y\"]), max(\n frame[\"Pixel Y\"]), \"dynamic range:\", max(frame[\"Pixel Y\"]) - min(frame[\"Pixel Y\"]))\n print(\"Z pixel range:\", min(frame[\"Pixel Z\"]), max(\n frame[\"Pixel Z\"]), \"dynamic range:\", max(frame[\"Pixel Z\"]) - min(frame[\"Pixel Z\"]))\n# print(frame)\n frame.to_csv(\"frame.csv\")\n return frame",
"def import_gds(\n gdspath: Union[str, Path],\n cellname: Optional[str] = None,\n flatten: bool = False,\n snap_to_grid_nm: Optional[int] = None,\n name: Optional[str] = None,\n decorator: Optional[Callable] = None,\n gdsdir: Optional[Union[str, Path]] = None,\n safe_cell_names: bool = False,\n **kwargs,\n) -> Component:\n gdspath = Path(gdsdir) / Path(gdspath) if gdsdir else Path(gdspath)\n gdshash = gdspy.gdsii_hash(gdspath)\n if not gdspath.exists():\n raise FileNotFoundError(f\"No file {gdspath!r} found\")\n\n metadata_filepath = gdspath.with_suffix(\".yml\")\n\n gdsii_lib = gdspy.GdsLibrary()\n gdsii_lib.read_gds(str(gdspath))\n top_level_cells = gdsii_lib.top_level()\n cellnames = [c.name for c in top_level_cells]\n\n if cellname is not None:\n if cellname not in gdsii_lib.cells:\n raise ValueError(\n f\"cell {cellname} is not in file {gdspath} with cells {cellnames}\"\n )\n topcell = gdsii_lib.cells[cellname]\n elif cellname is None and len(top_level_cells) == 1:\n topcell = top_level_cells[0]\n elif cellname is None and len(top_level_cells) > 1:\n raise ValueError(\n f\"import_gds() There are multiple top-level cells in {gdspath!r}, \"\n f\"you must specify `cellname` to select of one of them among {cellnames}\"\n )\n\n if name:\n if name in CACHE:\n raise ValueError(\n f\"name = {name!r} already on cache. \"\n \"Please, choose a different name or set name = None. \"\n )\n else:\n topcell.name = name\n\n if flatten:\n component = Component(name=name or cellname or cellnames[0])\n polygons = topcell.get_polygons(by_spec=True)\n\n for layer_in_gds, polys in polygons.items():\n component.add_polygon(polys, layer=layer_in_gds)\n\n component.name = (\n get_name_short(f\"{component.name}_{gdshash}\")\n if safe_cell_names\n else get_name_short(component.name)\n )\n\n else:\n D_list = []\n cell_to_device = {}\n for c in gdsii_lib.cells.values():\n D = Component(name=c.name)\n D.paths = c.paths\n D.polygons = c.polygons\n D.references = c.references\n D.name = c.name\n for label in c.labels:\n rotation = label.rotation\n if rotation is None:\n rotation = 0\n label_ref = D.add_label(\n text=label.text,\n position=np.asfarray(label.position),\n magnification=label.magnification,\n rotation=rotation * 180 / np.pi,\n layer=(label.layer, label.texttype),\n )\n label_ref.anchor = label.anchor\n\n D.name = (\n get_name_short(f\"{D.name}_{gdshash}\")\n if safe_cell_names\n else get_name_short(D.name)\n )\n D.unlock()\n\n cell_to_device.update({c: D})\n D_list += [D]\n\n for D in D_list:\n # First convert each reference so it points to the right Device\n converted_references = []\n for e in D.references:\n ref_device = cell_to_device[e.ref_cell]\n if isinstance(e, gdspy.CellReference):\n dr = DeviceReference(\n device=ref_device,\n origin=e.origin,\n rotation=e.rotation,\n magnification=e.magnification,\n x_reflection=e.x_reflection,\n )\n dr.owner = D\n converted_references.append(dr)\n elif isinstance(e, gdspy.CellArray):\n dr = CellArray(\n device=ref_device,\n columns=e.columns,\n rows=e.rows,\n spacing=e.spacing,\n origin=e.origin,\n rotation=e.rotation,\n magnification=e.magnification,\n x_reflection=e.x_reflection,\n )\n dr.owner = D\n converted_references.append(dr)\n D.references = converted_references\n\n # Next convert each Polygon\n # temp_polygons = list(D.polygons)\n # D.polygons = []\n # for p in temp_polygons:\n # D.add_polygon(p)\n\n # Next convert each Polygon\n temp_polygons = list(D.polygons)\n D.polygons = []\n for p in temp_polygons:\n if snap_to_grid_nm:\n points_on_grid = snap_to_grid(p.polygons[0], nm=snap_to_grid_nm)\n p = gdspy.Polygon(\n points_on_grid, layer=p.layers[0], datatype=p.datatypes[0]\n )\n D.add_polygon(p)\n component = cell_to_device[topcell]\n cast(Component, component)\n\n name = name or component.name\n component.name = name\n\n if metadata_filepath.exists():\n logger.info(f\"Read YAML metadata from {metadata_filepath}\")\n metadata = OmegaConf.load(metadata_filepath)\n\n for port_name, port in metadata.ports.items():\n if port_name not in component.ports:\n component.add_port(\n name=port_name,\n midpoint=port.midpoint,\n width=port.width,\n orientation=port.orientation,\n layer=port.layer,\n port_type=port.port_type,\n )\n\n component.settings = OmegaConf.to_container(metadata.settings)\n\n component.name = name\n\n if decorator:\n component_new = decorator(component)\n component = component_new or component\n if flatten:\n component.flatten()\n component.info.update(**kwargs)\n component.lock()\n return component",
"def loadFromFile(cls , filename):\n if FortIO.isFortranFile( filename ):\n return EclGrid( filename )\n else:\n return EclGrid.loadFromGrdecl( filename )",
"def process_load_magic(path, cell):\n modified = False\n # Find any load magics\n load_magics = find_load_magics_in_cell(cell)\n\n # Replace load magics with file contents\n for magic_string in load_magics:\n path = Path(path)\n script_path = path.parent / magic_string.split('load ')[1]\n formatted_script = format_script_for_cell(script_path)\n cell_str = get_cell_content_as_string(cell)\n find_extra_content(cell_str)\n cell['source'] = cell_str + formatted_script\n modified = True\n\n return modified",
"def import_graphics_section(self, filename_suffix='gra'):\n pass",
"def _read_cell_direct(cls):\n\n cell_data = {}\n cell_columns = cls._get_columns(CELL_MANIFEST)\n cell_psvs = cls._get_component_psvs(CELL_MANIFEST)\n\n for cell_psv in cell_psvs:\n for row in gzip.GzipFile(fileobj=io.BytesIO(cls._read_s3_url(cell_psv))):\n row_dict = dict(zip(cell_columns, row.strip().split(b'|')))\n cell_data[row_dict[\"cellkey\"].decode()] = {k: v.decode() for\n k, v in row_dict.items()}\n total_umis = cell_data[row_dict[\"cellkey\"].decode()][\"total_umis\"]\n cell_data[row_dict[\"cellkey\"].decode()][\"total_umis\"] = (total_umis if total_umis == \"nan\"\n else str(float(total_umis)))\n\n return cell_data",
"def cell(self):\n return self._cell",
"def import_gpx(file):\n\n gpx_file = open(file, 'r')\n gpx_out = gpxpy.parse(gpx_file)\n\n return gpx_out",
"def import_data_helper(self): \n if len(self.components) == 1:\n hapi.fetch(TableName = self.tablename, M = self.components[0][0], I = self.components[0][1], numin = self.min_x, numax = self.max_x)\n else: \n global_id = []\n for c in self.components:\n global_id.append(hapi.ISO[c][0])\n hapi.fetch_by_ids(TableName = self.tablename, iso_id_list = global_id, numin = self.min_x, numax = self.max_x)",
"def import_grid(file_name):\n\n return FileReader(file_name=file_name).grid",
"def build_import_table(self):\n self.section_alignment = 4096\n pe = pefile.PE(data=self.mem_dump.dump)\n IAT_addr = pe.OPTIONAL_HEADER.DATA_DIRECTORY[1].VirtualAddress\n print 'IAT_addr: ' + hex(IAT_addr)\n raw_size = self.import_table.raw_memory_size()\n jump_table_size = self.import_table.get_jump_table_size()\n\n print \"raw_size: \", `raw_size`\n print \"jump table size: \", `jump_table_size`\n\n size = ((raw_size+jump_table_size) / self.section_alignment + 1) * self.section_alignment\n print size\n byte_array = bytearray(size)\n IID_pos = 0\n content_pos = (len(self.import_table.dlls) + 1) * 20\n\n jump_table_pos = IID_pos + raw_size\n\n print \"jump table position: \", hex(jump_table_pos)\n\n for dll in self.import_table.dlls:\n print dll\n thunk_position = content_pos + len(dll) + 1\n byte_array[IID_pos + 12:IID_pos + 16] = pack('<L', content_pos + IAT_addr)\n byte_array[IID_pos + 16:IID_pos + 20] = pack('<L', thunk_position + IAT_addr)\n\n print 'Dll position: ', content_pos\n byte_array[content_pos:content_pos + len(dll)] = dll\n names_pos = thunk_position + (len(self.import_table.dlls[dll]) + 1) * 4\n\n for function_name in self.import_table.dlls[dll]:\n self.import_table.set_thunk_addr(dll, function_name, thunk_position + IAT_addr)\n byte_array[thunk_position:thunk_position + 4] = pack('<L', names_pos + IAT_addr)\n func_name = '\\x00\\x00' + function_name\n func_name_len = len(function_name) + 2\n byte_array[names_pos:names_pos + func_name_len] = func_name\n\n\n # Write the jump table entry \n byte_array[jump_table_pos] = 0xFF\n byte_array[jump_table_pos+1] = 0x25\n byte_array[jump_table_pos+2:jump_table_pos+6] = pack(\"<L\", thunk_position + IAT_addr + self.mem_dump.base_address)\n\n self.import_table.set_jump_addr(dll, function_name, jump_table_pos + IAT_addr)\n\n # Accumulate the new counters\n thunk_position += 4\n names_pos += func_name_len\n jump_table_pos += 6\n\n IID_pos += 20\n content_pos = names_pos + 1\n\n self.mem_dump.append_memory_before_end(byte_array, IAT_addr)",
"def exportECL(self, fname):\r\n\r\n # TODO add consistency of dimensions across the inputs\r\n self.ne, self.nn, self.nz = np.array(self.Grid.GetDimensions()) - 1 # ECLIPSE\r\n filename, ext = os.path.splitext(fname)\r\n if self.GridType == \"vtkStructuredGrid\":\r\n with io.open(filename + \".GRDECL\", 'w', newline='\\r\\n') as f:\r\n f.write('-- Generated [\\n')\r\n f.write('-- Format : ECLIPSE keywords (grid geometry and properties) (ASCII)\\n')\r\n # f.write('-- Exported by : Petrel 2013.7 (64-bit) Schlumberger\\n'\r\n f.write('-- Exported by : ReGrid v.' + version + \"\\n\")\r\n f.write('-- User name : ' + getpass.getuser() + \"\\n\")\r\n f.write('-- Date : ' + datetime.now().strftime(\"%A, %B %d %Y %H:%M:%S\") + \"\\n\")\r\n f.write('-- Project : ' + \"ReGrid project\\n\")\r\n f.write('-- Grid : ' + \"Description\\n\")\r\n f.write('-- Generated ]\\n\\n')\r\n\r\n f.write('SPECGRID -- Generated : ReGrid\\n')\r\n f.write(' %i %i %i 1 F /\\n\\n' % (self.ne, self.nn, self.nz))\r\n f.write('COORDSYS -- Generated : ReGrid\\n')\r\n f.write(' 1 4 /\\n\\n') # what is this line?\r\n\r\n f.write('COORD -- Generated : ReGrid\\n')\r\n nz = self.nz\r\n fstr = str(\" \")\r\n\r\n for iy in range(self.nn):\r\n for ix in range(self.ne):\r\n p0 = self.Grid.GetCell(ix, iy, 0).GetPoints().GetPoint(0)\r\n fstr = self.printCOORDS(f, p0, fstr)\r\n p1 = self.Grid.GetCell(ix, iy, nz - 1).GetPoints().GetPoint(4)\r\n fstr = self.printCOORDS(f, p1, fstr)\r\n # outside edge on far x\r\n p2 = self.Grid.GetCell(ix, iy, 0).GetPoints().GetPoint(1)\r\n fstr = self.printCOORDS(f, p2, fstr)\r\n p3 = self.Grid.GetCell(ix, iy, nz - 1).GetPoints().GetPoint(5)\r\n fstr = self.printCOORDS(f, p3, fstr)\r\n # outside edge on far y\r\n for ix in range(self.ne):\r\n p8 = self.Grid.GetCell(ix, iy, 0).GetPoints().GetPoint(3)\r\n fstr = self.printCOORDS(f, p8, fstr)\r\n p9 = self.Grid.GetCell(ix, iy, nz - 1).GetPoints().GetPoint(7)\r\n fstr = self.printCOORDS(f, p9, fstr)\r\n # outside edge on far northeast\r\n p14 = self.Grid.GetCell(ix, iy, 0).GetPoints().GetPoint(2)\r\n fstr = self.printCOORDS(f, p14, fstr)\r\n p15 = self.Grid.GetCell(ix, iy, nz - 1).GetPoints().GetPoint(6)\r\n fstr = self.printCOORDS(f, p15, fstr)\r\n f.write(fstr)\r\n fstr = \" \"\r\n f.write(\" /\")\r\n f.write(\"\\n\")\r\n f.write(\"\\n\")\r\n\r\n f.write('ZCORN -- Generated : ReGrid\\n')\r\n for iz in range(self.nz):\r\n for iy in range(self.nn):\r\n # front face\r\n for ix in range(self.ne):\r\n p0 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(0)\r\n p1 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(1)\r\n fstr = self.printCOORDS(f, [p0[2]], fstr)\r\n fstr = self.printCOORDS(f, [p1[2]], fstr)\r\n # back face\r\n for ix in range(self.ne):\r\n p0 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(3)\r\n p1 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(2)\r\n fstr = self.printCOORDS(f, [p0[2]], fstr)\r\n fstr = self.printCOORDS(f, [p1[2]], fstr)\r\n # bottom layer\r\n for iy in range(self.nn):\r\n # front face\r\n for ix in range(self.ne):\r\n p0 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(4)\r\n p1 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(5)\r\n fstr = self.printCOORDS(f, [p0[2]], fstr)\r\n fstr = self.printCOORDS(f, [p1[2]], fstr)\r\n # back face\r\n for ix in range(self.ne):\r\n p0 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(7)\r\n p1 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(6)\r\n fstr = self.printCOORDS(f, [p0[2]], fstr)\r\n fstr = self.printCOORDS(f, [p1[2]], fstr)\r\n f.write(fstr)\r\n fstr = \" \"\r\n f.write(\" /\")\r\n f.write(\"\\n\")\r\n f.write(\"\\n\")\r\n f.write('ACTNUM -- Generated : ReGrid\\n')\r\n\r\n c = -999\r\n N = 0\r\n for iac in self.ActiveCells.flatten(order='F'):\r\n if iac == c:\r\n N += 1\r\n else:\r\n if c != -999:\r\n fstr = self.printAC(f, c, N, fstr)\r\n c = iac\r\n N = 1\r\n fstr = self.printAC(f, c, N, fstr)\r\n f.write(fstr)\r\n f.write(\" /\")\r\n f.write(\"\\n\")\r\n f.write(\"\\n\")\r\n else:\r\n print(\"Only structured grids can be converted to ECLIPSE files\")",
"def dereference(self):\n offset = headers.calculateRelativeAddress(self, self['Name'])\n return self.p.p.new(IMAGE_IMPORT_HINT, __name__='ImportName', offset=offset)"
] | [
"0.7146233",
"0.5359825",
"0.53328305",
"0.52091736",
"0.5137022",
"0.51339316",
"0.5118252",
"0.5001852",
"0.49921566",
"0.4971133",
"0.49391878",
"0.4921173",
"0.49046072",
"0.48940232",
"0.48588583",
"0.48488533",
"0.48184195",
"0.4751385",
"0.47495297",
"0.4736816",
"0.47112766",
"0.47019655",
"0.46877936",
"0.4683013",
"0.46646816",
"0.46633863",
"0.46613854",
"0.46469417",
"0.46201837",
"0.46176943"
] | 0.6714337 | 1 |
Write the specified cell to the file. | def write_cell(self, cell, timestamp=None):
cell.to_gds(self._outfile, self._res, timestamp)
return self | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_cell(self, cell):\n self._outfile.write(cell.to_gds(self._res))\n return self",
"def writeCell(hoja, fila, columna, dato, formato=''):\r\n if formato:\r\n hoja.write(fila, columna, dato, formato)\r\n else:\r\n hoja.write(fila, columna, dato)",
"def write_cell(self, sheet_name, row, column, value, color='black'):\n if isinstance(row, int) and isinstance(column, int):\n try:\n cell_obj = self.wb[sheet_name].cell(row, column)\n cell_obj.font = Font(color=self.RGBDict[color], bold=True)\n cell_obj.value = value\n self.wb.save(self.filename)\n logger.info(\"Excel文件[{}]表单[{}]写入数据[{}]\".format(self.filename, sheet_name, value))\n except Exception as e:\n logger.error(\"Excel文件[{}]表单[{}]写入数据错误\\n错误信息:{}\".format(self.filename, sheet_name, e))\n raise e\n else:\n logger.error(\"Excel文件写入数据错误\\n错误信息:{}\".format('row and column must be type int'))\n raise TypeError('row and column must be type int')",
"def write(self, fname):\n pass",
"def setOutCell(outSheet, col, row, value):\n\n def _getOutCell(outSheet, colIndex, rowIndex):\n \"\"\" HACK: Extract the internal xlwt cell representation. \"\"\"\n row = outSheet._Worksheet__rows.get(rowIndex)\n if not row: return None\n\n cell = row._Row__cells.get(colIndex)\n return cell\n\n # HACK to retain cell style.\n previousCell = _getOutCell(outSheet, col, row)\n # END HACK, PART I\n\n outSheet.write(row, col, value)\n\n # HACK, PART II\n if previousCell:\n newCell = _getOutCell(outSheet, col, row)\n if newCell:\n newCell.xf_idx = previousCell.xf_idx\n # END HACK",
"def write(self, filename):\n pass",
"def write(self, filename):\n pass",
"def write_to_file(self, filename: str) -> None:",
"def cell_file(self, filename, append=False):\n if filename.startswith(\"~\"):\n filename = os.path.expanduser(filename)\n filename = os.path.abspath(filename)\n # Create the path of the file if dirs don't exist:\n path = os.path.dirname(os.path.abspath(filename))\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n # Create or append to file:\n if not append:\n message = \"Created file '%s'.\" % filename\n if os.path.isfile(self.code):\n message = \"Overwrote file '%s'.\" % filename\n else:\n message = \"Appended on file '%s'.\" % filename\n try:\n if append:\n fp = open(filename, \"a\")\n else:\n fp = open(filename, \"w\")\n fp.write(self.code)\n fp.close()\n self.kernel.Print(message)\n except Exception as e:\n self.kernel.Error(str(e))\n self.evaluate = False",
"def write_gds(self, outfile, cells=None, timestamp=None):\n if isinstance(outfile, basestring):\n outfile = open(outfile, 'wb')\n close = True\n else:\n close = False\n now = datetime.datetime.today() if timestamp is None else timestamp\n name = self.name if len(self.name) % 2 == 0 else (self.name + '\\0')\n outfile.write(\n struct.pack('>19h', 6, 0x0002, 0x0258, 28, 0x0102, now.year,\n now.month, now.day, now.hour, now.minute, now.second,\n now.year, now.month, now.day, now.hour, now.minute,\n now.second, 4 + len(name), 0x0206) +\n name.encode('ascii') + struct.pack('>2h', 20, 0x0305) +\n _eight_byte_real(self.precision / self.unit) +\n _eight_byte_real(self.precision))\n if cells is None:\n cells = self.cell_dict.values()\n else:\n cells = [self.cell_dict.get(c, c) for c in cells]\n for cell in cells:\n outfile.write(cell.to_gds(self.unit / self.precision))\n outfile.write(struct.pack('>2h', 4, 0x0400))\n if close:\n outfile.close()",
"def write_to_cell(table, row, col, text):\n\n\ttable.cell(row, col).paragraphs[0].runs[0].text = text",
"def write_towhee_coord(self, filename):\n with open(filename, 'w') as f:\n df = self.contents[['X', 'Y', 'Z']].copy()\n np.savetxt(f, df.values, fmt=\" %20.15f\"*3)",
"def write_file(self):\n if self._write_file == None:\n return\n\n try:\n out = file(self._write_file, \"w\")\n except IOError, e:\n print e\n sys.exit(1)\n out.writelines(\"A cases\") \n out.close()",
"def writetofile(self,direction,value):\r\n output = str(\"{},{} \\n\".format(direction,value))\r\n self.new_file.write(output)",
"def save_GRID( self , filename ):\n self._fwrite_GRID( filename )",
"def w(self, value):\n self.oFile.write(value)",
"def write( self, NewFilename='', Integer=True ):\n try:\n if NewFilename != '':\n self.name=NewFilename\n Output = open( self.name, 'w' )\n Output.write( 'ncols\\t\\t %d\\n' % self.ncols )\n Output.write( 'nrows\\t\\t %d\\n' % self.nrows )\n Output.write( 'xllcorner\\t\\t %f\\n' % self.xllcorner)\n Output.write( 'yllcorner\\t\\t %f\\n' % self.yllcorner)\n Output.write( 'cellsize\\t\\t %f\\n' % self.cellsize)\n if Integer:\n Output.write( 'NODATA_value\\t\\t %d\\n' % int(self.nodata) )\n else:\n Output.write( 'NODATA_value\\t\\t %f\\n' % self.nodata )\n for row in range( self.nrows-1,-1,-1 ):\n record = []\n for col in range( self.ncols ):\n if Integer:\n record.append( str( int( round( self.data[row,col]) ) ) )\n else:\n record.append( str(self.data[row,col]) )\n Output.write( string.join(record, ' ')+'\\n' )\n Output.close()\n except:\n print \"Error writing grid ::\", self.name",
"def setCell(self, row = None, column = None, value = None, *, cell = None):\n\n\t\t\t\tif (cell is None):\n\t\t\t\t\tcell = self.getCell(row = row, column = column)\n\n\t\t\t\tif (value is None):\n\t\t\t\t\tvalue = \"\"\n\n\t\t\t\t#Write Value\n\t\t\t\tfor _cell in self.ensure_container(cell):\n\t\t\t\t\t_cell.value = f\"{value}\" #Make sure input is a valid ascii",
"def filewrite(self, filename):\n io.write(self, filename)",
"def write_file(file_name, table):\r\n \r\n savectr=len(table)\r\n try:\r\n with open (file_name, 'wb') as objFile:\r\n pickle.dump(table,objFile) #pickle my 2D list\r\n print ('{} CD(s) saved into {}.\\n'.format(savectr,file_name))\r\n except PermissionError as e:\r\n print('Not enough rights to create/modify ' + file_name + '.') #if unable pickle data due to permission issues\r\n print ()\r\n print (e, e.__doc__, sep='\\n')\r\n print ()\r\n except IOError as e:\r\n print ('I/O error({0}): {1}'.format(e.errno,e.strerror))#if unable to pickle data due to IO errors such as disk space issues\r\n print ()\r\n print (e, e.__doc__, sep='\\n')\r\n print ()\r\n except pickle.PickleError as e:\r\n print ('Unable to write data into ' + file_name + '.') #if unable to pickle 2D list, exception handling for pickling errors\r\n print ()\r\n print (e, e.__doc__, sep='\\n')\r\n print ()",
"def write_to_file(self, time):\n if Parameters.instance().use_ages:\n nb_age_groups = len(Parameters.instance().age_proportions)\n else:\n nb_age_groups = 1\n if Parameters.instance().use_ages:\n if self.spatial_output: # Separate output line for each cell\n for cell in self.population.cells:\n for age_i in range(0, nb_age_groups):\n data = {s: 0 for s in list(InfectionStatus)}\n for inf_status in data:\n data_per_inf_status =\\\n cell.compartment_counter.retrieve()[inf_status]\n data[inf_status] += data_per_inf_status[age_i]\n # Age groups are numbered from 1 to the total number\n # of age groups (thus the +1):\n data[\"age_group\"] = age_i+1\n data[\"time\"] = time\n data[\"cell\"] = cell.id\n data[\"location_x\"] = cell.location[0]\n data[\"location_y\"] = cell.location[1]\n self.writer.write(data)\n else: # Summed output across all cells in population\n data = {s: 0 for s in list(InfectionStatus)}\n for cell in self.population.cells:\n for age_i in range(0, nb_age_groups):\n for inf_status in list(InfectionStatus):\n data_per_inf_status =\\\n cell.compartment_counter.retrieve()[inf_status]\n data[inf_status] += data_per_inf_status[age_i]\n data[\"age_group\"] = age_i+1\n data[\"time\"] = time\n self.writer.write(data)\n else: # If age not considered, age_group not written in csv\n if self.spatial_output: # Separate output line for each cell\n for cell in self.population.cells:\n data = {s: 0 for s in list(InfectionStatus)}\n for k in data:\n data[k] += sum(cell.compartment_counter.retrieve()[k])\n data[\"time\"] = time\n data[\"cell\"] = cell.id\n data[\"location_x\"] = cell.location[0]\n data[\"location_y\"] = cell.location[1]\n self.writer.write(data)\n else: # Summed output across all cells in population\n data = {s: 0 for s in list(InfectionStatus)}\n for cell in self.population.cells:\n for k in data:\n # Sum across age compartments\n data[k] += sum(cell.compartment_counter.retrieve()[k])\n data[\"time\"] = time\n self.writer.write(data)",
"def write_table(table, file_path):\n\n\twith open(file_path, 'w') as file:\n\t\tfile.write(table)",
"def write_to(self, filename):\n with open(filename, 'w') as f:\n for xx, yy, zz, ww in zip(self.x, self.y, self.field, self.weight):\n f.write(\"%s %s %s %s\\n\" % (xx, yy, zz, ww))\n logger.info(\"Written data into file {0}\".format(filename))",
"def write_to_file(fib_details: dict):\n pass # TODO: Replace with implementation!",
"def write_to_sheet(self):\n for i, row in enumerate(self.ws[self.range_name]):\n if isinstance(self.data[i], tuple):\n self.data[i] = list(self.data[i])\n for j, cell in enumerate(row):\n cell.value = self.data[i][j]",
"def write_CASTEP_cell(CASTEP_cell,filename):\n f=open(seedname+\".cell\",\"r\")\n castep_cell = f.readlines()\n global hashes\n f=StringIO.StringIO()\n f.write(\"%BLOCK LATTICE_CART\\n\")\n for i in xrange(3):\n f.write(\"{0[0]:>20.15f} {0[1]:>20.15f} {0[2]:>20.15f}\\n\".format(\n (CASTEP_cell[\"lattvec\"][:,i]*10).tolist()))\n f.write(\"%ENDBLOCK LATTICE_CART\\n\")\n f.write(\"\\n\")\n f.write(\"%BLOCK POSITIONS_FRAC\\n\")\n k = 0\n for i in xrange(len(CASTEP_cell[\"numbers\"])):\n for j in xrange(CASTEP_cell[\"numbers\"][i]):\n l = k + j\n f.write(\"{0}\".format(\"\".join(CASTEP_cell[\"elements\"][i]))) \n f.write(\"{0[0]:>20.15f} {0[1]:>20.15f} {0[2]:>20.15f}\\n\".format(\n CASTEP_cell[\"positions\"][:,l].tolist()))\n k += j + 1\n f.write(\"%ENDBLOCK POSITIONS_FRAC\\n\")\n\n # Copy everything after '%ENDBLOCK POSITIONS_FRAC'\n for index, line in enumerate(castep_cell):\n if '%ENDBLOCK POSITIONS_FRAC' in line.upper():\n index_end = index\n for i in xrange(index_end+1,len(castep_cell)):\n f.write(castep_cell[i])\n with open(filename,\"w\") as finalf:\n finalf.write(f.getvalue())\n f.close()",
"def save_to_file(self, file_path):\n if file_path:\n f = open(file_path, 'w')\n for row in range(self.rows):\n f.write(''.join(self.data[row]) + '\\n')\n f.close()",
"def save_to_file(self, file_path):\n if file_path:\n f = open(file_path, 'w')\n for row in range(self.rows):\n f.write(''.join(self.data[row]) + '\\n')\n f.close()",
"def write_gds(self, outfile, cells=None, timestamp=None, binary_cells=None):\n close = True\n if hasattr(outfile, \"__fspath__\"):\n outfile = open(outfile.__fspath__(), \"wb\")\n elif isinstance(outfile, (basestring, Path)):\n outfile = open(outfile, \"wb\")\n else:\n close = False\n now = datetime.datetime.today() if timestamp is None else timestamp\n name = self.name if len(self.name) % 2 == 0 else (self.name + \"\\0\")\n outfile.write(\n struct.pack(\n \">5H12h2H\",\n 6,\n 0x0002,\n 0x0258,\n 28,\n 0x0102,\n now.year,\n now.month,\n now.day,\n now.hour,\n now.minute,\n now.second,\n now.year,\n now.month,\n now.day,\n now.hour,\n now.minute,\n now.second,\n 4 + len(name),\n 0x0206,\n )\n + name.encode(\"ascii\")\n + struct.pack(\">2H\", 20, 0x0305)\n + _eight_byte_real(self.precision / self.unit)\n + _eight_byte_real(self.precision)\n )\n if cells is None:\n cells = self.cells.values()\n else:\n cells = [self.cells.get(c, c) for c in cells]\n if len(cells) == 0:\n warnings.warn(\"[GDSPY] Creating a GDSII file without any cells.\")\n for cell in cells:\n cell.to_gds(outfile, self.unit / self.precision, timestamp=timestamp)\n if binary_cells is not None:\n for bc in binary_cells:\n outfile.write(bc)\n outfile.write(struct.pack(\">2H\", 4, 0x0400))\n if close:\n outfile.close()",
"def write(cls, experiment: Experiment):\n cls.__mutex.acquire()\n os.makedirs('./temp', exist_ok=True)\n worksheet = cls.__workbook.add_worksheet(experiment.name)\n for i, value in enumerate(experiment.values.items()):\n worksheet.write(0, i, value[0])\n worksheet.write_column(1, i, value[1])\n if experiment.model == 'accuracy':\n # cls.__add_accuracy_plot(worksheet, value)\n cls.test(worksheet, value)\n\n pass\n\n if experiment.model == 'performance':\n cls.test(worksheet, value)\n pass\n # cls.__add_accuracy_plot(worksheet, value)\n\n cls.__mutex.release()"
] | [
"0.7893535",
"0.7167007",
"0.64020705",
"0.6233088",
"0.6106569",
"0.60965765",
"0.60965765",
"0.605748",
"0.59968483",
"0.59903365",
"0.5989713",
"0.5946001",
"0.59330434",
"0.58675724",
"0.5825177",
"0.58209854",
"0.58193207",
"0.57958156",
"0.5771627",
"0.5768062",
"0.57342947",
"0.5719415",
"0.57104045",
"0.5702582",
"0.56932694",
"0.5671611",
"0.5655174",
"0.5655174",
"0.5650916",
"0.56448686"
] | 0.74768186 | 1 |
Write the specified binary cells to the file. | def write_binary_cells(self, binary_cells):
for bc in binary_cells:
self._outfile.write(bc)
return self | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_binary(self, path):\n return",
"def write_df_to_binary(file_name_mask, df):\n write_matrix_to_binary(file_name_mask + '-value.bin', df.values)\n with open(file_name_mask + '-name.txt', 'w') as f:\n f.write(\"\\t\".join(df.index))\n f.write(\"\\n\")\n f.write(\"\\t\".join(df.columns))\n f.write(\"\\n\")",
"def write_gds(self, outfile, cells=None, timestamp=None, binary_cells=None):\n close = True\n if hasattr(outfile, \"__fspath__\"):\n outfile = open(outfile.__fspath__(), \"wb\")\n elif isinstance(outfile, (basestring, Path)):\n outfile = open(outfile, \"wb\")\n else:\n close = False\n now = datetime.datetime.today() if timestamp is None else timestamp\n name = self.name if len(self.name) % 2 == 0 else (self.name + \"\\0\")\n outfile.write(\n struct.pack(\n \">5H12h2H\",\n 6,\n 0x0002,\n 0x0258,\n 28,\n 0x0102,\n now.year,\n now.month,\n now.day,\n now.hour,\n now.minute,\n now.second,\n now.year,\n now.month,\n now.day,\n now.hour,\n now.minute,\n now.second,\n 4 + len(name),\n 0x0206,\n )\n + name.encode(\"ascii\")\n + struct.pack(\">2H\", 20, 0x0305)\n + _eight_byte_real(self.precision / self.unit)\n + _eight_byte_real(self.precision)\n )\n if cells is None:\n cells = self.cells.values()\n else:\n cells = [self.cells.get(c, c) for c in cells]\n if len(cells) == 0:\n warnings.warn(\"[GDSPY] Creating a GDSII file without any cells.\")\n for cell in cells:\n cell.to_gds(outfile, self.unit / self.precision, timestamp=timestamp)\n if binary_cells is not None:\n for bc in binary_cells:\n outfile.write(bc)\n outfile.write(struct.pack(\">2H\", 4, 0x0400))\n if close:\n outfile.close()",
"def write_to_binary_file(self, filename):\n\n self.octree.writeBinary(str.encode(filename))",
"def bin_writer(fpath, fname, data):\n path = fpath + fname + '.dat'\n with open(path, 'ab') as file:\n for row in data:\n file.write(row.encode('utf-8'))\n return None",
"def write_bin(file, binary, buffer=None, append=True):\n\n # Get current stream, default or not.\n stream = cp.cuda.get_current_stream()\n\n if buffer is None:\n buffer = cp.asnumpy(binary)\n else:\n binary.get(out=buffer)\n\n if append is True:\n mode = \"ab\"\n else:\n mode = \"wb\"\n\n with open(file, mode) as f:\n stream.synchronize()\n buffer.tofile(f)",
"def write_cell(self, cell):\n self._outfile.write(cell.to_gds(self._res))\n return self",
"def write_subactors_file_binary(filename, G, nodelist, profile, colnames):\n assert(len(nodelist) == G.GetNodes())\n assert(len(profile) >= G.GetNodes())\n binattrs = ['gender', 'public']\n # rename gender to male for binary attribute\n binattr_names = ['male' if x == 'gender' else x for x in binattrs] \n with open(filename, 'w') as f:\n f.write(' '.join(binattr_names) + '\\n')\n for i in nodelist:\n for attr in binattrs:\n val = profile[i][colnames[attr]]\n val = val if val in ['0','1'] else 'NA'\n f.write(val)\n if attr == binattrs[-1]:\n f.write('\\n')\n else:\n f.write(' ' )",
"def write_file(self):\n print 'Writing '+self.name+' binary...'\n if self.vals is not None:\n if len(self.vals) == self.size:\n stream = self.pack_mem()\n with open(self.name+'.bin','wb') as f:\n f.write(stream)\n print 'File written: '+self.name+'.bin'\n else:\n print 'Error: input array for '+self.name+'is not the right '+\\\n 'size (should be '+str(self.size)+'). Skipping.'\n else:\n print 'No array provided, skipping.'",
"def tabser(filename, body, data):\n # XXX checksums ignored\n head = Struct(\"!BiHBxxxB\")\n body = Struct(body)\n # foot = Struct(\"!4s\")\n\n buffer = bytearray([0] * (2 ** 16))\n head.pack_into(buffer, 0, 0, int(time()), len(data), body.size, 0),\n offset = head.size\n for row in data:\n body.pack_into(buffer, offset, *row, 0)\n offset += body.size\n else:\n print(\"write %d rows\" % len(data))\n # offset = 2 ** 16 - foot.size\n # foot.pack_into(buffer, offset, bytes([0, 0, 0, 0]))\n with open(filename, \"wb\") as f:\n f.write(buffer)",
"def write_gds(self, outfile, cells=None, timestamp=None):\n if isinstance(outfile, basestring):\n outfile = open(outfile, 'wb')\n close = True\n else:\n close = False\n now = datetime.datetime.today() if timestamp is None else timestamp\n name = self.name if len(self.name) % 2 == 0 else (self.name + '\\0')\n outfile.write(\n struct.pack('>19h', 6, 0x0002, 0x0258, 28, 0x0102, now.year,\n now.month, now.day, now.hour, now.minute, now.second,\n now.year, now.month, now.day, now.hour, now.minute,\n now.second, 4 + len(name), 0x0206) +\n name.encode('ascii') + struct.pack('>2h', 20, 0x0305) +\n _eight_byte_real(self.precision / self.unit) +\n _eight_byte_real(self.precision))\n if cells is None:\n cells = self.cell_dict.values()\n else:\n cells = [self.cell_dict.get(c, c) for c in cells]\n for cell in cells:\n outfile.write(cell.to_gds(self.unit / self.precision))\n outfile.write(struct.pack('>2h', 4, 0x0400))\n if close:\n outfile.close()",
"def save_bin(data, file_path):\n np.save(file_path, data)",
"def _save_binary(file_name, data):\n with open(file_name, \"wb\") as f:\n cp.dump(data, f)",
"def write_matrix_to_binary(file_name, val):\n with open(file_name, 'wb') as file:\n nrow = val.shape[0]\n ncol = val.shape[1]\n file.write(int32_to_bytes(nrow) + int32_to_bytes(ncol) + val.astype(float).tobytes(order='C'))",
"def write_cells_shp(self,shpname,extra_fields=[],overwrite=True):\n # assemble a numpy struct array with all of the info \n # seems that having an object references in there is unstable,\n # so pass geometries in a list separately.\n base_dtype =[('poly_id1',np.int32),\n ('area',np.float64),\n ('volume',np.float64),\n ('depth_mean',np.float64)]\n\n try:\n cell_depths_max = self.cell_depths_max()\n extra_fields.append( ('depth_max',np.float64, lambda i: cell_depths_max[i]) )\n except:\n pass\n\n for efi in range(len(extra_fields)):\n fname,fdata=extra_fields[efi]\n base_dtype.append( (fname,fdata.dtype) )\n\n cell_data = np.zeros(self.Ncells(), dtype=base_dtype)\n\n for efi in range(len(extra_fields)):\n fname,fdata=extra_fields[efi]\n cell_data[fname]=fdata\n\n self.update_cell_edges()\n\n cell_geoms = [None]*self.Ncells()\n \n cell_data['depth_mean'] = self.cell_depths()\n cell_data['area']=self.cells_area()\n cell_data['volume']=cell_data['depth_mean']*cell_data['area']\n cell_data['poly_id1'] = 1+np.arange(self.Ncells())\n\n for poly_id in range(self.Ncells()):\n if poly_id % 500 == 0:\n print( \"%0.2g%%\"%(100.*poly_id/self.Ncells()) )\n\n # older code put this together manually.\n cell_geoms[poly_id]=self.cell_polygon(poly_id)\n\n print( cell_data.dtype )\n wkb2shp.wkb2shp(shpname,input_wkbs=cell_geoms,fields=cell_data,\n overwrite=overwrite)",
"def save_bin(words,data,fname):\n\n out=open(fname,\"wb\")\n\n rows,dims=data.shape\n out.write(\"{} {}\\n\".format(rows,dims).encode(\"utf-8\"))\n counter=0\n\n for i,w in enumerate(words):\n out.write(w.encode(\"utf-8\"))\n out.write(\" \".encode(\"utf-8\"))\n out.write(struct.pack(\"{}f\".format(dims),*data[i,:]))\n counter+=1\n \n out.close()\n print(\"Model saved to\",fname,file=sys.stderr)",
"def binary_write(iring, file_ext='out', *args, **kwargs):\n return BinaryFileWriteBlock(iring, file_ext, *args, **kwargs)",
"def save_fits(data, fname):\n\tcols = fits.ColDefs(np.copy(data)) # This is somehow necessary.\n\ttbhdu = fits.BinTableHDU.from_columns(cols)\n\ttbhdu.writeto(fname, clobber=True)\n\t\n\treturn",
"def write_binary(self, data, ensure=False):\n if ensure:\n self.dirpath().ensure(dir=1)\n with self.open(\"wb\") as f:\n f.write(data)",
"def writeCell(hoja, fila, columna, dato, formato=''):\r\n if formato:\r\n hoja.write(fila, columna, dato, formato)\r\n else:\r\n hoja.write(fila, columna, dato)",
"def save_to(self, f: BinaryIO):\n raise NotImplementedError",
"def write(filename, data, extname=None, extver=None, header=None,\n clobber=False, ignore_empty=False, units=None, table_type='binary',\n names=None, write_bitcols=False, compress=None, tile_dims=None,\n **keys):\n if keys:\n import warnings\n warnings.warn(\n \"The keyword arguments '%s' are being ignored! This warning \"\n \"will be an error in a future version of `fitsio`!\" % keys,\n DeprecationWarning, stacklevel=2)\n\n kwargs = {\n 'clobber': clobber,\n 'ignore_empty': ignore_empty\n }\n with FITS(filename, 'rw', **kwargs) as fits:\n fits.write(data,\n table_type=table_type,\n units=units,\n extname=extname,\n extver=extver,\n compress=compress,\n header=header,\n names=names,\n write_bitcols=write_bitcols,\n tile_dims=tile_dims)",
"def write_file(self):\r\n # -open file for writing\r\n f_fbob = open(self.fn_path, 'w')\r\n\r\n # -write header\r\n f_fbob.write('%s\\n' % (self.heading))\r\n\r\n # -write sections 1 & 2 : NOTE- what about NOPRINT?\r\n f_fbob.write('%10i%10i%10i%10i\\n' % (self.nqfb, self.nqcfb,\r\n self.nqtfb, self.iufbobsv))\r\n f_fbob.write('%10e\\n' % (self.tomultfb)) # check format\r\n\r\n # -write sections 3-5 looping through observations groups\r\n c = 0\r\n for i in range(self.nqfb):\r\n # while (i < self.nqfb):\r\n # write section 3\r\n f_fbob.write('{:10d}{:10d}\\n'.format(self.nqobfb[i],\r\n self.nqclfb[i]))\r\n\r\n # Loop through observation times for the groups\r\n for j in range(self.nqobfb[i]):\r\n # -write section 4\r\n f_fbob.write(\r\n '{}{:10d}{:10.4g}{}{:10.4g}\\n'.format(self.obsnam[c],\r\n self.irefsp[c],\r\n self.toffset[c], ' ',\r\n self.flwobs[c]))\r\n c += 1 # index variable\r\n\r\n # -write section 5 - NOTE- need to adjust factor for muliple obs same cell\r\n for j in range(abs(self.nqclfb[i])):\r\n if self.nqclfb[\r\n i] < 0: # set factor to 1.0 for all cells in group\r\n self.factor[i, :] = 1.0\r\n f_fbob.write('{:10d}{:10d}{:10d}{}{:10f}\\n'\r\n .format(self.layer[i, j], (self.row[i, j]),\r\n self.column[i, j],\r\n ' ', self.factor[\r\n i, j])) # note- is 10f good enough here?\r\n\r\n f_fbob.close()\r\n #\r\n # swm: BEGIN hack for writing standard file\r\n sfname = self.fn_path # swm:hack\r\n sfname += '_ins' # swm: hack\r\n # write header\r\n f_ins = open(sfname, 'w') # swm: hack for standard file\r\n f_ins.write('jif @\\n') # swm: hack for standard file\r\n f_ins.write('StandardFile 0 1 %s\\n' % (\r\n self.nqtfb)) # swm: hack for standard file\r\n for i in range(0, self.nqtfb):\r\n f_ins.write(\r\n '{}\\n'.format(self.obsnam[i])) # swm: hack for standard file\r\n\r\n f_ins.close()\r\n # swm: END hack for writing standard file\r\n\r\n return",
"def write(self, fileW):\n fileW.wByte(self.b)\n fileW.wByte(self.g)\n fileW.wByte(self.r)\n fileW.wByte(self.a)",
"def write(self, data, units=None, extname=None, extver=None,\n compress=None, tile_dims=None, header=None, names=None,\n table_type='binary', write_bitcols=False, **keys):\n\n if keys:\n import warnings\n warnings.warn(\n \"The keyword arguments '%s' are being ignored! This warning \"\n \"will be an error in a future version of `fitsio`!\" % keys,\n DeprecationWarning, stacklevel=2)\n\n isimage = False\n if data is None:\n isimage = True\n elif isinstance(data, numpy.ndarray):\n if data.dtype.fields == None: # noqa - probably should be is None\n isimage = True\n\n if isimage:\n self.write_image(data, extname=extname, extver=extver,\n compress=compress, tile_dims=tile_dims,\n header=header)\n else:\n self.write_table(data, units=units,\n extname=extname, extver=extver, header=header,\n names=names,\n table_type=table_type,\n write_bitcols=write_bitcols)",
"def writeElems(fil, elems1, eofs=1, nofs=1):\n #pyFormex uses the same convention for hexahedral elements as ABAQUS\n #Gambit uses a different convention\n #function currently only for hexahedral mesh\n elems = elems1.copy()\n elems[:,2] = elems1[:,3]\n elems[:,3] = elems1[:,2]\n\n elems[:,6] = elems1[:,7]\n elems[:,7] = elems1[:,6]\n \n fil.write(' ELEMENTS/CELLS 2.2.30\\n')\n for i,e in enumerate(elems+nofs):\n fil.write('%8d %2d %2d %8d%8d%8d%8d%8d%8d%8d\\n %8d\\n' % ((i+eofs,4,8)+tuple(e)))\n fil.write('ENDOFSECTION\\n')",
"def write_file(file_name, table):\r\n \r\n savectr=len(table)\r\n try:\r\n with open (file_name, 'wb') as objFile:\r\n pickle.dump(table,objFile) #pickle my 2D list\r\n print ('{} CD(s) saved into {}.\\n'.format(savectr,file_name))\r\n except PermissionError as e:\r\n print('Not enough rights to create/modify ' + file_name + '.') #if unable pickle data due to permission issues\r\n print ()\r\n print (e, e.__doc__, sep='\\n')\r\n print ()\r\n except IOError as e:\r\n print ('I/O error({0}): {1}'.format(e.errno,e.strerror))#if unable to pickle data due to IO errors such as disk space issues\r\n print ()\r\n print (e, e.__doc__, sep='\\n')\r\n print ()\r\n except pickle.PickleError as e:\r\n print ('Unable to write data into ' + file_name + '.') #if unable to pickle 2D list, exception handling for pickling errors\r\n print ()\r\n print (e, e.__doc__, sep='\\n')\r\n print ()",
"def _saveBinaryData(self, file, with_axis=None):\n if with_axis is not None:\n data = self._data_with_axis(with_axis)\n numpy.save(file, data)\n else:\n numpy.save(file, self.data)",
"def save(self,outPath=None):\n if (not self.canSave or self.skipObjRecords): raise StateError(_(\"Insufficient data to write file.\"))\n if not outPath:\n fileInfo = self.fileInfo\n outPath = os.path.join(fileInfo.dir,fileInfo.name)\n out = file(outPath,'wb')\n #--Tes3 Record\n self.tes3.changed = 1\n self.tes3.hedr.changed = 1\n self.tes3.hedr.numRecords = len(self.records) #--numRecords AFTER TES3 record\n self.tes3.getSize()\n self.tes3.dump(out)\n #--Size Cell Records\n cntRecords = 0\n progress = self.progress\n progress.setMax(len(self.cells))\n progress(0.0,'Saving '+self.fileInfo.name)\n for record in self.cells:\n record.getSize()\n #--Progress\n cntRecords += 1\n progress(cntRecords)\n #--Other Records\n for record in self.records:\n record.getSize() #--Should already be done, but just in case.\n record.dump(out)\n out.close()",
"def write_raw_text(self, path='.'):\n cells = self.get_cells()\n arrays = []\n for cell in cells:\n arrays.append(cell.data)\n array = np.concatenate(arrays)\n fn = os.path.join(path, self.label + '.txt')\n fmt = []\n p = re.compile('(\\w)(\\d+)')\n for key, value in self.datatype:\n m = p.search(value)\n if m:\n kind, size = m.groups()\n # strings\n if kind == 'S':\n add = '%{}c'.format(size)\n # integers\n elif kind in ['u', 'i']:\n add = '%d'\n else:\n add = '%.8e'\n else:\n add = '%.8e'\n fmt.append(add)\n np.savetxt(fn, array, fmt=fmt, delimiter='\\t')\n return"
] | [
"0.65858525",
"0.6368011",
"0.6283617",
"0.622919",
"0.6155533",
"0.6013805",
"0.5996172",
"0.5971584",
"0.59386206",
"0.5921192",
"0.591766",
"0.5843438",
"0.58248633",
"0.5801343",
"0.578895",
"0.5729028",
"0.57093227",
"0.5703158",
"0.56399626",
"0.56187046",
"0.56164056",
"0.5616271",
"0.560932",
"0.5608996",
"0.55241644",
"0.55212003",
"0.55159926",
"0.5445883",
"0.5433054",
"0.54202205"
] | 0.8135084 | 0 |
Return the unit and precision used in the GDS stream file. | def get_gds_units(infile):
close = True
if hasattr(infile, "__fspath__"):
infile = open(infile.__fspath__(), "rb")
elif isinstance(infile, (basestring, Path)):
infile = open(infile, "rb")
else:
close = False
unit = precision = None
for rec_type, data in _raw_record_reader(infile):
# UNITS
if rec_type == 0x03:
db_user = _eight_byte_real_to_float(data[4:12])
db_meters = _eight_byte_real_to_float(data[12:])
unit = db_meters / db_user
precision = db_meters
break
if close:
infile.close()
return (unit, precision) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_precision(self):\n ...",
"def unit_of_measurement(self) -> str:\n return FPS",
"def unit_of_measurement(self) -> str:\n return FPS",
"def unit_of_measurement(self):\n return self._metadata[1]",
"def GetDataPrecision():\n return _gmat_py.GmatBase_GetDataPrecision()",
"def GmatBase_GetDataPrecision():\n return _gmat_py.GmatBase_GetDataPrecision()",
"def _prec(self):\n prec = int(self.get_par(\"precision\"))\n if prec == 0:\n return \"f\"\n else:\n return \".{0}f\".format(prec)",
"def getSize(self, precision=1):\n kilo = 1000.0\n if self.size < kilo:\n return str(self.size) + \"B\"\n elif self.size < kilo ** 2:\n return str(round(self.size / kilo, precision)) + \"K\"\n elif self.size < kilo ** 3:\n return str(round(self.size / (kilo ** 2), precision)) + \"M\"\n elif self.size < kilo ** 4:\n return str(round(self.size / (kilo ** 3), precision)) + \"G\"\n else:\n return str(round(self.size / (kilo ** 4), precision)) + \"T\"",
"def unit_of_measurement(self) -> str:\n return MS",
"def unit_of_measurement(self):\r\n return self._sensor_cfg[1]",
"def unit_of_measurement(self):\r\n return self._sensor_cfg[1]",
"def get_observed_precision(self):\n return self.get_precision()",
"def unit_of_measurement(self) -> str:\n return self._unit_of_measurement",
"def unit_of_measurement(self):\n return self.device.unit()",
"def unit_of_measurement(self):\n return self._config.get(CONF_UNIT_OF_MEASUREMENT)",
"def unit_of_measurement(self) -> str:\n return self._unit",
"def GetTimePrecision():\n return _gmat_py.GmatBase_GetTimePrecision()",
"def unit_of_measurement(self):\n return self.sensor_type[\"unit\"]",
"def unit_of_measurement(self):\n return self._unit_of_measurement",
"def unit_of_measurement(self):\n return self._unit_of_measurement",
"def unit_of_measurement(self):\n return self._unit_of_measurement",
"def unit_of_measurement(self):\n return self._unit_of_measurement",
"def unit_of_measurement(self):\n return self._unit_of_measurement",
"def unit_of_measurement(self):\n return self._unit_of_measurement",
"def unit_of_measurement(self):\n return self._unit_of_measurement",
"def unit_of_measurement(self):\n return self._unit_of_measurement",
"def unit_of_measurement(self):\n return self._unit_of_measurement",
"def unit_of_measurement(self):\n return self._unit_of_measurement",
"def unit_of_measurement(self):\n return self._unit_of_measurement",
"def unit_of_measurement(self):\n return self._unit_of_measurement"
] | [
"0.66406333",
"0.62469673",
"0.62469673",
"0.62245446",
"0.6159991",
"0.6155099",
"0.6046847",
"0.60281473",
"0.60199654",
"0.597692",
"0.597692",
"0.5961308",
"0.5957242",
"0.59303707",
"0.59269",
"0.5922502",
"0.5920388",
"0.58822894",
"0.5881604",
"0.5881604",
"0.5881604",
"0.5881604",
"0.5881604",
"0.5881604",
"0.5881604",
"0.5881604",
"0.5881604",
"0.5881604",
"0.5881604",
"0.5881604"
] | 0.6897266 | 0 |
Load all cells from a GDSII stream file in binary format. | def get_binary_cells(infile):
close = True
if hasattr(infile, "__fspath__"):
infile = open(infile.__fspath__(), "rb")
elif isinstance(infile, (basestring, Path)):
infile = open(infile, "rb")
else:
close = False
cells = {}
name = None
cell_data = None
for rec_type, data in _raw_record_reader(infile):
# BGNSTR
if rec_type == 0x05:
cell_data = [data]
# STRNAME
elif rec_type == 0x06:
cell_data.append(data)
if str is not bytes:
if data[-1] == 0:
name = data[4:-1].decode("ascii")
else:
name = data[4:].decode("ascii")
else:
if data[-1] == "\0":
name = data[4:-1]
else:
name = data[4:]
# ENDSTR
elif rec_type == 0x07:
cell_data.append(data)
cells[name] = b"".join(cell_data)
cell_data = None
elif cell_data is not None:
cell_data.append(data)
if close:
infile.close()
return cells | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load(datastream):",
"def load_rbc( fname, skiprows, nx, ny ):\n C = numpy.loadtxt( fname, skiprows=skiprows ) \n cell_frames = [ C[i].reshape(( nx,ny )) for i in range( 5000-skiprows ) ]\n return cell_frames",
"def load_binary_data(self, encoding='utf8'):\n\n # TODO use smart_open again when https://github.com/RaRe-Technologies/smart_open/issues/207 will be fixed\n with open(self.file_name, 'rb') as f:\n self.load_model_params(f)\n self.load_dict(f, encoding=encoding)\n self.load_vectors(f)",
"def read_binned(run, bin_scheme):\n\n fname=get_binned_file(run,bin_scheme)\n print(\"reading:\",fname)\n return fitsio.read(fname)",
"def read_scil_b0():\n dipy_home = os.path.join(os.path.expanduser('~'), '.dipy')\n file = pjoin(dipy_home,\n 'datasets_multi-site_all_companies',\n '3T',\n 'GE',\n 'b0.nii.gz')\n\n return nib.load(file)",
"def _load_file(self):\n getLogger(__name__).debug(\"Loading {} in {} mode.\".format(self.filename, self.mode))\n try:\n kwargs = {'driver': 'H5FD_CORE'} if self.in_memory else {}\n self.file = tables.open_file(self.filename, mode='a' if self.mode == 'write' else 'r', **kwargs)\n except (IOError, OSError):\n raise\n\n # get important cal params\n self.nominal_wavelength_bins = self.nyquist_wavelengths()\n\n # get the beam image\n self.beamImage = self.file.get_node('/beammap/map').read()\n self._flagArray = self.file.get_node('/beammap/flag') # The absence of .read() here is correct\n self.nXPix, self.nYPix = self.beamImage.shape\n\n # get the photontable\n self.photonTable = self.file.get_node('/photons/photontable')",
"def load_bin(file_path):\n return np.load(file_path)",
"def load_clips():\n try:\n with open(DATA_FILE, 'r') as f:\n return msgpack.unpack(f, encoding='utf-8')\n except IOError:\n return {}",
"def _loadBinaryData(self, filename, with_axis=None):\n \n self.set_data_writable()\n _data = numpy.load(filename)\n self.data = self._extract_data_with_axis(_data, with_axis)\n self.set_data_protected()",
"def binary_read(filenames, gulp_size, gulp_nframe, dtype, *args, **kwargs):\n return BinaryFileReadBlock(filenames, gulp_size, gulp_nframe, dtype, *args, **kwargs)",
"def _load(self):\n # Extract the ASCII header (5 first lines)\n with open(self._xst_bin, 'rb') as f:\n header = list(islice(f, 0, 5))\n assert header[0] == b'HeaderStart\\n',\\\n 'Wrong header start'\n assert header[-1] == b'HeaderStop\\n',\\\n 'Wrong header stop'\n header = [s.decode('utf-8') for s in header]\n hd_size = sum([len(s) for s in header])\n\n # Parse informations into a metadata dictionnary\n keys = ['freq', 'ma', 'accu']\n search = ['Freq.List', 'Mr.List', 'accumulation']\n types = ['float64', 'int', 'int']\n for key, word, typ in zip(keys, search, types):\n for h in header:\n if word in h:\n self.meta[key] = np.array(\n h.split('=')[1].split(','),\n dtype=typ\n )\n\n # Deduce the dtype for decoding\n n_ma = self.meta['ma'].size\n n_sb = self.meta['freq'].size\n dtype = np.dtype(\n [('jd', 'float64'),\n ('data', 'complex64', (n_sb, n_ma*n_ma*2 + n_ma))]\n )\n\n # Decoding the binary file\n tmp = np.memmap(\n filename=self._xst_bin,\n dtype='int8',\n mode='r',\n offset=hd_size\n )\n decoded = tmp.view(dtype)\n\n self.data = decoded['data'] / self.meta['accu']\n self.time = Time(decoded['jd'], format='jd', precision=0)\n\n return",
"def _loadBinaryData_compressed(self, filename, with_axis=None): \n self.set_data_writable() \n _data = numpy.load(filename)[\"data\"]\n self.data = self._extract_data_with_axis(_data, with_axis)\n self.set_data_protected()",
"def read_syn_data():\n dipy_home = pjoin(os.path.expanduser('~'), '.dipy')\n folder = pjoin(dipy_home, 'syn_test')\n t1_name = pjoin(folder, 't1.nii.gz')\n b0_name = pjoin(folder, 'b0.nii.gz')\n\n md5_dict = {'t1': '701bda02bb769655c7d4a9b1df2b73a6',\n 'b0': 'e4b741f0c77b6039e67abb2885c97a78'}\n\n check_md5(t1_name, md5_dict['t1'])\n check_md5(b0_name, md5_dict['b0'])\n\n t1 = nib.load(t1_name)\n b0 = nib.load(b0_name)\n return t1, b0",
"def loadIHex(self, file):\n segmentdata = b''\n currentAddr = 0\n startAddr = 0\n offsetAddr = 0\n lines = file.readlines()\n for l in lines:\n if l[0] != ':': raise BSLException(\"File Format Error\\n\")\n l = l.strip() #fix CR-LF issues...\n length = int(l[1:3],16)\n address = int(l[3:7],16)\n field_type = int(l[7:9], 16)\n check = int(l[-2:],16)\n if field_type == 0x00:\n if currentAddr != offsetAddr + address:\n if segmentdata:\n self.segments.append(Segment(startAddr, segmentdata))\n startAddr = currentAddr = offsetAddr + address\n segmentdata = b''\n for i in range(length):\n segmentdata += bytes([int(l[9+2*i:11+2*i],16)])\n currentAddr = length + currentAddr\n elif field_type == 0x02:\n if segmentdata:\n self.segments.append(Segment(startAddr, segmentdata))\n offsetAddr = int(l[9:13],16)*16\n startAddr = currentAddr = offsetAddr\n segmentdata = b''\n elif field_type in (0x01, 0x03, 0x04, 0x05):\n pass\n else:\n sys.stderr.write(\"Ignored unknown field (type 0x%02x) in ihex file.\\n\" % field_type)\n if segmentdata:\n self.segments.append( Segment(startAddr, segmentdata) )\n\n if DEBUG:\n sys.stderr.write(\"loadIHex\\n\")\n for segment in self.segments:\n sys.stderr.write(\" Segment(startadress = 0x%04x, len = %i)\\n\" % (segment.startaddress, len(segment)))",
"def read(self, fname):\r\n self.header = {}\r\n self.resetvals()\r\n infile = self._open(fname, \"rb\")\r\n self._readheader(infile)\r\n # Compute image size\r\n try:\r\n self.dim1 = int(self.header['NumberOfRowsInFrame'])\r\n self.dim2 = int(self.header['NumberOfColsInFrame'])\r\n self.bpp = int(self.header['BitsPerPixel'])\r\n except:\r\n raise Exception(\"GE file\", str(fname) + \\\r\n \"is corrupt, cannot read it\")\r\n\r\n # More than one image can be saved in a GE file\r\n # Will only load the first one\r\n\r\n\r\n # Go to the beginning of the file\r\n infile.seek(0)\r\n infile.seek(self.header['HeaderSizeInBytes'] + self.header['UserHeaderSizeInBytes'])\r\n\r\n ReadBytes = self.dim1 * self.dim2 * (self.bpp / 8)\r\n block = infile.read(ReadBytes)\r\n block = N.fromstring(block, N.uint16)\r\n\r\n infile.close()\r\n\r\n try:\r\n self.data = N.reshape(block, [self.dim2, self.dim1])\r\n except:\r\n print len(block), self.dim2, self.dim1\r\n raise IOError, \\\r\n 'Size spec in GE-header does not match size of image data field'\r\n\r\n self.bytecode = self.data.dtype.type\r\n self.pilimage = None\r\n return self",
"def test_read_binary(self):\n meshes = stlreader.get_data(self.stl_bin_file)\n print(meshes[0][0], file=sys.stderr)\n name, vertices, polygons = meshes[0]\n self.assertEqual(name, \"{}#{}\".format(os.path.basename(self.stl_bin_file), 0))\n self.assertTrue(len(vertices) > 0)\n self.assertTrue(len(polygons) > 0)\n polygon_ids = list()\n for a, b, c in polygons.itervalues():\n polygon_ids += [a, b, c]\n self.assertItemsEqual(set(vertices.keys()), set(polygon_ids))",
"def load_pickle(path: Path):\n # Before investing significant time processing, ensure server is up\n with get_session() as sess:\n _b = sess.execute(sa.select(sch.Batch).limit(1))\n\n data = pickle.load(open(path, 'rb'))\n required_cols = ['from_name', 'from_email', 'raw_from_string',\n 'to_name', 'to_email', 'raw_to_string',\n 'cc_name', 'cc_email', 'raw_cc_string',\n 'subject',\n 'date', 'raw_date_string',\n 'message_id',\n 'in_reply_to', 'refs',\n 'body_text', 'flagged_abuse',\n 'filename',\n # time_stamp is when it was imported... not important for us.\n #'time_stamp',\n ]\n # Will raise error if any columns not found\n data = data[required_cols]\n\n entities = collections.defaultdict(lambda: {})\n flush_count = [0]\n with get_session() as sess:\n clean_for_ingest(sess)\n\n resource = f'ocean-{os.path.basename(path)}'\n sch.Batch.cls_reset_resource(resource, session=sess)\n\n batch = sch.Batch(resource=resource)\n sess.add(batch)\n\n # The `db_get_*()` functions return the id for the chosen Entity, as an\n # optimization\n def db_get_message(id):\n rid = entities['message'].get(id)\n if rid is None:\n rid = sch.Entity(name=f'Message {id}',\n type=sch.EntityTypeEnum.message, attrs={},\n batch=batch)\n sess.add(rid)\n flush_count[0] += 1\n entities['message'][id] = rid\n return rid\n def db_get_user(name, email):\n id = f'{name} <{email}>'\n rid = entities['user'].get(id)\n if rid is None:\n rid = sch.Entity(name=id, type=sch.EntityTypeEnum.person,\n batch=batch,\n attrs={\n 'name': name,\n 'email': email,\n })\n sess.add(rid)\n flush_count[0] += 1\n entities['user'][id] = rid\n return rid\n\n for m_idx, m in tqdm.tqdm(data.iterrows(), desc='importing messages',\n total=len(data)):\n\n # No date --> useless\n if m['raw_date_string'] is None:\n continue\n\n def user_resolve(prefix):\n if m[f'{prefix}_name'] is None:\n return None\n name = m[f'{prefix}_name']\n email = m[f'{prefix}_email']\n return db_get_user(name, email)\n frm = user_resolve('from')\n to = user_resolve('to')\n cc = user_resolve('cc')\n\n try:\n message_time = date_field_resolve(m['date'], m['raw_date_string'])\n except:\n raise ValueError(f\"Bad date: {m['message_id']} {m['date']} {m['raw_date_string']}\")\n\n def fixnull(v):\n \"Some ocean data has \\x00 bytes... remove those\"\n if not isinstance(v, str):\n return v\n return v.replace('\\x00', '<NULL>')\n message = db_get_message(m['message_id'])\n message.attrs.update({\n 'origin_filename': fixnull(m['filename']),\n 'subject': fixnull(m['subject']),\n 'body_text': fixnull(m['body_text']),\n 'flagged_abuse': m['flagged_abuse'],\n 'time': message_time.timestamp(), # float for JSON\n })\n\n if frm is not None:\n message.obs_as_dst.append(sch.Observation(src=frm, batch=batch,\n type=sch.ObservationTypeEnum.message_from,\n time=message_time))\n if to is not None:\n message.obs_as_src.append(sch.Observation(dst=to, batch=batch,\n type=sch.ObservationTypeEnum.message_to,\n time=message_time))\n if cc is not None:\n message.obs_as_src.append(sch.Observation(dst=cc, batch=batch,\n type=sch.ObservationTypeEnum.message_cc,\n time=message_time))\n for r in m['refs']:\n message.obs_as_src.append(sch.Observation(\n dst=db_get_message(r['ref']), batch=batch,\n type=sch.ObservationTypeEnum.message_ref,\n time=message_time))\n\n if flush_count[0] > 10000:\n sess.flush()\n flush_count[0] = 0\n\n print(f'Finished with batch {batch.id}; committing')",
"def _read_data(self):\n with self._open(self.filename, 'rb') as f:\n try:\n f.seek(self._offset_data, self._offset_whence)\n except IOError:\n print('Error: hedp.io.HamamatsuFile seeking outside of file limits.')\n print(' Failed to parse file.')\n print(\" Either the 'offset' or 'dtype' input arguments must be wrong!\")\n raise\n except:\n raise\n\n data_len = np.prod(self.shape)*np.dtype(self._dtype).itemsize\n data_str = f.read(data_len)\n if data_len != len(data_str):\n print(data_len, len(data_str))\n raise ValueError('File ended before all data was read. Probably wrong offset or dtype!')\n\n\n self.data = np.fromstring(data_str, dtype=self._dtype).reshape(self.shape[::-1])\n self.data = np.ndarray.astype(self.data, 'float32')\n\n #self.data = np.fromfile(f, dtype=self._dtype,\n # count=np.prod(self.shape)).reshape(self.shape[::-1])",
"def read_xsf(filename):\n f = open(filename)\n lines = f.readlines()\n\n found_datagrid = None\n first_data_line = None\n number_data_lines = None\n xiter = yiter = ziter = 0\n\n for idx, line in enumerate(lines):\n if found_datagrid is None:\n if \"BEGIN_BLOCK_DATAGRID_3D\" in line:\n found_datagrid = idx\n code = lines[idx+1].strip()\n\n # The specific formatting may, similar to .cube files.\n # So better to be specific.\n if code != \"3D_PWSCF\":\n raise Exception(\"This .xsf parser can only read .xsf files\"\n \" generated by Quantum ESPRESSO\")\n else:\n if idx == found_datagrid + 3:\n grid_dimensions = [int(line.split()[0]), int(line.split()[1]),\n int(line.split()[2])]\n data = np.zeros(grid_dimensions, dtype=np.float64)\n\n # Quantum ESPRESSO writes with 6 entries per line.\n number_data_lines = int(np.ceil(np.prod(grid_dimensions) / 6))\n last_entry = int(np.prod(data) % 6)\n first_data_line = found_datagrid + 8\n\n if first_data_line is not None:\n if first_data_line <= idx < number_data_lines+first_data_line:\n dataline = line.split()\n if idx == number_data_lines+first_data_line-1:\n number_entries = last_entry\n else:\n number_entries = 6\n\n # Each line contains 6 entries, except the last.\n for i in range(0, number_entries):\n data[xiter, yiter, ziter] = float(dataline[i])\n xiter += 1\n if xiter == grid_dimensions[0]:\n xiter = 0\n yiter += 1\n if yiter == grid_dimensions[1]:\n yiter = 0\n ziter += 1\n return data, {}",
"def load_binary_ROCKSTAR(path, \n snapid=0, \n filename_prefix='', \n filename_suffix='', \n n_files=1, \n read_ascii=False,\n myprops='all',\n include_desc_info=True):\n #print '\\nREADING ROCKSTAR BINARY HALO CATALOG\\n####################################\\n'\n\n #get header_size (where to start when reading binary), size of halo information for one halo, data type structure, data type of particle information\n #such asnumber of halos, numer of particles, box size, particle mass and particle type\n header_size, halo_struct_size, dt, dt_header_info, bytes_to_header_info = get_dtype_sarray('ROCKSTAR_binary')\n \n particle_ID_dict = {}\n count=0\n #Load binary files\n for nfile in range(0,n_files,1):\n #read the ascii file format as pandas object (optional) \n if read_ascii==True:\n print('++++++++++++++++++\\nReading ... ', path+filename_prefix+str(nfile)+'.ascii',) \n dt_ascii=ha_lib.get_dtype_sarray('ROCKSTAR_ASCII')\n data_ascii = ha_lib.df_to_sarray(pd.read_csv(path+filename_prefix+str(nfile)+'.ascii', comment='#', names=[k[0] for k in dt_ascii], sep=' ', dtype=dt_ascii))\n \n print('--> succesfully!\\nHalo catalog:\\n', data_ascii, '\\n') \n \n #print 'Reading ... ', path+filename_prefix+str(nfile)+filename_suffix, \n f = open(path+filename_prefix+str(nfile)+filename_suffix,'rb')\n \n f.seek(bytes_to_header_info)\n particle_info = np.fromfile(f, dt_header_info, 1)[0]\n\n #print '--> succesfully!'\n if particle_info['n_halos']>0: \n #print '\\ntot num halos:\\t\\t', particle_info['n_halos'], '\\ntot num particles:\\t', particle_info['tot_n_particles'], '\\nparticle mass:\\t\\t', format(particle_info['m_particles'], '0.8e'), '\\n'\n\n f.seek(header_size)\n data_this_file = np.fromfile(f, dt, particle_info['n_halos'])\n \n #print data_this_file, '\\n'\n byte_loc=header_size+halo_struct_size*particle_info['n_halos']\n \n f.seek(byte_loc)\n \n #Read particle IDs as block of n_particles x 8 bytes\n #print 'Reading particle IDs at byte loc:', byte_loc, 'bytes',\n particle_IDs = np.fromfile(f, np.int64, particle_info['tot_n_particles'])\n #print '--> successfully!\\n'\n \n #connect haloid with particles bound to that halo\n particle_ID_dict.update({haloid: {'particle_IDs': particle_IDs[sum(data_this_file['n_particles'][:i]) : sum(data_this_file['n_particles'][:i])+data_this_file['n_particles'][i]]} for (i, haloid, descID) in zip(range(0,data_this_file['haloid'].size,1),data_this_file['haloid'],data_this_file['descIndex'])})\n\n if count==0:\n #print 'here create data!'\n data = np.zeros((data_this_file.size,),dtype=np.dtype([(k, data_this_file.dtype[k]) for k in data_this_file.dtype.names]))\n data = data_this_file\n count+=1\n else:\n data = np.append(data, data_this_file, axis=0) \n\n f.close()\n \n #print('SNAPID:', snapid, 'SUCESSFULLY READ!\\n') \n\n if myprops!='all':\n data=data[myprops]\n \n import numpy.lib.recfunctions as rcfuncs\n data = rcfuncs.append_fields([data], ['snapid'] , [np.zeros(data.size,)], dtypes=['i4'], usemask=False)\n data['snapid']= snapid\n \n if include_desc_info==True:\n #get descIndex\n data.sort(order=['haloid'], axis=0)\n data['descIndex']= get_descIndex(path, snapid) \n\n particle_info_dict={snapid: {'n_halos': data.size, 'n_particles': sum(data['n_particles'])}}\n\n return data, particle_ID_dict, particle_info_dict",
"def readData():\n\tN = 800\n\tD = 28*28\n\tX = np.zeros((N, D), dtype=np.uint8)\n\n\tf = open(\"data/a012_images.dat\", 'rb')\n\n\tfor i in range(0, N):\n\t\tX[i, :] = np.fromstring(f.read(D), dtype='uint8')\n\n\tf.close()\n\n\treturn X",
"def read_isbi2013_2shell():\n dipy_home = pjoin(os.path.expanduser('~'), '.dipy')\n folder = pjoin(dipy_home, 'isbi2013')\n fraw = pjoin(folder, 'phantom64.nii.gz')\n fbval = pjoin(folder, 'phantom64.bval')\n fbvec = pjoin(folder, 'phantom64.bvec')\n\n md5_dict = {'data': '42911a70f232321cf246315192d69c42',\n 'bval': '90e8cf66e0f4d9737a3b3c0da24df5ea',\n 'bvec': '4b7aa2757a1ccab140667b76e8075cb1'}\n\n check_md5(fraw, md5_dict['data'])\n check_md5(fbval, md5_dict['bval'])\n check_md5(fbvec, md5_dict['bvec'])\n\n bvals, bvecs = read_bvals_bvecs(fbval, fbvec)\n\n gtab = gradient_table(bvals, bvecs)\n img = nib.load(fraw)\n return img, gtab",
"def load(self, fh):\n\t\t# first we read all nodes\n\t\tfor line in filter(lambda l: l.startswith('+NODE:'), fh):\n\t\t\tname = line.rstrip()[line.find('\"') + 1:line.rfind('\"')]\n\t\t\tline = line.rstrip()[line.rfind('\"') + 2:].split(',')\n\t\t\tpan = ''.join(reversed(line[:2])).replace('?', '')\n\t\t\tpan = '0x' + pan.lower() if pan else ''\n\t\t\taddress = ''.join(reversed(line[2:4])).replace('?', '')\n\t\t\taddress = '0x' + address.lower() if address else ''\n\t\t\tself.nodes.append_unique(Node(pan, address, name))\n\n\t\t# but they are at the end of file so we return to begining now:\n\t\tfh.seek(0)\n\t\tfor line in filter(lambda l: l.startswith('+FRAM:'), fh):\n\t\t\tload, time = (line[19:].split(',')[1:], #load\n\t\t\t\t\t\t\tline[6:18].split(',')) #time\n\t\t\tload = [int(x, 16) for x in load] #hex->int\n\t\t\tself.append(Packet(load, time))\n\t\t\t\n\t\tself.changed(False)\n\t\tself.nodes.changed(False)",
"def load_stream(self, stream):\n # load the batches\n for batch in self.serializer.load_stream(stream):\n yield batch\n\n # load the batch order indices or propagate any error that occurred in the JVM\n num = read_int(stream)\n if num == -1:\n error_msg = UTF8Deserializer().loads(stream)\n raise RuntimeError(\n \"An error occurred while calling \"\n \"ArrowCollectSerializer.load_stream: {}\".format(error_msg)\n )\n batch_order = []\n for i in range(num):\n index = read_int(stream)\n batch_order.append(index)\n yield batch_order",
"def load_data_BCI(self, list_channels=[2, 7, 1, 8, 4, 5, 3, 6]):\r\n self.number_channels = len(list_channels)\r\n self.list_channels = list_channels\r\n \r\n # load raw data into numpy array\r\n self.raw_eeg_data = np.loadtxt(self.path, \r\n delimiter=',',\r\n skiprows=7,\r\n usecols=list_channels)\r\n\r\n #expand the dimmension if only one channel \r\n if self.number_channels == 1:\r\n self.raw_eeg_data = np.expand_dims(self.raw_eeg_data, \r\n axis=1)",
"def read(self):\n try:\n f = open(self.datfile, 'r')\n except:\n print('ERROR: data file not found!')\n exit()\n\n # Get rid of the header\n for _ in range(self.header_length):\n f.readline()\n\n # Read nuclide mass data\n for line in f:\n ls = line.strip()\n n, z, ebind = ls.split()\n nuclide = BindingNuclide(n, z, ebind)\n self.nuclides.append(nuclide)\n\n f.close()",
"def LoadBatch(filename):",
"def load_bc(self):\r\n\r\n # Open the file and read all the lines.\r\n array = np.loadtxt(self.bc_file)\r\n\r\n # Convert the columns to appropriate type.\r\n self.beta = array[:, 0]\r\n self.code = array[:, 1].astype(int)",
"def read_sm_product(filepath):\n # check the files are udp files\n if os.path.basename(filepath)[14:17] != 'UDP':\n raise ValueError('{} is not a UDP file'.format(filepath))\n\n # Open the data file for reading\n try:\n file = open(filepath, 'rb')\n except IOError:\n logging.exception('file {} does not exist'.format(filepath))\n raise\n\n # Read first unsigned int32, containing number of datapoints to iterate over\n n_grid_points = np.fromfile(file, dtype=np.uint32, count=1)[0]\n logging.debug('Data file contains {} data points'.format(n_grid_points))\n logging.debug('Reading file... ')\n data = np.fromfile(file, dtype=datatype, count=n_grid_points)\n file.close()\n logging.debug('Done')\n\n return data",
"def read_grid(self, file_path=None):\n print('[info] reading the grid ...')\n if not file_path:\n file_path = os.path.join(self.directory, 'grid.dat')\n if not os.path.exists(file_path):\n file_path = os.path.join(self.directory, 'grid.txt')\n # test if file written in binary format\n textchars = bytearray({7, 8, 9, 10, 12, 13, 27}\n | set(range(0x20, 0x100)) - {0x7f})\n is_binary_string = lambda bytes: bool(bytes.translate(None, textchars))\n infile = open(file_path, 'rb')\n binary_format = is_binary_string(infile.read(1024))\n infile.close()\n if binary_format:\n with open(file_path, 'rb') as infile:\n # x-direction\n nx = struct.unpack('i', infile.read(4))[0]\n x = numpy.array(struct.unpack('d' * (nx + 1),\n infile.read(8 * (nx + 1))))\n # y-direction\n ny = struct.unpack('i', infile.read(4))[0]\n y = numpy.array(struct.unpack('d' * (ny + 1),\n infile.read(8 * (ny + 1))))\n self.grid = numpy.array([x, y])\n else:\n with open(file_path, 'r') as infile:\n n_cells = numpy.array([int(n)\n for n in infile.readline().strip().split()])\n coords = numpy.loadtxt(infile, dtype=numpy.float64)\n self.grid = numpy.array(numpy.split(coords,\n numpy.cumsum(n_cells[:-1] + 1)))\n if self.grid.size == 2:\n print('\\tgrid-size: {}x{}'.format(self.grid[0].size - 1,\n self.grid[1].size - 1))\n elif self.grid.size == 3:\n print('\\tgrid-size: {}x{}x{}'.format(self.grid[0].size - 1,\n self.grid[1].size - 1,\n self.grid[2].size - 1))"
] | [
"0.59665906",
"0.5666531",
"0.5652383",
"0.56487894",
"0.5564757",
"0.5411691",
"0.53955466",
"0.5385155",
"0.538372",
"0.53774667",
"0.52971804",
"0.5276019",
"0.52390313",
"0.52172196",
"0.5202131",
"0.51881504",
"0.5182322",
"0.5163801",
"0.5156424",
"0.5144767",
"0.513413",
"0.51071304",
"0.51051724",
"0.51021653",
"0.50914013",
"0.50771564",
"0.5075937",
"0.5067689",
"0.5065712",
"0.5061349"
] | 0.61892736 | 0 |
Sets the product of this SubscriptionProductRetirement. | def product(self, product):
self._product = product | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def product(self, product):\n self._product = product",
"def product(self, product):\n if product is None:\n raise ValueError(\"Invalid value for `product`, must not be `None`\") # noqa: E501\n\n self._product = product",
"def set_product(self, product):\n self.single_selection_from_static_kendo_dropdown(self.product_kendo_dropdown_locator, product)",
"def product_revision(self, product_revision):\n\n self._product_revision = product_revision",
"def product_id(self, product_id):\n self._product_id = product_id",
"def product_id(self, product_id):\n\n self._product_id = product_id",
"def product_id(self, product_id):\n\n self._product_id = product_id",
"def set_sms_product(self, product):\n self.single_selection_from_static_kendo_dropdown(self.sms_product_kendo_dropdown_locator, product)",
"def product_version(self, product_version):\n\n self._product_version = product_version",
"def product_name(self, product_name):\n\n self._product_name = product_name",
"def product_config(self, product_config):\n\n self._product_config = product_config",
"def target_product(self, target_product):\n\n self._target_product = target_product",
"def set_product_in_received_charges_grid(self, product):\n full_product_name = product + \"_\" + self.random_string_generator(3)\n self.set_value_in_received_charges_grid_column(self.product_column_name, full_product_name)\n return full_product_name",
"def product_id(self, product_id):\n if product_id is None:\n raise ValueError(\"Invalid value for `product_id`, must not be `None`\") # noqa: E501\n\n self._product_id = product_id",
"def remove_product(self, product):\n try:\n sp = SubscriptionProduct.objects.get(subscription=self, product=product)\n sp.delete()\n except SubscriptionProduct.DoesNotExist:\n pass\n else:\n self.contact.add_product_history(self, product, \"D\")",
"def addProduct(self, product):\n self._checkDeleted()\n product._checkDeleted()\n\n productPath = self.productSearch.productClient.product_path(\n project=self.productSearch.projectId, location=self.productSearch.location, product=product.productId)\n\n self.productSearch.productClient.add_product_to_product_set(name=self.productSetPath, product=productPath)",
"def set_received_charges_grid_product_name(self, product_name):\n if product_name != \"\":\n self.set_product(product_name)\n else:\n self.set_value_in_grid_column(self.received_charges_grid_div_id, self.product_column_name, self.random_string_generator(6), True)",
"def product(self, product_id):\r\n return products.Product(self, product_id)",
"def set_adjustment_charge_product(self, product_name_prefix):\n product_name = product_name_prefix + self.random_string_generator(size=4)\n self.set_value_into_input_field(self.create_adjustment_charge_product_textbox_locator, product_name)",
"def setGeneProduct(self, *args):\n return _libsbml.GeneProductRef_setGeneProduct(self, *args)",
"def product_type(self, product_type):\n if product_type is None:\n raise ValueError(\"Invalid value for `product_type`, must not be `None`\") # noqa: E501\n\n self._product_type = product_type",
"def add_product(self, product):\n return self._make_post_request(self._urls['products'],\n data=dict(name=product))",
"def products(self, products):\n\n self._products = products",
"def products(self, products):\n\n self._products = products",
"async def set_property(self, product_type: ProductType, serial_no: str, name: str, value: Any) -> None:\n await self._send_message_get_response(OutgoingMessage(OutgoingMessageType.set_property, domain=product_type.name, serial_no=serial_no, name=name, value=value))",
"def product(self):\n return self._product",
"def product(self):\n return self._product",
"def product(self):\n return self._product",
"def new_product(self, product_price=None, lead_time=None):\n self.is_sold = False\n\n if not lead_time:\n lead_time = self.default_lead_time\n\n if not product_price:\n product_price = self.default_product_price\n\n self.remaining_slots = lead_time\n self.product_price = product_price",
"def product_count(self, product_count: int):\n if product_count is None:\n raise ValueError(\"Invalid value for `product_count`, must not be `None`\")\n\n self._product_count = product_count"
] | [
"0.77392894",
"0.7601882",
"0.7119301",
"0.7109555",
"0.69029176",
"0.6886196",
"0.6886196",
"0.6858822",
"0.6649319",
"0.64404374",
"0.6351997",
"0.6307819",
"0.62698454",
"0.601716",
"0.6005919",
"0.59450114",
"0.59232247",
"0.5884955",
"0.58693993",
"0.5860039",
"0.5857742",
"0.58534324",
"0.5838857",
"0.5838857",
"0.57660085",
"0.5581151",
"0.5581151",
"0.5581151",
"0.554996",
"0.5514564"
] | 0.78371936 | 0 |
Gets the respect_terminiation_periods_enabled of this SubscriptionProductRetirement. | def respect_terminiation_periods_enabled(self):
return self._respect_terminiation_periods_enabled | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def respect_terminiation_periods_enabled(self, respect_terminiation_periods_enabled):\n\n self._respect_terminiation_periods_enabled = respect_terminiation_periods_enabled",
"def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")",
"def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")",
"def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")",
"def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")",
"def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")",
"def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")",
"def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")",
"def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")",
"def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")",
"def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")",
"def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")",
"def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")",
"def day_periods(self) -> localedata.LocaleDataDict:\n return self._data['day_periods']",
"def periods(self) -> localedata.LocaleDataDict:\n try:\n return self._data['day_periods']['stand-alone']['wide']\n except KeyError:\n return localedata.LocaleDataDict({}) # pragma: no cover",
"def period(self):\n return self.__period",
"def getPeriod(self):\n return StripePeriod(self.base.get(\"period\", []))",
"def __get_period(self):\n return self.__period",
"def get_periods():\n return [\n relativedelta(),\n relativedelta(days=6),\n relativedelta(months=1),\n relativedelta(months=3),\n relativedelta(years=1),\n relativedelta(years=5)\n ]",
"def failing_periods(self) -> 'outputs.DynamicThresholdFailingPeriodsResponse':\n return pulumi.get(self, \"failing_periods\")",
"def get_interval(self):\n return self._period",
"def service_endpoint_policies(self) -> Optional[Sequence['outputs.ServiceEndpointPolicyResponse']]:\n return pulumi.get(self, \"service_endpoint_policies\")",
"def real_period(self):\n return max(\n self.period * self.PERIOD_MARGIN_FACTOR -\n (self.max_lag if self.max_lag else self.lag * self.LAG_MARGIN_FACTOR),\n 0.0)",
"def current_period(self):\n return self._current_period",
"def number_of_evaluation_periods(self) -> float:\n return pulumi.get(self, \"number_of_evaluation_periods\")",
"def renewal_period(self) -> Optional[float]:\n return pulumi.get(self, \"renewal_period\")",
"def expected_last_period_end(self):\n return self._expected_last_period_end",
"def kind(self):\n return DateValueTypes.PERIOD",
"def get_rates(self):\n rates = np.empty(len(self.periods))\n for index, element in enumerate(self.periods):\n rates[index] = self.periods[element]['price']\n return(pd.Series(rates, self.periods.keys()))",
"def block_period_consumption(self):\n return self._safe_value(VAR_BLOCKPERIODCONSUMPTION, float)"
] | [
"0.74990064",
"0.59558636",
"0.59558636",
"0.59558636",
"0.59558636",
"0.59558636",
"0.59558636",
"0.59558636",
"0.59558636",
"0.59558636",
"0.59558636",
"0.59558636",
"0.59558636",
"0.52868",
"0.52440816",
"0.51808107",
"0.51157266",
"0.51113236",
"0.50006104",
"0.49916717",
"0.4969711",
"0.48591822",
"0.48164082",
"0.48126093",
"0.4797861",
"0.4776214",
"0.4766286",
"0.46966928",
"0.46208766",
"0.45986018"
] | 0.7897476 | 0 |
Sets the respect_terminiation_periods_enabled of this SubscriptionProductRetirement. | def respect_terminiation_periods_enabled(self, respect_terminiation_periods_enabled):
self._respect_terminiation_periods_enabled = respect_terminiation_periods_enabled | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def respect_terminiation_periods_enabled(self):\n return self._respect_terminiation_periods_enabled",
"def set_atr_periods(self, periods: int = 100):\n h, l, c_prev = self.data.High, self.data.Low, pd.Series(self.data.Close).shift(1)\n tr = np.max([h - l, (c_prev - h).abs(), (c_prev - l).abs()], axis=0)\n atr = pd.Series(tr).rolling(periods).mean().bfill().values\n self.__atr = atr",
"def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")",
"def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")",
"def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")",
"def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")",
"def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")",
"def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")",
"def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")",
"def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")",
"def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")",
"def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")",
"def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")",
"def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")",
"def payee_grace_period_processing_enabled(self, payee_grace_period_processing_enabled):\n\n self._payee_grace_period_processing_enabled = payee_grace_period_processing_enabled",
"def period_types(self, period_types):\n\n self._period_types = period_types",
"def periods(self) -> localedata.LocaleDataDict:\n try:\n return self._data['day_periods']['stand-alone']['wide']\n except KeyError:\n return localedata.LocaleDataDict({}) # pragma: no cover",
"def allows_recurring_payments(self, allows_recurring_payments):\n\n self._allows_recurring_payments = allows_recurring_payments",
"def AverageTrueRangeStopLoss(self, timeperiod = 14, multiplier = 2):\r\n stopLoss = ta.ATR(self.data.high, self.data.low, self.data.close, timeperiod)\r\n \r\n plus_dm = ta.PLUS_DM(self.data.high,self.data.low, timeperiod)\r\n minus_dm = ta.MINUS_DM(self.data.high,self.data.low, timeperiod)\r\n \r\n if plus_dm > minus_dm:\r\n stopLoss = self.data.close - multiplier * stopLoss\r\n else:\r\n stopLoss = self.data.close + multiplier * stopLoss\r\n \r\n\r\n stopLoss.dropna(inplace=True) \r\n \r\n return stopLoss",
"def period(self, period):\n\n self._period = period",
"def period(self, period):\n\n self._period = period",
"def _update_reporting_rate(supply_point, report_period, products_managed, base_level):\n late_cutoff = report_period.window_date + \\\n timedelta(days=settings.LOGISTICS_DAYS_UNTIL_LATE_PRODUCT_REPORT)\n\n # Filtering on base_level is not necessary for ProductReport because the supply_point\n # should tell what the base_level is (base_level will be HSA if the supply_point\n # is an hsa, and base_level will be FACILITY if the supply_point is a facility).\n # So since this is already a big query, it's better to not include the filter\n # for performance.\n reports_in_range = ProductReport.objects.filter(\n supply_point=supply_point,\n report_type__code=Reports.SOH,\n report_date__gte=report_period.period_start,\n report_date__lte=report_period.period_end,\n )\n period_rr = get_or_create_singular_model(\n ReportingRate,\n supply_point=supply_point,\n date=report_period.window_date,\n base_level=base_level,\n )[0]\n period_rr.total = 1\n period_rr.reported = 1 if reports_in_range else period_rr.reported\n if reports_in_range:\n first_report_date = reports_in_range.order_by('report_date')[0].report_date\n period_rr.on_time = first_report_date <= late_cutoff or period_rr.on_time\n\n if not period_rr.complete:\n # check for completeness (only if not already deemed complete)\n # unfortunately, we have to walk all avaialable\n # transactions in the period every month\n # in order to do this correctly.\n this_months_reports = ProductReport.objects.filter(\n supply_point=supply_point,\n report_type__code=Reports.SOH,\n report_date__gte=report_period.window_date,\n report_date__lte=report_period.period_end,\n )\n\n found = set(this_months_reports.values_list(\"product\", flat=True).distinct())\n period_rr.complete = 0 if found and (products_managed - found) else \\\n (1 if found else 0)\n # sanity check a weird bug where something was complete but not reported:\n # https://sentry.io/organizations/dimagi/issues/3257281095/\n if period_rr.complete:\n period_rr.reported = 1\n\n period_rr.save()",
"def update_period(self):\n return 0.1",
"def failing_periods(self) -> 'outputs.DynamicThresholdFailingPeriodsResponse':\n return pulumi.get(self, \"failing_periods\")",
"def update_to_termination(self, max_holding_days=None,\r\n max_obs_days=None, last_day_to_activate=None, use_half_life=False):\r\n if max_obs_days is None:\r\n max_obs_days = self.MAX_OBS_DAYS\r\n\r\n if max_holding_days is None:\r\n max_holding_days = self.MAX_HLD_DAYS\r\n\r\n if use_half_life:\r\n half_life, hl_pvalue = self._calculate_half_life()\r\n if half_life is None or half_life <= 0:\r\n self.is_terminated = True\r\n return\r\n if half_life * 2 > max_holding_days:\r\n self.is_terminated = True\r\n return\r\n if hl_pvalue > 0.05:\r\n self.is_terminated = True\r\n return\r\n\r\n activate_date_n_direction = self.find_activate_date_n_direction(max_obs_days=max_obs_days,\r\n last_day_to_activate=last_day_to_activate)\r\n if activate_date_n_direction:\r\n\r\n self.is_activated = True\r\n self.activate_date, activate_date_rel_idx, self.type = activate_date_n_direction\r\n if self.type == 'long':\r\n long_leg = self.pair[0]\r\n short_leg = self.pair[1]\r\n exit_signal = self._data_dict['below_mean'] == -1\r\n else:\r\n long_leg = self.pair[1]\r\n short_leg = self.pair[0]\r\n exit_signal = self._data_dict['above_mean'] == -1\r\n exit_idxs = np.argwhere(exit_signal).flatten()\r\n exit_date_rel_idxs = exit_idxs[exit_idxs > activate_date_rel_idx]\r\n # find exit date\r\n\r\n if not len(exit_date_rel_idxs):\r\n exit_date_rel_idx = activate_date_rel_idx + max_holding_days\r\n exit_reason = 'max.holding.days'\r\n else:\r\n exit_date_rel_idx = exit_date_rel_idxs[0]\r\n exit_reason = 'reverted'\r\n if exit_date_rel_idx > activate_date_rel_idx + max_holding_days: # reversion on the last day is still reversion\r\n exit_date_rel_idx = activate_date_rel_idx + max_holding_days\r\n exit_reason = 'max.holding.days'\r\n\r\n # get forward return\r\n # example: day 1 activate; day 2 buy at close price; day 10 exit signal, day 11 sell at close price\r\n # need the forward return of day 2 to day 10\r\n sl = slice(self._identified_date_id + activate_date_rel_idx + 1,\r\n self._identified_date_id + exit_date_rel_idx + 1)\r\n forward_returns = self.dl['FRTN1P', [long_leg, short_leg]][:, sl]\r\n # start from the the day after the activate date\r\n forward_returns[np.isnan(forward_returns)] = 0\r\n # wealth after deducting the cost when initiating the position\r\n wealth = np.cumprod(1 + forward_returns, axis=1) * np.array(\r\n [[1 / (1 + self.TRANSACTION_COST)], [1 / (1 - self.TRANSACTION_COST)]])\r\n wealth = np.c_[([1, 1], wealth)] # start from the the activate day (wealth = 1)\r\n # one day forward wealth\r\n holding_returns = (wealth[0] - wealth[1]) # start from the the activate date\r\n # one day forward pair return\r\n\r\n # check stoploss point\r\n stop_loss_idxs = np.argwhere(holding_returns <= self.STOP_LOSS)\r\n # day x, the 1 day forward return <= stoploss. The stoploss signal is detected on day x + 1\r\n stop_loss_idx = stop_loss_idxs[0][0] + 1 if len(stop_loss_idxs) else 99999\r\n\r\n # get delisting information and check for delisting\r\n # delist = ~self._data_dict['in_flag'][activate_date_rel_idx + 2: exit_date_rel_idx + 1]\r\n delist = ~self._data_dict['in_flag'][activate_date_rel_idx + 2: exit_date_rel_idx + 2]\r\n # start from the the second day after the activate day\r\n delist_idxs = np.argwhere(delist)\r\n delist_idx = delist_idxs[0][0] if len(delist_idxs) else 99999\r\n # if delist_idx == 0, then delisting happens the second day after the activate day which corresponds to a index\r\n # value of 2 relative to the activate date. But we need not adjust delist_idx to 2 because we can assume\r\n # on day 1 we successfully clear the position (at its close price). The 1 day forward return on day 2\r\n # and day 1 thus should not be counted toward the pair performance\r\n\r\n breakpoint = None # by default, no breaks caused by either stop loss or delisting.\r\n if min(delist_idx, stop_loss_idx) != 99999:\r\n if min(delist_idx, stop_loss_idx) == delist_idx:\r\n exit_reason = 'delist'\r\n breakpoint = delist_idx\r\n else:\r\n if stop_loss_idx < len(holding_returns):\r\n exit_reason = 'stop.loss'\r\n breakpoint = stop_loss_idx\r\n\r\n self.exit_reason = exit_reason\r\n self.open_date = self._get_date(activate_date_rel_idx + 1)\r\n if breakpoint is not None:\r\n exit_date_rel_idx = activate_date_rel_idx + breakpoint\r\n self.exit_date = self._get_date(exit_date_rel_idx)\r\n self.close_date = self._get_date(exit_date_rel_idx + 1)\r\n self.holding_days = exit_date_rel_idx - activate_date_rel_idx\r\n if breakpoint is not None:\r\n self._measure_performance(wealth[:, :breakpoint + 1], holding_returns[:breakpoint + 1])\r\n else:\r\n self._measure_performance(wealth, holding_returns)\r\n\r\n self.is_terminated = True",
"def real_period(self):\n return max(\n self.period * self.PERIOD_MARGIN_FACTOR -\n (self.max_lag if self.max_lag else self.lag * self.LAG_MARGIN_FACTOR),\n 0.0)",
"def expected_last_period_end(self, expected_last_period_end):\n\n self._expected_last_period_end = expected_last_period_end",
"def collapse(self, periods=None, **kwargs):\n for period in periods or self._periods:\n self._backend.collapse(period, **kwargs)",
"def __set_period(self, period):\n if not isinstance(period, int):\n raise TypeError('The period should be an integer')\n if period <= 0:\n raise ValueError('The period should be a natural number')\n self.__period = period",
"def checkpoint_period_set(self):\n raise Exception(\"TODO\")"
] | [
"0.73718745",
"0.45488954",
"0.44557628",
"0.44557628",
"0.44557628",
"0.44557628",
"0.44557628",
"0.44557628",
"0.44557628",
"0.44557628",
"0.44557628",
"0.44557628",
"0.44557628",
"0.44557628",
"0.42839718",
"0.4270166",
"0.40364996",
"0.4027975",
"0.39874214",
"0.39431196",
"0.39431196",
"0.3899132",
"0.3890515",
"0.3885506",
"0.38753322",
"0.3864665",
"0.38548604",
"0.38443998",
"0.3842124",
"0.3820573"
] | 0.8498492 | 0 |
Gets the target_product of this SubscriptionProductRetirement. | def target_product(self):
return self._target_product | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def target_for_product(self, product):\n for target, products in self._products_by_target.items():\n if product in products:\n return target\n return None",
"def getTarget(self):\n return self._target",
"def getTarget(self):\n\n return self._target",
"def target_resource(self):\n return self._target_resource",
"def product(self):\n return self._product",
"def product(self):\n return self._product",
"def product(self):\n return self._product",
"def get_target(self):\n return self._target",
"def getTarget(self):\n return self.Target",
"def getProduct(self, *args):\n return _libsbml.Reaction_getProduct(self, *args)",
"def target(self):\n return self._target",
"def target(self):\n return self._target",
"def target(self):\n return self._target",
"def target(self):\n return self._target",
"def target(self):\n return self._properties.get('target')",
"def Target(self):\n return self._target",
"def product_id(self):\n return self._product_id",
"def product_id(self):\n return self._product_id",
"def product_id(self):\n return self._product_id",
"def product_id(self):\n return self._product_id",
"def target_product(self, target_product):\n\n self._target_product = target_product",
"def get_target(self, ):\n return self.get_parameter('target')",
"def target(self):\n return self.__target",
"def target(self):\n return self.__target",
"def target_resource_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"target_resource_id\")",
"def target_resource_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"target_resource_id\")",
"def get_card_product(self):\n\n return self.card_product",
"def target( self ) :\n\n return( self.__target )",
"def _target(self):\n return self.__target",
"def target(self) :\n\t\ttry :\n\t\t\treturn self._target\n\t\texcept Exception as e:\n\t\t\traise e"
] | [
"0.679874",
"0.65475947",
"0.65354234",
"0.6525121",
"0.6492427",
"0.6492427",
"0.6492427",
"0.638465",
"0.6314479",
"0.616897",
"0.61486644",
"0.61486644",
"0.61486644",
"0.61486644",
"0.6129035",
"0.60792124",
"0.6019437",
"0.6019437",
"0.6019437",
"0.6019437",
"0.60076684",
"0.60051125",
"0.594989",
"0.594989",
"0.5890792",
"0.5890792",
"0.58888894",
"0.58790237",
"0.58569455",
"0.5806063"
] | 0.8075166 | 0 |
Sets the target_product of this SubscriptionProductRetirement. | def target_product(self, target_product):
self._target_product = target_product | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def target_resource(self, target_resource):\n self._target_resource = target_resource",
"def product(self, product):\n\n self._product = product",
"def product(self, product):\n\n self._product = product",
"def target_product(self):\n return self._target_product",
"def product(self, product):\n self._product = product",
"def setTarget(self, target):\n\n self._target = target",
"def product_revision(self, product_revision):\n\n self._product_revision = product_revision",
"def product_id(self, product_id):\n\n self._product_id = product_id",
"def product_id(self, product_id):\n\n self._product_id = product_id",
"def product_id(self, product_id):\n self._product_id = product_id",
"def target(self, target):\n\n self._target = target",
"def set_product(self, product):\n self.single_selection_from_static_kendo_dropdown(self.product_kendo_dropdown_locator, product)",
"def SetTarget(self, entity):\n\t\tself.target = entity",
"def target_id(self, target_id):\n\n self._target_id = target_id",
"def product(self, product):\n if product is None:\n raise ValueError(\"Invalid value for `product`, must not be `None`\") # noqa: E501\n\n self._product = product",
"def set_sms_product(self, product):\n self.single_selection_from_static_kendo_dropdown(self.sms_product_kendo_dropdown_locator, product)",
"def target_version_id(self, target_version_id):\n\n self._target_version_id = target_version_id",
"def product_version(self, product_version):\n\n self._product_version = product_version",
"def target(self, target) :\n\t\ttry :\n\t\t\tself._target = target\n\t\texcept Exception as e:\n\t\t\traise e",
"def target(self, target) :\n\t\ttry :\n\t\t\tself._target = target\n\t\texcept Exception as e:\n\t\t\traise e",
"def target_configuration(self, target_configuration):\n\n self._target_configuration = target_configuration",
"def target_industry(self, target_industry):\n\n self._target_industry = target_industry",
"def target_contact(self, target_contact):\n \n self._target_contact = target_contact",
"def target_region(self, target_region):\n\n self._target_region = target_region",
"def target_id(self, target_id: str):\n\n self._target_id = target_id",
"def product_config(self, product_config):\n\n self._product_config = product_config",
"def target_element_id(self, target_element_id):\n\n self._target_element_id = target_element_id",
"def target_microversion_id(self, target_microversion_id):\n\n self._target_microversion_id = target_microversion_id",
"def target_description(self, target_description):\n\n self._target_description = target_description",
"def target(self, target):\n self.__target = float(target)"
] | [
"0.6490736",
"0.6366156",
"0.6366156",
"0.633028",
"0.62527555",
"0.6134227",
"0.6102009",
"0.59970754",
"0.59970754",
"0.59861636",
"0.5946594",
"0.5928907",
"0.58098286",
"0.57920307",
"0.5782366",
"0.57533026",
"0.57472503",
"0.56921405",
"0.56693083",
"0.56693083",
"0.56607616",
"0.56053096",
"0.5570115",
"0.5566127",
"0.5559968",
"0.5530783",
"0.55096835",
"0.5493087",
"0.54669654",
"0.54445"
] | 0.84785706 | 0 |
Creates a module for converting to two theta | def pixels_two_theta_module(id=None, datatype=None, action=None,
version='0.0', fields=[], xtype=None, **kwargs):
icon = {
'URI': config.IMAGES + config.ANDR_FOLDER + "twotheta.png",
'image': config.IMAGES + config.ANDR_FOLDER + "twotheta_image.png",
'terminals': {
'input': (-12, 16, -1, 0),
'output': (48, 16, 1, 0),
}
}
xtype = 'AutosizeImageContainer'
terminals = [
dict(id='input',
datatype=datatype,
use='in',
description='data',
required=True,
multiple=False,
),
dict(id='output',
datatype=datatype,
use='out',
description='data with two theta',
),
]
# pixels_per_degree=80.0, qzero_pixel=309, instr_resolution=1e-6
fields = {
"pixels_per_degree": {
"type":"float",
"label": "pixels per degree",
"name": "pixels_per_degree",
"value": 52.8,
},
"qzero_pixel": {
"type":"float",
"label": "qzero pixel",
"name": "qzero_pixel",
"value": 358,
},
"instr_resolution": {
"type":"float",
"label": "instrument resolution",
"name": "instr_resolution",
"value": 1e-6,
},
"ax_name": {
"type": "List",
"label": "Name of pixel axis",
"name": "ax_name",
"value": 'xpixel',
"choices": ['xpixel', 'ypixel']
}
}
# Combine everything into a module.
module = Module(id=id,
name='Pixels to two theta',
version=version,
description=action.__doc__,
icon=icon,
terminals=terminals,
fields=fields,
action=action,
xtype=xtype,
**kwargs
)
return module | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def theta():\n pass",
"def theta(a, b):\n \n \n def norm_vec(x):\n norm_out = sqrt(dot(x, x))\n return norm_out\n \n theta = acos(dot(a, b) / (norm_vec(a) * norm_vec(b))) * 180 / pi\n \n print theta",
"def world_to_tanp(self, ra, dec):\n x, y = ra, dec\n return x, y",
"def class AlphaTheta(object):\n pass",
"def get_theta(p1,p2):\r\n \r\n dy = p1[1] - p2[1]\r\n dx = p1[0] - p2[0]\r\n theta = atan2(dy,dx)\r\n return theta",
"def theta_v_time():\n pass",
"def tanp_to_world(self, x, y):\n ra, dec = x, y\n return ra, dec",
"def T2(params):\n\t# handle the base frame, eqn 3.9, p36\n\tt = np.array([\n\t\t[1.0, 0.0, 0.0, 0.0],\n\t\t[0.0, 1.0, 0.0, 0.0],\n\t\t[0.0, 0.0, 1.0, 0.0],\n\t\t[0.0, 0.0, 0.0, 1.0]\n\t])\n\tfor i, p in enumerate(params):\n\t\tt = t.dot(rot(*p))\n\treturn t",
"def model(theta, x):\n\tw, b = theta\n\treturn w * x + b",
"def op_rotation(theta):\n C2 = np.cos(2 * theta)\n S2 = np.sin(2 * theta)\n rot = np.array([[1, 0, 0, 0],\n [0, C2, S2, 0],\n [0, -S2, C2, 0],\n [0, 0, 0, 1]])\n return rot",
"def to_thermo(x,y):\n\tp = p_from_y(y)\n\tT_C = T_from_xp(x,p) - C_to_K\n\treturn T_C, p",
"def generate_theta_sv(ar):\n theta_out = []\n\n mu_y = 0\n while abs(mu_y) < 0.5:\n mu_y = np.random.uniform(-6, 6)\n\n theta_out.append(mu_y) # Expected return.\n theta_out.append(np.random.uniform(0, 3)) # Mu_s.\n theta_out.append(np.random.uniform(0, 0.7)) # Phi_1\n theta_out.append(np.random.uniform(0, 0.7)) # Phi_2\n\n # Initialize autoregressive coefficients in lag polynomial.\n #p = [-1, -1, 1]\n\n #while sum(abs(np.roots(p)) < 1.2) != 0:\n #p = [np.random.randn() for i in range(ar)]\n #p = [np.random.uniform(0, 1)*-1 for i in range(ar)]\n #p.append(1)\n\n #theta_out.append(-1*p[1]) # Phi_1\n #theta_out.append(-1*p[0]) # Phi_2\n #theta_out.append(-1*p[1])\n #theta_out.append(-1*p[0])\n #theta_out.append(np.random.rand()*2) # Sigma_eta.\n\n return theta_out",
"def theta(lam, gam, p):\n #lam = lam - 1e-15\n return np.pi - np.arccos(np.divide(-1 + lam*np.cos(2*np.pi*p ), w(lam, gam, p) ) )",
"def RotationY(theta):\n\n return Rotation([0., 1., 0.], theta)",
"def theta(flag, S, K, t, r, sigma, q): \n\n b = r-q\n\n return numerical_theta(flag, S, K, t, r, sigma, b, f)",
"def convert_tan(node, **kwargs):\n return create_basic_op_node('Tan', node, kwargs)",
"def _model(self, t, theta, period, tmpid):\n template = self.templates[tmpid]\n phase = (t / period - theta[2]) % 1\n return theta[0] + theta[1] * template(phase)",
"def ab2rhotheta(a, b):\n \"\"\" also : y - ax - b = 0 \"\"\"\n \"\"\" y*sin(theta) + x*cos(theta) - rho = 0 \"\"\"\n #print(\"a: %f b: %f\" % (a, b))\n theta = math.atan(a) + math.pi/2.0\n rho = b*math.sin(theta)\n #print(\"a: %f b: %f rho: %f theta: %f\" % (a, b, rho, theta))\n return (rho, theta)",
"def theta(point_a, point_b):\r\n dx = point_b[0] - point_a[0]\r\n dy = point_b[1] - point_a[1]\r\n\r\n if abs(dx) < 1.e-6 and abs(dy) < 1.e-6:\r\n return 360\r\n else:\r\n t = dy/(abs(dx) + abs(dy))\r\n\r\n if dx < 0:\r\n t = 2 - t\r\n elif dy < 0:\r\n t += 4\r\n\r\n if t == 0:\r\n return 360\r\n\r\n return t*90",
"def convert_tanh(node, **kwargs):\n return create_basic_op_node('Tanh', node, kwargs)",
"def dec2theta(dec: float) -> float:\n return np.pi / 2.0 - np.pi / 180.0 * dec",
"def create_tangent_angles_equal(self):\n\n self.text_mirror = TextMobject(r\"Specular reflection\")\n self.text_mirror.move_to(4.0 * RIGHT + 2.0 * UP)\n\n self.tex_derive_ti_tr = TexMobject(r\"\\theta_{i}\", r\"=\", r\"\\theta_{r}\", r\"=\", r\"\\theta_{0}\")\n self.tex_derive_ti_tr[0].set_color(self.tex_theta_in_color)\n self.tex_derive_ti_tr[2].set_color(self.tex_theta_ref_color)\n self.tex_derive_ti_tr[4].set_color(RED)\n self.tex_derive_ti_tr.move_to(4.0 * RIGHT + 1.0 * UP)\n\n self.tex_derive_tan_tin_tan_tr = TexMobject(r\"90^{\\circ}\", r\"-\", r\"\\theta_{i}\",\n r\"=\",\n r\"90^{\\circ}\", r\"-\", r\"\\theta_{r}\",\n r\"=\", r\"\\theta_{0}'\")\n for i in range(0,3):\n self.tex_derive_tan_tin_tan_tr[ i].set_color(self.tex_theta_in_color)\n self.tex_derive_tan_tin_tan_tr[4+i].set_color(self.tex_theta_ref_color)\n self.tex_derive_tan_tin_tan_tr[8].set_color(RED)\n self.tex_derive_tan_tin_tan_tr.move_to(4.0 * RIGHT + 0.0 * UP)\n\n self.theta_0 = TexMobject(r\"\\theta_{0}\"). set_color(RED)\n self.theta_0_d = TexMobject(r\"\\theta_{0}'\").set_color(RED)",
"def world_to_tanp(self, ra, dec):\n tpc = self._default_tpcorr if self._tpcorr is None else self._tpcorr\n v2, v3 = self._world_to_v23(ra, dec)\n x, y = tpc.v2v3_to_tanp(v2, v3)\n return x, y",
"def thetaCal(opposite, adjacent):\n opposite = opposite * (-1)\n theta = math.atan2(opposite, adjacent) # * (180 / 3.1415)\n theta = math.degrees(theta)\n theta = round(theta, 2)\n\n if theta < 0:\n theta = 180 + theta\n theta = theta + 180\n theta = round(theta, 2)\n return theta",
"def t2v(T):\n x = T[0, 2]\n y = T[1, 2]\n theta = np.arctan2(T[1, 0], T[0, 0])\n v = np.array([x, y, theta])\n return v",
"def theta_rule(a, u, h, t, n, th):\n \n Dt = t[n+1] - t[n]\n num = (1.0 - (1-th)*a*Dt)*u[n] + (1-th)*a*Dt*h(t[n]) + th*a*Dt*h(t[n+1])\n den = 1 + th*a*Dt\n \n return num/den",
"def twist(self, theta):\n self.G_indv = li.expm(self.xi_hat * theta)",
"def _tf2_ ( self , *args ) :\n ##\n if not hasattr ( self , '_wo2' ) : self._wo2 = _WO2_ ( self )\n if not self._wo2 : self._wo2 = _WO2_ ( self )\n ## \n _wo = self._wo2\n fun = ROOT.TF2 ( funID () , _wo , *args )\n fun.SetNpx ( 100 ) \n fun.SetNpy ( 100 ) \n #\n return fun",
"def T(params, phi):\n\t# handle the base frame, eqn 3.9, p36\n\tt = np.array([\n\t\t[cos(phi), -sin(phi), 0.0, 0.0],\n\t\t[sin(phi), cos(phi), 0.0, 0.0],\n\t\t[0.0, 0.0, 1.0, 0.0],\n\t\t[0.0, 0.0, 0.0, 1.0]\n\t])\n\tfor i, p in enumerate(params):\n\t\tt = t.dot(rot(*p))\n\treturn t",
"def sentence_encoding_rnn_phi(t1, t2):\n return (t1.leaves(), t2.leaves())"
] | [
"0.7062045",
"0.62151426",
"0.61780596",
"0.6039193",
"0.5918481",
"0.58948064",
"0.57671696",
"0.5749727",
"0.5746482",
"0.5715399",
"0.5696348",
"0.56941783",
"0.56172824",
"0.5604744",
"0.5569989",
"0.5564663",
"0.55641603",
"0.5526037",
"0.5512258",
"0.5491512",
"0.54909927",
"0.54890823",
"0.5465623",
"0.5461398",
"0.5456362",
"0.54449725",
"0.5444146",
"0.54397774",
"0.54241955",
"0.5419632"
] | 0.6354712 | 1 |
Builds and compiles an LSTM model with the provided hyperparameters | def build_lstm_model(num_features,
embedding_size=None,
kernel_size=None,
filters=None,
pool_size=None,
lstm_output_size=None):
# Embedding
if embedding_size is None:
embedding_size = 64
# Convolution
if kernel_size is None:
kernel_size = 5
if filters is None:
filters = 64
if pool_size is None:
pool_size = 4
# LSTM
if lstm_output_size is None:
lstm_output_size = 70
print('Build model...')
lstm_model = models.lstm(num_features,
embedding_size=embedding_size,
kernel_size=kernel_size,
filters=filters,
pool_size=pool_size,
lstm_output_size=lstm_output_size)
return lstm_model | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_model():\n model = Sequential()\n\n # declare the sizes of the layers (1d input and output)\n layers = [1, 50, 100, 1]\n\n # first hidden layer, using linear activation (not specified)\n model.add(LSTM(layers[1], input_shape=(None, layers[0]), return_sequences=True))\n model.add(Dropout(0.2))\n\n # second hidden layer\n model.add(LSTM(layers[2], return_sequences=False))\n model.add(Dropout(0.2))\n\n # third hidden layer\n model.add(Dense(layers[3]))\n model.add(Activation(\"linear\"))\n\n # compile using MSE as loss function for regression, RMSPROP as optimiser\n model.compile(loss=\"mse\", optimizer=\"RMSProp\", metrics=['accuracy'])\n\n # return the model\n return model",
"def LSTM_train(X_train, Y_train, X_dev, Y_dev, R_train, R_dev, hyperparams):",
"def build_model(input_shape):\r\n model = keras.Sequential()\r\n\r\n # 2 LSTM layers\r\n model.add(keras.layers.LSTM(64, input_shape=input_shape, return_sequences=True))\r\n model.add(keras.layers.LSTM(64))\r\n\r\n # feed into dense layer\r\n model.add(keras.layers.Dense(64, activation='relu'))\r\n model.add(keras.layers.Dropout(0.3))\r\n\r\n # output layer (with softmax)\r\n model.add(keras.layers.Dense(10, activation='softmax'))\r\n\r\n return model",
"def build_lstm11(embeddings, shape, settings):\n model = Sequential()\n model.add(\n Embedding(\n embeddings.shape[0],\n embeddings.shape[1],\n input_length=shape['max_length'],\n trainable=False,\n weights=[embeddings],\n mask_zero=False\n )\n )\n model.add(TimeDistributed(Dense(shape['n_hidden'], use_bias=False), name='td9a'))\n model.add(Bidirectional(LSTM(shape['n_hidden'], return_sequences=True,\n recurrent_dropout=settings['dropout'],\n dropout=settings['dropout']), name='bidi9a'))\n # model.add(GlobalMaxPool1D())\n # model.add(BatchNormalization())\n # model.add(Dropout(settings['dropout'] / 2.0))\n\n # model.add(TimeDistributed(Dense(shape['n_hidden'], use_bias=False), name='td9b'))\n model.add(Bidirectional(LSTM(shape['n_hidden'] // 2, return_sequences=True,\n recurrent_dropout=settings['dropout'],\n dropout=settings['dropout']), name='bidi9b'))\n model.add(Bidirectional(LSTM(shape['n_hidden'] // 2, return_sequences=True,\n recurrent_dropout=settings['dropout'],\n dropout=settings['dropout']), name='bidi9c'))\n model.add(GlobalMaxPool1D(name='mp9'))\n model.add(BatchNormalization(name='bn9'))\n model.add(Dropout(settings['dropout'] / 2.0, name='drop9b'))\n\n model.add(Dense(shape['n_class'], activation='sigmoid', name='den9b'))\n xprint('build_lstm9: embeddings=%s shape=%s' % (dim(embeddings), shape))\n return model",
"def _build_model(self, input_dim=0):\n model = Sequential()\n model.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2, return_sequences=True,\n input_shape=(self.trace_size, input_dim)))\n model.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2, return_sequences=True))\n model.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2, return_sequences=True))\n\n # A Dense layer is used as the output for the network.\n model.add(TimeDistributed(Dense(input_dim, activation='softmax')))\n if self.gpus > 1:\n model = keras.utils.multi_gpu_model(model, gpus=self.gpus)\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n return model",
"def model_create_lstm(input_dim, output_dim, n_features, n_houses, x_train, y_train, x_test, y_test, early=None):\r\n model = Sequential()\r\n for _ in range(nn_hparams['num_layers']):\r\n model.add(LSTM(nn_hparams['units'], activation=nn_hparams['activation'], input_shape=(input_dim,n_features), return_sequences=True))\r\n model.add(Dropout(nn_hparams['dropout']))\r\n model.add(Flatten())\r\n model.add(Dense(y_train.shape[1]*y_train.shape[2]))\r\n custom_optimizer = getattr(optimizers, nn_hparams['optimizer'])(lr=nn_hparams['learning_rate'], beta_1=nn_hparams['beta_1'], beta_2=nn_hparams['beta_2'])\r\n model.compile(optimizer=custom_optimizer, loss=nn_hparams['loss'])\r\n y_train = y_train.reshape((y_train.shape[0], y_train.shape[1]*y_train.shape[2]))\r\n y_test = y_test.reshape((y_test.shape[0], y_test.shape[1]*y_test.shape[2]))\r\n if early:\r\n model.fit(x_train, y_train, validation_data=(x_test,y_test), epochs=nn_hparams['epochs'], verbose=1, callbacks=[early])\r\n else:\r\n model.fit(x_train, y_train, validation_data=(x_test,y_test), epochs=nn_hparams['epochs'], verbose=1)\r\n model_loss = model.evaluate(x_train, y_train, verbose=0)\r\n \r\n return model, model_loss",
"def BuildLSTMLayer(batch_size, seq_length, num_inputs, num_nodes):\n weights = RandomVar(\n LSTMCellWeightsShape(num_inputs, num_nodes), name='weights')\n m = array_ops.zeros([batch_size, num_nodes], name='init_m')\n c = array_ops.zeros([batch_size, num_nodes], name='init_c')\n x_seq, pad_seq = RandomInputs(batch_size, seq_length, num_inputs)\n\n out_seq = LSTMLayer('lstm', weights, m, c, x_seq, pad_seq)\n return out_seq, [weights]",
"def build_lstm_nnet(X, base_config, mid_layers_config, model_loss, optimizer):\n n_input_neurons = X.shape[1]\n\n model = Sequential()\n model.add(\n LSTM(\n base_config[\"first_layer\"][\"mult\"] * n_input_neurons,\n input_shape=(n_input_neurons, X.shape[2]),\n return_sequences=True,\n )\n )\n model.add(Dropout(rate=base_config[\"first_layer\"][\"dropout_rate\"]))\n\n for i in range(mid_layers_config[\"n_layers\"]):\n model.add(\n LSTM(mid_layers_config[\"mult\"] * n_input_neurons, return_sequences=True)\n )\n model.add(Dropout(rate=mid_layers_config[\"dropout_rate\"]))\n\n model.add(LSTM(base_config[\"last_layer\"][\"mult\"] * n_input_neurons))\n model.add(Dropout(rate=base_config[\"last_layer\"][\"dropout_rate\"]))\n # TO DO : parametrize this\n model.add(Dense(1))\n\n if model_loss == \"max_error\":\n model_loss = max_error_loss\n\n model.compile(loss=model_loss, optimizer=optimizer)\n\n return model",
"def make_model():\n hidden_size = 256\n model = Sequential()\n model.add(LSTM(hidden_size, return_sequences=True,\n input_shape=(config.seq_length, 256)))\n model.compile(loss='categorical_crossentropy', optimizer='adam',\n metrics=['categorical_accuracy'])\n return model",
"def build_lstm8(embeddings, shape, settings):\n model = Sequential()\n model.add(\n Embedding(\n embeddings.shape[0],\n embeddings.shape[1],\n input_length=shape['max_length'],\n trainable=False,\n weights=[embeddings],\n mask_zero=False,\n name='eembed'\n )\n )\n model.add(TimeDistributed(Dense(shape['n_hidden'], use_bias=False, name='td8')))\n model.add(Bidirectional(LSTM(shape['n_hidden'], return_sequences=True,\n recurrent_dropout=settings['dropout'],\n dropout=settings['dropout']), name='bidi'))\n model.add(Flatten(name='flaaten'))\n model.add(BatchNormalization())\n model.add(Dropout(settings['dropout'] / 2.0))\n model.add(Dense(shape['n_class'], activation='sigmoid'))\n xprint('build_lstm8: embeddings=%s shape=%s' % (dim(embeddings), shape))\n return model",
"def build_model(look_back_window_size):\n strategy = tf.distribute.MirroredStrategy()\n print('Number of devices: {}'.format(strategy.num_replicas_in_sync))\n\n with strategy.scope():\n model = Sequential()\n model.add(LSTM(units=50, input_shape=(look_back_window_size, 1), return_sequences=False, name=\"lstm-1\"))\n model.add(Dense(25, activation='relu', name=\"dense-1\"))\n model.add(Dense(5, activation='relu', name=\"dense-2\"))\n model.add(Dense(1, name=\"dense-3\"))\n model.compile(loss='mse', optimizer='adam')\n\n print(model.summary()) # Summary to console as text\n plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True) # Graphical View\n\n return model",
"def lstm(self):\n # Model.\n model = Sequential()\n model.add(LSTM(2048, return_sequences=True,\n input_shape=self.input_shape,\n dropout=0.0))\n #model.add(Flatten()) #qiao_added\n # model.add(Dense(1024, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(2048, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Flatten())\n model.add(Dense(self.nb_classes, activation='softmax'))\n return model",
"def __init__(self, layers, in_dim, lstm_dim, word_vocab, model, pre_trained=None):\n self._model = model\n if pre_trained is None:\n self._E = model.add_lookup_parameters((len(word_vocab), in_dim))\n else:\n self._E = model.lookup_parameters_from_numpy(pre_trained)\n self._fwd_RNN_first = dy.VanillaLSTMBuilder(layers, in_dim, lstm_dim, model)\n self._bwd_RNN_first = dy.VanillaLSTMBuilder(layers, in_dim, lstm_dim, model)\n self._classifier = LinearClassifier(2 * lstm_dim, 3, model)\n self._w2i = word_vocab",
"def makemod(LSTM_layers, LSTM_sizes, Dense_layers, text_designation, vocab_size, x_train, y_train, val_size=0.1,\n num_epochs=25, batch_size=False, loss_type=\"categorical_crossentropy\", opt=\"adam\"):\n if not batch_size:\n batch_size = \"No\"\n for lstmlayer in LSTM_layers:\n for lstmsize in LSTM_sizes:\n for denselayer in Dense_layers:\n NAME = f\"{text_designation}-model, {lstmlayer} layer(s) of {lstmsize} LSTM Nodes, \" \\\n f\"{denselayer} Dense, {num_epochs} Ep, {batch_size} Bat, \" \\\n f\"{val_size*100}% Val\"\n model = Sequential()\n for l in range(lstmlayer - 1):\n model.add(LSTM(lstmsize, return_sequences=True, input_shape=(x_train.shape[1], x_train.shape[2])))\n model.add(LSTM(lstmsize, input_shape=(x_train.shape[1], x_train.shape[2])))\n for l in range(denselayer):\n model.add(Dense(vocab_size, activation='relu'))\n model.add(Dense(vocab_size, activation='softmax'))\n print(model.summary())\n # Log the model\n tb = TensorBoard(log_dir=f\"logs\\logs\\{NAME}\")\n # Compile model\n model.compile(loss=loss_type, optimizer=opt, metrics=[\"accuracy\"])\n es = EarlyStopping(monitor='val_loss', patience=10, verbose=1, restore_best_weights=True)\n model.fit(x_train, y_train, epochs=num_epochs, batch_size=100, validation_split=val_size, shuffle=True,\n verbose=2, callbacks=[tb, es])\n print(\"Model {} created\".format(NAME))\n # Save Model\n model.save(f\"models\\models\\{NAME}\")\n print(\"Model {} saved\".format(NAME))",
"def build(self):\n sequence_input = Input(shape=(self.max_sequence_length, ), dtype='int32')\n embedded_sequences = self.embedding_layer(sequence_input)\n x = Conv1D(128, 5, activation='relu')(embedded_sequences)\n x = MaxPooling1D(5)(x)\n x = Conv1D(128, 5, activation='relu')(x)\n x = MaxPooling1D(5)(x)\n x = Flatten()(x)\n x = Dense(128, activation='relu')(x)\n\n y = Bidirectional(LSTM(50, dropout=0.2, recurrent_dropout=0.2))(embedded_sequences)\n z = concatenate([x, y])\n preds = Dense(6, activation='softmax')(z)\n self.model = Model(sequence_input, preds)",
"def __init__(self, incoming, n_units,\n W_ci=tf.zeros, W_ig=tf.zeros, W_og=tf.zeros, W_fg=tf.zeros,\n b_ci=tf.zeros, b_ig=tf.zeros, b_og=tf.zeros, b_fg=tf.zeros,\n a_ci=tf.tanh, a_ig=tf.sigmoid, a_og=tf.sigmoid, a_fg=tf.sigmoid, a_out=tf.identity,\n c_init=tf.zeros, h_init=tf.zeros, learn_c_init=False, learn_h_init=False, forgetgate=True,\n output_dropout=False, store_states=False, return_states=False, precomp_fwds=False,\n tickerstep_biases=None, learn_tickerstep_biases=True, name='LSTM'):\n super(LSTMLayer, self).__init__()\n with tf.variable_scope(name) as self.layer_scope:\n self.incoming, self.incoming_shape = get_input(incoming)\n self.n_units = n_units\n self.lstm_inlets = ['ci', 'ig', 'og', 'fg']\n if return_states:\n store_states = True\n \n #\n # Initialize weights and biases\n #\n \n # Turn W inits into lists [forward_pass, backward_pass]\n W_ci, W_ig, W_og, W_fg = [v[:2] if isinstance(v, list) else [v, v] for v in [W_ci, W_ig, W_og, W_fg]]\n \n # Make W and b tf variables\n W_ci, W_ig, W_og, W_fg = [\n [tofov(v[0], shape=[self.incoming_shape[-1], n_units], var_params=dict(name=n + '_fwd')),\n tofov(v[1], shape=[n_units, n_units], var_params=dict(name=n + '_bwd'))]\n for v, n in zip([W_ci, W_ig, W_og, W_fg], ['W_ci', 'W_ig', 'W_og', 'W_fg'])]\n b_ci, b_ig, b_og, b_fg = [tofov(v, shape=[n_units], var_params=dict(name=n)) for v, n in\n zip([b_ci, b_ig, b_og, b_fg], ['b_ci', 'b_ig', 'b_og', 'b_fg'])]\n \n # Pack weights for fwd and bwd connections\n W_fwd_conc = tf.concat(axis=1, values=[W[0] for W in [W_ci, W_ig, W_og, W_fg]])\n W_bwd_conc = tf.concat(axis=1, values=[W[1] for W in [W_ci, W_ig, W_og, W_fg]])\n \n if not forgetgate:\n print(\"Warning: Setting forgetgate to 0 has not been tested yet, please set the W and b manually \"\n \"to not-trainable tensorflow variables!\")\n \n def a_fg(x):\n return tf.ones(x.get_shape().as_list())\n \n # Initialize bias for tickersteps\n if tickerstep_biases is not None:\n self.W_tickers = OrderedDict(zip_longest(self.lstm_inlets,\n [tofov(tickerstep_biases, shape=[n_units],\n var_params=dict(name='W_tickers_' + g,\n trainable=learn_tickerstep_biases))\n for g in self.lstm_inlets]))\n else:\n self.W_tickers = None\n \n #\n # Create mask for output dropout\n # apply dropout to n_units dimension of outputs, keeping dropout mask the same for all samples,\n # sequence positions, and pixel coordinates\n #\n output_shape = self.get_output_shape()\n if output_dropout:\n out_do_mask = tf.ones(shape=[output_shape[0], output_shape[-1]],\n dtype=tf.float32)\n out_do_mask = tf.nn.dropout(out_do_mask, keep_prob=1. - output_dropout,\n noise_shape=[1, output_shape[-1]])\n \n def out_do(x):\n \"\"\"Function for applying dropout mask to outputs\"\"\"\n if output_dropout:\n return out_do_mask * x\n else:\n return x\n \n # Redefine a_out to include dropout (sneaky, sneaky)\n a_out_nodropout = a_out\n \n def a_out(x):\n return a_out_nodropout(out_do(x))\n \n #\n # Handle initializations for h (hidden states) and c (cell states) as Variable\n #\n h_init = out_do(tofov(h_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_h_init)))\n c_init = tofov(c_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_c_init))\n \n # Initialize lists to store LSTM activations and cell states later\n h = [h_init]\n c = [c_init]\n \n self.precomp_fwds = precomp_fwds\n self.store_states = store_states\n self.return_states = return_states\n \n self.W_fwd = OrderedDict(zip(self.lstm_inlets, [W[0] for W in [W_ci, W_ig, W_og, W_fg]]))\n self.W_bwd = OrderedDict(zip(self.lstm_inlets, [W[1] for W in [W_ci, W_ig, W_og, W_fg]]))\n \n self.W_fwd_conc = W_fwd_conc\n self.W_bwd_conc = W_bwd_conc\n self.a = OrderedDict(zip(self.lstm_inlets, [a_ci, a_ig, a_og, a_fg]))\n self.a['out'] = a_out\n self.b = OrderedDict(zip(self.lstm_inlets, [b_ci, b_ig, b_og, b_fg]))\n self.h = h\n self.c = c\n self.external_rec = None\n \n self.ig = []\n self.og = []\n self.ci = []\n self.fg = []\n \n self.out = tf.expand_dims(h_init, 1)\n self.name = name",
"def set_model(self, n_lstm_layers=3, n_lstm_nodes=150, n_dense_1=1, n_nodes_dense_1=300, n_dense_2=4, n_nodes_dense_2=200, dropout_rate=0.1, learning_rate=0.001, batch_norm=True, batch_momentum=0.99):\n\n input_objects = keras.layers.Input(shape=(len(self.low_level_vars), len(self.low_level_vars[0])), name='input_objects') \n input_global = keras.layers.Input(shape=(len(self.high_level_vars),), name='input_global')\n lstm = input_objects\n decay = 0.2\n for i_layer in range(n_lstm_layers):\n #lstm = keras.layers.LSTM(n_lstm_nodes, activation='tanh', kernel_regularizer=keras.regularizers.l2(decay), recurrent_regularizer=keras.regularizers.l2(decay), bias_regularizer=keras.regularizers.l2(decay), return_sequences=(i_layer!=(n_lstm_layers-1)), name='lstm_{}'.format(i_layer))(lstm)\n lstm = keras.layers.LSTM(n_lstm_nodes, activation='tanh', return_sequences=(i_layer!=(n_lstm_layers-1)), name='lstm_{}'.format(i_layer))(lstm)\n\n #inputs to dense layers are output of lstm and global-event variables. Also batch norm the FC layers\n dense = keras.layers.concatenate([input_global, lstm])\n for i in range(n_dense_1):\n dense = keras.layers.Dense(n_nodes_dense_1, activation='relu', kernel_initializer='he_uniform', name = 'dense1_%d' % i)(dense)\n if batch_norm:\n dense = keras.layers.BatchNormalization(name = 'dense_batch_norm1_%d' % i)(dense)\n dense = keras.layers.Dropout(rate = dropout_rate, name = 'dense_dropout1_%d' % i)(dense)\n\n for i in range(n_dense_2):\n dense = keras.layers.Dense(n_nodes_dense_2, activation='relu', kernel_initializer='he_uniform', name = 'dense2_%d' % i)(dense)\n #add droput and norm if not on last layer\n if batch_norm and i < (n_dense_2 - 1):\n dense = keras.layers.BatchNormalization(name = 'dense_batch_norm2_%d' % i)(dense) \n if i < (n_dense_2 - 1):\n dense = keras.layers.Dropout(rate = dropout_rate, name = 'dense_dropout2_%d' % i)(dense)\n\n output = keras.layers.Dense(1, activation = 'sigmoid', name = 'output')(dense)\n #optimiser = keras.optimizers.Nadam(lr = learning_rate)\n optimiser = keras.optimizers.Adam(lr = learning_rate)\n\n model = keras.models.Model(inputs = [input_global, input_objects], outputs = [output])\n model.compile(optimizer = optimiser, loss = 'binary_crossentropy')\n self.model = model",
"def __init__(self, incoming, n_units,\n W_ci=tf.zeros, W_ig=tf.zeros, W_og=tf.zeros, W_fg=tf.zeros,\n b_ci=tf.zeros, b_ig=tf.zeros, b_og=tf.zeros, b_fg=tf.zeros,\n a_ci=tf.tanh, a_ig=tf.sigmoid, a_og=tf.sigmoid, a_fg=tf.sigmoid, a_out=tf.identity,\n c_init=tf.zeros, h_init=tf.zeros, learn_c_init=False, learn_h_init=False, forgetgate=True,\n output_dropout=False, store_states=False, return_states=False, precomp_fwds=False,\n tickerstep_biases=None, learn_tickerstep_biases=True, name='LSTM'):\n super(LSTMLayerGetNetInput, self).__init__()\n with tf.variable_scope(name) as self.layer_scope:\n self.incoming, self.incoming_shape = get_input(incoming)\n self.n_units = n_units\n self.lstm_inlets = ['ci', 'ig', 'og', 'fg']\n if return_states:\n store_states = True\n \n #\n # Initialize weights and biases\n #\n \n # Turn W inits into lists [forward_pass, backward_pass]\n W_ci, W_ig, W_og, W_fg = [v[:2] if isinstance(v, list) else [v, v] for v in [W_ci, W_ig, W_og, W_fg]]\n \n # Make W and b tf variables\n W_ci, W_ig, W_og, W_fg = [\n [tofov(v[0], shape=[self.incoming_shape[-1], n_units], var_params=dict(name=n + '_fwd')),\n tofov(v[1], shape=[n_units, n_units], var_params=dict(name=n + '_bwd'))]\n for v, n in zip([W_ci, W_ig, W_og, W_fg], ['W_ci', 'W_ig', 'W_og', 'W_fg'])]\n b_ci, b_ig, b_og, b_fg = [tofov(v, shape=[n_units], var_params=dict(name=n)) for v, n in\n zip([b_ci, b_ig, b_og, b_fg], ['b_ci', 'b_ig', 'b_og', 'b_fg'])]\n \n # Pack weights for fwd and bwd connections\n W_fwd_conc = tf.concat(axis=1, values=[W[0] for W in [W_ci, W_ig, W_og, W_fg]])\n W_bwd_conc = tf.concat(axis=1, values=[W[1] for W in [W_ci, W_ig, W_og, W_fg]])\n \n if not forgetgate:\n print(\"Warning: Setting forgetgate to 0 has not been tested yet, please set the W and b manually \"\n \"to not-trainable tensorflow variables!\")\n \n def a_fg(x):\n return tf.ones(x.get_shape().as_list())\n \n # Initialize bias for tickersteps\n if tickerstep_biases is not None:\n self.W_tickers = OrderedDict(zip_longest(self.lstm_inlets,\n [tofov(tickerstep_biases, shape=[n_units],\n var_params=dict(name='W_tickers_' + g,\n trainable=learn_tickerstep_biases))\n for g in self.lstm_inlets]))\n else:\n self.W_tickers = None\n \n #\n # Create mask for output dropout\n # apply dropout to n_units dimension of outputs, keeping dropout mask the same for all samples,\n # sequence positions, and pixel coordinates\n #\n output_shape = self.get_output_shape()\n if output_dropout:\n out_do_mask = tf.ones(shape=[output_shape[0], output_shape[-1]],\n dtype=tf.float32)\n out_do_mask = tf.nn.dropout(out_do_mask, keep_prob=1. - output_dropout,\n noise_shape=[1, output_shape[-1]])\n \n def out_do(x):\n \"\"\"Function for applying dropout mask to outputs\"\"\"\n if output_dropout:\n return out_do_mask * x\n else:\n return x\n \n # Redefine a_out to include dropout (sneaky, sneaky)\n a_out_nodropout = a_out\n \n def a_out(x):\n return a_out_nodropout(out_do(x))\n \n #\n # Handle initializations for h (hidden states) and c (cell states) as Variable\n #\n h_init = out_do(tofov(h_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_h_init)))\n c_init = tofov(c_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_c_init))\n \n # Initialize lists to store LSTM activations and cell states later\n h = [h_init]\n c = [c_init]\n \n self.precomp_fwds = precomp_fwds\n self.store_states = store_states\n self.return_states = return_states\n \n self.W_fwd = OrderedDict(zip(self.lstm_inlets, [W[0] for W in [W_ci, W_ig, W_og, W_fg]]))\n self.W_bwd = OrderedDict(zip(self.lstm_inlets, [W[1] for W in [W_ci, W_ig, W_og, W_fg]]))\n \n self.W_fwd_conc = W_fwd_conc\n self.W_bwd_conc = W_bwd_conc\n self.a = OrderedDict(zip(self.lstm_inlets, [a_ci, a_ig, a_og, a_fg]))\n self.a['out'] = a_out\n self.b = OrderedDict(zip(self.lstm_inlets, [b_ci, b_ig, b_og, b_fg]))\n self.h = h\n self.c = c\n self.external_rec = None\n \n self.ig = []\n self.og = []\n self.ci = []\n self.fg = []\n \n self.out = tf.expand_dims(h_init, 1)\n self.name = name\n \n self.cur_net_fwd = dot_product(tf.zeros(self.incoming_shape[:1] + self.incoming_shape[2:]),\n tf.zeros(self.W_fwd_conc.shape.as_list()))",
"def __init__(\n self,\n n_timesteps: int = 80,\n n_features: int = 5,\n n_LSTM_layers: int = 2,\n LSTM_size: int = 64,\n random_seed: Optional[int] = None\n ):\n\n self.n_timesteps = n_timesteps\n self.n_features = n_features\n self.random_seed = random_seed\n\n self.model = self._define_model(n_LSTM_layers, LSTM_size)",
"def lstm_classifier(**kwargs):\n input_vector_size = kwargs.get('input_vector_size', 128)\n dense_size = kwargs.get('dense_size', 20)\n output = kwargs.get('label_size', 2)\n timesteps = 1\n xav_init = tf.contrib.layers.xavier_initializer()\n adam = optimizers.Adam(lr=0.01)\n sgd = optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)\n ##########\n\n model = Sequential()\n model.add(CuDNNLSTM(64))\n model.add(Dense(20, activation='softmax', \n kernel_initializer='glorot_normal',\n activity_regularizer=regularizers.l2(0.001)))\n model.add(Dropout(0.2))\n model.add(Dense(20, activation='softmax', \n kernel_initializer='glorot_normal',\n activity_regularizer=regularizers.l2(0.001)))\n model.add(Dropout(0.2))\n model.add(Dense(2, activation='softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n \n return model",
"def create_model(sequence_length, chars_length):\n model = Sequential()\n model.add(LSTM(1024, input_shape=(sequence_length, chars_length)))\n model.add(Dense(64))\n model.add(Dense(chars_length))\n model.add(Activation('softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='RMSprop')\n return model",
"def __init__(self, incoming, n_units,\n W_ci=tf.zeros, W_ig=tf.zeros, W_og=tf.zeros, W_fg=tf.zeros,\n b_ci=tf.zeros, b_ig=tf.zeros, b_og=tf.zeros, b_fg=tf.zeros,\n a_ci=tf.tanh, a_ig=tf.sigmoid, a_og=tf.sigmoid, a_fg=tf.sigmoid, a_out=tf.identity,\n c_init=tf.zeros, h_init=tf.zeros, learn_c_init=False, learn_h_init=False, forgetgate=True,\n output_dropout=False, store_states=False, return_states=False, precomp_fwds=False,\n tickerstep_biases=None, learn_tickerstep_biases=True, name='LSTM'):\n super(LSTMLayerSetNetInput, self).__init__()\n with tf.variable_scope(name) as self.layer_scope:\n self.incoming, self.incoming_shape = get_input(incoming)\n self.n_units = n_units\n self.lstm_inlets = ['ci', 'ig', 'og', 'fg']\n if return_states:\n store_states = True\n \n # Make W and b tf variables\n W_ci, W_ig, W_og, W_fg = [\n tofov(v, shape=[n_units, n_units], var_params=dict(name=n + '_bwd'))\n for v, n in zip([W_ci, W_ig, W_og, W_fg], ['W_ci', 'W_ig', 'W_og', 'W_fg'])]\n b_ci, b_ig, b_og, b_fg = [tofov(v, shape=[n_units], var_params=dict(name=n)) for v, n in\n zip([b_ci, b_ig, b_og, b_fg], ['b_ci', 'b_ig', 'b_og', 'b_fg'])]\n \n # Pack weights for bwd connections\n W_bwd_conc = tf.concat(axis=1, values=[W_ci, W_ig, W_og, W_fg])\n \n if not forgetgate:\n print(\"Warning: Setting forgetgate to 0 has not been tested yet, please set the W and b manually \"\n \"to not-trainable tensorflow variables!\")\n \n def a_fg(x):\n return tf.ones(x.get_shape().as_list())\n \n # Initialize bias for tickersteps\n if tickerstep_biases is not None:\n self.W_tickers = OrderedDict(zip_longest(self.lstm_inlets,\n [tofov(tickerstep_biases, shape=[n_units],\n var_params=dict(name='W_tickers_' + g,\n trainable=learn_tickerstep_biases))\n for g in self.lstm_inlets]))\n else:\n self.W_tickers = None\n \n #\n # Create mask for output dropout\n # apply dropout to n_units dimension of outputs, keeping dropout mask the same for all samples,\n # sequence positions, and pixel coordinates\n #\n output_shape = self.get_output_shape()\n if output_dropout:\n out_do_mask = tf.ones(shape=[output_shape[0], output_shape[-1]],\n dtype=tf.float32)\n out_do_mask = tf.nn.dropout(out_do_mask, keep_prob=1. - output_dropout,\n noise_shape=[1, output_shape[-1]])\n \n def out_do(x):\n \"\"\"Function for applying dropout mask to outputs\"\"\"\n if output_dropout:\n return out_do_mask * x\n else:\n return x\n \n # Redefine a_out to include dropout (sneaky, sneaky)\n a_out_nodropout = a_out\n \n def a_out(x):\n return a_out_nodropout(out_do(x))\n \n #\n # Handle initializations for h (hidden states) and c (cell states) as Variable\n #\n h_init = out_do(tofov(h_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_h_init)))\n c_init = tofov(c_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_c_init))\n \n # Initialize lists to store LSTM activations and cell states later\n h = [h_init]\n c = [c_init]\n \n self.precomp_fwds = precomp_fwds\n self.store_states = store_states\n self.return_states = return_states\n \n self.W_fwd = OrderedDict(zip(self.lstm_inlets, [None, None, None, None]))\n self.W_bwd = OrderedDict(zip(self.lstm_inlets, [W_ci, W_ig, W_og, W_fg]))\n \n self.W_fwd_conc = None\n self.W_bwd_conc = W_bwd_conc\n self.a = OrderedDict(zip(self.lstm_inlets, [a_ci, a_ig, a_og, a_fg]))\n self.a['out'] = a_out\n self.b = OrderedDict(zip(self.lstm_inlets, [b_ci, b_ig, b_og, b_fg]))\n self.h = h\n self.c = c\n self.external_rec = None\n \n self.ig = []\n self.og = []\n self.ci = []\n self.fg = []\n \n self.out = tf.expand_dims(h_init, 1)\n self.name = name",
"def __build_mol_to_latent_model(self):\n\n # Input tensor (MANDATORY)\n encoder_inputs = Input(shape=self.input_shape, name=\"Encoder_Inputs\")\n\n x = encoder_inputs\n\n # The two encoder layers, number of cells are halved as Bidirectional\n encoder = Bidirectional(\n LSTM(\n self.lstm_dim // 2,\n return_sequences=True,\n return_state=True, # Return the states at end of the batch\n name=\"Encoder_LSTM_1\",\n )\n )\n\n x, state_h, state_c, state_h_reverse, state_c_reverse = encoder(x)\n\n if self.bn:\n x = BatchNormalization(momentum=self.bn_momentum, name=\"BN_1\")(x)\n\n encoder2 = Bidirectional(\n LSTM(\n self.lstm_dim // 2,\n return_state=True, # Return the states at end of the batch\n name=\"Encoder_LSTM_2\",\n )\n )\n\n _, state_h2, state_c2, state_h2_reverse, state_c2_reverse = encoder2(x)\n\n # Concatenate all states of the forward and the backward LSTM layers\n states = Concatenate(axis=-1, name=\"Concatenate_1\")(\n [\n state_h,\n state_c,\n state_h2,\n state_c2,\n state_h_reverse,\n state_c_reverse,\n state_h2_reverse,\n state_c2_reverse,\n ]\n )\n\n if self.bn:\n states = BatchNormalization(momentum=self.bn_momentum, name=\"BN_2\")(states)\n\n # A non-linear recombination\n neck_relu = Dense(\n self.codelayer_dim, activation=self.h_activation, name=\"Codelayer_Relu\"\n )\n neck_outputs = neck_relu(states)\n\n if self.bn:\n neck_outputs = BatchNormalization(\n momentum=self.bn_momentum, name=\"BN_Codelayer\"\n )(neck_outputs)\n\n # Add Gaussian noise to \"spread\" the distribution of the latent variables during training\n neck_outputs = GaussianNoise(self.noise_std, name=\"Gaussian_Noise\")(\n neck_outputs\n )\n\n # Define the model\n self.__mol_to_latent_model = Model(encoder_inputs, neck_outputs)\n\n # Name it!\n self.mol_to_latent_model.name = \"mol_to_latent_model\"",
"def create_LSTM_LSTM_model(feats2d, shapes, model_settings, is_training):\n\n if is_training:\n dropout_prob = model_settings['dropout_prob'] \n\n # Get dimensions\n lstm_size = model_settings['lstm_size']\n\n batch_size = tf.shape(feats2d)[0] \n feats2d = tf.reshape(feats2d, shape=[batch_size,-1,model_settings['feature_width']]) # features are of shape [max seq length for batch, 40]\n seq_lengths = shapes[:,0] # all shapes are [seq_length, 40], we extract seq_length\n\n # First LSTM \n\n # LSTM cells\n cell_fw = tf.contrib.rnn.LSTMCell(lstm_size, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(lstm_size, state_is_tuple=True)\n\n # Bi-directional RNN (+ Dropout)\n (output_fw, output_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, feats2d, \n sequence_length=seq_lengths, \n dtype=tf.float32)\n\n # TODO: make predictions after every 64 time slices\n\n concat_rnn = tf.concat([state_fw[0], state_bw[0]], axis=1)\n\n if is_training:\n first_dropout = tf.nn.dropout(concat_rnn, dropout_prob)\n else:\n first_dropout = concat_rnn\n\n # Second LSTM \n # TODO\n\n # Logits Layer\n num_classes = model_settings['num_classes']\n logits = tf.layers.dense(inputs=first_dropout, units=num_classes)\n \n if is_training:\n return logits, dropout_prob\n else:\n return logits",
"def create_model(self, model_input, vocab_size, num_frames, **unused_params):\n lstm_size = FLAGS.lstm_cells\n\n feature_size = model_input.get_shape().as_list()[2]\n sequence_length = model_input.get_shape().as_list()[1]\n\n # start_token is important!\n start_token = tf.zeros_like(tf.expand_dims(model_input[:, 0, :], axis=1), dtype=tf.float32)\n input_sequence = tf.concat( [start_token, model_input[:, :-1, :]], axis=1)\n output_sequence = model_input[:, :, :]\n\n # fc-relu\n # input_sequence = tf.reshape(input_sequence, [-1, feature_size])\n # fc1 = tf.contrib.layers.fully_connected(input_sequence, lstm_size, activation_fn=tf.nn.relu)\n # input_sequence = tf.reshape(fc1, [-1, sequence_length, lstm_size])\n\n cell = tf.contrib.rnn.BasicLSTMCell(lstm_size)\n outputs, state = tf.nn.dynamic_rnn(\n cell=cell, \n inputs=input_sequence, \n sequence_length=None,\n parallel_iterations=128,\n dtype=tf.float32) # output = (batch, num_frames, lstm_size)\n\n # fc-linear\n outputs = tf.reshape(outputs, [-1, lstm_size])\n fc2 = tf.contrib.layers.fully_connected(outputs, feature_size, activation_fn=None)\n outputs = tf.reshape(fc2, [-1, sequence_length, feature_size])\n\n loss = tf.nn.l2_loss(outputs - output_sequence)\n\n dummy_pooled = tf.reduce_sum(model_input,axis=[1])\n dummy_output = slim.fully_connected(\n dummy_pooled, vocab_size, activation_fn=tf.nn.sigmoid,\n weights_regularizer=slim.l2_regularizer(1e-8))\n\n return {\"predictions\": dummy_output, \"loss\": loss}",
"def _build_model(self):\n # expand the input dimensions to match the required input shape\n # if using the predict model, the batch size is fixed to size 1\n if self._statefulness == False: \n sequence_state_shape = []\n sequence_state_shape.append((None,)+self._state_shape[0])\n sequence_state_shape.append((None, self._state_shape[1]))\n sequence_state_shape.append((None, self._state_shape[2]))\n \n conv_inputs = keras.Input(shape = sequence_state_shape[0])\n # phase_inputs = keras.Input(shape = sequence_state_shape[1])\n elapsed_time_inputs = keras.Input(shape = sequence_state_shape[2])\n else:\n batch_state_shape = []\n batch_state_shape.append((1,1)+ self._state_shape[0])\n batch_state_shape.append((1,1, self._state_shape[1]))\n batch_state_shape.append((1,1, self._state_shape[2]))\n \n conv_inputs = keras.Input(batch_shape = batch_state_shape[0])\n # phase_inputs = keras.Input(batch_shape = batch_state_shape[1])\n elapsed_time_inputs = keras.Input(batch_shape = batch_state_shape[2])\n \n #conv layers \n c1 = layers.TimeDistributed(layers.Conv2D(filters = 4, kernel_size = 2, strides = (2,2), padding = \"same\", activation = 'relu'))(conv_inputs)\n c2 = layers.TimeDistributed(layers.Conv2D(filters = 8, kernel_size = 2, strides = (2,2), padding = \"same\", activation = 'relu'))(c1)\n flat = layers.TimeDistributed(layers.Flatten())(c2)\n \n #combine elapsed time and green time layer\n # combined_green = layers.concatenate([phase_inputs, elapsed_time_inputs])\n # green_dense = layers.TimeDistributed(layers.Dense(10, activation='relu'))(combined_green)\n \n #combine green layer with conv layer, LSTM and output \n # all_combined = layers.concatenate([green_dense, flat])\n all_combined = layers.concatenate([elapsed_time_inputs, flat])\n lstm = layers.LSTM(96, activation='tanh', return_sequences=True, stateful = self._statefulness)(all_combined)\n dense = layers.Dense(32, activation='relu')(lstm)\n dense = layers.Dense(16, activation='relu')(dense)\n outputs = layers.Dense(self._output_dim, activation='linear')(dense)\n \n # model = keras.Model(inputs = [conv_inputs, phase_inputs, elapsed_time_inputs], outputs = outputs, name='CNN_with_LSTM') \n model = keras.Model(inputs = [conv_inputs, elapsed_time_inputs], outputs = outputs, name='CNN_with_LSTM') \n model.compile(loss=losses.mean_squared_error, optimizer=Adam(lr=self._learning_rate))\n \n return model",
"def build_model(hype_space):\n print(\"Hyperspace:\")\n print(hype_space)\n\n input = Input(shape=(MAXLEN_SEQ, int(hype_space['embed_dim']) ))\n\n profiles_input = Input(shape=(MAXLEN_SEQ, NB_FEATURES,))\n x1 = concatenate([input, profiles_input])\n x2 = concatenate([input, profiles_input])\n inp = [input, profiles_input]\n\n x1 = Dense(1200, activation=\"relu\")(x1)\n x1 = Dropout(0.5)(x1)\n\n # x1 = Bidirectional(CuDNNGRU(units=100, return_sequences=True))(x1)\n # Defining a bidirectional LSTM using the embedded representation of the inputs\n x2 = Bidirectional(CuDNNGRU(units=500, return_sequences=True))(x2)\n # x2 = Dropout(0.5)(x2)\n x2 = Bidirectional(CuDNNGRU(units=100, return_sequences=True))(x2)\n # x2 = Dropout(0.5)(x2)\n COMBO_MOVE = concatenate([x1, x2])\n w = Dense(500, activation=\"relu\")(COMBO_MOVE) # try 500\n w = Dropout(0.4)(w)\n w = tcn.TCN(return_sequences=True)(w)\n\n y = TimeDistributed(Dense(NB_CLASSES_Q8, activation=\"softmax\"))(w)\n\n # Defining the model as a whole and printing the summary\n model = Model(inp, y)\n # model.summary()\n\n # Setting up the model with categorical x-entropy loss and the custom accuracy function as accuracy\n adamOptimizer = Adam(lr=0.001, beta_1=0.8, beta_2=0.8, epsilon=None, decay=0.0001, amsgrad=False)\n model.compile(optimizer=adamOptimizer, loss=\"categorical_crossentropy\", metrics=[accuracy])\n\n return model",
"def BuildKerasModel(\n sequence_length: int, num_classes: int, lstm_size: int, num_layers: int,\n dnn_size: int, atomizer: atomizers.AtomizerBase):\n code_in = keras.layers.Input(\n shape=(sequence_length,), dtype='int32', name='code_in')\n x = keras.layers.Embedding(\n # Note the +1 on atomizer.vocab_size to accommodate the padding character.\n input_dim=atomizer.vocab_size + 1, input_length=sequence_length,\n output_dim=lstm_size, name='embedding')(code_in)\n for i in range(num_layers):\n x = keras.layers.LSTM(\n lstm_size, implementation=1, return_sequences=True,\n go_backwards=not i)(x)\n x = keras.layers.LSTM(lstm_size, implementation=1)(x)\n x = keras.layers.Dense(dnn_size, activation='relu')(x)\n outs = [\n keras.layers.Dense(1, activation='sigmoid',\n name=reachability.NumberToLetters(i))(x)\n for i in range(num_classes)\n ]\n\n model = keras.models.Model(input=code_in, outputs=outs)\n model.compile(loss='binary_crossentropy', metrics=['accuracy'],\n optimizer='adam')\n model.summary()\n return model",
"def __init__(self, input_dim, hidden_dim, output_dim):\r\n super(LstmEstimator, self).__init__()\r\n \r\n # The LSTM takes track features as inputs, and outputs hidden states\r\n # with dimensionality hidden_dim\r\n self.lstm = nn.LSTM(input_dim, hidden_dim)\r\n \r\n self.hidden2target = nn.Linear(hidden_dim, output_dim)",
"def build_lstm(self, keep_prob):\n def get_cell():\n if self.kernel == 'LSTM':\n cell = tf.contrib.rnn.BasicLSTMCell(self.num_hidden_units)\n print('LSTM is using...')\n elif self.kernel == 'GRU': # GRU RNN\n cell = tf.contrib.rnn.GRUCell(self.num_hidden_units)\n print('GRU is using...')\n else:\n raise AttributeError\n cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)\n return cell\n lstm_cell = get_cell()\n init_state = lstm_cell.zero_state(self.batch_size, dtype=tf.float32)\n return lstm_cell, init_state"
] | [
"0.7327266",
"0.7019922",
"0.69401413",
"0.6916434",
"0.6855566",
"0.6760684",
"0.6749975",
"0.66563517",
"0.6652487",
"0.6617321",
"0.6601682",
"0.6585494",
"0.6581302",
"0.65048313",
"0.6483536",
"0.64814115",
"0.6456501",
"0.6426442",
"0.63948613",
"0.6370045",
"0.63666123",
"0.63460505",
"0.63441384",
"0.63113904",
"0.62833935",
"0.62701595",
"0.62634385",
"0.6255065",
"0.62522626",
"0.6235265"
] | 0.70667225 | 1 |
Builds and compiles an GRU model with the provided hyperparameters | def build_gru_model(num_features,
embedding_size=None,
kernel_size=None,
filters=None,
pool_size=None,
gru_output_size=None):
# Embedding
if embedding_size is None:
embedding_size = 64
# Convolution
if kernel_size is None:
kernel_size = 5
if filters is None:
filters = 64
if pool_size is None:
pool_size = 4
# GRU
if gru_output_size is None:
gru_output_size = 70
print('Build model...')
gru_model = models.gru(num_features,
embedding_size=embedding_size,
kernel_size=kernel_size,
filters=filters,
pool_size=pool_size,
gru_output_size=gru_output_size)
return gru_model | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compile_gru_model(input_dim=101, output_dim=4563, recur_layers=3, nodes=1000,\n conv_context=11, conv_border_mode='valid', conv_stride=2,\n initialization='glorot_uniform', batch_norm=True, num_gpu=1):\n logger.info(\"Building gru model\")\n # Main acoustic input\n acoustic_input = Input(shape=(None, input_dim), name='acoustic_input')\n\n # Setup the network\n #conv_1d = Conv1D(nodes, conv_context, name='conv_1d',\n # padding='same', strides=conv_stride,\n # kernel_initializer=initialization,\n # activation='relu')(acoustic_input)\n conv_1d = Convolution1D(nodes, conv_context, name='conv1d',\n border_mode=conv_border_mode,\n subsample_length=conv_stride, init=initialization,\n activation='relu')(acoustic_input)\n if batch_norm:\n output = normalization.BatchNormalization(name='bn_conv_1d')(conv_1d, training=True)\n else:\n output = conv_1d\n\n for r in range(recur_layers):\n # output = GRU(nodes, activation='relu',\n # name='rnn_{}'.format(r + 1), init=initialization,\n # return_sequences=True)(output)\n output = Bidirectional(GRU(nodes, return_sequences=True),name='bi_lstm_{}'.format(r + 1))(output)\n if batch_norm:\n bn_layer = normalization.BatchNormalization(name='bn_rnn_{}'.format(r + 1),moving_mean_initializer='zeros')\n output = bn_layer(output, training=True)\n\n network_output = TimeDistributed(Dense(\n output_dim+1, name='dense', activation='softmax', init=initialization,\n ))(output)\n model = Model(input=acoustic_input, output=network_output)\n #model.conv_output_length = lambda x: conv_output_length(\n # x, conv_context, conv_border_mode, conv_stride)\n # model = ParallelModel(model, num_gpu)\n return model",
"def compile_model(network):\n # Get our network parameters.\n max_depth = network['max_depth']\n base_score = network['base_score']\n colsample_bylevel = network['colsample_bylevel']\n colsample_bytree = network['colsample_bytree']\n gamma = network['gamma']\n learning_rate = network['learning_rate']\n min_child_weight = network['min_child_weight']\n tree_method = network['tree_method']\n\n model = xgb.XGBRegressor(nthread=-1, n_estimators=5000,\n # booster=booster,\n max_depth=max_depth,\n base_score=base_score,\n colsample_bylevel=colsample_bylevel,\n colsample_bytree=colsample_bytree,\n gamma=gamma,\n learning_rate=learning_rate,\n min_child_weight=min_child_weight,\n tree_method=tree_method)\n\n return model",
"def model_create_gru(input_dim, output_dim, n_features, n_houses, x_train, y_train, x_test, y_test, early=None):\r\n model = Sequential()\r\n for _ in range(nn_hparams['num_layers']):\r\n model.add(GRU(nn_hparams['units'], activation=nn_hparams['activation'], input_shape=(input_dim,n_features), return_sequences=True))\r\n model.add(Dropout(nn_hparams['dropout']))\r\n model.add(Flatten())\r\n model.add(Dense(y_train.shape[1]*y_train.shape[2]))\r\n custom_optimizer = getattr(optimizers, nn_hparams['optimizer'])(lr=nn_hparams['learning_rate'], beta_1=nn_hparams['beta_1'], beta_2=nn_hparams['beta_2'])\r\n model.compile(optimizer=custom_optimizer, loss=nn_hparams['loss'])\r\n y_train = y_train.reshape((y_train.shape[0], y_train.shape[1]*y_train.shape[2]))\r\n y_test = y_test.reshape((y_test.shape[0], y_test.shape[1]*y_test.shape[2]))\r\n if early:\r\n model.fit(x_train, y_train, validation_data=(x_test,y_test), epochs=nn_hparams['epochs'], verbose=1, callbacks=[early])\r\n else:\r\n model.fit(x_train, y_train, validation_data=(x_test,y_test), epochs=nn_hparams['epochs'], verbose=1)\r\n model_loss = model.evaluate(x_train, y_train, verbose=0)\r\n \r\n return model, model_loss",
"def build_model(self, filtered_model_params: Dict) -> XGBRegressor:\n model = XGBRegressor(\n max_depth=filtered_model_params[\"max_depth\"],\n learning_rate=filtered_model_params[\"learning_rate\"],\n n_estimators=filtered_model_params[\"n_estimators\"],\n verbosity=filtered_model_params[\"verbosity\"],\n # objective=filtered_model_params[\"objective\"],\n booster=filtered_model_params[\"booster\"],\n tree_method=filtered_model_params[\"tree_method\"],\n n_jobs=filtered_model_params[\"n_jobs\"],\n gamma=filtered_model_params[\"gamma\"],\n min_child_weight=filtered_model_params[\"min_child_weight\"],\n max_delta_step=filtered_model_params[\"max_delta_step\"],\n subsample=filtered_model_params[\"subsample\"],\n colsample_bytree=filtered_model_params[\"colsample_bytree\"],\n colsample_bylevel=filtered_model_params[\"colsample_bylevel\"],\n colsample_bynode=filtered_model_params[\"colsample_bynode\"],\n reg_alpha=filtered_model_params[\"reg_alpha\"],\n reg_lambda=filtered_model_params[\"reg_lambda\"],\n scale_pos_weight=filtered_model_params[\"scale_pos_weight\"],\n base_score=filtered_model_params[\"base_score\"],\n random_state=filtered_model_params[\"random_state\"],\n # missing=np.nan,\n num_parallel_tree=filtered_model_params[\"num_parallel_tree\"],\n # monotone_constraints=filtered_model_params[\"monotone_constraints\"],\n # interaction_constraints=filtered_model_params[\"interaction_constraints\"],\n importance_type=filtered_model_params[\"importance_type\"]\n )\n return model",
"def __init__(\n self,\n input_size,\n hidden_size,\n num_layers=1,\n bidirectional=False,\n dropout=0,\n **kwargs\n ):\n super(GRU, self).__init__(\n 'gru', input_size, hidden_size,\n num_layers, bidirectional, dropout, **kwargs\n )",
"def build_model(self):\n input_pencil = tf.keras.Input((128,128,3))\n # generator's output\n gen_image = self.gan_generator.model(input_pencil)\n # generator's output\n x = self.gan_discriminator.model([input_pencil,gen_image])\n model = tf.keras.Model(input_pencil,[x,gen_image])\n # compiling the model\n model.compile(loss=['hinge', 'mae'], optimizer = self.optimizer,loss_weights=[1,100], metrics=['accuracy'])\n self.model = model",
"def model_build(self):\n\n # Define the input placeholder as a tensor with shape input_shape. Think of this as your input image!\n X_input = Input(self.inputData[0].shape)\n\n '''\n # CONV -> BN -> RELU Block applied to X\n X = Conv2D(8, (8, 8), name='conv0')(X_input)\n X = BatchNormalization(name='bn0')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 4), name='max_pool0')(X)\n X = Dropout(0.1, name='dropout0')(X)\n\n X = Conv2D(16, (16, 16), name='conv1')(X)\n X = BatchNormalization(name='bn1')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 4), name='max_pool1')(X)\n X = Dropout(0.1, name='dropout1')(X)\n\n X = Conv2D(16, (32, 32), name='conv2')(X)\n X = BatchNormalization(name='bn2')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 4), name='max_pool2')(X)\n X = Dropout(0.1, name='dropout2')(X)\n' '''\n\n X = Dense(500, activation='relu', name='fc0')(X_input)\n X = Dropout(0.1, name='dropout1')(X)\n X = Dense(500, activation='relu', name='fc1')(X)\n X = Dropout(0.1, name='dropout2')(X)\n X = Dense(3, activation='softmax', name='fc2')(X)\n\n # Create model. This creates your Keras model instance, you'll use this instance to train/test the model.\n self.model = Model(inputs=X_input, outputs=X, name='acouModel')",
"def build_model(hyperparameters):\r\n model = keras.Sequential()\r\n\r\n model.add(layers.BatchNormalization(input_shape=[hyperparameters['input_size']]))\r\n model.add(layers.Dense(hyperparameters['nodes'], activation='relu'))\r\n model.add(layers.BatchNormalization())\r\n model.add(layers.Dropout(hyperparameters['dropout_value']))\r\n model.add(layers.Dense(hyperparameters['nodes'], activation='sigmoid'))\r\n model.add(layers.BatchNormalization())\r\n model.add(layers.Dropout(hyperparameters['dropout_value']))\r\n model.add(layers.Dense(hyperparameters['nodes'], activation='relu'))\r\n model.add(layers.BatchNormalization())\r\n model.add(layers.Dropout(hyperparameters['dropout_value']))\r\n model.add(layers.Dense(hyperparameters['nodes'], activation='sigmoid'))\r\n model.add(layers.BatchNormalization())\r\n model.add(layers.Dropout(hyperparameters['dropout_value']))\r\n model.add(layers.Dense(5, activation='softmax'))\r\n\r\n model.compile(optimizer=keras.optimizers.Adam(learning_rate=hyperparameters['learning_rate']),\r\n loss='categorical_crossentropy',\r\n metrics=['categorical_accuracy'])\r\n\r\n return model",
"def build_model(cls, args):\n base_architecture(args) \n return StyleGANGeneratorPretrain(args)",
"def build_model(data_tensor, reuse, training):\n with tf.variable_scope('cnn', reuse=reuse):\n with tf.variable_scope('input', reuse=reuse):\n conv_aux = {\n 'pretrained': os.path.join(\n 'weights',\n 'gabors_for_contours_7.npy'),\n 'pretrained_key': 's1',\n 'nonlinearity': 'square'\n }\n x = conv.conv_layer(\n bottom=data_tensor,\n name='gabor_input',\n stride=[1, 1, 1, 1],\n padding='SAME',\n trainable=training,\n use_bias=True,\n aux=conv_aux)\n layer_hgru = hgru.hGRU(\n 'hgru_1',\n x_shape=x.get_shape().as_list(),\n timesteps=8,\n h_ext=15,\n strides=[1, 1, 1, 1],\n padding='SAME',\n # aux={'gamma': False},\n train=training)\n h2 = layer_hgru.build(x)\n\n with tf.variable_scope('readout_1', reuse=reuse):\n activity = conv.conv_layer(\n bottom=h2,\n name='pre_readout_conv',\n num_filters=2,\n kernel_size=1,\n trainable=training,\n use_bias=False)\n pool_aux = {'pool_type': 'max'}\n activity = pooling.global_pool(\n bottom=activity,\n name='pre_readout_pool',\n aux=pool_aux)\n activity = normalization.batch(\n bottom=activity,\n name='readout_1_bn',\n training=training)\n\n with tf.variable_scope('readout_2', reuse=reuse):\n activity = tf.layers.flatten(\n activity,\n name='flat_readout')\n activity = tf.layers.dense(\n inputs=activity,\n units=2)\n return activity, h2",
"def build_model():\n pretrained_model = VGG16(input_shape=(fixed_size[0], fixed_size[1], 3), weights='imagenet', include_top=False)\n # We will not train the layers imported.\n for layer in pretrained_model.layers:\n layer.trainable = False\n transfer_learning_model = Sequential()\n transfer_learning_model.add(pretrained_model)\n transfer_learning_model.add(Flatten())\n transfer_learning_model.add(Dense(128, activation='relu', kernel_initializer='he_uniform'))\n transfer_learning_model.add(Dropout(0.5))\n transfer_learning_model.add(Dense(3, activation='softmax'))\n transfer_learning_model.summary()\n opt = Adam(learning_rate=.0003)\n transfer_learning_model.compile(optimizer=opt, loss='sparse_categorical_crossentropy', metrics=['accuracy'])\n return transfer_learning_model",
"def build_model(self):\n self.g12 = G12(conv_dim=self.g_conv_dim)\n init_weights(self.g12, init_type='normal')\n self.g21 = G21(conv_dim=self.g_conv_dim)\n init_weights(self.g21, init_type='normal')\n self.d1 = D1(conv_dim=self.d_conv_dim, use_labels=self.use_labels)\n init_weights(self.d1, init_type='normal')\n self.d2 = D2(conv_dim=self.d_conv_dim, use_labels=self.use_labels)\n init_weights(self.d2, init_type='normal')\n self.dreid = DSiamese(class_count=self.num_classes_market)\n\n g_params = list(self.g12.parameters()) + list(self.g21.parameters())\n d_params = list(self.d1.parameters()) + list(self.d2.parameters())\n dr_params = list(self.dreid.parameters())\n\n self.g_optimizer = optim.Adam(g_params, self.lr, [self.beta1, self.beta2])\n self.d_optimizer = optim.Adam(d_params, self.lr, [self.beta1, self.beta2])\n self.dr_optimizer = optim.Adam(dr_params, self.lr, [self.beta1, self.beta2])\n\n if torch.cuda.is_available():\n self.g12.cuda()\n self.g21.cuda()\n self.d1.cuda()\n self.d2.cuda()\n self.dreid.cuda()",
"def build_model(self):\n self.G = Generator(self.g_conv_dim)\n self.D = Discriminator(self.d_conv_dim, self.c_dim)\n self.generator = Generator(self.g_conv_dim).train(False)\n\n self.G = nn.DataParallel(self.G)\n self.D = nn.DataParallel(self.D)\n\n # For Adam (Unofficial)\n # self.g_optimizer = torch.optim.Adam(self.G.parameters(), self.g_lr, [self.beta1, self.beta2])\n # self.d_optimizer = torch.optim.Adam(self.D.parameters(), self.d_lr, [self.beta1, self.beta2])\n\n # For RMSprop(Official)\n self.g_optimizer = torch.optim.RMSprop(self.G.parameters(), lr=0.0001)\n self.d_optimizer = torch.optim.RMSprop(self.D.parameters(), lr=0.0001)\n\n self.accumulate(self.generator, self.G.module, 0)\n # self.print_network(self.G, 'G')\n # self.print_network(self.D, 'D')\n \n self.G.to(self.device)\n self.D.to(self.device)\n self.generator.to(self.device)\n\n # weight init\n self.G.apply(self.weights_init)\n self.D.apply(self.weights_init)\n self.generator.apply(self.weights_init)",
"def build_model():\n with tf.name_scope('placeholders'):\n real_data_int = tf.placeholder(tf.int32, shape=[None, picture_size])\n x_true = 2 * ((tf.cast(real_data_int, tf.float32) / 255.) - .5)\n z = tf.placeholder(tf.float32, [None, input_dim])\n if use_JL:\n JL = tf.placeholder(tf.float32, [d_last_layer_size, JL_dim])\n P_non_normalized = tf.placeholder(tf.float32, [JL_dim, n_projections])\n P_non_normalized_SWD = tf.placeholder(tf.float32, [picture_size, n_projections])\n else:\n JL = None\n P_non_normalized = tf.placeholder(tf.float32, [d_last_layer_size, n_projections])\n P_non_normalized_SWD = tf.placeholder(tf.float32, [picture_size, n_projections])\n\n x_generated = generator(z, n_features_first=n_features_first_g,\n n_features_reduction_factor=n_features_reduction_factor, min_features=64,\n BN=BN, power=power, extra_layer=extra_layer_g,\n init_method=init_method, n_features_image=n_features_image)\n\n d_pred_true, d_last_true = discriminator(x_true, reuse=False, n_features_last=n_features_last_d,\n n_features_increase_factor=n_features_reduction_factor,\n min_features=min_features, d_BN=d_BN, power=power,\n n_features_image=n_features_image, init_method=init_method)\n d_pred_gen, d_last_gen = discriminator(x_generated, reuse=True, n_features_last=n_features_last_d,\n n_features_increase_factor=n_features_reduction_factor,\n min_features=min_features, d_BN=d_BN, power=power,\n n_features_image=n_features_image, init_method=init_method)\n\n # define generator loss (big part taken from SWG)\n with tf.name_scope('g_loss'):\n # apply the Johnson-Lindenstrauss map, if wanted, to the flattened array\n if use_JL:\n JL_true = tf.matmul(d_last_true, JL)\n JL_gen = tf.matmul(d_last_gen, JL)\n else:\n JL_true = d_last_true\n JL_gen = d_last_gen\n\n # next project the samples (images). After being transposed, we have tensors\n # of the format: [[projected_image1_proj1, projected_image2_proj1, ...],\n # [projected_image1_proj2, projected_image2_proj2, ...],...]\n # Each row has the projections along one direction. This makes it easier for the sorting that follows.\n # first normalize the random normal vectors to lie in the sphere\n P = tf.nn.l2_normalize(P_non_normalized, axis=0)\n\n projected_true = tf.transpose(tf.matmul(JL_true, P))\n projected_fake = tf.transpose(tf.matmul(JL_gen, P))\n\n sorted_true, true_indices = tf.nn.top_k(input=projected_true, k=batch_size)\n sorted_fake, fake_indices = tf.nn.top_k(input=projected_fake, k=batch_size)\n\n # For faster gradient computation, we do not use sorted_fake to compute\n # loss. Instead we re-order the sorted_true so that the samples from the\n # true distribution go to the correct sample from the fake distribution.\n\n # It is less expensive (memory-wise) to rearrange arrays in TF.\n # Flatten the sorted_true from dim [n_projections, batch_size].\n flat_true = tf.reshape(sorted_true, [-1])\n\n # Modify the indices to reflect this transition to an array.\n # new index = row + index\n rows = np.asarray([batch_size * np.floor(i * 1.0 / batch_size) for i in range(n_projections * batch_size)])\n rows = rows.astype(np.int32)\n flat_idx = tf.reshape(fake_indices, [-1, 1]) + np.reshape(rows, [-1, 1])\n\n # The scatter operation takes care of reshaping to the rearranged matrix\n shape = tf.constant([batch_size * n_projections])\n rearranged_true = tf.reshape(tf.scatter_nd(flat_idx, flat_true, shape), [n_projections, batch_size])\n\n generator_loss = tf.reduce_mean(tf.square(projected_fake - rearranged_true))\n\n # get the sliced Wasserstein distance (SWD) (since SWD and JLSWD are not comparable)\n with tf.name_scope('SWD'):\n P_SWD = tf.nn.l2_normalize(P_non_normalized_SWD, axis=0)\n\n projected_true_SWD = tf.transpose(tf.matmul(x_true, P_SWD))\n projected_fake_SWD = tf.transpose(tf.matmul(x_generated, P_SWD))\n\n sorted_true_SWD, true_indices_SWD = tf.nn.top_k(input=projected_true_SWD, k=batch_size)\n sorted_fake_SWD, fake_indices_SWD = tf.nn.top_k(input=projected_fake_SWD, k=batch_size)\n\n flat_true_SWD = tf.reshape(sorted_true_SWD, [-1])\n flat_idx_SWD = tf.reshape(fake_indices_SWD, [-1, 1]) + np.reshape(rows, [-1, 1])\n\n rearranged_true_SWD = tf.reshape(tf.scatter_nd(flat_idx_SWD, flat_true_SWD, shape),\n [n_projections, batch_size])\n\n SWD = tf.reduce_mean(tf.square(projected_fake_SWD - rearranged_true_SWD))\n\n # define the discriminator loss\n with tf.name_scope('d_loss'):\n d_true_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_pred_true), logits=d_pred_true)\n d_fake_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(d_pred_gen), logits=d_pred_gen)\n discriminator_loss = tf.reduce_mean(d_true_loss + d_fake_loss)\n\n with tf.name_scope('g_optimizer'):\n generator_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator')\n g_optimizer = tf.train.AdamOptimizer(learning_rate, beta1=0.5)\n g_train = g_optimizer.minimize(generator_loss, var_list=generator_vars)\n\n with tf.name_scope('d_optimizer'):\n discriminator_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator')\n d_optimizer = tf.train.AdamOptimizer(learning_rate, beta1=0.5)\n d_train = d_optimizer.minimize(discriminator_loss, var_list=discriminator_vars)\n\n return real_data_int, z, x_generated, JL, P_non_normalized, P_non_normalized_SWD, SWD, g_train, d_train",
"def build_model():\n # noise for soise sampling in NCE\n noise = build_unigram_noise(\n torch.FloatTensor(corpus.vocab.idx2count)\n )\n\n norm_term = 'auto' if args.norm_term == -1 else args.norm_term\n # setting up NCELoss modules\n if args.index_module == 'linear':\n criterion = IndexLinear(\n args.emsize,\n ntoken,\n noise=noise,\n noise_ratio=args.noise_ratio,\n norm_term=norm_term,\n loss_type=args.loss,\n reduction='none',\n )\n model = RNNModel(\n ntoken, args.emsize, args.nhid, args.nlayers,\n criterion=criterion, dropout=args.dropout,\n )\n elif args.index_module == 'gru':\n if args.nlayers != 1:\n logger.warning('Falling into one layer GRU due to Index_GRU supporting')\n nce_criterion = IndexGRU(\n ntoken, args.emsize, args.nhid,\n args.dropout,\n noise=noise,\n noise_ratio=args.noise_ratio,\n norm_term=norm_term,\n )\n model = GenModel(\n criterion=nce_criterion,\n )\n else:\n logger.error('The index module [%s] is not supported yet' % args.index_module)\n raise(NotImplementedError('index module not supported'))\n\n if args.cuda:\n model.cuda()\n\n logger.info('model definition:\\n %s', model)\n return model",
"def rnn_gru(\n nclass,\n input_shape=(187, 1),\n recurrent_layers=[64, 128],\n dense_layers=[64, 16],\n dropout=0.2,\n binary=False,\n):\n if not binary:\n loss = losses.sparse_categorical_crossentropy\n last_activation = activations.softmax\n else:\n loss = losses.binary_crossentropy\n last_activation = activations.sigmoid\n return_sequences = True\n\n inp = Input(shape=input_shape)\n x = inp\n for i, neurons in enumerate(recurrent_layers):\n x = GRU(neurons, return_sequences=return_sequences)(x)\n x = Dropout(rate=dropout)(x)\n return_sequences = False\n for i, neurons in enumerate(dense_layers):\n x = Dense(neurons, name=f\"dense_{i+1}\", activation=\"relu\")(x)\n x = Dense(nclass, name=\"Output\", activation=last_activation)(x)\n\n model = models.Model(inputs=inp, outputs=x)\n opt = optimizers.Adam(0.001)\n model.compile(optimizer=opt, loss=loss, metrics=[\"acc\"])\n model.summary()\n return model",
"def GRU(previous_hidden_state, x):\n # R Gate\n r = tf.sigmoid(tf.matmul(x, Wr) + \\\n tf.matmul(previous_hidden_state, Ur) + br)\n # U Gate\n u = tf.sigmoid(tf.matmul(x, Wu) + \\\n tf.matmul(previous_hidden_state, Uu) + bu)\n # Final Memory cell\n c = tf.tanh(tf.matmul(x, Wh) + \\\n tf.matmul( tf.multiply(r, previous_hidden_state), Uh) + bh)\n # Current Hidden state\n current_hidden_state = tf.multiply( (1 - u), previous_hidden_state ) + \\\n tf.multiply( u, c )\n return current_hidden_state",
"def __init__(\n self,\n model_type,\n num_features,\n num_classes,\n reparam_mode,\n prior_mode,\n latent_size,\n sample_size=1,\n num_layers=2,\n struct_dropout_mode=(\"standard\", 0.6),\n dropout=True,\n with_relu=True,\n val_use_mean=True,\n reparam_all_layers=True,\n normalize=True,\n is_cuda=False,\n ):\n super(GNN, self).__init__()\n self.model_type = model_type\n self.num_features = num_features\n self.num_classes = num_classes\n self.normalize = normalize\n self.reparam_mode = reparam_mode\n self.prior_mode = prior_mode\n self.struct_dropout_mode = struct_dropout_mode\n self.dropout = dropout\n self.latent_size = latent_size\n self.sample_size = sample_size\n self.num_layers = num_layers\n self.with_relu = with_relu\n self.val_use_mean = val_use_mean\n self.reparam_all_layers = reparam_all_layers\n self.is_cuda = is_cuda\n self.device = torch.device(self.is_cuda if isinstance(self.is_cuda, str) else \"cuda\" if self.is_cuda else \"cpu\")\n\n self.init()",
"def build_model(self):\n import tensorflow as tf\n \n y = tf.nn.relu(tf.matmul(self.variables[\"input_observation\"], self.variables[\"W1\"]) + \n self.variables[\"b1\"], name=\"y1\")\n \n for i in range(self.n_hidden-1):\n y = tf.nn.relu(tf.matmul(y, self.variables[\"W\"+str(i+2)]) + \n self.variables[\"b\"+str(i+2)], name=\"y\"+str(i+2))\n \n self.variables[\"y\"] = [tf.matmul(y, self.variables[\"Wo_0\"]) + self.variables[\"bo_0\"]]\n for i in range(1, len(self.output_size)):\n self.variables[\"y\"] += [tf.matmul(y, self.variables[\"Wo_%s\"%i]) + self.variables[\"bo_%s\"%i]]",
"def build_model_gurobipy(resite, params: Dict):\n\n from gurobipy import Model\n from resite.models.gurobipy_utils import minimize_deployed_capacity, capacity_bigger_than_existing, \\\n generation_bigger_than_load_proportion, create_generation_y_dict\n\n data = resite.data_dict\n load = data[\"load\"].values\n regions = resite.regions\n tech_points_tuples = list(resite.tech_points_tuples)\n time_slices = define_time_slices(params[\"time_resolution\"], resite.timestamps)\n\n model = Model()\n\n # - Parameters - #\n load_perc_per_region = dict(zip(regions, params[\"perc_per_region\"]))\n\n # - Variables - #\n # Portion of capacity at each location for each technology\n y = model.addVars(tech_points_tuples, lb=0., ub=1., name=lambda k: 'y_%s_%s_%s' % (k[0], k[1], k[2]))\n # Create generation dictionary for building speed up\n generation_potential_df = data[\"cap_factor_df\"] * data[\"cap_potential_ds\"]\n region_generation_y_dict = \\\n create_generation_y_dict(y, regions, resite.tech_points_regions_ds, generation_potential_df)\n\n # - Constraints - #\n # Impose a certain percentage of the load to be covered over each time slice\n generation_bigger_than_load_proportion(model, region_generation_y_dict, load, regions, time_slices,\n load_perc_per_region)\n # Percentage of capacity installed must be bigger than existing percentage\n existing_cap_percentage_ds = data[\"existing_cap_ds\"].divide(data[\"cap_potential_ds\"])\n capacity_bigger_than_existing(model, y, existing_cap_percentage_ds, tech_points_tuples)\n\n # - Objective - #\n # Minimize the capacity that is deployed\n obj = minimize_deployed_capacity(model, y, data[\"cap_potential_ds\"])\n\n resite.instance = model\n resite.y = y\n resite.obj = obj",
"def build_model(\n data_tensor,\n reuse,\n training,\n output_shape,\n data_format='NHWC'):\n if isinstance(output_shape, list):\n output_shape = output_shape[-1]\n elif isinstance(output_shape, dict):\n output_shape = output_shape['output']\n output_normalization_type = 'batch_norm_original'\n ff_kernel_size = (5, 5)\n ff_nl = tf.nn.elu\n data_tensor, long_data_format = tf_fun.interpret_data_format(\n data_tensor=data_tensor,\n data_format=data_format)\n\n # Build model\n with tf.variable_scope('gammanet', reuse=reuse):\n conv_aux = {\n 'pretrained': os.path.join(\n 'weights',\n 'gabors_for_contours_11.npy'),\n 'pretrained_key': 's1',\n 'nonlinearity': 'square'\n }\n activity = conv.conv_layer(\n bottom=data_tensor,\n name='gabor_input',\n stride=[1, 1, 1, 1],\n padding='SAME',\n trainable=training,\n use_bias=True,\n aux=conv_aux)\n layer_hgru = hgru.hGRU(\n 'hgru_1',\n x_shape=activity.get_shape().as_list(),\n timesteps=8,\n h_ext=15,\n strides=[1, 1, 1, 1],\n padding='SAME',\n aux={'reuse': False, 'constrain': False},\n train=training)\n h2 = layer_hgru.build(activity)\n h2 = normalization.batch_contrib(\n bottom=h2,\n name='hgru_bn',\n training=training)\n mask = np.load('weights/cardena_mask.npy')[None, :, :, None]\n activity = h2 * mask\n with tf.variable_scope('cv_readout', reuse=reuse):\n activity = tf.reduce_mean(activity, reduction_indices=[1, 2])\n activity = tf.layers.dense(activity, output_shape)\n if long_data_format is 'channels_first':\n activity = tf.transpose(activity, (0, 2, 3, 1))\n extra_activities = {\n }\n if activity.dtype != tf.float32:\n activity = tf.cast(activity, tf.float32)\n # return [activity, h_deep], extra_activities\n return activity, extra_activities",
"def create_model(self, model_input, vocab_size, num_frames, is_training=True, **unused_params):\n gru_size = FLAGS.gru_cells\n number_of_layers = FLAGS.gru_layers\n backward = FLAGS.gru_backward\n random_frames = FLAGS.gru_random_sequence\n iterations = FLAGS.iterations\n \n if random_frames:\n num_frames_2 = tf.cast(tf.expand_dims(num_frames, 1), tf.float32)\n model_input = utils.SampleRandomFrames(model_input, num_frames_2,\n iterations)\n \n if backward:\n model_input = tf.reverse_sequence(model_input, num_frames, seq_axis=1) \n \n stacked_GRU = tf.contrib.rnn.MultiRNNCell(\n [\n tf.contrib.rnn.GRUCell(gru_size)\n for _ in range(number_of_layers)\n ], state_is_tuple=False)\n\n loss = 0.0\n with tf.variable_scope(\"RNN\"):\n outputs, state = tf.nn.dynamic_rnn(stacked_GRU, model_input,\n sequence_length=num_frames,\n dtype=tf.float32)\n\n aggregated_model = getattr(video_level_models,\n FLAGS.video_level_classifier_model)\n return aggregated_model().create_model(\n model_input=state,\n vocab_size=vocab_size,\n is_training=is_training,\n **unused_params)",
"def build_model(input_shape, X_train, arch=\"VGG16\", loss=\"sparse_categorical_crossentropy\", learning_rate=[0.0005, 0.0001, 0.00002]):\n # select model architecture\n if arch == \"VGG16\":\n model = models.VGG16(input_shape, num_layers=num_labels)\n elif arch = \"VGG16_twist\":\n model = models.VGG16_twst(input_shape, num_layers=num_labels)\n elif arch = \"VGG11\":\n model = VGG11(input_shape, X_train, num_layers=num_labels)\n\n # learning rate constant decay\n learning_rate_fn = keras.optimizers.schedules.PiecewiseConstantDecay(\n BOUNDARIES, learning_rate)\n\n model.summary()\n # compile model\n optimiser = tf.optimizers.Adam(learning_rate=learning_rate_fn)\n model.compile(optimizer=optimiser,\n # loss=loss,\n loss=tf.keras.losses.SparseCategoricalCrossentropy(),\n metrics=[\"accuracy\"])\n return model",
"def build_model(hype_space):\n print(\"Hyperspace:\")\n print(hype_space)\n\n input = Input(shape=(MAXLEN_SEQ, int(hype_space['embed_dim']) ))\n\n profiles_input = Input(shape=(MAXLEN_SEQ, NB_FEATURES,))\n x1 = concatenate([input, profiles_input])\n x2 = concatenate([input, profiles_input])\n inp = [input, profiles_input]\n\n x1 = Dense(1200, activation=\"relu\")(x1)\n x1 = Dropout(0.5)(x1)\n\n # x1 = Bidirectional(CuDNNGRU(units=100, return_sequences=True))(x1)\n # Defining a bidirectional LSTM using the embedded representation of the inputs\n x2 = Bidirectional(CuDNNGRU(units=500, return_sequences=True))(x2)\n # x2 = Dropout(0.5)(x2)\n x2 = Bidirectional(CuDNNGRU(units=100, return_sequences=True))(x2)\n # x2 = Dropout(0.5)(x2)\n COMBO_MOVE = concatenate([x1, x2])\n w = Dense(500, activation=\"relu\")(COMBO_MOVE) # try 500\n w = Dropout(0.4)(w)\n w = tcn.TCN(return_sequences=True)(w)\n\n y = TimeDistributed(Dense(NB_CLASSES_Q8, activation=\"softmax\"))(w)\n\n # Defining the model as a whole and printing the summary\n model = Model(inp, y)\n # model.summary()\n\n # Setting up the model with categorical x-entropy loss and the custom accuracy function as accuracy\n adamOptimizer = Adam(lr=0.001, beta_1=0.8, beta_2=0.8, epsilon=None, decay=0.0001, amsgrad=False)\n model.compile(optimizer=adamOptimizer, loss=\"categorical_crossentropy\", metrics=[accuracy])\n\n return model",
"def build_model(self):\r\n self.images, self.labels = self.dataloader.get_model_inputs()\r\n\r\n model = SimpleModel(self.images, self.labels, output_dim=F.output_dim, scope='source_regressor')\r\n self.out, _ = model.get_model()\r\n self.get_loss()",
"def build_model(self):\n self.global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n\n if self.config.optimizer == 'sgd':\n self.optimizer = tf.keras.optimizers.SGD(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'rms':\n self.optimizer = tf.keras.optimizers.RMSprop(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adam':\n self.optimizer = tf.keras.optimizers.Adam(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adagrad':\n self.optimizer = tf.keras.optimizers.Adagrad(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adadelta':\n self.optimizer = tf.keras.optimizers.Adadelta(learning_rate=self.config.learning_rate)\n else:\n raise NotImplementedError(\"No support for %s optimizer\" % self.config.optimizer)\n \n if self.config.optimizer in ['rms', 'adagrad', 'adadelta']:\n with tf.device('cpu:0'):\n self.model.def_parameters()\n else:\n self.model.def_parameters()\n\n self.config.summary()\n self.config.summary_hyperparameter(self.model.model_name)",
"def build_model(train_inputs,train_labels,model_params,model_mode='classification',\n model_type='naive_bayes'):\n if model_mode == \"classification\":\n if model_type == \"naive_bayes\":\n model = GaussianNB()\n if model_type == \"knn\":\n model = KNeighborsClassifier(n_neighbors=50)\n if model_type == \"svm\":\n model = SVC(kernel='poly', degree =27, coef0 =1, C=5)\n if model_type == \"decision_tree\":\n model = DecisionTreeClassifier(min_samples_split=45,min_samples_leaf=45,criterion=\"gini\")\n #model = RandomForestClassifier(n_estimators=500, n_jobs=-1)\n\n if model_mode == \"regression\":\n if model_type == \"knn\":\n model = KNeighborsRegressor()\n if model_type == \"svm\":\n model = SVR()\n if model_type == \"decision_tree\":\n model = DecisionTreeRegressor()\n\n\n model.fit(train_inputs, train_labels)\n # for name, score in zip(train_inputs.columns,model.feature_importances_):\n # print(name, score)\n\n return model",
"def build_model(self):\n \n # initalizing generators\n self.g12 = G12(conv_dim=self.numGenFilter, domainA_channels = self.domainA_channels, domainB_channels = self.domainB_channels)\n self.g21 = G21(conv_dim=self.numGenFilter, domainA_channels = self.domainA_channels, domainB_channels = self.domainB_channels)\n \n # initializing discriminators\n self.d1 = D1(conv_dim=self.numDiscFilter, domainA_channels = self.domainA_channels, use_labels=self.use_labels)\n self.d2 = D2(conv_dim=self.numDiscFilter, domainB_channels = self.domainB_channels, use_labels=self.use_labels)\n \n\n g_params = list(self.g12.parameters()) + list(self.g21.parameters())\n d_params = list(self.d1.parameters()) + list(self.d2.parameters())\n \n self.g_optimizer = optim.Adam(g_params, self.lr, [self.beta1, self.beta2])\n self.d_optimizer = optim.Adam(d_params, self.lr, [self.beta1, self.beta2])\n \n if torch.cuda.is_available():\n self.g12.cuda()\n self.g21.cuda()\n self.d1.cuda()\n self.d2.cuda()",
"def _build_model(self):\n if self.weight_function is None:\n self.weight_function = default_weight_function\n\n tf.reset_default_graph()\n\n # Placeholders for the inputs\n self.x0 = tf.placeholder(\n shape=[None, self.num_features],\n dtype=self.dtype,\n name=\"x0\"\n )\n self.x1 = tf.placeholder(\n shape=[None, self.num_features],\n dtype=self.dtype,\n name=\"x1\"\n )\n # Placeholder for the real classes\n self.y0 = tf.placeholder(\n shape=[None, 1],\n dtype=self.dtype,\n name=\"y0\"\n )\n # Placeholder for the weights\n self.w0 = tf.placeholder(\n shape=[None, ],\n dtype=self.dtype,\n name=\"w0\"\n )\n\n # Drop placeholder\n self.should_drop = tf.placeholder(tf.bool, name=\"drop\")\n\n # Regularization\n regularizer = tf.keras.regularizers.l2(self.weight_regularization)\n\n # Input_Dropout\n in0 = tf.layers.dropout(inputs=self.x0,\n rate=self.input_dropout,\n training=self.should_drop\n )\n\n in1 = tf.layers.dropout(inputs=self.x1,\n rate=self.input_dropout,\n training=self.should_drop\n )\n\n # Constructing the feature creation part of the net\n nn0 = tf.layers.dense(\n inputs=in0,\n units=self.hidden_layers[0],\n activation=self.feature_activation,\n use_bias=self.feature_bias,\n kernel_initializer=self.kernel_initializer,\n kernel_regularizer=regularizer,\n name=\"nn_hidden_0\"\n )\n\n # By giving nn1 the same name as nn0 and using the flag reuse=True,\n # the weights and biases of all neurons in each branch are identical\n nn1 = tf.layers.dense(\n inputs=in1,\n units=self.hidden_layers[0],\n activation=self.feature_activation,\n use_bias=self.feature_bias,\n kernel_initializer=self.kernel_initializer,\n kernel_regularizer=regularizer,\n name=\"nn_hidden_0\",\n reuse=True\n )\n\n # Layer Dropout\n nn0 = tf.layers.dropout(inputs=nn0,\n rate=self.dropout,\n training=self.should_drop\n )\n nn1 = tf.layers.dropout(inputs=nn1,\n rate=self.dropout,\n training=self.should_drop\n )\n\n for i in range(1, len(self.hidden_layers)):\n nn0 = tf.layers.dense(\n inputs=nn0,\n units=self.hidden_layers[i],\n activation=self.feature_activation,\n use_bias=self.feature_bias,\n kernel_initializer=self.kernel_initializer,\n kernel_regularizer=regularizer,\n name=\"nn_hidden_\" + str(i)\n )\n nn1 = tf.layers.dense(\n inputs=nn1,\n units=self.hidden_layers[i],\n activation=self.feature_activation,\n use_bias=self.feature_bias,\n kernel_initializer=self.kernel_initializer,\n kernel_regularizer=regularizer,\n name=\"nn_hidden_\" + str(i),\n reuse=True\n )\n\n # Layer Dropout\n nn0 = tf.layers.dropout(inputs=nn0,\n rate=self.dropout,\n training=self.should_drop\n )\n nn1 = tf.layers.dropout(inputs=nn1,\n rate=self.dropout,\n training=self.should_drop\n )\n\n # Creating antisymmetric features for the ranking\n self.nn = (nn0 - nn1) / 2.\n\n self.nn = tf.layers.dense(\n inputs=self.nn,\n units=1,\n activation=self.ranking_activation,\n use_bias=False,\n kernel_initializer=self.kernel_initializer,\n kernel_regularizer=regularizer,\n name=\"nn_rank\"\n )\n\n self.nn_cls = tf.layers.dense(\n inputs=nn0 / 2.,\n units=1,\n activation=self.ranking_activation,\n use_bias=False,\n kernel_initializer=self.kernel_initializer,\n kernel_regularizer=regularizer,\n name=\"nn_rank\",\n reuse=True\n )\n\n nn_out = tf.identity(\n input=self.nn,\n name=\"nn\"\n )",
"def get_model_fn(num_gpus, variable_strategy, num_workers):\n\n def _hg_model_fn(features, labels, mode, params):\n \"\"\" HG model body.\n\n Support single host, one or more GPU training. Parameter distribution can\n be either one of the following scheme.\n 1. CPU is the parameter server and manages gradient updates.\n 2. Parameters are distributed evenly across all GPUs, and the first GPU\n manages gradient updates.\n\n Args:\n features: a list of tensors, one for each tower\n labels: a list of tensors, one for each tower\n mode: ModeKeys.TRAIN or EVAL\n params: Hyperparameters suitable for tuning\n Returns:\n A EstimatorSpec object.\n \"\"\"\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n weight_decay = params.weight_decay\n momentum = params.momentum\n decay_factor = params.decay_factor\n decay_step = params.decay_step\n init_learning_rate = params.init_learning_rate\n num_stacks = params.num_stacks\n num_joints = params.num_joints\n\n tower_features = features\n if mode == tf.estimator.ModeKeys.PREDICT:\n if num_gpus < 1:\n tower_labels = [None]\n else:\n tower_labels = [None for i in range(num_gpus)]\n else:\n tower_labels = labels\n\n tower_losses = []\n tower_gradvars = []\n tower_preds = []\n\n # channels first (NCHW) is normally optimal on GPU and channels last (NHWC)\n # on CPU. The exception is Intel MKL on CPU which is optimal with\n # channels_last.\n data_format = params.data_format\n if not data_format:\n if num_gpus == 0:\n data_format = 'channels_last'\n else:\n data_format = 'channels_first'\n\n if num_gpus == 0:\n num_devices = 1\n device_type = 'cpu'\n else:\n num_devices = num_gpus\n device_type = 'gpu'\n\n for i in range(num_devices):\n worker_device = '/{}:{}'.format(device_type, i)\n if variable_strategy == 'CPU':\n device_setter = utils.local_device_setter(\n worker_device=worker_device)\n elif variable_strategy == 'GPU':\n device_setter = utils.local_device_setter(\n ps_device_type='gpu',\n worker_device=worker_device,\n ps_strategy=tf.contrib.training.GreedyLoadBalancingStrategy(\n num_gpus, tf.contrib.training.byte_size_load_fn))\n if mode == tf.estimator.ModeKeys.TRAIN:\n batch_size = params.train_batch_size / num_devices\n else:\n batch_size = params.eval_batch_size / num_devices\n\n with tf.variable_scope('hg', reuse=bool(i != 0)):\n with tf.name_scope('tower_%d' % i) as name_scope:\n with tf.device(device_setter):\n loss, gradvars, preds = _tower_fn(\n mode, weight_decay, tower_features[i][0], tower_labels[i],\n data_format, params.batch_norm_decay,\n params.batch_norm_epsilon, params.num_stacks, params.num_out, params.n_low, params.num_joints, batch_size,params.seq_length)\n tower_losses.append(loss)\n tower_gradvars.append(gradvars)\n tower_preds.append(preds)\n if i == 0:\n # Only trigger batch_norm moving mean and variance update from\n # the 1st tower. Ideally, we should grab the updates from all\n # towers but these stats accumulate extremely fast so we can\n # ignore the other stats from the other towers without\n # significant detriment.\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,\n name_scope)\n\n if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL:\n\n # Now compute global loss and gradients.\n gradvars = []\n with tf.name_scope('gradient_averaging'):\n all_grads = {}\n for grad, var in itertools.chain(*tower_gradvars):\n if grad is not None:\n all_grads.setdefault(var, []).append(grad)\n for var, grads in six.iteritems(all_grads):\n # Average gradients on the same device as the variables\n # to which they apply.\n with tf.device(var.device):\n if len(grads) == 1:\n avg_grad = grads[0]\n else:\n avg_grad = tf.multiply(tf.add_n(grads), 1. / len(grads))\n gradvars.append((avg_grad, var))\n\n # Device that runs the ops to apply global gradient updates.\n consolidation_device = '/gpu:0' if variable_strategy == 'GPU' else '/cpu:0'\n with tf.device(consolidation_device):\n\n learning_rate = tf.train.exponential_decay(init_learning_rate, tf.train.get_global_step(), decay_step, decay_factor, staircase=True, name= 'learning_rate')\n\n loss = tf.reduce_mean(tower_losses, name='loss')\n\n examples_sec_hook = utils.ExamplesPerSecondHook(\n params.train_batch_size, every_n_steps=10)\n\n tensors_to_log = {'learning_rate': learning_rate, 'loss': loss}\n\n logging_hook = tf.train.LoggingTensorHook(\n tensors=tensors_to_log, every_n_iter=100)\n\n train_hooks = [logging_hook, examples_sec_hook]\n\n optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate)\n\n if params.sync:\n optimizer = tf.train.SyncReplicasOptimizer(\n optimizer, replicas_to_aggregate=num_workers)\n sync_replicas_hook = optimizer.make_session_run_hook(params.is_chief)\n train_hooks.append(sync_replicas_hook)\n\n # Create single grouped train op\n train_op = [\n optimizer.apply_gradients(\n gradvars, global_step=tf.train.get_global_step())\n ]\n \n train_op.extend(update_ops)\n train_op = tf.group(*train_op)\n\n predictions = {\n 'heatmaps':\n tf.concat([p['heatmaps'] for p in tower_preds], axis=0),\n 'images':\n tf.concat([i for i in tower_features], axis=0)\n }\n if mode==tf.estimator.ModeKeys.EVAL:\n hm = predictions['heatmaps']\n stacked_labels = tf.concat(labels[0][0][0], axis=0)\n \n gt_labels = tf.transpose(stacked_labels,[1,0,3,4,2])\n\n joint_accur = []\n for j in range(params.seq_length):\n for i in range(params.num_joints):\n joint_accur.append(_pck_hm(hm[j,:,-1, :, :,i], gt_labels[j,:, :, :, i], params.eval_batch_size/num_devices))\n accuracy = tf.stack(joint_accur)\n metrics = {'Mean Pixel Error': tf.metrics.mean(accuracy)}\n tf.logging.info('Accuracy op computed')\n else:\n metrics = None\n \n else:\n train_op = None\n loss = None\n train_hooks = None\n metrics = None\n predictions = {\n 'heatmaps':\n tf.concat([p['heatmaps'] for p in tower_preds], axis=0),\n 'images':\n tf.concat([i for i in tower_features], axis=0)\n }\n \n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions,\n loss=loss,\n train_op=train_op,\n training_hooks=train_hooks,\n eval_metric_ops=metrics)\n\n return _hg_model_fn"
] | [
"0.73912466",
"0.66649204",
"0.6635141",
"0.6230592",
"0.6221744",
"0.61874807",
"0.61704993",
"0.61428016",
"0.6135985",
"0.61158645",
"0.6077031",
"0.6056821",
"0.60392755",
"0.60282505",
"0.5926871",
"0.5913023",
"0.5907081",
"0.5886617",
"0.58645344",
"0.58603835",
"0.58461463",
"0.58201605",
"0.58162594",
"0.5810863",
"0.580266",
"0.58007926",
"0.58003867",
"0.57909703",
"0.5790854",
"0.5783487"
] | 0.7483657 | 0 |
Create bitmap from given unicode character, return image file object. | def create_unicode_image(unicode_character):
# Check the cache
if unicode_character in unicode_cache.keys():
return unicode_cache[unicode_character]
# Initialize canvas and font parameters
# Credit: JackNova (until URL)
width = 10
height = 20
background_color=(0,0,0)
font_size=20
font_color=(255,255,255)
unicode_text = unicode_character
im = Image.new ("RGB", (width, height), background_color )
draw = ImageDraw.Draw ( im )
unicode_font = ImageFont.truetype("Hack-Regular.ttf", font_size)
draw.text ((0,0), unicode_text, font=unicode_font, fill=font_color )
# https://stackoverflow.com/a/22612295
# Return the image as a file object
unicode_file = BytesIO()
im.save(unicode_file, format='PNG')
# Cache the charcater bitmap
unicode_cache[unicode_character] = unicode_file
return unicode_file | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fromascii(self, *args, **kwargs):\n return _image.image_fromascii(self, *args, **kwargs)",
"def get_tile_bitmap(self, char):\n if char == '#':\n return self.tiles[0:32, 0:32, :]\n elif char == 'b':\n return self.tiles[0:32, 128:160, :]\n elif char == 'd':\n return self.tiles[64:96, 128:160, :]\n elif char == 'w':\n return self.tiles[96:128, 128:160, :]\n elif char == 'a':\n return self.tiles[96:128, 160:192, :]\n elif char == 'q':\n return self.tiles[32:64, 128:160, :]\n elif char == 'p':\n return self.tiles[64:96, 192:224, :]\n elif char == 'x':\n return self.tiles[128:160, 128:160, :]\n elif char == 'y':\n return self.tiles[192:224, 96:128, :]\n elif char == 'z':\n return self.tiles[160:192, 96:128, :]\n elif char == 'm':\n return self.tiles[96:128, 224:256, :]\n elif char == 's':\n return self.tiles[32:64, 0:32, :]\n else:\n return self.tiles[32:64, 64:96, :]",
"def decode_file(source, palette):\n\n (CHRStart, CHRSize) = get_CHR_data_position(source)\n charRowCount = CHRSize // 256 # 16 characters/row\n\n img = Image.new(\"P\", (128, charRowCount * 8), 0)\n img.putpalette(itertools.chain.from_iterable(palette))\n\n source.seek(CHRStart)\n for (y, pixelRow) in enumerate(decode_pixel_rows(source, charRowCount)):\n for (x, value) in enumerate(pixelRow):\n img.putpixel((x, y), value)\n\n return img",
"def generate(self, chars, format='png'):\n im = self.generate_image(chars)\n out = BytesIO()\n im.save(out, format=format)\n out.seek(0)\n return out",
"def char_image(self, chars: str, font_path: Path, filter_: str = 'median') -> Image:\n target = chars[1]\n params = self.font_params.get(font_path.stem, {})\n\n chars = [params.get(c, c)[-1] for c in chars]\n chars = ''.join(chars)\n\n size_high = params.get('pt', 42) + 1\n size_low = size_high - 4\n font_size = randint(size_low, size_high)\n\n font = ImageFont.truetype(str(font_path), size=font_size)\n size = font.getsize(chars)\n size = ImageSize(size[0], size[1])\n\n image = Image.new('L', CONTEXT_SIZE, color='black')\n\n left = (CONTEXT_SIZE.width - size.width) // 2\n left = left if left > 0 else 0\n\n top = (CONTEXT_SIZE.height - size.height) // 2\n top = top if top > 0 else 0\n\n draw = ImageDraw.Draw(image)\n draw.text((left, top), chars, font=font, fill='white')\n\n soot = params.get('soot', 0.3)\n soot = params.get('soot_sm', soot) if target in TINY_PUNCT else soot\n image = add_soot(image, soot)\n\n filter_ = params.get('filter', filter_)\n image = filter_image(image, filter_)\n\n image = image.point(lambda x: ON if x > 128 else OFF)\n\n return image",
"def print_image_as_unicode(image_file, **kwargs):\n char_set = kwargs['char_set']\n x256_mode = kwargs['x256']\n height = 20 # height of unicode character\n width = 10 # width of the unicode characters we are using\n # Credit ElTero and ABM (https://stackoverflow.com/a/7051075)\n if image_file == '-':\n source = sys.stdin.buffer\n image_file = BytesIO()\n image_file.write(source.read())\n im = Image.open(image_file)\n imgwidth, imgheight = im.size\n\n for row in range(imgheight//height):\n last_avg_color = np.array([0,0,0])\n for column in range(imgwidth//width):\n box = (column*width, row*height, (column+1)*width, (row+1)*height)\n cropped = im.crop(box)\n lowest_value = 100000\n lowest_unicode = None\n for unicode in char_set:\n unicode = chr(unicode)\n dissimilarity = compare(create_unicode_image(unicode), cropped)\n if dissimilarity < lowest_value:\n lowest_value = dissimilarity\n lowest_unicode = unicode\n if x256_mode:\n # Credit: Ruan B. (until URL)\n avg_color_per_row = np.average(cropped, axis=0)\n avg_color = np.average(avg_color_per_row, axis=0)[:3]\n x256_color = str(x256.from_rgb(*avg_color))\n # https://stackoverflow.com/a/43112217\n composite_color = np.average(np.array([avg_color,\n last_avg_color]),\n axis=0)\n x256_bg_color = str(x256.from_rgb(*avg_color))\n if lowest_unicode == chr(32):\n print('\\033[48;5;{0}m{1}\\033[0m'.format(x256_color,\n chr(32)), end='')\n else:\n print('\\033[38;5;{0}m\\033[48;5;{1}m'.format(x256_color,\n x256_bg_color) + \n '{0}\\033[0m'.format(lowest_unicode), end='')\n last_avg_color = avg_color\n else:\n print(lowest_unicode, end='')\n if x256_mode:\n print('\\x1b[39m', end='\\r\\n')\n else:\n print('', end='\\r\\n')",
"def get_img_by_char(char, base_path='../../dataset/nums'):\n opdict = {'+': 10, '-': 11, '*': 12, '/': 13, '=': 14, '(': 15, ')': 16}\n if char in opdict.keys():\n char = opdict[char]\n path = os.path.join(base_path, str(char))\n files = os.listdir(path)\n\n rdm = random.randint(0, len(files) - 1)\n\n if rdm >= len(files):\n print(path, len(files), rdm)\n\n file = files[rdm]\n path = os.path.join(path, file)\n return cv2.imread(path, cv2.IMREAD_GRAYSCALE)",
"def create_char(self, location, bitmap):\n if not (0 <= location <= 7):\n raise ValueError('Only locations 0-7 are valid.')\n if len(bitmap) != 8:\n raise ValueError('Bitmap should have exactly 8 rows.')\n\n # Store previous position\n pos = self.cursor_pos\n\n # Write character to CGRAM\n self.command(_LCD_SETCGRAMADDR | location << 3)\n for row in bitmap:\n self._send(row, _RS_DATA)\n\n # Restore cursor pos\n self.cursor_pos = pos",
"def create_char(self, location, bitmap):\n assert 0 <= location <= 7, 'Only locations 0-7 are valid.'\n assert len(bitmap) == 8, 'Bitmap should have exactly 8 rows.'\n\n # Store previous position\n pos = self.cursor_pos\n\n # Write character to CGRAM\n self.command(self.LCD_SETCGRAMADDR | location << 3)\n for row in bitmap:\n self._send(row, self.RS_DATA)\n\n # Restore cursor pos\n self.cursor_pos = pos",
"def spew_to_image(bitstream, img_name):\n pixel_width = int(IMAGE_FULL_X / NUM_BITS_X)\n pixel_height = int(IMAGE_FULL_Y / NUM_BITS_Y)\n\n bits_per_pixel = 1\n\n total_length = NUM_BITS_X * NUM_BITS_Y * bits_per_pixel\n\n if len(bitstream) != total_length:\n print(f'Bitstream is not correct length. Expected {total_length},', end='');\n print(f'but found {len(bitstream)}')\n exit(0)\n\n is_binary = all(ch in('01') for ch in bitstream)\n if not is_binary:\n print(f'Bitstream must be all 0s and 1s. {bitstream} has other chars')\n exit(0)\n\n img = Image.new('RGB', (IMAGE_FULL_X,IMAGE_FULL_Y), BLACK)\n draw = ImageDraw.Draw(img)\n\n for i in range(len(bitstream)):\n y_start = int(i/NUM_BITS_X) * pixel_height\n y_end = y_start + pixel_height\n\n x_start = (i % NUM_BITS_X) * pixel_width\n x_end = x_start + pixel_width\n\n color = 'white' if int( bitstream[i] )==1 else 'black'\n\n draw.rectangle( ((x_start,y_start),\n (x_end,y_end)), fill=color)\n # print(f'({x_start},{y_start}) to ({x_end},{y_end}) = {bitstream[i]}')\n img.save(img_name, 'PNG')",
"def init_bitmap(self, input_file):\n script_path = os.path.dirname(os.path.realpath(__file__))\n input_file_path = os.path.join(script_path, input_file)\n with open(input_file_path, \"rb\") as bitmap_file:\n reader = csv.reader(bitmap_file)\n for row in reader:\n if row[0] not in self.FONT.keys():\n self.FONT[str(row[0])] = []\n self.FONT[str(row[0])].append(row[1] + self._spacer)\n else:\n self.FONT[str(row[0])].append(row[1] + self._spacer)",
"def LoadPicture(filename):\n return Bitmap(filename)",
"def unpack_mono_bitmap(bitmap):\n # Allocate a bytearray of sufficient size to hold the glyph bitmap.\n data = bytearray(bitmap.rows * bitmap.width)\n\n # Iterate over every byte in the glyph bitmap. Note that we're not\n # iterating over every pixel in the resulting unpacked bitmap --\n # we're iterating over the packed bytes in the input bitmap.\n for y in range(bitmap.rows):\n for byte_index in range(bitmap.pitch):\n\n # Read the byte that contains the packed pixel data.\n byte_value = bitmap.buffer[y * bitmap.pitch + byte_index]\n\n # We've processed this many bits (=pixels) so far. This determines\n # where we'll read the next batch of pixels from.\n num_bits_done = byte_index * 8\n\n # Pre-compute where to write the pixels that we're going\n # to unpack from the current byte in the glyph bitmap.\n rowstart = y * bitmap.width + byte_index * 8\n\n # Iterate over every bit (=pixel) that's still a part of the\n # output bitmap. Sometimes we're only unpacking a fraction of a byte\n # because glyphs may not always fit on a byte boundary. So we make sure\n # to stop if we unpack past the current row of pixels.\n for bit_index in range(min(8, bitmap.width - num_bits_done)):\n\n # Unpack the next pixel from the current glyph byte.\n bit = byte_value & (1 << (7 - bit_index))\n\n # Write the pixel to the output bytearray. We ensure that `off`\n # pixels have a value of 0 and `on` pixels have a value of 1.\n data[rowstart + bit_index] = 1 if bit else 0\n\n return data",
"def _get_image(x):\n return b64encode(x).decode('ascii')",
"def createNew(string):\n image=Image.open('imageGenerator/images/images.jpg').convert('RGBA')\n\n fnt = ImageFont.truetype(\"fonts/arial.ttf\", 25)\n d = ImageDraw.Draw(image)\n\n d.text((10,10), string, font=fnt, fill=(255,255,255,128))\n\n d.text((10,60), \"World\", fill=(255,255,255,255))\n file=byt()\n image.save(file,'jpeg')\n return file.getvalue()",
"def wx2PIL(self, bitmap):\n size = tuple(bitmap.GetSize())\n try:\n buf = size[0]*size[1]*3*\"\\x00\"\n bitmap.CopyToBuffer(buf)\n except:\n del buf\n buf = bitmap.ConvertToImage().GetData()\n return Image.frombuffer(\"RGB\", size, buf, \"raw\", \"RGB\", 0, 1)",
"def get_letter_image(self, letter):\n\t\tassert len(letter) == 1\n\t\treturn self._tileset.get_tile(self._letter_mapping[letter])",
"def read_text(text=\"刘菁我爱你\"):\n\tpygame.init()\n\tfont = pygame.font.Font('/System/Library/Fonts/Supplemental/Songti.ttc', 26)\n\trtext = font.render(text, True, (0, 0, 0), (255, 255, 255))\n\n\tif os.path.exists(CHAR_IMG):\n\t\tos.remove(CHAR_IMG)\n\tpygame.image.save(rtext, CHAR_IMG)\n\t\n\timg = cv2.imread(CHAR_IMG)\n\timg = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)/255\n\n\treturn img",
"def testBinaryImage():\n ALIEN = \"0\"*8 + \"11011011\"*2 + \"0\"*8 + \"00001000\" + \\\n \"01000010\" + \"01111110\" + \"0\"*8\n # this function is imported from cs5png.py\n NUM_ROWS = 8\n NUM_COLS = 8\n binaryIm( ALIEN, NUM_COLS, NUM_ROWS )\n # that should create a file, binary.png, in this\n # directory with the 8x8 image...",
"def unpack_mono_bitmap(bitmap):\n # Allocate a bytearray of sufficient size to hold the glyph bitmap.\n data = bytearray(bitmap.rows * bitmap.width)\n\n # Iterate over every byte in the glyph bitmap. Note that we're not\n # iterating over every pixel in the resulting unpacked bitmap --\n # we're iterating over the packed bytes in the input bitmap.\n for row in range(bitmap.rows):\n for byte_index in range(bitmap.pitch):\n\n # Read the byte that contains the packed pixel data.\n byte_value = bitmap.buffer[row * bitmap.pitch + byte_index]\n\n # We've processed this many bits (=pixels) so far. This\n # determines where we'll read the next batch of pixels from.\n num_bits_done = byte_index * 8\n\n # Pre-compute where to write the pixels that we're going\n # to unpack from the current byte in the glyph bitmap.\n rowstart = row * bitmap.width + byte_index * 8\n\n # Iterate over every bit (=pixel) that's still a part of the\n # output bitmap. Sometimes we're only unpacking a fraction of\n # a byte because glyphs may not always fit on a byte boundary.\n # So we make sure to stop if we unpack past the current row\n # of pixels.\n for bit_index in range(min(8, bitmap.width - num_bits_done)):\n\n # Unpack the next pixel from the current glyph byte.\n bit = byte_value & (1 << (7 - bit_index))\n\n # Write the pixel to the output bytearray. We ensure that\n # `off` pixels have a value of 0 and `on` pixels have a\n # value of 1.\n data[rowstart + bit_index] = 1 if bit else 0\n\n return data",
"def byte2img(filename):\n try:\n with open(filename, 'r') as f:\n arr = []\n for line in f:\n vals = line.split()\n del vals[0]\n arr.append(vals)\n \n max_len = max([len(vals) for vals in arr])\n \n new_arr = []\n for vals in arr:\n new_arr.append([val.replace('?', '0') for val in vals])\n \n for vals in new_arr:\n if '?' in vals:\n print(vals)\n \n hexstring = ''.join(list(itertools.chain.from_iterable(new_arr)))\n \n byte_arr = bytearray.fromhex(hexstring)\n width = 1024\n rem = len(byte_arr) % width\n byte_arr_len = len(byte_arr) - rem\n byte_arr = byte_arr[:byte_arr_len]\n byte_arr = np.asarray(byte_arr)\n np_arr = np.reshape(byte_arr, (len(byte_arr)//width, width))\n np_arr = np.uint8(np_arr)\n img = Image.fromarray(np_arr)\n return img\n except Exception as error:\n logging.error(traceback.format_exc())",
"def create_image_with_string(s, size, color, fontsize=10):\n from PIL import Image\n # from PIL import ImageFont\n from PIL import ImageDraw\n img = Image.new(\"RGB\", size, \"white\")\n\n draw = ImageDraw.Draw(img)\n# font = ImageFont.truetype('FreeMono', 10)\n options = [\n '/usr/local/texlive/2015/texmf-dist/fonts/truetype/public/gnu-freefont/FreeMono.ttf',\n '/usr/share/fonts/truetype/freefont/FreeMono.ttf']\n font = None\n for f in options:\n if os.path.exists(f):\n font = ImageFont.truetype(f, fontsize)\n break\n else:\n logger.info('Could not find any font in %r' % options)\n \n draw.text((0, 0), s, color, font=font)\n data = get_png(img)\n return data",
"def _CreateClipboardImage( self ):\n bmap = None\n\n fd, name = tempfile.mkstemp( '.png' )\n try:\n os.close( fd )\n if self.CreatePrintImage( name ):\n bmap = wx.Image( name, wx.BITMAP_TYPE_PNG ).ConvertToBitmap()\n finally:\n os.remove( name )\n\n return bmap",
"def get_letter_image(self, letter): # pragma: no cover\n\t\traise NotImplementedError()",
"def get_letter_image(self, letter):\n\t\tassert len(letter) == 1\n\t\treturn ImageRegion(self._tileset, self._bound_rects[letter])",
"def base64_to_PIL(string):\n try:\n base64_data = base64.b64decode(string)\n img = Image.open(BytesIO(base64_data)).convert('RGB')\n return img\n except:\n return None",
"def BitmapFromIcon(*args, **kwargs):\n val = _gdi_.new_BitmapFromIcon(*args, **kwargs)\n return val",
"def from_bytes(data):\n\tstream = Gio.MemoryInputStream.new_from_bytes(GLib.Bytes.new(data))\n\tpixbuf = GdkPixbuf.Pixbuf.new_from_stream(stream)\n\treturn pixbuf",
"def text2image(text, size):\n fontPath = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), \"simsun.ttc\"))\n image = Image.new(\"RGB\", size, (255, 255, 255)) # mode, size(width, height), bg color\n draw = ImageDraw.Draw(image)\n font = ImageFont.truetype(fontPath, 10)\n # 第一个个参数为距离左上角的坐标, fill参数为填充字体的颜色\n draw.text((0, 0), text, font=font, fill=\"black\", align=\"left\")\n return pil2cv(image)",
"def __add_char__(self,character,bitmap):\n # if self.training_page is None:\n # self.__create_blank_page__()\n\n char_height,char_width = bitmap.shape\n\n # do we have too many characters for this row?\n # if so - flush\n if (self.column_pointer+char_width) >= self.width-spacing:\n self.__write_out_row__()\n\n # self.character_heights.append(bitmap.shape[0])\n\n\n self.row_bitmaps.append(bitmap)\n self.row_characters.append(character)\n self.column_pointer += char_width + spacing\n\n # self.__box_file_update__()"
] | [
"0.5853663",
"0.58179283",
"0.56746536",
"0.5479672",
"0.5382288",
"0.53489006",
"0.53442067",
"0.53410566",
"0.53409886",
"0.52899945",
"0.52466315",
"0.52246463",
"0.5191945",
"0.5159506",
"0.5151667",
"0.5137356",
"0.51334125",
"0.5129038",
"0.5070787",
"0.50635946",
"0.5045427",
"0.5029963",
"0.50208503",
"0.5018598",
"0.50134623",
"0.49855512",
"0.49672717",
"0.49427912",
"0.49413",
"0.49334776"
] | 0.777532 | 0 |
Ingest a file and slice it into 10x20 bitmaps which are compared with bitmaps of unicode charcters. The most similar character is printed with x256 color which is most like the average color for the 10x20 bitmap slice. | def print_image_as_unicode(image_file, **kwargs):
char_set = kwargs['char_set']
x256_mode = kwargs['x256']
height = 20 # height of unicode character
width = 10 # width of the unicode characters we are using
# Credit ElTero and ABM (https://stackoverflow.com/a/7051075)
if image_file == '-':
source = sys.stdin.buffer
image_file = BytesIO()
image_file.write(source.read())
im = Image.open(image_file)
imgwidth, imgheight = im.size
for row in range(imgheight//height):
last_avg_color = np.array([0,0,0])
for column in range(imgwidth//width):
box = (column*width, row*height, (column+1)*width, (row+1)*height)
cropped = im.crop(box)
lowest_value = 100000
lowest_unicode = None
for unicode in char_set:
unicode = chr(unicode)
dissimilarity = compare(create_unicode_image(unicode), cropped)
if dissimilarity < lowest_value:
lowest_value = dissimilarity
lowest_unicode = unicode
if x256_mode:
# Credit: Ruan B. (until URL)
avg_color_per_row = np.average(cropped, axis=0)
avg_color = np.average(avg_color_per_row, axis=0)[:3]
x256_color = str(x256.from_rgb(*avg_color))
# https://stackoverflow.com/a/43112217
composite_color = np.average(np.array([avg_color,
last_avg_color]),
axis=0)
x256_bg_color = str(x256.from_rgb(*avg_color))
if lowest_unicode == chr(32):
print('\033[48;5;{0}m{1}\033[0m'.format(x256_color,
chr(32)), end='')
else:
print('\033[38;5;{0}m\033[48;5;{1}m'.format(x256_color,
x256_bg_color) +
'{0}\033[0m'.format(lowest_unicode), end='')
last_avg_color = avg_color
else:
print(lowest_unicode, end='')
if x256_mode:
print('\x1b[39m', end='\r\n')
else:
print('', end='\r\n') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, file_path=\"data/\", batch_size=1, img_size=[128, 2048], max_text_len=256):\r\n\r\n # filePath needs to be a folder\r\n assert file_path[-1]=='/'\r\n\r\n self.current_index = 0\r\n self.batch_size = batch_size\r\n self.img_size = img_size\r\n self.samples = []\r\n\r\n # metadata for words in words.txt\r\n f=open(file_path+'lines.txt')\r\n\r\n\r\n chars = set()\r\n bad_samples = []\r\n #bad_samples_reference = ['a01-117-05-02.png', 'r06-022-03-05.png']\r\n for line in f:\r\n # ignore comment line\r\n if not line or line[0]=='#':\r\n continue\r\n # line = (name, status, graylevel, components, (x y w h), grammatical tag, ground_truth)\r\n line_split = line.strip().split(' ')\r\n assert len(line_split) >= 9\r\n\r\n # get path + name of a file\r\n file_name_split = line_split[0].split('-')\r\n file_name = file_path + 'lines/' + file_name_split[0] + '/' + file_name_split[0] + '-' + file_name_split[1] + '/' + line_split[0] + '.png'\r\n\r\n\t\t\t# GT text are columns starting at 9, cut off words that are too long\r\n target_text = line_split[8:]\r\n target_text = \"\".join(target_text)\r\n\r\n target_text = target_text.replace(\"|\", \" \")\r\n # get all characters present in the dataset.\r\n chars = chars.union(set(list(target_text)))\r\n\r\n\t\t\t# check if image is not empty\r\n if not os.path.getsize(file_name):\r\n bad_samples.append(line_split[0] + '.png')\r\n continue\r\n\r\n # put sample into list\r\n self.samples.append(Sample(target_text, file_name))\r\n\r\n # some images in the IAM dataset are known to be damaged, don't show warning for them\r\n #if set(bad_samples) != set(bad_samples_reference):\r\n # print(\"Warning, damaged images found:\", bad_samples)\r\n\r\n #print(\"Damaged images expected:\", bad_samples_reference)\r\n\r\n # split into training and validation set: 95% - 5%\r\n split_index = int(0.95 * len(self.samples))\r\n self.train_samples = self.samples[:split_index]\r\n self.validation_samples = self.samples[split_index:]\r\n\r\n # put words into lists\r\n self.train_words = [x.target_text for x in self.train_samples]\r\n self.validation_words = [x.target_text for x in self.validation_samples]\r\n\r\n # number of randomly chosen samples per epoch of training\r\n self.train_samples_per_epoch = len(self.train_samples)//2\r\n\r\n # start with train set\r\n self.train_set()\r\n\r\n \t# sorted list of all chars in dataset\r\n self.char_list = sorted(list(chars))",
"def main():\n args = parse_args(sys.argv[1:])\n dir = args.directory\n wild = args.wildcard\n rec = args.recursive\n nb_clusters = args.clusters\n bricks = args.bricks\n\n path_for_display = os.path.abspath(dir)\n\n all_paths = None\n if rec:\n all_paths = Path(dir).rglob(wild)\n path_for_display += \" (recursive)\"\n else:\n all_paths = Path(dir).glob(wild)\n path_for_display += \" (non recursive)\"\n\n f_sizes = []\n\n for path in all_paths:\n\n full_path = os.path.join(path.parent, path.name)\n byte_size = os.path.getsize(full_path)\n # print(full_path, byte_size)\n f_sizes.append(byte_size)\n\n f_sizes = np.array(f_sizes)\n # print(f_sizes)\n\n min_byte_length = np.amin(f_sizes)\n max_byte_length = np.amax(f_sizes)\n mean_byte_length = np.mean(f_sizes)\n std_byte_length = np.std(f_sizes)\n median_byte_length = np.median(f_sizes)\n\n histo, bin_edges = np.histogram(f_sizes, nb_clusters)\n histo = histo.astype(\"float32\")\n histo_normalized = (np.copy(histo) / histo.max() * bricks).astype(\"uint32\")\n\n print(\"[BYTESPREAD REPORT]\")\n print()\n print(\"Date: \", datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\"))\n print(\"Directory: \", path_for_display)\n print(\"Filename match: \", wild)\n print(\"Number of files: \", f_sizes.shape[0])\n print(\"Smallest file: \", byteToHumanReadable(min_byte_length))\n print(\"Largest file: \", byteToHumanReadable(max_byte_length))\n print(\"Average size: \", byteToHumanReadable(mean_byte_length))\n print(\"Standard deviation: \", byteToHumanReadable(std_byte_length))\n print(\"Median size: \", byteToHumanReadable(median_byte_length))\n print(\"Histogram intervals: \", byteToHumanReadable(bin_edges[1] - bin_edges[0]))\n print(\"Histogram:\")\n print()\n\n for i in range(0, len(bin_edges)-1):\n lower_bound = byteToHumanReadable(bin_edges[i])\n upper_bound = byteToHumanReadable(bin_edges[i+1])\n print(\"|\", \"▓\" * histo_normalized[i], \"[{}-{}], {} files\".format(lower_bound, upper_bound, math.ceil(histo[i])))",
"def render(filename,i):\n print('running render')\n A = np.genfromtxt(filename,skip_header=1,dtype=float,delimiter=',')\n img = np.array(A[i,:],copy=True)\n print(img.shape)\n img = img.reshape(28,28)\n img = 255 - img\n print(img.shape)\n plt.imshow(img, cmap=\"gray\", vmin=0, vmax=255)\n plt.savefig(\"img\" + str(i)+\"render\"+ \".png\")",
"def sort(self):\n\n img_files = os.listdir(self.path)\n\n img_list = {}\n\n for img_file in img_files:\n filename = os.path.join(self.path, img_file)\n\n try:\n img = Image.open(filename)\n except:\n continue\n\n print \"Analyzing %s\" % img_file\n\n points = self.points(img.size[0], img.size[1])\n key = \"\"\n for point in points:\n\n # Get the average color for each point\n ave_points = self.diamond_points(point[0], point[1])\n red = 0\n green = 0\n blue = 0\n for ave_point in ave_points:\n try:\n rgb = img.getpixel(ave_point)\n red += rgb[0]\n green += rgb[1]\n blue += rgb[2]\n except IndexError:\n pass\n red /= len(ave_points)\n green /= len(ave_points)\n blue /= len(ave_points)\n\n # Bitdepths:\n # 12 bit - 4096 colors, range 0-F, divide by 16\n # 9 bit - 512 colors, range 0-7, divide by 32\n # 6 bit - 64 colors, range 0-3, divide by 64\n # 3 bit - 8 colors, range 0-1, divide by 128\n\n if self.num_colors == 8:\n div = 128\n elif self.num_colors == 64:\n div = 64\n elif self.num_colors == 512:\n div = 32\n elif self.num_colors == 4096:\n div = 16\n else:\n self.usage()\n\n # Lower the bitdepth\n red = int(red / div)\n green = int(green / div)\n blue = int(blue / div)\n\n # Add to the key\n key += \"%x%x%x\" % (red, green, blue)\n\n # Add the key if needed\n if key not in img_list:\n img_list[key] = []\n\n # Add the file to the list\n img_list[key].append(img_file)\n\n # Go through and rename the files, based on the img_list dictionary\n # and the prefix\n num = 1\n for img in sorted(img_list.iterkeys()):\n for filename in sorted(img_list[img]):\n name, ext = os.path.splitext(filename)\n new_filename = \"%s%04d%s\" % (self.prefix, num, ext)\n full_filename = os.path.join(self.path, filename)\n full_new_filename = os.path.join(self.path, new_filename)\n if os.path.isfile(full_new_filename):\n print \"File %s exists - aborting!\" % full_new_filename\n return\n\n os.rename(full_filename, full_new_filename)\n print \"Renamed %s to %s.\" % (filename, new_filename)\n num += 1",
"def pixdump( source, start=None, end=None, length=None, width=64, height=None, palette=None ):\n\n for line in pixdump_iter( source, start, end, length, width, height, palette ):\n print( line )",
"def _handle_image_descriptors(self):\n while self.file_content[self.data_idx] == 0x2c:\n img_left = self.file_content[self.data_idx + 1] + \\\n (self.file_content[self.data_idx + 2] << 8)\n img_top = self.file_content[self.data_idx + 3] + \\\n (self.file_content[self.data_idx + 4] << 8)\n img_width = self.file_content[self.data_idx+5] + \\\n (self.file_content[self.data_idx + 6] << 8)\n #img_height = self.file_content[self.data_idx+7] + \\\n # (self.file_content[self.data_idx + 8] << 8)\n flags = self.file_content[self.data_idx + 9]\n local_col_table_flag = (flags & 0b10000000) != 0\n #interlace_flag = (flags & 0b01000000) != 0\n self.data_idx = self.data_idx + 10\n if local_col_table_flag:\n # read local color table\n print('read local color table. Not implemented yet')\n\n self.lzw_min_code_sz = self.file_content[self.data_idx]\n self.data_idx = self.data_idx + 1\n\n pix_xix = img_left\n pix_yix = img_top\n subblock_data = []\n while self.file_content[self.data_idx] != 0:\n subblock_sz = self.file_content[self.data_idx]\n self.data_idx = self.data_idx + 1\n subblock_data += self.file_content[self.data_idx:self.data_idx + subblock_sz]\n self.data_idx = self.data_idx + subblock_sz\n self.data_idx = self.data_idx + 1\n dec_data = self.decode_subblock(subblock_data)\n for dat in dec_data:\n self.output_image[pix_xix][pix_yix][0] = self.color_table[dat][0]\n self.output_image[pix_xix][pix_yix][1] = self.color_table[dat][1]\n self.output_image[pix_xix][pix_yix][2] = self.color_table[dat][2]\n pix_xix = pix_xix + 1\n if pix_xix == img_left + img_width:\n pix_xix = img_left\n pix_yix = pix_yix + 1",
"def decompose(self, file_name):\n print(\"[+] Decompose started...\")\n with open(file_name, \"rb\") as image_file:\n\n # We check if the directory chunks doesn't exist, then, we create it\n if not path.exists(\"./chunks/\"):\n makedirs(\"chunks\")\n \n to_print = b64.b64encode(image_file.read()).decode('utf-8')\n size = len(to_print)\n re_size = self.verify_size_content(self.divide(size))\n content = \"\"\n i = 0\n\n print(\"[+] FILENAME: \" + str(file_name))\n print(\"[+] \" + str(re_size))\n print(\"[+] SIZE: \" + str(size))\n \n while to_print:\n content = to_print[:re_size['chunck']]\n title = md5(content[:300].encode()).hexdigest()\n self.map[i] = title\n self.chunk_array.append({title: content})\n print(\"> chunck: \" + title)\n\n system(\"mkdir ../chunks/\")\n # Optionnal, to saved the chunks\n with open(\"../chunks/\" + title, \"w+\") as file:\n file.write(content)\n # Optionnal, to saved the chunks\n to_print = to_print[re_size['chunck']:]\n i += 1\n print(\"[+] Decompose done.\")\n print(\"-------\")",
"def process(file_name):\n img=Image.open(str(file_name))\n cim_resized = img.resize((40,40), resample=Image.LANCZOS)\n n = cim_resized.convert('L')\n cropped = np.array(n).astype(np.float64)\n im=Image.fromarray(cropped)\n im.show()\n normalized_cropped_image = cropped - np.mean(cropped)\n normalized_cropped_image = normalized_cropped_image.reshape((-1, image_size, image_size, num_channels)).astype(np.float32)\n predicted_arr = predict(normalized_cropped_image)\n label = ''.join(['' if int(x[0]) == 10 else str(x[0]) for x in list(predicted_arr)])\n print 'LABEL: ' + label",
"def init_bitmap(self, input_file):\n script_path = os.path.dirname(os.path.realpath(__file__))\n input_file_path = os.path.join(script_path, input_file)\n with open(input_file_path, \"rb\") as bitmap_file:\n reader = csv.reader(bitmap_file)\n for row in reader:\n if row[0] not in self.FONT.keys():\n self.FONT[str(row[0])] = []\n self.FONT[str(row[0])].append(row[1] + self._spacer)\n else:\n self.FONT[str(row[0])].append(row[1] + self._spacer)",
"def unpack_mraw_frame_10bit(file,n_pixels,start_frame=0):\n \n start_byte = start_frame*n_pixels*10/8\n file.seek(start_byte)\n image = []\n \n n_bytes = n_pixels*10/8\n \n int_array = np.fromfile(file,count=n_bytes,dtype=np.uint8)\n \n bytes_1 = int_array[::5]\n bytes_2 = int_array[1::5] \n bytes_3 = int_array[2::5]\n bytes_4 = int_array[3::5] \n bytes_5 = int_array[4::5]\n\n \n # Here 4 pixels from the image are shared between 5 bytes of data like\n #\n # | byte 1 | byte 2 | byte 3 | byte 4 | byte 5 |\n # |o o o o o o o o | o o | o o o o o o | o o o o | o o o o | o o o o o o | o o | o o o o o o o o|\n # | Pixel 1 | Pixel 2 | Pixel 3 | Pixel 4 |\n #\n # byte 2 is shared between pixel and we need only the right-most bits for pixel 2 and\n # only the left most bits for pixel 1. \n \n # right-most bits of byte 2 = Most significant bits of Pixel 2\n # left-most bits of byte 2 = Least significant bits of Pixel 1\n \n pix_1 = np.array(4.0*bytes_1 + np.right_shift(bytes_2,6),dtype=np.uint16)\n pix_2 = np.array(16.0*np.bitwise_and(bytes_2,0b111111) + np.right_shift(bytes_3,4),dtype=np.uint16)\n pix_3 = np.array(64.0*np.bitwise_and(bytes_3,0b1111) + np.right_shift(bytes_4,2),dtype=np.uint16)\n pix_4 = np.array(256.0*np.bitwise_and(bytes_4,0b11) + bytes_5,dtype=np.uint16)\n #try:\n image = (np.dstack([pix_1,pix_2,pix_3,pix_4])).reshape((1,n_pixels))[0]\n #except:\n # image = np.zeros(n_pixels)\n return image",
"def av_color(file):\n\ttry:\n\t\timage = Image.open(file)\n\t\tw, h = image.size\n\t\tpixels = image.getcolors(w * h)\n\t\tmost_frequent_pixel = pixels[0]\n\t\tfor count, colour in pixels:\n\t\t\tif count > most_frequent_pixel[0]:\n\t\t\t\tmost_frequent_pixel = (count, colour)\n\t\tdbg = int('0x%02x%02x%02x' % most_frequent_pixel[1], 16)\n\t\tprint(dbg)\n\t\treturn dbg\n\texcept Exception as e:\n\t\tprint('[!Error!] in AV COLOR')\n\t\tprint(e)\n\t\treturn 0xB46BCF",
"def decode_file(source, palette):\n\n (CHRStart, CHRSize) = get_CHR_data_position(source)\n charRowCount = CHRSize // 256 # 16 characters/row\n\n img = Image.new(\"P\", (128, charRowCount * 8), 0)\n img.putpalette(itertools.chain.from_iterable(palette))\n\n source.seek(CHRStart)\n for (y, pixelRow) in enumerate(decode_pixel_rows(source, charRowCount)):\n for (x, value) in enumerate(pixelRow):\n img.putpixel((x, y), value)\n\n return img",
"def createGlyphs(input):\r\n\r\n command_open = subprocess.Popen([\"start\", \"cmd\",\"/k\", 'echo {}'.format(input)], shell = True)\r\n\r\n time.sleep(2) #time for window to appear\r\n\r\n #print win32gui.FindWindow(None, \"C:\\Windows\\system32\\cmd.exe\")\r\n hwnd = win32gui.FindWindow(None, \"C:\\Windows\\system32\\cmd.exe\")\r\n win32gui.SetForegroundWindow(hwnd)\r\n bbox = win32gui.GetWindowRect(hwnd)\r\n img = ImageGrab.grab(bbox)\r\n #img.show()\r\n\r\n length_of_chars = len(input) * 8 # +2 to compensate for the quotation marks\r\n combined_chars = img.crop((10,34,length_of_chars+9,45))\r\n #combined_chars.show()\r\n\r\n #nonetype error was caused by chaining the .show() wiutth the rest of the stuff\r\n\r\n chars = {x:\"\" for x in input}\r\n\r\n for i, each in enumerate(range(8,combined_chars.size[0]+9,8)): #starts from 1, and +8 to compensate for the -8 below VVV\r\n #if i not in bad_indexes: #this is to avoid the first and last double quotation marks\r\n current_char = input[i]\r\n \r\n indiv_char = combined_chars.crop((each-8,0,each,11))\r\n \r\n w, h = indiv_char.size #should be 8 wide by 9 high\r\n\r\n for i2, pixel in enumerate(indiv_char.getdata()): #tuple values can either be (0, 0, 0) or (192,192,192) for the default terminal colours\r\n if pixel == (192,192,192): \r\n chars[current_char] += u\"\\u2588\"\r\n\r\n else:\r\n chars[current_char] += \" \"\r\n\r\n if i2 % w == w-1: # we want it too look decent so overflow is neeeded onto the next lines\r\n # ^^^ before it was i2 % w == 0, but that lead to a trail behind character, so whats before 0? -1! so w-1!!!\r\n chars[current_char] += \"\\n\"\r\n\r\n chars[current_char] = chars[current_char][:-1] #this is to remove the last \"\\n\"\r\n\r\n return chars",
"def generate_tamil_images(label_file, fonts_dir, output_dir):\r\n with io.open(label_file, 'r', encoding='utf-8') as f:\r\n labels = f.read().splitlines()\r\n\r\n image_dir = os.path.join(output_dir, 'images')\r\n if not os.path.exists(image_dir):\r\n os.makedirs(os.path.join(image_dir))\r\n\r\n # Get a list of the fonts.\r\n fonts = glob.glob(os.path.join(fonts_dir, '*.ttf'))\r\n\r\n labels_csv = io.open(os.path.join(output_dir, 'labels-map.csv'), 'w',\r\n encoding='utf-8')\r\n\r\n total_count = 0\r\n prev_count = 0\r\n folder_count = 0\r\n for character in labels:\r\n folder_count += 1\r\n #print('%s: %d',character,len(character))\r\n # Print image count roughly every 5000 images.\r\n if total_count - prev_count > 5000:\r\n prev_count = total_count\r\n print('{} images generated...'.format(total_count))\r\n mychar = character\r\n for font in fonts:\r\n total_count += 1\r\n image = Image.new('L', (IMAGE_WIDTH, IMAGE_HEIGHT), color='white')\r\n font = ImageFont.truetype(font, int(80/len(mychar)))\r\n drawing = ImageDraw.Draw(image)\r\n w, h = drawing.textsize(mychar, font=font)\r\n drawing.text(\r\n ((IMAGE_WIDTH-w)/2, (IMAGE_HEIGHT-h)/2),\r\n mychar,\r\n fill=(0),\r\n font=font\r\n )\r\n file_string = '{}_{}.jpg'.format(mychar, total_count)\r\n #file_path = os.path.join(image_dir, 'f'+str(folder_count))\r\n file_path = os.path.join(image_dir, file_string)\r\n orgFile = file_path\r\n image.save(file_path)\r\n \r\n\r\n #directory creation\r\n mvDir = os.path.join(image_dir, mychar + '_' + str(folder_count))\r\n if not os.path.exists(mvDir):\r\n os.makedirs(mvDir)\r\n \r\n for i in range(DISTORTION_COUNT):\r\n total_count += 1\r\n dist_string = '{}_{}.jpg'.format(mychar, total_count)\r\n #file_path = os.path.join(image_dir, 'f'+str(folder_count))\r\n file_path = os.path.join(image_dir, dist_string)\r\n arr = numpy.array(image)\r\n\r\n distorted_array = elastic_distort(\r\n arr, alpha=random.randint(30, 36),\r\n sigma=random.randint(5, 6)\r\n )\r\n distorted_image = Image.fromarray(distorted_array)\r\n distorted_image.save(file_path)\r\n img = Image.open(file_path)\r\n dr = ImageDraw.Draw(img)\r\n cor = (0,0, IMAGE_WIDTH,IMAGE_HEIGHT)\r\n dr.rectangle(cor, outline=\"white\", width=5)\r\n img.save(file_path)\r\n distLoc = os.path.join(mvDir, dist_string)\r\n shutil.move(file_path, distLoc)\r\n labels_csv.write(u'{},{}\\n'.format(mychar, distLoc))\r\n origLoc = os.path.join(mvDir, file_string)\r\n newPath = os.path.join(mvDir, origLoc)\r\n shutil.move(orgFile, newPath)\r\n labels_csv.write(u'{},{}\\n'.format(mychar, newPath))\r\n \r\n print('Finished generating {} images.'.format(total_count))\r\n labels_csv.close()",
"def batch_analysis(filename: str) -> None:\n \n infile = open(filename, \"r\")\n word_list_new = []\n for line in infile:\n word_list = line.split()\n for word in word_list:\n if word != '':\n word_list_new += [word]\n load_img = load_image(word_list_new[0])\n for filter in range(len(word_list)-2):\n load_img = filter_array(load_img, word_list[filter+2])\n save_as(load_img, word_list_new[1])\n word_list_new = [] #reset the line to empty\n infile.close()\n # Now build the list of distinct words.\n word_list_new = list(word_list)\n return word_list_new",
"def readTiles(self):\n TileImage = Image.open(self.Filename).convert(\"RGB\")\n TileIW, TileIH = TileImage.size\n TilesetW, TilesetH = TileIW // self.TileWidth, TileIH // self.TileHeight\n\n for y in range(TilesetH):\n for x in range(TilesetW):\n box = self.TileWidth * x, self.TileHeight * y, self.TileWidth * (x+1), self.TileHeight * (y+1)\n tile = TileImage.crop(box)\n self.List.append(tile)\n\n str = tile.tostring()\n if not str in self.TileDict:\n #print(\"add tile: \", str)\n self.TileDict[str] = len(self.List) - 1\n print(\"tile count: {}, unique count: {}\".format(len(self.List),len(self.TileDict.values())))",
"def main():\n stats = []\n start = timer()\n\n for file_name in get_dataset():\n\n # load image and ground truth detection mask\n img = cv2.imread(settings.PATH + file_name)\n ground_truth_mask = cv2.imread(settings.PATH_GT_MASKS + file_name)\n\n # Find list of barcode regions (rotated rectangle) within image\n barcode_regions, debug_img = find_barcodes(img)\n barcode_regions_mask = np.zeros(img.shape, np.uint8)\n barcode_images = None\n result = []\n\n # Decode barcode regions\n for barcode_region in barcode_regions:\n\n # Decode barcode image\n barcode_img = barcode_region.extract_from(img)\n barcode_mask = barcode_region.get_mask(img)\n debug_img = barcode_region.draw(debug_img)\n\n # Combine masks from multiple detected regions\n barcode_regions_mask += barcode_mask\n\n # Decode barcode\n decoded = pyzbar.decode(barcode_img)\n\n # Keep result for logging\n data = \", \".join([d.data.decode(\"utf-8\") for d in decoded])\n result.append({\"data\": data, \"region\": barcode_region.json()})\n\n if settings.SHOW_IMAGE:\n barcode_images = img_concat(barcode_images, barcode_img)\n\n # Jaccard_accuracy = intersection over union of the two binary masks\n jaccard_accuracy = 0\n if ground_truth_mask is not None:\n r = barcode_regions_mask.max(axis=-1).astype(bool)\n u = ground_truth_mask.max(axis=-1).astype(bool)\n jaccard_accuracy = float((r & u).sum()) / (r | u).sum()\n stats.append(jaccard_accuracy)\n\n # Log result\n logger.info(\n \"Image processed\",\n file_name=file_name,\n jaccard_accuracy=jaccard_accuracy,\n success=jaccard_accuracy > 0.5,\n result=result,\n )\n\n # In debug mode show visualization of detection algorithm\n if settings.SHOW_IMAGE:\n\n # Add alpha channel\n debug_img = cv2.cvtColor(debug_img, cv2.COLOR_BGR2BGRA)\n if barcode_images is not None:\n barcode_images = cv2.cvtColor(barcode_images, cv2.COLOR_BGR2BGRA)\n\n # Overlay error mask\n # Pixel-wise difference between ground truth and detected barcodes\n if ground_truth_mask is not None:\n error_img = np.zeros(debug_img.shape, np.uint8)\n error_img[r & u] = np.array([0, 0, 0, 0], dtype=np.uint8)\n error_img[np.logical_xor(r, u)] = np.array(\n [0, 0, 255, 1], dtype=np.uint8\n )\n debug_img = cv2.addWeighted(debug_img, 1, error_img, 0.5, 0)\n\n # Append barcode pictures to the right\n debug_img = img_concat(debug_img, barcode_images, axis=1)\n\n # Show visualization\n cv2.namedWindow(\"img\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"img\", debug_img)\n cv2.waitKey(0)\n\n # Calculate final stats\n end = timer()\n accuracy = np.array(stats).mean()\n successes = np.where(np.array(stats) > 0.5)[0]\n logger.info(\n \"Final stats\",\n accuracy=accuracy,\n detection_rate=float(len(successes)) / len(stats),\n fps=len(stats) / (end - start),\n )",
"def decode_image(file_location=\"images/encoded_sample.png\"):\n encoded_image = Image.open(file_location)\n red_channel = encoded_image.split()[0]\n\n\n x_size = encoded_image.size[0]\n y_size = encoded_image.size[1]\n\n\n decoded_image = Image.new(\"RGB\", encoded_image.size)\n pixels = decoded_image.load()\n for x in range(x_size):\n for y in range(y_size):\n red_pixel = red_channel.getpixel((x,y))\n binary = bin(red_pixel)\n\n lsb = int(binary[-1])\n if(lsb == 0):\n pixels[x,y] = (0,0,0)\n elif(lsb == 1):\n pixels[x,y] = (255,255,255)\n\n pass\n decoded_image.save(\"images/decoded_image.png\")",
"def ExtractIconReps(icon_file_name):\n with open(icon_file_name, \"r\") as icon_file:\n icon_file_contents = icon_file.readlines()\n\n current_icon_size = REFERENCE_SIZE_DIP\n icon_sizes = []\n current_icon_representation = []\n icon_representations = {}\n for line in icon_file_contents:\n # Strip comments and empty lines.\n line = line.partition(CPP_COMMENT_DELIMITER)[0].strip()\n if not line:\n continue\n # Retrieve sizes specified by CANVAS_DIMENSIONS to ensure icons are added in\n # sorted order by size descending.\n if line.startswith(CANVAS_DIMENSIONS):\n sizes = re.findall(r\"\\d+\", line)\n if len(sizes) != 1:\n Error(\"Malformed {} line in {} - it should specify exactly one size.\"\n .format(CANVAS_DIMENSIONS, icon_file_name))\n icon_sizes.append(int(sizes[0]))\n\n # All icons except the first / default icon must start with\n # \"CANVAS_DIMENSIONS\", so rely on it here as a icon delimiter.\n if current_icon_representation:\n icon_representations = AddIconToDictionary(\n icon_file_name, current_icon_representation, current_icon_size,\n icon_representations)\n current_icon_representation = []\n current_icon_size = icon_sizes[-1]\n\n current_icon_representation.append(line)\n if current_icon_representation:\n icon_representations = AddIconToDictionary(\n icon_file_name, current_icon_representation, current_icon_size,\n icon_representations)\n\n if not icon_representations:\n Error(\"Didn't find any icons in {}.\".format(icon_file_name))\n\n if len(icon_representations) != len(icon_sizes):\n icon_sizes.insert(0, REFERENCE_SIZE_DIP)\n if sorted(icon_sizes, reverse=True) != icon_sizes:\n Error(\"The icons in {} should be sorted in descending order of size.\"\n .format(icon_file_name))\n return icon_representations",
"def convert_image(img=None, image_reducer=10, fontSize=10, spacing=1.1, maxsize=None,\n chars=\" .*:+%S0#@\", logs=False, threads=4, progress_tracker=None):\n \n try:\n if logs:\n print (\"Converting image...\")\n start_time = time.time()\n rows = len(img)\n cols = len(img[0])\n\n # reducer takes image_reducer percentage, and will skip nth pixels when converting\n reducer = int(100 / image_reducer)\n # set up image scaling based on font size and line spacing\n scale = fontSize * 0.8 / reducer * spacing\n # create new image with black bacground (because white text on black looks cooler)\n output_img = Image.new(\"L\", (int(cols * scale), int(rows * scale)), color=0)\n draw = ImageDraw.Draw(output_img)\n # load ttf font\n font = ImageFont.truetype(\"./NotoMono-Regular.ttf\", fontSize, encoding=\"unic\")\n\n # defines the subsets of pixel intensities\n # Can vary depending on max pixel intensity or length of char set\n div = np.amax(img) / (len(chars) - 1)\n\n # will be used to track our overall conversion progress\n if progress_tracker is None:\n progress_tracker = Value(\"f\", 0, lock=True)\n progress_step = 100 / (rows / reducer * 2)\n\n final_results = []\n def convert_rows(start, end, progress_tracker):\n \"\"\"Small function that converts a subset of rows into characters. Used in multithreading\n Does not draw to image yet. Just calculates which rows/cols have which chars\n\n Creates the final array of characters like so\n - col = (colNumber, char)\n - col_results = [col, ...]\n - row = (rowNumber, col_results)\n - results = [row, ....]\n - final_results = [results, ...] (length = number of threads)\n - final_results = [\n [\n (rowNumber, [\n (colNumber, char),\n (colNumber, char),\n (colNumber, char),\n ...\n ]),\n (rowNumber, [\n (colNumber, char),\n (colNumber, char),\n (colNumber, char),\n ...\n ]),\n ...\n ],\n ...\n ]\n \"\"\"\n rows = end - start\n results = []\n process_start = time.process_time()\n for row in range(start, end, reducer):\n col_results = []\n currentRow = row * scale\n for col in range(0, cols, reducer):\n val = int(img[row, col] / div)\n col_results.append((col * scale, chars[val]))\n results.append((currentRow, col_results))\n with progress_tracker.get_lock():\n progress_tracker.value += progress_step\n if logs : print (\"Progress: %.4f%%\" % progress_tracker.value, end=\"\\r\")\n final_results.append(results)\n\n # split up jobs with multithreading\n batches = threads\n rows_per_batch = int(rows / batches)\n threads = []\n for batch in range(batches):\n starting = rows_per_batch * batch\n convert_thread = threading.Thread(target=convert_rows, args=(\n starting,\n starting + rows_per_batch,\n progress_tracker\n ))\n convert_thread.start()\n threads.append(convert_thread)\n for t in threads:\n t.join()\n \n # after we converted, draw onto image (single thread)\n for r in range(1, len(final_results) + 1):\n result = final_results[r - 1]\n for row in range(len(result)):\n currentRow = result[row][0]\n cols = result[row][1]\n for col in cols:\n currentCol = col[0]\n val = col[1]\n draw.text((currentCol, currentRow), val, 255, font=font)\n with progress_tracker.get_lock():\n progress_tracker.value += progress_step\n if logs : print (\"Progress: %.4f%%\" % progress_tracker.value, end=\"\\r\")\n\n # set max image\n if (maxsize is not None):\n output_img.thumbnail(maxsize)\n\n # when we are done, there might be some rounding errors when converting some stuff to integers, thus it doesn't appear to be done\n # So we just simply set it to 100\n with progress_tracker.get_lock():\n progress_tracker.value = 100\n \n if logs:\n print (\"Progress: %.4f%%\" % progress_tracker.value)\n print (\"Time took: %.4f secs\" % (time.time() - start_time))\n\n return output_img\n except Exception as e:\n # don't know what exceptions may pop up\n print (\"\")\n print (\"Uh oh image converting went wrong!\")\n print (e)\n exit(0)",
"def file_parser(file_name):\n h = 480\n w = 640\n out = []\n with open(file_name, 'r') as f:\n line_num = 1\n for line in f:\n if line_num < 17:\n # Read to where data starts\n line_num += 1\n continue\n elif line_num > 74:\n break\n # print(list(map(int, line.strip().split(\" \"))))\n vals = line.split()\n # print(list(\"\".join(line)))\n # print(line.split())\n assert(float(vals[2]) < 640)\n assert(float(vals[3]) < 480)\n point = [float(vals[2]) * w, float(vals[3]) * h]\n # print(point)\n out.append(point)\n line_num += 1\n\n out.append([0,0])\n out.append([w-1, 0])\n out.append([0, h-1])\n out.append([w-1, h-2])\n return out",
"def read_tiles_from_file(gfxfile, unfilled, fsplit=False):\n\n filled = []\n for tile in unfilled:\n if tile.address.upper() != 'BLANK':\n gfxfile.seek(convert_mame_addr(tile.address, tile.dimensions, fsplit))\n if tile.dimensions == 8:\n read_data = gfxfile.read(32)\n if tile.dimensions == 16:\n read_data = gfxfile.read(128)\n filled.append(Tile.from_packed_bytes(tile.address, read_data, tile.dimensions))\n\n return filled",
"def main():\n input_file = sys.argv[1]\n target_width = int(sys.argv[2]) * 2\n\n to_write = \"\"\n \n print(\"Processing: %s\" % input_file)\n\n with open(input_file,\"r\") as fh:\n for line in fh.readlines():\n slices = line[:-1]\n \n endian_buf = []\n\n while(len(slices) > 0):\n k = slices[0:target_width]\n endian_buf.insert(0,k+\"\\n\")\n slices = slices[target_width:]\n\n for b in endian_buf:\n to_write += b\n\n with open(input_file,\"w\") as fh:\n fh.write(to_write)",
"def image_generator_not_random(list_of_files, crop_size=320, scale=1):\n while True:\n text_region = []\n for jpgname in list_of_files:\n print jpgname\n # jpgname = np.random.choice(list_of_files)\n img = cv2.imread(jpgname)\n pattern = re.compile('jpg')\n txtname = pattern.sub('txt', jpgname)\n if not os.path.isfile(txtname):\n continue\n cropped_image = img\n with open(txtname, 'r') as f:\n for line in f:\n line_split = line.strip().split(',')\n print line_split\n # clockwise\n (x1, y1, x2, y2) = line_split[0:4]\n (x3, y3, x4, y4) = line_split[4:8]\n text_region.append([string.atof(x1), string.atof(y1), string.atof(x2), string.atof(y2),\n string.atof(x3), string.atof(y3), string.atof(x4), string.atof(y4)])\n if cropped_image is None or text_region is None or \\\n cropped_image.shape[0] != crop_size or cropped_image.shape[1] != crop_size:\n continue\n yield [scale * cropped_image, text_region]",
"def find_emoji_partial_multiFiles(self,bound_tuple):\n count_2_save=self.count_to_save\n save_period=count_2_save\n start=bound_tuple[0]\n limit=bound_tuple[1]\n emoji_hdf5_Info_File_address = '{}/info_{}_to_{}.hdf'.format(self.result_dir, start+1, start+limit)\n emoji_hdf5_Mat_File_address = '{}/matrix_{}_to_{}.hdf'.format(self.result_dir, start+1, start+limit)\n trace_working_file = '{}/taceWorking_{}_to_{}.txt'.format(self.result_dir, start+1, start+limit)\n \n my_col=self.get_collection()\n part_DB=my_col.find().skip(start).limit(limit)\n \n emojiList=self.emoji_list\n adjMat = np.zeros((len(emojiList), len(emojiList)), dtype = int) # The matrix containing the edges\n emojiCount=np.zeros((len(emojiList)), dtype = int) # The number of emoji in the tweet dataset\n heap_mat = np.zeros((len(emojiList), len(emojiList)), dtype = int) # The matrix containing the edges\n last_emoji_netIndex=0\n df_emoji_info = pd.DataFrame()\n df_emoji_heap = pd.DataFrame()\n count_tweet=0\n count_tweet_emoji=0\n count_total_seen_emoji=0\n count_new_emoji=0\n ####------------------------------------------------------######\n ####------------------------------------------------------######\n #### This is the part that the emoji extractor works.\n #### It reads each tweet and matches teh emoji unicodes.\n #### If the emoji unicode is in the text, it will be appended to the \"mentionedTogether\" list.\n print 'Start to extract emojis.....'\n for mytweet in part_DB:\n mentionedTogether=[] ## It stores the emojis detected from the current tweet (i.e. mytweet).\n mentionedTogether_index_in_Net=[] ## It stores the index of emojis. The indeices are defined based on the emojiList.\n mentionedTogether_position_in_Text=[] ## It stores the posision of emoji in the text for future work.\n count_tweet+=1\n if 'text' in mytweet:\n #count_tweet+=1\n for emoji in emojiList:\n emoji_str=emoji.replace('\\n','')\n match_all=re.finditer(emoji_str.decode('unicode-escape'),mytweet['text'])\n for match in match_all:\n count_total_seen_emoji+=1\n mentionedTogether.append(emoji)\n mentionedTogether_index_in_Net.append(emojiList.index(emoji))\n mentionedTogether_position_in_Text.append(int(match.start()))\n emojiCount[emojiList.index(emoji)]+=1\n\n \n if len(mentionedTogether)>0:\n ## Yoiu can uncomment the followings to see the tweets detected:\n #print 'tweet #', count_tweet, ': ', mytweet['text']\n #print mentionedTogether\n #print '-----------------------------------------------------'\n ##\n count_tweet_emoji+=1\n emoji_dict=emojiFunction.create_Emoji_info_Dictionary(mytweet,mentionedTogether, mentionedTogether_index_in_Net, \n mentionedTogether_position_in_Text)## creating the dictionary of info\n df_emoji_info = df_emoji_info.append(emoji_dict, ignore_index=True)## updating dataframe for info by emoji_info dictionary\n emoji_heap_dict=emojiFunction.create_Emoji_heap_Dictionary(count_tweet, count_tweet_emoji, count_total_seen_emoji,\n count_new_emoji, mytweet['lang'])## creating the dictionary for heap\n df_emoji_heap=df_emoji_heap.append(emoji_heap_dict, ignore_index=True)## updating dataframe for heap by heap dictionary\n \n if (len(mentionedTogether)>1):####### 2 Mentioned - If they are mentioned together they should be in this list\n #print count_tweet,': ',mentionedTogether_index_in_Net, '(NET) is/are mentioned in: ', mytweet['text']\n #print (mentionedTogether_position_in_Text, ' TEXT is/are mentioned in: ', mytweet['text'])\n adjMat=emojiFunction.update_adj_matrix(adjMat, mentionedTogether_index_in_Net, mentionedTogether_position_in_Text)\n if self.concat_tweet and count_tweet_emoji>1:\n mentionedTogether_index_in_Net.insert(0,last_emoji_netIndex)\n heap_mat=emojiFunction.update_heap_mat(heap_mat, mentionedTogether_index_in_Net)\n if len(mentionedTogether)>0:\n last_emoji_netIndex=mentionedTogether_index_in_Net.pop()\n \n if count_tweet>count_2_save:\n count_2_save+=save_period\n print 'total number of tweets: ',count_tweet, ' saving files .............'\n #print (mentionedTogether_index_in_Net, '(NET) is/are mentioned in: ', mytweet['text'])\n df_emoji_count= pd.DataFrame(data=emojiCount, index=emojiList)\n \n df_emoji_adjMatrix=pd.DataFrame(data=adjMat, index=np.arange(len(emojiList)), columns=np.arange(len(emojiList)))\n df_emoji_heapMatrix=pd.DataFrame(data=heap_mat, index=np.arange(len(emojiList)), columns=np.arange(len(emojiList)))\n \n #df_emoji_adjMatrix=pd.DataFrame(data=adjMat, index=np.arange(len(emojiList)), columns=np.arange(len(emojiList))) ## create data frame for adjacency matrix\n #df_emoji_heapMatrix=pd.DataFrame(data=heap_mat, index=np.arange(len(emojiList)), columns=np.arange(len(emojiList))) ## create dataframe for the heap matrix\n print 'Saving df_info .........'\n self.write_on_hdf(emoji_hdf5_Info_File_address, hdf_struct=df_emoji_info, hdf_key='df_info', my_mode='a')\n print 'Saving df_heap ..........'\n self.write_on_hdf(emoji_hdf5_Info_File_address, hdf_struct=df_emoji_heap, hdf_key='df_heap', my_mode='a')\n del df_emoji_info\n df_emoji_info = pd.DataFrame()\n del df_emoji_heap\n df_emoji_heap = pd.DataFrame()\n \n print 'Saving df_count .........'\n self.write_on_hdf(emoji_hdf5_Mat_File_address, hdf_struct=df_emoji_count, hdf_key='df_count', my_mode='w')\n print 'Saving df_adjMat ..........'\n self.write_on_hdf(emoji_hdf5_Mat_File_address, hdf_struct=df_emoji_adjMatrix, hdf_key='df_adjMat', my_mode='a')\n print 'Saving df_heapMat ..........'\n self.write_on_hdf(emoji_hdf5_Mat_File_address, hdf_struct=df_emoji_heapMatrix, hdf_key='df_heapMat', my_mode='a') \n \n with open(trace_working_file, 'a') as the_file:\n temp='\\t'+str(count_tweet)+',\\t'+str(mytweet['created_at'])+',\\t'+str(mytweet['id'])\n the_file.write(temp)\n the_file.write('\\n')\n print 'After tweet #{}, the {}_to_{} part was saved'.format(count_tweet, start+1, start+limit)\n print 'Working on the rest........'\n if self.stop:\n break\n\n print 'Saving files of the part {}_to{} for the last time...............'.format(start+1, start+limit)\n df_emoji_count= pd.DataFrame(data=emojiCount, index=emojiList)\n df_emoji_adjMatrix=pd.DataFrame(data=adjMat, index=np.arange(len(emojiList)), columns=np.arange(len(emojiList)))\n df_emoji_heapMatrix=pd.DataFrame(data=heap_mat, index=np.arange(len(emojiList)), columns=np.arange(len(emojiList)))\n \n #df_emoji_info.to_hdf(emoji_hdf5_Mat_File_address, where='df_info, df_heap, df_count, df_adjMat, df_heapMat', mode='w')\n \n self.write_on_hdf(emoji_hdf5_Info_File_address, hdf_struct=df_emoji_info, hdf_key='df_info', my_mode='a')\n self.write_on_hdf(emoji_hdf5_Info_File_address, hdf_struct=df_emoji_heap, hdf_key='df_heap', my_mode='a')\n self.write_on_hdf(emoji_hdf5_Mat_File_address, hdf_struct=df_emoji_count, hdf_key='df_count', my_mode='w')\n self.write_on_hdf(emoji_hdf5_Mat_File_address, hdf_struct=df_emoji_adjMatrix, hdf_key='df_adjMat', my_mode='a')\n self.write_on_hdf(emoji_hdf5_Mat_File_address, hdf_struct=df_emoji_heapMatrix, hdf_key='df_heapMat', my_mode='a') \n\n with open(trace_working_file, 'a') as the_file:\n temp='\\t'+str(count_tweet)+',\\t'+str(mytweet['created_at'])+',\\t'+str(mytweet['id'])\n the_file.write(temp)\n the_file.write('\\n')\n print \"total emoji: \", count_total_seen_emoji\n # return {'df_emoji_info':df_emoji_info, 'df_emoji_heap':df_emoji_heap, 'df_emoji_count':df_emoji_count, 'df_emoji_adjMatrix':df_emoji_adjMatrix, 'df_emoji_heapMatrix':df_emoji_heapMatrix}",
"def _create_bitmaps(\n font, format, base,\n scan_unit_bytes=1, padding_bytes=1, bit_order='little',\n ):\n byte_big = base == be\n bit_big = bit_order[:1].lower() == 'b'\n bitmaps = (\n _g.as_bytes(\n # align rows on padding_bytes boundaries\n stride=ceildiv(_g.width, padding_bytes*8) * padding_bytes*8,\n byte_swap=0 if (bool(byte_big) == bool(bit_big)) else scan_unit_bytes,\n bit_order='big' if bit_big else 'little',\n )\n for _g in font.glyphs\n )\n # align full byte sequence on scan_unit boundaries\n bitmaps = tuple(\n _bits.ljust(ceildiv(len(_bits), scan_unit_bytes) * scan_unit_bytes)\n for _bits in bitmaps\n )\n offsets = tuple(accumulate((len(_b) for _b in bitmaps), initial=0))[:-1]\n offsets = (base.int32 * len(bitmaps))(*offsets)\n bitmap_data = b''.join(bitmaps)\n # bytes # shorts # ints #?\n # apparently we do need to calculate all 4\n bitmap_sizes = [\n sum(\n # align full byte sequence on scan_unit boundaries\n ceildiv(\n # align rows on padding_bytes boundaries\n _g.pixels.get_byte_size(stride=ceildiv(_g.width, 8*2**_p) * 8*(2**_p)),\n scan_unit_bytes\n ) * scan_unit_bytes\n for _g in font.glyphs\n )\n for _p in range(4)\n ]\n assert bitmap_sizes[format&3] == len(bitmap_data), f'{bitmap_sizes[format&3]} != {len(bitmap_data)}'\n bitmap_sizes = (base.int32 * 4)(*bitmap_sizes)\n table_bytes = (\n bytes(le.uint32(format))\n + bytes(base.int32(len(offsets)))\n + bytes(offsets)\n + bytes(bitmap_sizes)\n + bitmap_data\n )\n return table_bytes, format",
"def imagehits(file):\n totalhits = len(file)\n imghits = []\n for line in file:\n r = re.compile(r'\\bgif\\b | \\bjpg\\b | \\bpng\\b', flags=re.I | re.X)\n if r.findall(line[0]):\n imghits.append(line)\n print(\"The percentage of image hits is: \"\"{:.0%}\".format(len(imghits) / totalhits))",
"def cutting_characters(character, image_2cut):\n\n preparing = []\n m = len(character)\n image_2cut = image_2cut.copy()\n\n for n in character:\n\n # The information is extracted from the the tupla n in character list.\n # For more information about this coordinates check the Bounding Rectangle function resources\n ulc_X = n[0]\n ulc_Y = n[1]\n\n width = n[2]\n height = n[3]\n\n #There is asigned new name to the above information and is constructed the rectangle.\n start_x = int(ulc_X)\n start_y = int(ulc_Y)\n\n width_new = int(width)\n height_new = int(height)\n\n\n final_x = start_x + width_new\n final_y = start_y + height_new\n\n # A width and height outter value is placed that allow a prudential margin of the principal content.\n width_outer = 25\n height_outer = 45\n\n\n #Then the rectangle is constructed with these outter width and heigt and the x and y coordinate are displaced too.\n x_outer = int(ulc_X) - 4\n y_outer = int(ulc_Y) - 6\n\n outer_xf = x_outer + width_outer\n outer_yf = y_outer + height_outer\n\n # Both rectangles are cutted by image_2cut\n\n rec_char_outer = image_2cut[y_outer:outer_yf, x_outer:outer_xf]\n\n rec_char_inter = image_2cut[start_y:final_y, start_x: final_x]\n\n # Imperfections are corrected and filling with white color by filling_white\n\n prep = filling_white(rec_char_outer, rec_char_inter)\n\n prep, _= resizing(prep, prep, 15)\n\n preparing.append(prep)\n\n return preparing",
"def decode_image(file_location=\"images/encoded_sample.png\"):\n encoded_image = Image.open(file_location)\n\n x_size = encoded_image.size[0]\n y_size = encoded_image.size[1]\n\n decoded_image = Image.new(\"RGB\", encoded_image.size)\n pixels = decoded_image.load()\n\n for x in range(x_size):\n for y in range(y_size):\n if lsb_of_red_pixel(encoded_image, x, y):\n pixels[x, y] = (255,255,255)\n else:\n pixels[x, y] = (0, 0, 0)\n\n #pixels[x, y] = [(0,0,0) if lsb_of_pixel(red_channel, x, y) else (1,1,1)]\n\n decoded_image.save(\"images/decoded_image.png\")\n decoded_image.show()",
"def Read_MapGen(self, filename, stats = 0,AllLines=0):\n with open(filename,'rt') as file_:\n data = [s.strip() for s in file_]\n\n Shorelines = []\n segment = []\n for line in data:\n if line:\n if line == \"# -b\": #New segment beginning\n if segment: Shorelines.append(N.array(segment))\n segment = []\n else:\n segment.append([float(e) for e in line.split()])\n if segment: Shorelines.append(N.array(segment))\n\n if stats:\n NumSegments = len(Shorelines)\n NumPoints = 0\n for segment in Shorelines:\n NumPoints = NumPoints + len(segment)\n AvgPoints = NumPoints / NumSegments\n print(\"Number of Segments: \", NumSegments)\n print(\"Average Number of Points per segment: \", AvgPoints)\n if AllLines:\n Lines = []\n for segment in Shorelines:\n Lines.append(segment[0])\n for point in segment[1:-1]:\n Lines.append(point)\n Lines.append(point)\n Lines.append(segment[-1])\n return Lines\n else:\n return Shorelines"
] | [
"0.5643496",
"0.5535181",
"0.5525569",
"0.54742545",
"0.5472219",
"0.5442176",
"0.53953683",
"0.5384109",
"0.5324327",
"0.53202426",
"0.528826",
"0.5234474",
"0.5232815",
"0.5206607",
"0.52048206",
"0.5201688",
"0.5193406",
"0.51914054",
"0.51873463",
"0.5156003",
"0.51466596",
"0.51442295",
"0.5143602",
"0.51426363",
"0.5133369",
"0.5126433",
"0.5123415",
"0.512304",
"0.51104605",
"0.5069892"
] | 0.57994384 | 0 |
Filter a `items` list according to a list of `queries`. Values from `items` are kept if they match at least one query. The original `items` list is untouched but the result list uses the same data (not a deep copy). If `attribute` is None, it is assumed that `items` is a list of strings to be filtered directly. If `attribute` is provided, it is assumed that `items` is a list of dicts, and `queries` will tested against value under `attribute` key in each dict. `attribute` can point into a nested dictionary, individual keys of the nested key path are separated by '.' character. | def stringfilter(items, queries, attribute=None):
result = []
if attribute is not None:
key_path = attribute.split('.')
else:
key_path = None
for item in items:
if key_path is not None:
string = _get_nested_value(item, key_path)
if not isinstance(string, str):
raise errors.AnsibleFilterError(
f"stringfilter: value under '{attribute}' in '{pformat(item)}' is not string: {pformat(string)}"
)
else:
if not isinstance(item, str):
raise errors.AnsibleFilterError(f"stringfilter: list item is not string: {pformat(item)}")
string = item
for query in queries:
if isinstance(query, str):
if query == string:
result.append(item)
break
elif isinstance(query, dict) and query.get('regex'):
if re.search(query['regex'], string):
result.append(item)
break
else:
raise errors.AnsibleFilterError(
f"stringfilter: unrecognized query: {pformat(query)}"
)
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def itemFilterAttr(*args, byName: Union[AnyStr, bool]=\"\", byNameString: Union[AnyStr,\n List[AnyStr], bool]=\"\", byScript: Union[AnyStr, bool]=\"\", classification:\n Union[AnyStr, bool]=\"\", dynamic: bool=True, exists: bool=True, hasCurve:\n bool=True, hasDrivenKey: bool=True, hasExpression: bool=True, hidden:\n bool=True, intersect: Union[List[AnyStr, AnyStr], bool]=None, keyable:\n bool=True, listBuiltInFilters: bool=True, listOtherFilters: bool=True,\n listUserFilters: bool=True, negate: bool=True, parent: AnyStr=\"\", published:\n bool=True, readable: bool=True, scaleRotateTranslate: bool=True,\n secondScript: Union[AnyStr, bool]=\"\", text: Union[AnyStr, bool]=\"\", union:\n Union[List[AnyStr, AnyStr], bool]=None, writable: bool=True, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass",
"def attribute_search(self, attribute, filters):\n for i in self.response_info['results']:\n if filters in i[attribute]:\n self.output.append(i)\n self.counter += 1",
"def dgfilter(*args, attribute: AnyStr=\"\", list: bool=True, logicalAnd: List[AnyStr,\n AnyStr]=None, logicalNot: AnyStr=\"\", logicalOr: List[AnyStr, AnyStr]=None, name:\n AnyStr=\"\", node: AnyStr=\"\", nodeType: AnyStr=\"\", plug: AnyStr=\"\",\n **kwargs)->AnyStr:\n pass",
"def itemFilter(*args, byBin: Union[AnyStr, List[AnyStr], bool]=\"\", byName: Union[AnyStr,\n bool]=\"\", byScript: Union[AnyStr, bool]=\"\", byType: Union[AnyStr, List[AnyStr],\n bool]=\"\", category: Union[AnyStr, List[AnyStr], bool]=\"\", classification:\n Union[AnyStr, bool]=\"\", clearByBin: bool=True, clearByType: bool=True,\n difference: Union[List[AnyStr, AnyStr], bool]=None, exists: bool=True,\n intersect: Union[List[AnyStr, AnyStr], bool]=None, listBuiltInFilters: bool=True,\n listOtherFilters: bool=True, listUserFilters: bool=True, negate: bool=True,\n parent: Union[AnyStr, bool]=\"\", pythonModule: Union[AnyStr, bool]=\"\",\n secondScript: Union[AnyStr, bool]=\"\", text: Union[AnyStr, bool]=\"\", union:\n Union[List[AnyStr, AnyStr], bool]=None, uniqueNodeNames: bool=True, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass",
"def query_filter_builder(cls, user_attribute: str, value: Any) -> List[Q]:\n attributes = re.compile(r\"Or|And|OR|AND\").split(user_attribute)\n query_builder = []\n for attr in attributes:\n attr = attr.strip().lower()\n cond = {f\"{attr}__icontains\": value}\n if user_attribute.split(attr)[0].lower().endswith(\"or\"):\n last_query = query_builder.pop()\n query_builder.append(Q(last_query, Q(**cond), join_type=\"OR\"))\n elif attr != \"\":\n query_builder = [*query_builder, Q(**cond)]\n return query_builder",
"def do_selectattrs(li, *ks, **kwargs):\n if 'result' in kwargs:\n cmp_val = kwargs['result']\n default = None if cmp_val is not None else False\n ret = filter(lambda d: do_get(d, *ks, default=default) == cmp_val, li)\n else:\n ret = filter(lambda d: do_contains(d, *ks), li)\n return ret",
"def unique(items, attribute=None):\n\n if attribute is not None:\n return list(\n collections.OrderedDict.fromkeys(\n [getattr(item, attribute) for item in items]))\n else:\n return list(collections.OrderedDict.fromkeys(items))",
"def query_generic(attribute, value_test_func):\n def new_value_test_func(val):\n try:\n return value_test_func(val)\n except:\n return False\n def test(row):\n try:\n return new_value_test_func(row[attribute])\n except KeyError:\n return False\n relational = []\n d = logbook.get_pub_logbook_dict()\n for k, v in d.iteritems():\n v['label'] = k\n relational.append(v)\n result =\\\n [row['runs']\n for row in relational\n if np.all(test(row))]\n if not result:\n return set()\n else:\n s = set(reduce(lambda x, y: x + y, filter(lambda t: t != (None,), result)))\n return set(filter(lambda x: x is not None, s))",
"def filter(\n self, items: Iterable[Product], spec: Specification\n ) -> Generator[Product, None, None]:\n return (item for item in items if spec.is_satisfied(item))",
"def lsThroughFilter(*args, item: Union[AnyStr, List[AnyStr]]=\"\", nodeArray: bool=True, reverse:\n bool=True, selection: bool=True, sort: AnyStr=\"\", **kwargs)->List[AnyStr]:\n pass",
"def attributeQuery(*args, affectsAppearance: bool=True, affectsWorldspace: bool=True,\n attributeType: bool=True, cachedInternally: bool=True, categories: bool=True,\n channelBox: bool=True, connectable: bool=True, enum: bool=True, exists:\n bool=True, hidden: bool=True, indeterminant: bool=True, indexMatters:\n bool=True, internal: bool=True, internalGet: bool=True, internalSet:\n bool=True, keyable: bool=True, listChildren: bool=True, listDefault:\n bool=True, listEnum: bool=True, listParent: bool=True, listSiblings:\n bool=True, longName: bool=True, maxExists: bool=True, maximum: bool=True,\n message: bool=True, minExists: bool=True, minimum: bool=True, multi:\n bool=True, niceName: bool=True, node: name=None, numberOfChildren: bool=True,\n range: bool=True, rangeExists: bool=True, readable: bool=True, renderSource:\n bool=True, shortName: bool=True, softMax: bool=True, softMaxExists:\n bool=True, softMin: bool=True, softMinExists: bool=True, softRange:\n bool=True, softRangeExists: bool=True, storable: bool=True, type: AnyStr=\"\",\n typeExact: AnyStr=\"\", usedAsColor: bool=True, usedAsFilename: bool=True,\n usesMultiBuilder: bool=True, worldspace: bool=True, writable: bool=True,\n **kwargs)->List[float]:\n pass",
"def filter(self, **args ):\n query = TXLOG.select('*')\n for key, value in args.items():\n if '__' in key:\n key, op = key.split('__')\n else:\n op = 'eq'\n\n if not key in self.schema:\n raise BadArgument(\"Key %s not a valid argument\" % key )\n\n if not isinstance(value, basestring ):\n value = str(value)\n\n query = query.where({key:value}, self.operators[op])\n\n items = query.list()\n return items",
"def filter(\n self, items: Iterable[Any], spec: Specification\n ) -> Generator[Any, None, None]:",
"def array_filter(item, func):\n return filter(func, item)",
"def filter(data, query, use_nested_keys=True,\n key_separator='.', case_sensitive=True,\n raise_keyerror=False):\n ast = parser.parse(query)\n dq = DataQueryVisitor(\n ast, use_nested_keys=use_nested_keys,\n key_separator=key_separator, case_sensitive=case_sensitive,\n raise_keyerror=raise_keyerror)\n for item in data:\n if not dq.evaluate(item):\n continue\n yield item",
"def misc_search(self, kwargs):\n attr = kwargs[\"attributes\"]\n filter_ = kwargs[\"filter\"]\n\n try:\n if attr and attr != \"ALL\":\n results = self.engine.query(filter_, attr.split(\",\"))\n else:\n results = self.engine.query(filter_)\n self.display(results, True)\n except PyAsn1UnicodeDecodeError as e:\n error(f\"Decoding error with the filter: {e}\")\n except Exception as e:\n if e.__str__() == \"\":\n error(\"An exception occurred with the provided filter\")\n else:\n error(e)",
"def find_in_list_via_attribute(self, objlist, attribute, equals):\n\n for x in objlist:\n if hasattr(x, attribute):\n if getattr(x, attribute) == equals:\n return x\n\n return None",
"def attribute_filter_factory(attributes):\n if callable(attributes):\n return attributes\n\n if isinstance(attributes, dict):\n def _attr_filter(tag, attr, value):\n if tag in attributes:\n attr_val = attributes[tag]\n if callable(attr_val):\n return attr_val(tag, attr, value)\n\n if attr in attr_val:\n return True\n\n if '*' in attributes:\n attr_val = attributes['*']\n if callable(attr_val):\n return attr_val(tag, attr, value)\n\n return attr in attr_val\n\n return False\n\n return _attr_filter\n\n if isinstance(attributes, list):\n def _attr_filter(tag, attr, value):\n return attr in attributes\n\n return _attr_filter\n\n raise ValueError('attributes needs to be a callable, a list or a dict')",
"def filter(self, *args):\n from .elements import EqualClauseElement\n for a in args:\n for c in self._criterion:\n if isinstance(c, EqualClauseElement) and isinstance(a, EqualClauseElement) and \\\n c.attribute.node == a.attribute.node and c.attribute.label == a.attribute.label:\n c.value = a.value\n break\n else:\n self._criterion.append(a)\n return self",
"def filter_items(self, context, data, propname):\n\n helper_funcs = bpy.types.UI_UL_list\n\n items = getattr(data, propname)\n\n # Filtering by name\n filtered = helper_funcs.filter_items_by_name(\n self.filter_name, self.bitflag_filter_item, items, \"name\", reverse=False\n )\n\n if not filtered:\n filtered = [self.bitflag_filter_item] * len(items)\n\n d = context.active_object.data\n anim_ret = context.active_object.anim_ret\n\n for index, bone in enumerate(items):\n excluded = False\n found = False\n\n anim_ret_bone = bone.anim_ret_bone\n\n if not anim_ret_bone:\n excluded = True\n if not excluded and anim_ret_bone.source_bone_name == \"\":\n excluded = True\n if bone.name.startswith(ObjectAnimRet.prefix):\n excluded = True\n if not excluded and not anim_ret.show_def and \"DEF-\" in bone.name:\n excluded = True\n if not excluded and not anim_ret.show_mch and \"MCH-\" in bone.name:\n excluded = True\n if not excluded and not anim_ret.show_org and \"ORG-\" in bone.name:\n excluded = True\n if not excluded and not anim_ret.show_fk and \"fk\" in bone.name.lower():\n excluded = True\n if not excluded and not anim_ret.show_ik and \"ik\" in bone.name.lower():\n excluded = True\n if not excluded and anim_ret.filter_layers:\n data_bone = d.bones[bone.name]\n for layer_id, layer in enumerate(d.layers):\n if layer:\n if data_bone.layers[layer_id]:\n found = True\n break\n\n if excluded or not found:\n filtered[index] &= ~self.bitflag_filter_item\n\n ordered = []\n\n # Reorder by name or average weight.\n if self.use_filter_sort_alpha:\n sort = [(idx, getattr(it, \"name\", \"\")) for idx, it in enumerate(items)]\n\n ordered = helper_funcs.sort_items_helper(sort, lambda e: e[1].lower())\n\n return filtered, ordered",
"def attr_exists(attribute, user_input):\n\n response = table.scan(\n FilterExpression = Attr(attribute).eq(user_input)\n )\n\n if response['Items']:\n return True\n\n return False",
"def filter_items(self, filter_data: Dict[str, str] = None) -> List[WalletItem]:\n filtered_items = self.items\n for key, value in filter_data.items():\n if key == \"category\":\n filtered_items = [item for item in filtered_items\n if re.search(value, item.category, re.IGNORECASE)]\n if key == \"account\":\n filtered_items = [item for item in filtered_items\n if re.search(value, item.account, re.IGNORECASE)]\n if key == \"notes\" in filter_data:\n filtered_items = [item for item in filtered_items\n if re.search(value, item.notes, re.IGNORECASE)]\n if key == \"amt_min\":\n value = float(value)\n filtered_items = [item for item in filtered_items if item.amount >= value]\n if key == \"amt_max\":\n value = float(value)\n filtered_items = [item for item in filtered_items if item.amount <= value]\n if key == \"begin_date\":\n try:\n begin_date = datetime.strptime(value, '%d/%m/%Y')\n filtered_items = [item for item in filtered_items if begin_date <= item.date]\n except ValueError as ex:\n print(ex)\n exit(1)\n if key == \"end_date\":\n try:\n end_date = datetime.strptime(value, '%d/%m/%Y')\n filtered_items = [item for item in filtered_items if item.date <= end_date]\n except ValueError as ex:\n print(ex)\n exit(1)\n return filtered_items",
"def queryByAttributePair(table, attribute1, attribute2, values):\n if len(values) > MAX_PAIRS:\n values1 = values[:MAX_PAIRS]\n values2 = values[MAX_PAIRS:]\n records1 = queryByAttributePair(table, attribute1, attribute2, values1)\n records2 = queryByAttributePair(table, attribute1, attribute2, values2)\n records1.extend(records2)\n return records1\n\n condition = u\"1=0\"\n for val1, val2 in values:\n condition += \" OR %s = '%s' AND %s = '%s'\" % (attribute1, sqlapi.quote(val1), attribute2, sqlapi.quote(val2))\n records = sqlapi.RecordSet2(table, condition, access_persno=auth.persno)\n return [records]",
"def get_objs_with_attr_match(self, attribute_name, attribute_value, location=None, exact=False): \n from src.objects.models import ObjAttribute\n lstring = \"\"\n if location:\n lstring = \", db_obj__db_location=location\" \n attrs = eval(\"ObjAttribute.objects.filter(db_key=attribute_name%s)\" % lstring)\n if exact: \n return [attr.obj for attr in attrs if attribute_value == attr.value]\n else:\n return [attr.obj for attr in attrs if utils.to_unicode(attribute_value) in str(attr.value)]",
"def queryByAttributeIn(table, attribute, values, access=None, addtl=\"\"):\n if len(values) > MAX_IN_ELEMENTS:\n values1 = values[:MAX_IN_ELEMENTS]\n values2 = values[MAX_IN_ELEMENTS:]\n records1 = queryByAttributeIn(table, attribute, values1, access, addtl)\n records2 = queryByAttributeIn(table, attribute, values2, access, addtl)\n records1.extend(records2)\n return records1\n\n valueString = u\",\".join(u\"'\" + sqlapi.quote(val) + u\"'\" for val in values)\n condition = u\"%s IN (%s)\" % (attribute, valueString)\n records = sqlapi.RecordSet2(table, condition,\n access=access, access_persno=auth.persno,\n addtl=addtl)\n return [records]",
"def filter_queryset(self, queryset):\n\n queryset = super().filter_queryset(queryset)\n\n items = self.get_items()\n\n if len(items) > 0:\n \"\"\"At this point, we are basically forced to be inefficient:\n\n We need to compare the 'filters' string of each report template,\n and see if it matches against each of the requested items.\n\n In practice, this is not too bad.\n \"\"\"\n\n valid_report_ids = set()\n\n for report in queryset.all():\n matches = True\n\n try:\n filters = InvenTree.helpers.validateFilterString(report.filters)\n except ValidationError:\n continue\n\n for item in items:\n item_query = self.ITEM_MODEL.objects.filter(pk=item.pk)\n\n try:\n if not item_query.filter(**filters).exists():\n matches = False\n break\n except FieldError:\n matches = False\n break\n\n # Matched all items\n if matches:\n valid_report_ids.add(report.pk)\n\n # Reduce queryset to only valid matches\n queryset = queryset.filter(pk__in=list(valid_report_ids))\n\n return queryset",
"def apply_filters(filters, items):\n return scom.apply_filters(filters, items)",
"def _filter_data(myList, queryNode):\n parentCheck = False\n #checks every child\n deleteList = []\n\n #checks all items\n for index, item in enumerate(myList):\n\n #reset eventBool on each event\n checkBool = False\n #if any children of current item pass the filter\n\n if(_filter_data(item['nest'], queryNode)):\n checkBool = True\n else:\n #checks occurrence against all query requirements\n checkBool = queryNode.eval_logic(item['info'])\n \"\"\"\n #if any children pass the parent also passes the filter\n if(checkBool):\n parentCheck = True\n #delete item from current list if it or no children matched query\n else:\n deleteList.append(index)\n \"\"\"\n\n \"\"\"\n if(checkBool):\n parentCheck = True\n else:\n deleteList.append(index)\n \"\"\"\n\n if(checkBool):\n parentCheck = True\n else:\n deleteList.append(index)\n #deletes all non matching items starting with the highest index\n for i in reversed(deleteList):\n del myList[i]\n\n return parentCheck",
"def recordAttr(*args, attribute: Union[AnyStr, List[AnyStr]]=\"\", delete: bool=True, q=True,\n query=True, **kwargs)->Union[None, Any]:\n pass",
"def amh_attr_filter_query(self):\n \n attr_filter_query = \"\"\"\n WITH {final_cte_name} as (\n -- Pull list of devices that were active (has any row; don't need TVT >0) in the past 4 weeks\n SELECT DISTINCT device_id\n FROM tubidw.all_metric_hourly\n WHERE DATE_TRUNC('week',hs) >= dateadd('week',-4,DATE_TRUNC('week',GETDATE()))\n AND DATE_TRUNC('week',hs) < DATE_TRUNC('week',GETDATE())\n {attr_filter} -- attribute filters dynamically populate here\n -- TODO: currently can't get a metric/attribute combo filter, like \"devices that watched at least 50% of a specific content_id\"\n )\n \"\"\"\n return attr_filter_query"
] | [
"0.57204485",
"0.5617756",
"0.548573",
"0.54075944",
"0.5388926",
"0.52190524",
"0.5199355",
"0.5139567",
"0.50037485",
"0.48549065",
"0.4841358",
"0.47993222",
"0.47757334",
"0.47549915",
"0.47255918",
"0.47191286",
"0.47072586",
"0.4689648",
"0.46839404",
"0.46671137",
"0.46435675",
"0.46251217",
"0.4619057",
"0.46158174",
"0.4584684",
"0.45584577",
"0.45552963",
"0.45395467",
"0.45321274",
"0.45306358"
] | 0.68550235 | 0 |
Get value under `key_path` key in `dct` dictionary. `key_path` is a list of keys to be traversed into a potentially nested `dct` dictionary. | def _get_nested_value(dct, key_path):
key = key_path[0]
if not isinstance(dct, dict):
raise errors.AnsibleFilterError(
f"stringfilter: looking for key '{key}' "
f"but list item is not dict: {pformat(dct)}"
)
if key not in dct:
raise errors.AnsibleFilterError(
f"stringfilter: key is '{key}' "
f"but it was not found in dict: {pformat(dct)}"
)
value = dct[key]
if len(key_path) > 1:
return _get_nested_value(value, key_path[1:])
else:
return value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_by_path(data: Dict[str, T], path: Sequence[str]) -> T:\n return reduce(operator.getitem, path, data)",
"def _get_by_path(dic, keys):\n assert len(keys) > 0, \"Path key can not be an empty list.\"\n\n d = dic\n for key in keys[:-1]:\n if isinstance(key, int) or key in d:\n d = d[key]\n else:\n return None\n if keys[-1] in d or (isinstance(d, list) and keys[-1] < len(d)):\n return d[keys[-1]]\n\n return None",
"def get_by_dot_path(dictionary: Dict, key_path: str) -> Any:\n return get_by_list_of_keys(dictionary, key_path.split(\".\"))",
"def _get_by_path(tree, keys):\n return reduce(getitem, keys, tree)",
"def _get_by_path(tree, keys):\n return reduce(getitem, keys, tree)",
"async def _iterate_dict(self, d, key_path, default):\n key, _iter = KeyPathsIter([key_path]).__next__()\n while _iter is not None:\n if key not in d:\n return default\n d = d[key]\n key, _iter = _iter.__next__()\n if key not in d:\n return default\n return d[key]",
"def get_nested_dict_entry_from_namespace_path(d, namespace_path):\n # Try to split off the namespace path into the first key and the rest of the keys\n split_namespace_path = namespace_path.split('.', 1)\n if len(split_namespace_path) == 1:\n # Only one key for a non-nested dict; return the result\n return d[split_namespace_path[0]]\n else:\n cur_key, path_remainder = split_namespace_path\n return get_nested_dict_entry_from_namespace_path(d[cur_key], path_remainder)",
"def get_value(dct, key):\n return dct.get(key)",
"def get(obj, path):\n right = path\n cur = obj\n while right:\n left, right = partition(right)\n if isinstance(cur, dict):\n cur = cur.get(left)\n elif isinstance(cur, (list, tuple)):\n left = int(left)\n cur = cur[left] if left < len(cur) else None\n return cur",
"def get_deep(tree, path):\n for key in path[:-1]:\n tree = tree.get(key, {})\n return tree.get(path[-1])",
"def _resolve_path(d, path):\n accum_value = d\n for node_key in path:\n accum_value = accum_value[node_key]\n return accum_value",
"def rget(dict_object, path_list):\n try:\n return reduce(lambda d, k: d[k], path_list, dict_object)\n except KeyError:\n return dict_object",
"def _get(d, *paths):\n if d is None:\n return None\n\n if paths is None:\n return None\n\n for path in paths:\n if path is None:\n return None\n\n path = path.split('.')\n for key in path:\n try:\n i = int(key)\n if i in d:\n return d[i]\n else:\n return None\n\n except BaseException:\n d = d.get(key, None)\n if d is None:\n return None\n\n return d",
"def get_element(d, path): # type: (Dict, Tuple) -> Any\n if len(path) == 0:\n raise ValueError('Path length cant be 0')\n elif len(path) == 1:\n return d.get(path[0])\n elif d.get(path[0]):\n return DictUtil.get_element(d[path[0]], path[1:])\n return None",
"def get_dictvalue_from_xpath(full_dict, path_string):\n\n\tkey_value = full_dict\n\n\tfor i in path_string.split('/')[1:] :\n\t\tkey_value = key_value[i]\n\n\treturn key_value",
"def dict_path(d, path, default=None):\n\n keys = path.split('.')\n rv = d\n\n try:\n for key in keys:\n rv = rv.get(key)\n except AttributeError:\n return default\n\n return rv",
"def test_utils_get_dict_value_from_path_should_return_given_value(path, value):\n dictionary = {\"foo\": {\"bar\": \"bar_value\"}}\n assert ralph_utils.get_dict_value_from_path(dictionary, path) == value",
"def access_path(data: dict or any, path: list[str]) -> any:\n if path:\n first = path[0]\n rest = path[1:]\n return access_path(data[first], rest)\n return data",
"def nested_get(\n d: t.Dict, *path: t.Tuple[str, str], raise_on_missing: bool = True\n) -> t.Optional[t.Any]:\n for name, key in path:\n d = d.get(key) # type: ignore\n if d is None:\n if raise_on_missing:\n name = \"table\" if name == \"this\" else name\n raise ValueError(f\"Unknown {name}: {key}\")\n return None\n\n return d",
"def nested_get(dictionary: dict, keys: list):\n nested_dict = dictionary\n for key in keys[:-1]:\n nested_dict = nested_dict[key]\n return nested_dict.get(keys[-1])",
"def GetKeyByPath(self, key_path):",
"def get_by_list_of_keys(dictionary: Dict, key_path: List[Any]) -> Dict:\n if len(key_path) == 1:\n return dictionary[key_path[0]]\n else:\n return get_by_list_of_keys(dictionary[key_path[0]], key_path[1:])",
"def get_by_path(root, path):\n \n sub_data = root\n for key in path:\n sub_data = sub_data[key]\n \n return sub_data",
"def dict_path(my_dict, path=None):\n if path is None:\n path = \"\"\n for k, v in my_dict.items():\n newpath = path + (\".\" if path != \"\" else \"\") + k\n if isinstance(v, dict):\n for u in dict_path(v, newpath):\n yield u\n else:\n yield newpath, v",
"def GetValueByPath(self, path_segments):\n key = self.root_key\n for path_segment in path_segments:\n if isinstance(key, dict):\n try:\n key = key[path_segment]\n except KeyError:\n return None\n\n elif isinstance(key, list):\n try:\n list_index = int(path_segment, 10)\n except ValueError:\n return None\n\n key = key[list_index]\n\n else:\n return None\n\n if not key:\n return None\n\n return key",
"def get_from_dict(d, k):\n try:\n return reduce(dict.get, k, d)\n except TypeError:\n # Value not found.\n return None",
"def _get_value(match_entry: Dict, path0: str) -> any:\n if path0 is None:\n current_el = match_entry\n else:\n path = path0.split('/')\n current_el = match_entry\n for p in path:\n if current_el is None:\n break\n current_el = current_el.get(p)\n return current_el",
"def get_value(path):\r\n return sum([d[sq] for sq in path])",
"def get_safe(dict_instance, keypath, default=None):\n try:\n obj = dict_instance\n keylist = keypath if type(keypath) is list else keypath.split('.')\n for key in keylist:\n obj = obj[key]\n return obj\n except Exception, ex:\n return default",
"def do_get(d, *ks, **kwargs):\n try:\n res = reduce (lambda acc, k: acc[k], ks, d)\n except (KeyError, TypeError):\n if \"default\" in kwargs:\n return kwargs[\"default\"]\n else:\n t, v, tb = sys.exc_info()\n if t == KeyError:\n msg = \"nested keys {} not found in {}\".format(ks, d)\n else:\n msg = \"nesting of keys {} too is too deep for {}\".format(ks, d)\n raise KeyError, msg, tb\n else:\n return res"
] | [
"0.6952644",
"0.68554395",
"0.6719718",
"0.64265496",
"0.64265496",
"0.6403472",
"0.63947314",
"0.63453716",
"0.6322947",
"0.6304281",
"0.6285504",
"0.62721723",
"0.6161064",
"0.61594445",
"0.6127265",
"0.61137265",
"0.60990524",
"0.60921264",
"0.6087193",
"0.6030112",
"0.6025811",
"0.6013587",
"0.5989113",
"0.5973356",
"0.59648323",
"0.5904218",
"0.5890434",
"0.58565205",
"0.58529544",
"0.5848173"
] | 0.7896383 | 0 |
Tests whether the trace for step methods is exactly the same as on master. Code changes that effect how random numbers are drawn may change this, and require `master_samples` to be updated, but such changes should be noted and justified in the commit. This method may also be used to benchmark step methods across commits, by running, for example ``` | def check_trace(self, step_method):
n_steps = 100
with Model():
x = Normal('x', mu=0, sd=1)
if step_method.__name__ == 'SMC':
Deterministic('like', - 0.5 * tt.log(2 * np.pi) - 0.5 * x.T.dot(x))
trace = smc.ATMIP_sample(n_steps=n_steps, step=step_method(random_seed=1),
n_jobs=1, progressbar=False, stage='0',
homepath=self.temp_dir)
else:
trace = sample(n_steps, step=step_method(), random_seed=1)
print(repr(trace.get_values('x')))
assert_array_almost_equal(
trace.get_values('x'),
self.master_samples[step_method],
decimal=select_by_precision(float64=6, float32=4)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def TestOneStep(self):\n pass",
"def test_changes(monkeypatch, local):\n monkeypatch.setenv('LANG', 'en_US.UTF-8')\n monkeypatch.setenv('TRAVIS_BUILD_ID', '12345')\n monkeypatch.setenv('TRAVIS_BRANCH', 'master')\n old_sha = pytest.run(local, ['git', 'rev-parse', 'HEAD']).strip()\n local.ensure('new', 'new.txt')\n local.join('README').write('test\\n', mode='a')\n\n actual = commit_and_push(str(local), 'origin', Versions(REMOTES))\n assert actual is True\n sha = pytest.run(local, ['git', 'rev-parse', 'HEAD']).strip()\n assert sha != old_sha\n pytest.run(local, ['git', 'diff-index', '--quiet', 'HEAD', '--']) # Exit 0 if nothing changed.\n\n # Verify commit message.\n subject, body = pytest.run(local, ['git', 'log', '-n1', '--pretty=%B']).strip().split('\\n', 2)[::2]\n assert subject == 'AUTO sphinxcontrib-versioning 20160722 0772e5ff32a'\n assert body == 'LANG: en_US.UTF-8\\nTRAVIS_BRANCH: master\\nTRAVIS_BUILD_ID: 12345'",
"def test_foo(self):\n self.ran = True\n 1 / 0",
"def test_update_splits():\n assert new_log.iloc[0]['start'] == 0",
"def test_split_feature(tree):\r\n print(\"test_split_feature()...\", end = \"\")\r\n assert (tree.process_split_feature() == True)\r\n print(\"Passed!\")",
"def test_different_workflows_and_cross_check_the_results(self):\n # Testcase 1. End-to-end report generation using coverage.py script. This is\n # the workflow of a regular user.\n cmd = [\n self.COVERAGE_SCRIPT,\n 'crypto_unittests',\n 'libpng_read_fuzzer',\n '-v',\n '-b',\n self.BUILD_DIR,\n '-o',\n self.REPORT_DIR_1,\n '-c'\n '%s/crypto_unittests' % self.BUILD_DIR,\n '-c',\n '%s/libpng_read_fuzzer -runs=0 third_party/libpng/' % self.BUILD_DIR,\n ]\n self.run_cmd(cmd)\n\n output_dir = os.path.join(self.REPORT_DIR_1, self.PLATFORM)\n self.verify_component_view(\n os.path.join(output_dir, 'component_view_index.html'))\n self.verify_directory_view(\n os.path.join(output_dir, 'directory_view_index.html'))\n self.verify_file_view(os.path.join(output_dir, 'file_view_index.html'))\n\n # Also try generating a report without components view. Useful for cross\n # checking with the report produced in the testcase #3.\n cmd = [\n self.COVERAGE_SCRIPT,\n 'crypto_unittests',\n 'libpng_read_fuzzer',\n '-v',\n '-b',\n self.BUILD_DIR,\n '-o',\n self.REPORT_DIR_1_NO_COMPONENTS,\n '-c'\n '%s/crypto_unittests' % self.BUILD_DIR,\n '-c',\n '%s/libpng_read_fuzzer -runs=0 third_party/libpng/' % self.BUILD_DIR,\n '--no-component-view',\n ]\n self.run_cmd(cmd)\n\n output_dir = os.path.join(self.REPORT_DIR_1_NO_COMPONENTS, self.PLATFORM)\n self.verify_directory_view(\n os.path.join(output_dir, 'directory_view_index.html'))\n self.verify_file_view(os.path.join(output_dir, 'file_view_index.html'))\n self.assertFalse(\n os.path.exists(os.path.join(output_dir, 'component_view_index.html')))\n\n # Testcase #2. Run the script for post processing in Chromium tree. This is\n # the workflow of the code coverage bots.\n instr_profile_path = os.path.join(self.REPORT_DIR_1, self.PLATFORM,\n 'coverage.profdata')\n\n cmd = [\n self.COVERAGE_SCRIPT,\n 'crypto_unittests',\n 'libpng_read_fuzzer',\n '-v',\n '-b',\n self.BUILD_DIR,\n '-p',\n instr_profile_path,\n '-o',\n self.REPORT_DIR_2,\n ]\n self.run_cmd(cmd)\n\n # Verify that the output dirs are the same except of the expected diff.\n report_1_listing = set(_RecursiveDirectoryListing(self.REPORT_DIR_1))\n report_2_listing = set(_RecursiveDirectoryListing(self.REPORT_DIR_2))\n logs_subdir = os.path.join(self.PLATFORM, 'logs')\n self.assertEqual(\n set([\n os.path.join(self.PLATFORM, 'coverage.profdata'),\n os.path.join(logs_subdir, 'crypto_unittests_output.log'),\n os.path.join(logs_subdir, 'libpng_read_fuzzer_output.log'),\n ]), report_1_listing - report_2_listing)\n\n output_dir = os.path.join(self.REPORT_DIR_2, self.PLATFORM)\n self.verify_component_view(\n os.path.join(output_dir, 'component_view_index.html'))\n self.verify_directory_view(\n os.path.join(output_dir, 'directory_view_index.html'))\n self.verify_file_view(os.path.join(output_dir, 'file_view_index.html'))\n\n # Verify that the file view pages are binary equal.\n report_1_file_view_data = _ReadFile(\n os.path.join(self.REPORT_DIR_1, self.PLATFORM, 'file_view_index.html'))\n report_2_file_view_data = _ReadFile(\n os.path.join(self.REPORT_DIR_2, self.PLATFORM, 'file_view_index.html'))\n self.assertEqual(report_1_file_view_data, report_2_file_view_data)\n\n # Testcase #3, run coverage_utils.py on manually produced report and summary\n # file. This is the workflow of OSS-Fuzz code coverage job.\n objects = [\n '-object=%s' % os.path.join(self.BUILD_DIR, 'crypto_unittests'),\n '-object=%s' % os.path.join(self.BUILD_DIR, 'libpng_read_fuzzer'),\n ]\n\n cmd = [\n self.PYTHON,\n self.COVERAGE_UTILS,\n '-v',\n 'shared_libs',\n '-build-dir=%s' % self.BUILD_DIR,\n ] + objects\n\n shared_libraries = self.run_cmd(cmd)\n objects.extend(shared_libraries.split())\n\n instr_profile_path = os.path.join(self.REPORT_DIR_1_NO_COMPONENTS,\n self.PLATFORM, 'coverage.profdata')\n cmd = [\n self.LLVM_COV,\n 'show',\n '-format=html',\n '-output-dir=%s' % self.REPORT_DIR_3,\n '-instr-profile=%s' % instr_profile_path,\n ] + objects\n if self.PLATFORM in ['linux', 'mac']:\n cmd.extend(['-Xdemangler', 'c++filt', '-Xdemangler', '-n'])\n self.run_cmd(cmd)\n\n cmd = [\n self.LLVM_COV,\n 'export',\n '-summary-only',\n '-instr-profile=%s' % instr_profile_path,\n ] + objects\n summary_output = self.run_cmd(cmd)\n\n summary_path = os.path.join(self.REPORT_DIR_3, 'summary.json')\n with open(summary_path, 'wb') as f:\n f.write(summary_output)\n\n cmd = [\n self.PYTHON,\n self.COVERAGE_UTILS,\n '-v',\n 'post_process',\n '-src-root-dir=%s' % self.CHROMIUM_SRC_DIR,\n '-summary-file=%s' % summary_path,\n '-output-dir=%s' % self.REPORT_DIR_3,\n ]\n self.run_cmd(cmd)\n\n output_dir = os.path.join(self.REPORT_DIR_3, self.PLATFORM)\n self.verify_directory_view(\n os.path.join(output_dir, 'directory_view_index.html'))\n self.verify_file_view(os.path.join(output_dir, 'file_view_index.html'))\n self.assertFalse(\n os.path.exists(os.path.join(output_dir, 'component_view_index.html')))\n\n # Verify that the file view pages are binary equal.\n report_1_file_view_data_no_component = _ReadFile(\n os.path.join(self.REPORT_DIR_1_NO_COMPONENTS, self.PLATFORM,\n 'file_view_index.html'))\n report_3_file_view_data = _ReadFile(\n os.path.join(self.REPORT_DIR_3, self.PLATFORM, 'file_view_index.html'))\n self.assertEqual(report_1_file_view_data_no_component,\n report_3_file_view_data)\n\n # Testcase 4. Export coverage data in lcov format using coverage.py script.\n cmd = [\n self.COVERAGE_SCRIPT,\n 'crypto_unittests',\n 'libpng_read_fuzzer',\n '--format',\n 'lcov',\n '-v',\n '-b',\n self.BUILD_DIR,\n '-o',\n self.REPORT_DIR_4,\n '-c'\n '%s/crypto_unittests' % self.BUILD_DIR,\n '-c',\n '%s/libpng_read_fuzzer -runs=0 third_party/libpng/' % self.BUILD_DIR,\n ]\n self.run_cmd(cmd)\n\n output_dir = os.path.join(self.REPORT_DIR_4, self.PLATFORM)\n self.verify_lcov_file(os.path.join(output_dir, 'coverage.lcov'))",
"def test1(self):\n\n log.info('This is a test')\n self.assertTrue((random.randint(0,9) % 2) == 0)#! /usr/bin/env python",
"def test_nothing_significant_to_commit(caplog, local, subdirs):\n local.ensure('sub' if subdirs else '', '.doctrees', 'file.bin').write('data')\n local.ensure('sub' if subdirs else '', 'searchindex.js').write('data')\n old_sha = pytest.run(local, ['git', 'rev-parse', 'HEAD']).strip()\n actual = commit_and_push(str(local), 'origin', Versions(REMOTES))\n assert actual is True\n sha = pytest.run(local, ['git', 'rev-parse', 'HEAD']).strip()\n assert sha != old_sha\n pytest.run(local, ['git', 'diff-index', '--quiet', 'HEAD', '--']) # Exit 0 if nothing changed.\n records = [(r.levelname, r.message) for r in caplog.records]\n assert ('INFO', 'No changes to commit.') not in records\n assert ('INFO', 'No significant changes to commit.') not in records\n\n local.ensure('sub' if subdirs else '', '.doctrees', 'file.bin').write('changed')\n local.ensure('sub' if subdirs else '', 'searchindex.js').write('changed')\n old_sha = sha\n records_seek = len(caplog.records)\n actual = commit_and_push(str(local), 'origin', Versions(REMOTES))\n assert actual is True\n sha = pytest.run(local, ['git', 'rev-parse', 'HEAD']).strip()\n assert sha == old_sha\n with pytest.raises(CalledProcessError):\n pytest.run(local, ['git', 'diff-index', '--quiet', 'HEAD', '--'])\n records = [(r.levelname, r.message) for r in caplog.records][records_seek:]\n assert ('INFO', 'No changes to commit.') not in records\n assert ('INFO', 'No significant changes to commit.') in records\n\n local.join('README').write('changed') # Should cause other two to be committed.\n old_sha = sha\n records_seek = len(caplog.records)\n actual = commit_and_push(str(local), 'origin', Versions(REMOTES))\n assert actual is True\n sha = pytest.run(local, ['git', 'rev-parse', 'HEAD']).strip()\n assert sha != old_sha\n pytest.run(local, ['git', 'diff-index', '--quiet', 'HEAD', '--']) # Exit 0 if nothing changed.\n records = [(r.levelname, r.message) for r in caplog.records][records_seek:]\n assert ('INFO', 'No changes to commit.') not in records\n assert ('INFO', 'No significant changes to commit.') not in records",
"def test_diff_trainability(self):\n self.run_subtests(\n {\n \"multi_tensor\": [False, True],\n \"sharding_strategy\": [\n ShardingStrategy.FULL_SHARD,\n ShardingStrategy.SHARD_GRAD_OP,\n ShardingStrategy.NO_SHARD,\n ],\n },\n self._test_diff_trainability,\n )",
"def test_master_versions(self):\n m = self.d.master(4242)\n r = self.d.release(79)\n v = m.versions\n\n self.assertEqual(len(v), 2)\n self.assertTrue(r in v)\n self.assertEqual(r.master, m)\n\n r2 = self.d.release(3329867)\n self.assertTrue(r2.master is None)",
"def test_updatePlayerbHist_fold_2(self):\n self.assertEqual(self.player.foldedInd, 1)",
"def test_forfatal_functions(self):\n logging.getLogger(\"tensorflow\").setLevel(logging.ERROR)\n logging.getLogger(\"batchglm\").setLevel(logging.WARNING)\n logging.getLogger(\"diffxpy\").setLevel(logging.WARNING)\n\n num_observations = 10\n num_features = 2\n\n sim = Simulator(num_observations=num_observations, num_features=num_features)\n sim.generate_sample_description(num_batches=0, num_conditions=0)\n sim.generate()\n\n random_sample_description = pd.DataFrame({\n \"pseudotime\": np.random.random(size=sim.nobs),\n \"batch\": np.random.randint(2, size=sim.nobs)\n })\n\n test = de.test.continuous_1d(\n data=sim.X,\n continuous=\"pseudotime\",\n df=3,\n formula_loc=\"~ 1 + pseudotime + batch\",\n formula_scale=\"~ 1\",\n factor_loc_totest=\"pseudotime\",\n test=\"wald\",\n sample_description=random_sample_description,\n quick_scale=True,\n batch_size=None,\n training_strategy=\"DEFAULT\",\n dtype=\"float64\"\n )\n\n summary = test.summary()\n ids = test.gene_ids\n\n # 1. Test all additional functions which depend on model computation:\n # 1.1. Only continuous model:\n temp = test.log_fold_change(genes=ids, nonnumeric=False)\n temp = test.max(genes=ids, nonnumeric=False)\n temp = test.min(genes=ids, nonnumeric=False)\n temp = test.argmax(genes=ids, nonnumeric=False)\n temp = test.argmin(genes=ids, nonnumeric=False)\n temp = test.summary(nonnumeric=False)\n # 1.2. Full model:\n temp = test.log_fold_change(genes=ids, nonnumeric=True)\n temp = test.max(genes=ids, nonnumeric=True)\n temp = test.min(genes=ids, nonnumeric=True)\n temp = test.argmax(genes=ids, nonnumeric=True)\n temp = test.argmin(genes=ids, nonnumeric=True)\n temp = test.summary(nonnumeric=True)\n\n return True",
"def test_random_passed(self):\n jobs = self._travis.jobs(state='passed')\n job_id = jobs[0].id\n print('Random using job_id {0}'.format(job_id))\n log = self._get_job_log(job_id=job_id)\n\n assert log.body != ''\n blocks = self._parse(log)",
"def test_dataset_scenario_generation_full_outside1(self):\n params = ParameterServer()\n\n map_filename = os.path.join(os.path.dirname(__file__), \"data/DR_DEU_Merging_MT_v01_shifted.xodr\")\n track_filename = os.path.join(os.path.dirname(__file__), \"data/interaction_dataset_DEU_Merging_dummy_track_outside.csv\")\n\n params[\"Scenario\"][\"Generation\"][\"InteractionDatasetScenarioGenerationFull\"][\"MapFilename\"] = map_filename\n params[\"Scenario\"][\"Generation\"][\"InteractionDatasetScenarioGenerationFull\"][\"TrackFilenameList\"] = [track_filename]\n params[\"Scenario\"][\"Generation\"][\"InteractionDatasetScenarioGenerationFull\"][\"StartingOffsetMs\"] = 0\n\n scenario_generation = InteractionDatasetScenarioGenerationFull(\n params=params, num_scenarios=1)\n\n scenario = scenario_generation.get_scenario(0)\n self.assertAlmostEqual(scenario.eval_agent_ids, [1])\n\n world_state = scenario.GetWorldState()\n agent11 = world_state.GetAgent(1)\n agent12 = world_state.GetAgent(2)\n agent13 = world_state.GetAgent(3)\n\n self.assertAlmostEqual(agent11.first_valid_timestamp, 0.0)\n self.assertAlmostEqual(agent12.first_valid_timestamp, 0.0)\n self.assertNotEqual(agent13.first_valid_timestamp, 0.0)\n \n # agent13 should not be valid at the beginning, as he is outside of map\n world_state.time = 0\n self.assertEqual(isinstance(agent11, Agent), True)\n self.assertEqual(agent11.IsValidAtTime(world_state.time), True)\n self.assertEqual(agent11.InsideRoadCorridor(), True)\n \n self.assertEqual(isinstance(agent12, Agent), True)\n self.assertEqual(agent12.IsValidAtTime(world_state.time), True)\n self.assertEqual(agent12.InsideRoadCorridor(), True)\n\n self.assertEqual(isinstance(agent13, Agent), True)\n self.assertEqual(agent13.IsValidAtTime(world_state.time), False)\n # as we use only state once it's in map, this will be true, although the time step is not valid yet\n self.assertEqual(agent13.InsideRoadCorridor(), True)\n\n # agent13 should not be valid at the beginning, as he is outside of map\n world_state.Step(0.05)\n\n self.assertEqual(isinstance(agent11, Agent), True)\n self.assertEqual(agent11.IsValidAtTime(world_state.time), True)\n self.assertEqual(agent11.InsideRoadCorridor(), True)\n \n self.assertEqual(isinstance(agent12, Agent), True)\n self.assertEqual(agent12.IsValidAtTime(world_state.time), True)\n self.assertEqual(agent12.InsideRoadCorridor(), True)\n\n self.assertEqual(isinstance(agent13, Agent), True)\n self.assertEqual(agent13.IsValidAtTime(world_state.time), False)\n # as we use only state once it's in map, this will be true, although the time step is not valid yet\n self.assertEqual(agent13.InsideRoadCorridor(), True)\n\n self.assertEqual(list(world_state.agents_valid.keys()), [1,2])\n\n # agent13 should be valid at some point\n world_state.Step(agent13.first_valid_timestamp)\n world_state.Step(0.01) # agent13.IsValidAtTime() uses previous time stamp, therefore we increment it one more step\n\n self.assertEqual(isinstance(agent11, Agent), True)\n self.assertEqual(agent11.IsValidAtTime(world_state.time), True)\n self.assertEqual(agent11.InsideRoadCorridor(), True)\n \n self.assertEqual(isinstance(agent12, Agent), True)\n self.assertEqual(agent12.IsValidAtTime(world_state.time), True)\n self.assertEqual(agent12.InsideRoadCorridor(), True)\n\n self.assertEqual(isinstance(agent13, Agent), True)\n self.assertEqual(agent13.IsValidAtTime(world_state.time), True)\n self.assertEqual(agent13.InsideRoadCorridor(), True)\n\n self.assertEqual(list(world_state.agents_valid.keys()), [1,2,3])",
"def step_solution(self):\n import time, random\n time.sleep(1.0)\n print '(step_solution) Implement me!'\n return True if random.random() < 0.25 else False",
"def test_passed():\n pass",
"def test_master(self, tmpgitdir, branch):\n with tmpgitdir.join('file_a.txt').open('w') as handle:\n handle.write('first file')\n\n subprocess.check_call(['git', 'checkout', '-b', branch])\n subprocess.check_call(['git', 'add', '.'])\n subprocess.check_call(['git', 'commit', '-m', 'first'])\n\n assert git_head_ref_name(tmpgitdir) == branch",
"def test_example_35(self):\n\n x = self.start_request_tests(example_35)\n # Insert: whether result is what it should be according to docs\n self.assert_success(x)\n # json.dump(x.json(), sys.stdout, indent=2)\n self.assertTrue(x.json()[u'are_same_tree'])",
"def test_identical(self):\n write this test!",
"def test_run(self):\n class MockProvider(BaseCoverageProvider):\n SERVICE_NAME = \"I do nothing\"\n was_run = False\n\n def run_once_and_update_timestamp(self):\n \"\"\"Set a variable.\"\"\"\n self.was_run = True\n return None\n\n provider = MockProvider(self._db)\n result = provider.run()\n\n # run_once_and_update_timestamp() was called.\n assert True == provider.was_run\n\n # run() returned a CoverageProviderProgress with basic\n # timing information, since run_once_and_update_timestamp()\n # didn't provide anything.\n assert isinstance(result, CoverageProviderProgress)\n now = utc_now()\n assert result.start < result.finish\n for time in (result.start, result.finish):\n assert (now - time).total_seconds() < 5",
"def test_sort_git_master_and_latest(self):\n identifiers = [\"latest\", \"master\", \"1.0\", \"2.0\", \"1.1\", \"1.9\", \"1.10\"]\n self.project.repo_type = REPO_TYPE_GIT\n self.project.save()\n self.project.versions.get(slug=LATEST).delete()\n\n for identifier in identifiers:\n get(\n Version,\n project=self.project,\n type=BRANCH,\n identifier=identifier,\n verbose_name=identifier,\n slug=identifier,\n )\n\n versions = list(Version.objects.filter(project=self.project))\n self.assertEqual(\n [\"master\", \"latest\", \"2.0\", \"1.10\", \"1.9\", \"1.1\", \"1.0\"],\n [v.slug for v in sort_version_aware(versions)],\n )",
"def test_task178b_main_logic(sequence, expected_value):\r\n assert algo.Task178b.main_logic(sequence) == expected_value",
"def on_same_branch(left, right):\n left = LooseVersion(left)\n right = LooseVersion(right)\n return left.version[:2] == right.version[:2]",
"def test_golden_old(self):\n successes = 0\n failures = 0\n iterations = NUM_CALLS\n\n for _ in range(iterations):\n\n handler = self.new_handler()\n ret = check_golden(handler)\n if ret:\n successes += 1\n else:\n failures += 1\n handler.debug_wrapper()\n\n if failures != 0:\n print('\\n=================================================\\n')\n\n self.assertEqual(failures, 0,\n msg=f'{BColors.FAIL}\\n\\t[-]\\tFrom state: Failed to correctly generate golden node! ' +\n f'{failures}/{iterations} failures! {BColors.ENDC}')\n print(f\"{BColors.OKGREEN}\\t[+]\\tFrom state: Validated correct golden node generation in {successes} trees.{BColors.ENDC}\")",
"def pytest_runtest_logfinish(nodeid):\n log.debug(\"<<<<<<< END %s <<<<<<<\", nodeid)",
"def test_dataset_scenario_generation_full_late(self):\n \n params = ParameterServer()\n\n map_filename = os.path.join(os.path.dirname(__file__), \"data/DR_DEU_Merging_MT_v01_shifted.xodr\")\n track_filename = os.path.join(os.path.dirname(__file__), \"data/interaction_dataset_DEU_Merging_dummy_track_late.csv\")\n\n params[\"Scenario\"][\"Generation\"][\"InteractionDatasetScenarioGenerationFull\"][\"MapFilename\"] = map_filename\n params[\"Scenario\"][\"Generation\"][\"InteractionDatasetScenarioGenerationFull\"][\"TrackFilenameList\"] = [track_filename]\n params[\"Scenario\"][\"Generation\"][\"InteractionDatasetScenarioGenerationFull\"][\"StartingOffsetMs\"] = 0\n\n scenario_generation = InteractionDatasetScenarioGenerationFull(\n params=params, num_scenarios=1)\n\n scenario = scenario_generation.get_scenario(0)\n world_state = scenario.GetWorldState()\n agent1 = world_state.GetAgent(1)\n agent2 = world_state.GetAgent(2)\n\n self.assertAlmostEqual(agent1.first_valid_timestamp, 0.0)\n self.assertAlmostEqual(agent2.first_valid_timestamp, 0.3)\n \n self.assertEqual(isinstance(agent1, Agent), True)\n self.assertEqual(agent1.IsValidAtTime(world_state.time), True)\n \n self.assertEqual(isinstance(agent2, Agent), True)\n self.assertEqual(agent2.IsValidAtTime(world_state.time), False)",
"def test_ge_master(\n self, aiosmtpd_version: version.Version, capsys: pytest.CaptureFixture\n ):\n reference = \"master:aiosmtpd/__init__.py\"\n cmd = f\"git show {reference}\".split()\n try:\n with capsys.disabled():\n master_smtp = subprocess.check_output(cmd).decode() # nosec\n except subprocess.CalledProcessError:\n pytest.skip(\"Skipping due to git error\")\n return\n for ln in master_smtp.splitlines():\n m = RE_DUNDERVER.match(ln)\n if m:\n break\n else:\n pytest.fail(f\"Cannot find __version__ in {reference}!\")\n master_ver = version.parse(m.group(\"ver\"))\n assert aiosmtpd_version >= master_ver, \"Version number cannot be < master's\"",
"def test_set_commits_master_include(self):\n\n temp = self.Temp(self.items, conds=[MasterInclude()])\n master_include = temp.conds[0]\n master_include.set_commits(temp.df)\n\n commit = CommitGit(self.items, conds=[MasterInclude()])\n self.assertEqual(master_include.included, commit.conds[0].included)",
"def test_default_repo_branch(self):\n # network may be unavailable, but we are not interested anyway,\n # so we ignore the exitcode\n output = self.run_command(\"selfupdate --check\", exitcode=None)\n self.assertIn(\"Target: ywangd:master\", output)\n self.assertNotIn(\"Target: ywangd:dev\", output)",
"def _compare(self, x,y, pr=False):\n batched = self.ex.batched(x, y)\n looped = self.ex.looped(x, y)\n #print(f'batched value {batched}')\n #print(f'looped value {looped}')\n \n self.assertTrue(\n torch.equal(batched, looped)\n )"
] | [
"0.5406668",
"0.5375",
"0.53389776",
"0.5314626",
"0.52967477",
"0.5258354",
"0.52216065",
"0.52073497",
"0.51792115",
"0.51779264",
"0.51701105",
"0.5160602",
"0.5152976",
"0.51241815",
"0.50939167",
"0.50733656",
"0.50511485",
"0.5043404",
"0.50413126",
"0.50389934",
"0.503667",
"0.50365883",
"0.5035098",
"0.5033721",
"0.5029778",
"0.5029195",
"0.50233614",
"0.50227267",
"0.50111586",
"0.49966806"
] | 0.549506 | 0 |
Test that samplers correctly create nonblocked compound steps. | def test_non_blocked(self):
_, model = simple_2model()
with model:
for sampler in self.samplers:
assert isinstance(sampler(blocked=False), CompoundStep) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_block_extra_batch(self):\n pass",
"def test_block_bad_batch(self):\n pass",
"def test_validation_correct_samplers():\n samplers = [\n {'type': 'MultiStateSampler', 'locality': 3},\n {'type': 'ReplicaExchangeSampler'},\n # MCMCMove 'single' is defined in get_template_script().\n {'type': 'SAMSSampler', 'mcmc_moves': 'single'},\n {'type': 'ReplicaExchangeSampler', 'number_of_iterations': 5, 'replica_mixing_scheme': 'swap-neighbors'},\n {'type': 'ReplicaExchangeSampler', 'number_of_iterations': 5, 'replica_mixing_scheme': None}\n ]\n exp_builder = ExperimentBuilder(get_template_script())\n for sampler in samplers:\n script = {'samplers': {'sampler1': sampler}}\n yield exp_builder._validate_samplers, script",
"def test_block_missing_batch(self):\n pass",
"def test_block_batches_order(self):\n pass",
"def test_uniform_basic():\r\n yield check_uniform_basic, False\r\n yield check_uniform_basic, False, True\r\n yield check_uniform_basic, True",
"def test_block_missing_batch_dependency(self):\n pass",
"def test_block_bad_state(self):\n pass",
"def test_normal_basic():\r\n yield check_normal_basic, False\r\n yield check_normal_basic, False, True\r\n yield check_normal_basic, True",
"def test_chunk_pauli(self, method, device):\n opts_no_chunk = {\"fusion_enable\": False}\n opts_chunk = copy.copy(opts_no_chunk)\n opts_chunk[\"blocking_enable\"] = True\n opts_chunk[\"blocking_qubits\"] = 3\n\n backend = self.backend(method=method, device=device, **opts_chunk)\n backend_no_chunk = self.backend(method=method, device=device, **opts_no_chunk)\n\n shots = 100\n\n qr = QuantumRegister(5)\n cr = ClassicalRegister(5)\n regs = (qr, cr)\n circuit = QuantumCircuit(*regs)\n circuit.h(qr[0])\n circuit.h(qr[1])\n circuit.h(qr[2])\n circuit.h(qr[3])\n circuit.h(qr[4])\n circuit.pauli(\"YXZYX\", qr)\n circuit.measure_all()\n\n result = backend.run(circuit, shots=shots, memory=True).result()\n counts = result.get_counts(circuit)\n result_no_chunk = backend_no_chunk.run(circuit, shots=shots, memory=True).result()\n counts_no_chunk = result_no_chunk.get_counts(circuit)\n\n self.assertEqual(counts_no_chunk, counts)",
"def test_sampling2 () :\n delta = 2 * np.pi / 3\n r = Reward(partial(stepFunction, \n xRange=(-delta/2, delta/2), \n yRange=(-delta/2, delta/2)), \n (-1, 0))\n states = []\n xs = np.arange(-np.pi, np.pi, delta)\n ys = np.arange(-np.pi, np.pi, delta)\n for x, y in product(xs, ys) : \n states.append(\n toExternalStateRep([x + delta / 2, y + delta / 2, 0, 0]).astype(float)\n )\n agent = findOptimalAgent(r)\n vals = estimateValueFromAgent(states, agent, r)\n for s, v in zip(states, vals) : \n print(toInternalStateRep(s)[:2], v)",
"def test_checks_population_size(self):\n with pm.Model() as model:\n n = pm.Normal(\"n\", mu=0, sigma=1)\n for stepper in TestPopulationSamplers.steppers:\n step = stepper()\n with pytest.raises(ValueError, match=\"requires at least 3 chains\"):\n pm.sample(draws=10, tune=10, chains=1, cores=1, step=step)\n # don't parallelize to make test faster\n pm.sample(\n draws=10,\n tune=10,\n chains=4,\n cores=1,\n step=step,\n compute_convergence_checks=False,\n )",
"def simulationTwoDrugsDelayedTreatment():\n\n # TODO",
"def scenario():\n LOCAL_HOST = \"http://127.0.0.1\"\n\n # import functions\n from . import genesis_block\n from . import create_block\n from . import connect_peer\n from . import stop_server\n from . import block_crosscheck\n\n total_cnt = 0\n pass_cnt = 0\n\n # 1. Check each peer's genesis block\n try:\n assert genesis_block.check(LOCAL_HOST, 3001)\n assert genesis_block.check(LOCAL_HOST, 3002)\n assert genesis_block.check(LOCAL_HOST, 3003)\n\n print(\"pass\", end=' ')\n pass_cnt += 1\n\n except:\n print(\"FAIL\", end=' ')\n\n finally:\n print(\"test1/genesis_block\")\n total_cnt += 1\n\n # 2. Generate new blocks\n # 2.1. 2 blocks on peer #1\n # 2.2. 4 blocks on peer #2\n # 2.3. 2 blocks on peer #3\n try:\n assert create_block.addBlocks(LOCAL_HOST, 3001, num=2)\n assert create_block.check(LOCAL_HOST, 3001, num=2)\n\n assert create_block.addBlocks(LOCAL_HOST, 3002, num=4)\n assert create_block.check(LOCAL_HOST, 3002, num=4)\n\n assert create_block.addBlocks(LOCAL_HOST, 3003, num=2)\n assert create_block.check(LOCAL_HOST, 3003, num=2)\n\n print(\"pass\", end=' ')\n pass_cnt += 1\n\n except:\n print(\"FAIL\", end=' ')\n\n finally:\n print(\"test1/create_block\")\n total_cnt += 1\n\n # 3. Connect peers\n # 3.1. peer #1 with #2 (1->2)\n # 3.2. peer #1 with #3 (1->(2 and 3))\n try:\n assert connect_peer.connectPeer(LOCAL_HOST, 3001, \"ws://127.0.0.1:6002\")\n assert connect_peer.connectPeer(LOCAL_HOST, 3001, \"ws://127.0.0.1:6003\")\n\n print(\"pass\", end=' ')\n pass_cnt += 1\n\n except:\n print(\"FAIL\", end=' ')\n\n finally:\n print(\"test1/connect_peer\")\n total_cnt += 1\n\n # 4. Generate new blocks\n # 4.1. 3 blocks on peer #1\n # 4.2. 5 blocks on peer #3\n try:\n isPass, newBlocks = block_crosscheck.addBlocks(LOCAL_HOST, 3001, num=3)\n assert isPass\n assert block_crosscheck.check(LOCAL_HOST, 3002, newBlocks, num=3)\n assert block_crosscheck.check(LOCAL_HOST, 3003, newBlocks, num=3)\n\n isPass, newBlocks = block_crosscheck.addBlocks(LOCAL_HOST, 3003, num=5)\n assert isPass\n assert block_crosscheck.check(LOCAL_HOST, 3001, newBlocks, num=5)\n assert block_crosscheck.check(LOCAL_HOST, 3002, newBlocks, num=5)\n\n print(\"pass\", end=' ')\n pass_cnt += 1\n\n except:\n print(\"FAIL\", end=' ')\n\n finally:\n print(\"test1/block_crosscheck\")\n total_cnt += 1\n\n # 5. Stop all peers\n try:\n assert stop_server.stopServer(LOCAL_HOST, 3001)\n assert stop_server.stopServer(LOCAL_HOST, 3002)\n assert stop_server.stopServer(LOCAL_HOST, 3003)\n\n print(\"pass\", end=' ')\n pass_cnt += 1\n\n except:\n print(\"FAIL\", end=' ')\n\n finally:\n print(\"test1/stop_server\")\n total_cnt += 1\n\n # return pass_cnt_per_test and total_cnt_per_test\n return pass_cnt, total_cnt",
"def __init__(self, tconst=2, tau1=5, tau2=10, lambda1=20, lambda2=50, lambda3=100): # only default arguments here\n gr.sync_block.__init__(\n self,\n name='Scenario classification', # will show up in GRC\n in_sig=[np.float32]*6,\n out_sig=None\n )\n # if an attribute with the same name as a parameter is found,\n # a callback is registered (properties work, too).\n self.tconst = tconst\n self.tau1 = tau1\n self.tau2 = tau2\n self.lambda1 = lambda1\n self.lambda2 = lambda2\n self.lambda3 = lambda3\n self.nchan = 4\n self.scenario_tau = np.array([self.tau1, self.tau2, 2*self.tau1, 4*self.tau2, self.tau1, self.tau1, self.tconst, self.lambda1, self.lambda2, self.lambda3, 0]) # scenario 10 is \"unsure, don't send\"\n self.scenario_send_window = self.scenario_tau.copy() # the send window for the hopping and Poisson scenarios differs from the deterministic ones\n self.scenario_send_window[2] = self.tau1\n self.scenario_send_window[3] = self.tau2\n for i in range(7, 10):\n self.scenario_send_window[i] = st.poisson.ppf(0.05, self.scenario_tau[i])\n print \"expected mean inter frame times: \", self.scenario_tau\n print \"send windows: \", self.scenario_send_window\n \n self.max_confidence = 3\n self.confidence = 0\n \n self.scenario = 10\n self.chan_occupied = [False] * self.nchan # list of true/false values\n self.tau = None\n \n self.message_port_register_out(pmt.intern(\"scenario\"))",
"def test_validation_wrong_samplers():\n # Each test case is a pair (regexp_error, sampler_description).\n samplers = [\n (\"locality must be an int\",\n {'type': 'MultiStateSampler', 'locality': 3.0}),\n (\"unallowed value unknown\",\n {'type': 'ReplicaExchangeSampler', 'mcmc_moves': 'unknown'}),\n (\"Could not find class NonExistentSampler\",\n {'type': 'NonExistentSampler'}),\n (\"found unknown parameter\",\n {'type': 'ReplicaExchangeSampler', 'unknown_kwarg': 5}),\n ]\n exp_builder = ExperimentBuilder(get_template_script())\n for regexp, sampler in samplers:\n script = {'samplers': {'sampler1': sampler}}\n yield assert_raises_regexp, YamlParseError, regexp, exp_builder._validate_samplers, script",
"def test_measure_nondeterministic_multi_qubit_with_sampling(self):\n shots = 2000\n qobj = ref_measure.measure_circuits_qobj_nondeterministic(allow_sampling=True)\n qobj.config.shots = shots\n circuits = [experiment.header.name for experiment in qobj.experiments]\n targets = ref_measure.measure_counts_qobj_nondeterministic(shots)\n job = QasmSimulator().run(qobj)\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)",
"def test_block_bad_consensus(self):\n pass",
"def test_02(self):\n assert 'False' == Api.requestBlock('test-02')",
"def test_wf_st_5(plugin):\n wf = Workflow(name=\"wf_st_5\", input_spec=[\"x\", \"y\"])\n wf.add(multiply(name=\"mult\", x=wf.lzin.x, y=wf.lzin.y))\n wf.add(add2(name=\"add2\", x=wf.mult.lzout.out))\n\n wf.split([\"x\", \"y\"], x=[1, 2], y=[11, 12])\n wf.combine(\"x\")\n wf.set_output([(\"out\", wf.add2.lzout.out)])\n wf.plugin = plugin\n\n with Submitter(plugin=plugin) as sub:\n sub.run(wf)\n\n # checking the results\n while not wf.done:\n sleep(1)\n results = wf.result()\n\n assert results[0][0].output.out == 13\n assert results[0][1].output.out == 24\n assert results[1][0].output.out == 14\n assert results[1][1].output.out == 26",
"def test_sampler_building(self):\n with mmtools.utils.temporary_directory() as tmp_dir:\n template_script = self.get_implicit_template_script(tmp_dir)\n template_script['options']['resume_setup'] = True\n default_number_of_iterations = template_script['options']['default_number_of_iterations']\n\n # Add tested samplers.\n template_script['samplers'] = {\n 'my-sampler1': {\n 'type': 'ReplicaExchangeSampler',\n 'number_of_iterations': 9,\n 'replica_mixing_scheme': 'swap-neighbors',\n },\n 'my-sampler2': {\n 'type': 'MultiStateSampler',\n 'locality': 5\n }\n }\n\n def check_default_number_of_iterations(phase, sampler_description):\n if 'number_of_iterations' not in sampler_description:\n assert phase.sampler.number_of_iterations == default_number_of_iterations\n\n # Test that options are passed to the sampler correctly.\n for sampler_id, sampler_description in template_script['samplers'].items():\n template_script['experiments']['sampler'] = sampler_id\n constructor_description = template_script['samplers'][sampler_id]\n yield (self.check_constructor, template_script, constructor_description,\n 'sampler', None, check_default_number_of_iterations)",
"def test_34(self):\n assert 'False' == Api.requestBlock('test-34')",
"def test_batch(self):\n pass",
"def TestOneStep(self):\n pass",
"def test_37(self):\n assert 'False' == Api.requestBlock('test-37')",
"def test_33(self):\n assert 'False' == Api.requestBlock('test-33')",
"def test_chunker(self):\n chunker = StringChunker(Protocol.sieve_function)\n\n self.assert_chunker_sample(chunker, self.RASFL_SAMPLE_DATA1)\n self.assert_chunker_sample_with_noise(chunker, self.RASFL_SAMPLE_DATA1)\n self.assert_chunker_fragmented_sample(chunker, self.RASFL_SAMPLE_DATA1)\n self.assert_chunker_combined_sample(chunker, self.RASFL_SAMPLE_DATA1) \n \n self.assert_chunker_sample(chunker, self.RASFL_STATUS_DATA)\n self.assert_chunker_sample_with_noise(chunker, self.RASFL_STATUS_DATA)\n self.assert_chunker_fragmented_sample(chunker, self.RASFL_STATUS_DATA)\n self.assert_chunker_combined_sample(chunker, self.RASFL_STATUS_DATA)",
"def test_simulate_sampled_expectation_inputs(self):\n n_qubits = 5\n batch_size = 5\n symbol_names = ['alpha']\n qubits = cirq.GridQubit.rect(1, n_qubits)\n circuit_batch, resolver_batch = \\\n util.random_symbol_circuit_resolver_batch(\n qubits, symbol_names, batch_size)\n\n symbol_values_array = np.array(\n [[resolver[symbol]\n for symbol in symbol_names]\n for resolver in resolver_batch])\n\n pauli_sums = util.random_pauli_sums(qubits, 3, batch_size)\n num_samples = [[10]] * batch_size\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'programs must be rank 1'):\n # Circuit tensor has too many dimensions.\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor([circuit_batch]), symbol_names,\n symbol_values_array,\n util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'symbol_names must be rank 1.'):\n # symbol_names tensor has too many dimensions.\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor(circuit_batch), np.array([symbol_names]),\n symbol_values_array,\n util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'symbol_values must be rank 2.'):\n # symbol_values_array tensor has too many dimensions.\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n np.array([symbol_values_array]),\n util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'symbol_values must be rank 2.'):\n # symbol_values_array tensor has too few dimensions.\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n symbol_values_array[0],\n util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'pauli_sums must be rank 2.'):\n # pauli_sums tensor has too few dimensions.\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor(circuit_batch),\n symbol_names, symbol_values_array,\n util.convert_to_tensor(list(pauli_sums)), num_samples)\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'pauli_sums must be rank 2.'):\n # pauli_sums tensor has too many dimensions.\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n symbol_values_array,\n [util.convert_to_tensor([[x] for x in pauli_sums])],\n num_samples)\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'num_samples must be rank 2'):\n # num_samples tensor has the wrong shape.\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n symbol_values_array,\n util.convert_to_tensor([[x] for x in pauli_sums]),\n [num_samples])\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'num_samples must be rank 2'):\n # num_samples tensor has the wrong shape.\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n symbol_values_array,\n util.convert_to_tensor([[x] for x in pauli_sums]),\n num_samples[0])\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'Unparseable proto'):\n # circuit tensor has the right type but invalid values.\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n ['junk'] * batch_size, symbol_names, symbol_values_array,\n util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'Could not find symbol in parameter map'):\n # symbol_names tensor has the right type but invalid values.\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor(circuit_batch), ['junk'],\n symbol_values_array,\n util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'qubits not found in circuit'):\n # pauli_sums tensor has the right type but invalid values.\n new_qubits = [cirq.GridQubit(5, 5), cirq.GridQubit(9, 9)]\n new_pauli_sums = util.random_pauli_sums(new_qubits, 2, batch_size)\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n symbol_values_array,\n util.convert_to_tensor([[x] for x in new_pauli_sums]),\n num_samples)\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'Unparseable proto'):\n # pauli_sums tensor has the right type but invalid values 2.\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n symbol_values_array, [['junk']] * batch_size, num_samples)\n\n with self.assertRaisesRegex(TypeError, 'Cannot convert'):\n # circuits tensor has the wrong type.\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n [1.0] * batch_size, symbol_names, symbol_values_array,\n util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)\n\n with self.assertRaisesRegex(TypeError, 'Cannot convert'):\n # symbol_names tensor has the wrong type.\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor(circuit_batch), [0.1234],\n symbol_values_array,\n util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)\n\n with self.assertRaisesRegex(tf.errors.UnimplementedError, ''):\n # symbol_values tensor has the wrong type.\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n [['junk']] * batch_size,\n util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)\n\n with self.assertRaisesRegex(TypeError, 'Cannot convert'):\n # pauli_sums tensor has the wrong type.\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n symbol_values_array, [[1.0]] * batch_size, num_samples)\n\n with self.assertRaisesRegex(TypeError, 'missing'):\n # we are missing an argument.\n # pylint: disable=no-value-for-parameter\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n symbol_values_array, num_samples)\n # pylint: enable=no-value-for-parameter\n\n with self.assertRaisesRegex(TypeError, 'positional arguments'):\n # pylint: disable=too-many-function-args\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n symbol_values_array,\n util.convert_to_tensor([[x] for x in pauli_sums]), [],\n num_samples)\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n expected_regex='do not match'):\n # wrong op size.\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor([cirq.Circuit()]), symbol_names,\n symbol_values_array.astype(np.float64),\n util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'greater than 0'):\n # pylint: disable=too-many-function-args\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n symbol_values_array,\n util.convert_to_tensor([[x] for x in pauli_sums]),\n [[-1]] * batch_size)\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n expected_regex='do not match'):\n # wrong symbol_values size.\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n symbol_values_array[:int(batch_size * 0.5)],\n util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n expected_regex='cirq.Channel'):\n # attempting to use noisy circuit.\n noisy_circuit = cirq.Circuit(cirq.depolarize(0.3).on_each(*qubits))\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor([noisy_circuit for _ in pauli_sums]),\n symbol_names, symbol_values_array,\n util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)",
"def test_generate_nb_testing(self):\n pass",
"def test_wfasnd_st_2(plugin):\n wfnd = Workflow(name=\"wfnd\", input_spec=[\"x\", \"y\"])\n wfnd.add(multiply(name=\"mult\", x=wfnd.lzin.x, y=wfnd.lzin.y))\n wfnd.set_output([(\"out\", wfnd.mult.lzout.out)])\n wfnd.split((\"x\", \"y\"))\n wfnd.inputs.x = [2, 4]\n wfnd.inputs.y = [1, 10]\n\n wf = Workflow(name=\"wf_st_3\", input_spec=[\"x\", \"y\"])\n wf.add(wfnd)\n wf.add(add2(name=\"add2\", x=wf.wfnd.lzout.out))\n wf.set_output([(\"out\", wf.add2.lzout.out)])\n wf.plugin = plugin\n\n with Submitter(plugin=plugin) as sub:\n sub.run(wf)\n\n # checking the results\n while not wf.done:\n sleep(1)\n results = wf.result()\n assert results.output.out == [4, 42]"
] | [
"0.66656935",
"0.6592942",
"0.64613193",
"0.64177775",
"0.6116642",
"0.6095393",
"0.6079921",
"0.6000796",
"0.5993817",
"0.59393144",
"0.5934992",
"0.57287186",
"0.5716562",
"0.57142526",
"0.57003576",
"0.56979024",
"0.5697373",
"0.5691124",
"0.567336",
"0.5657544",
"0.5653979",
"0.56525356",
"0.5624289",
"0.5613733",
"0.56032073",
"0.5601314",
"0.5599174",
"0.5598846",
"0.5570944",
"0.55575854"
] | 0.79553413 | 0 |
Test bernoulli distribution is assigned binary gibbs metropolis method | def test_bernoulli(self):
with Model() as model:
Bernoulli('x', 0.5)
steps = assign_step_methods(model, [])
assert isinstance(steps, BinaryGibbsMetropolis) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def bernoulli(p):\n bern = rn.binomial(1,p)\n return bern",
"def bernoulli(p):\r\n if np.random.random() < p:\r\n return 0\r\n else:\r\n return 1",
"def bernoulli_num(n):\n return mp.bernoulli(n)",
"def bernoulli(n):\n\n x, res, s, c = Rat(0), Rat(0), Rat(0), Rat(-1)\n for k in range(1, n+2):\n c *= 1 - Rat(n + 2)/k\n s += x**n\n x += 1\n res += c*s/k\n return res",
"def test_Bernoulli_NB_estimators():",
"def bernoulli_trial(p: float) -> int:\n return 1 if random.random() < p else 0",
"def bernoulli(gp_link=None):\r\n if gp_link is None:\r\n gp_link = noise_models.gp_transformations.Probit()\r\n #else:\r\n # assert isinstance(gp_link,noise_models.gp_transformations.GPTransformation), 'gp_link function is not valid.'\r\n\r\n if isinstance(gp_link,noise_models.gp_transformations.Probit):\r\n analytical_mean = True\r\n analytical_variance = False\r\n\r\n elif isinstance(gp_link,noise_models.gp_transformations.Heaviside):\r\n analytical_mean = True\r\n analytical_variance = True\r\n\r\n else:\r\n analytical_mean = False\r\n analytical_variance = False\r\n\r\n return noise_models.bernoulli_noise.Bernoulli(gp_link,analytical_mean,analytical_variance)",
"def gibbs_sample(self):\n # Initialize the initial state of Markov Chain.\n self.initialize()\n # Gibbs Sampling.\n for iteration_index in range(0, self.iteration_number, 1):\n for m in range(0,self.document_number,1):\n for n in range(0, len(self.documents[m]), 1):\n # Change the state of word_m_n according to it's full conditional probability.\n self.sample_by_full_condition(m=m,n=n)\n print 'iteration:', iteration_index,datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n if iteration_index > self.burn_in and iteration_index % self.update_cycle == 0:\n # Update the distribution after burn in.\n self.update_distribution()\n else:\n pass\n # calculate the final distribution.\n self.get_distribution()",
"def sample_bernoulli(params):\n assert False, 'tfp not available on cluster gpu yet'\n \"\"\"\n shape = tf.shape(params)\n bernoulli_dist = tfp.distributions.Bernoulli(logits=params, dtype=tf.float32)\n return bernoulli_dist.sample()\n \"\"\"",
"def binary_sample(x):\n return np.random.binomial(1, p=x)",
"def isBernoulli(self):\n return self._bernoulli",
"def bernoulliSample(x):\r\n g = tf.get_default_graph()\r\n\r\n with ops.name_scope(\"BernoulliSample\") as name:\r\n with g.gradient_override_map({\"Ceil\": \"Identity\", \"Sub\": \"BernoulliSample_ST\"}):\r\n\r\n if args[\"deterministic_train\"]:\r\n train_fn = lambda: tf.minimum(tf.ones(tf.shape(x)), tf.ones(tf.shape(x)) * 0.5)\r\n else:\r\n train_fn = lambda: tf.minimum(tf.ones(tf.shape(x)), tf.random_uniform(tf.shape(x)))\r\n\r\n if args[\"deterministic_eval\"]:\r\n eval_fn = lambda: tf.minimum(tf.ones(tf.shape(x)), tf.ones(tf.shape(x)) * 0.5)\r\n else:\r\n eval_fn = lambda: tf.minimum(tf.ones(tf.shape(x)), tf.random_uniform(tf.shape(x)))\r\n\r\n mus = tf.cond(is_training, train_fn, eval_fn)\r\n\r\n return tf.ceil(x - mus, name=name)",
"def binomial(n, p):\n sum_ans = 0\n for k in range(n):\n sum_ans = sum_ans + bernoulli(p)\n return sum_ans",
"def calculateBernoulli(x, mean, stdev):\n\t\t\tif x:\n\t\t\t\tprob = mean\n\t\t\telse:\n\t\t\t\tprob = 1-mean\n\t\t\treturn prob",
"def Bernoulli(p, succ=1, fail=0, symbol=None):\n\n return BernoulliPSpace(p, succ, fail, symbol).value",
"def binary_blow_wind():\n s = random.random()\n return s < 0.05",
"def rbernoulli(p):\n # The idea is to sample a random real r in the unit interval, one\n # bit (i.e. binary decimal place) at a time, until we are sure\n # that either r < p or r > p.\n hi = 1.0\n lo = 0.0\n d = -1\n while lo < p < hi:\n if random.getrandbits(1):\n lo = (hi + lo)/2\n else:\n hi = (hi + lo)/2\n print lo,hi\n if p > hi:\n return 1\n else:\n return 0",
"def test_m2b_via_uniform (self):\n nt = 5\n ns = 1\n num_giter = 100\n net = self.m2b\n\n tmr = mytime.timeit()\n\n # For this test, each sample is tested independently rather than aggregated\n for i in xrange(ns):\n arrv = net.sample (nt)\n print arrv\n obs = arrv.subset (lambda a,e: a.is_initial (e), copy_evt)\n gsmp = net.gibbs_resample (obs, 0, num_giter, sample_final=False)\n for tid in xrange(nt):\n # For each task, check that the Gibbs distribution is correctly uniform\n times = []\n for smp_id in xrange(1,len(gsmp)):\n byt = gsmp[smp_id].events_of_task (tid)\n self.assertEquals (3, len(byt))\n times.append (byt[1].d)\n \n # examine gibbs function\n e0 = arrv.events_of_task (tid)[1]\n e1 = arrv.events_of_task (tid)[2]\n L = e0.a\n U = e1.d\n cdist = net.gibbs_for_departure (obs, e0)\n xs = [ L+ i*(U-L)/10 for i in xrange(10) ]\n for x in xs: print \" x %.4f p(d = x | A) %.4f\" % (x, cdist(x))\n \n # generate true sample\n s = [ numpy.random.uniform (L, U) for i in xrange(num_giter) ] \n\n # now check the cdfs\n s.sort()\n times.sort()\n print summarize (times)\n netutils.check_quantiles (self, s, times, num_giter)\n\n elapsed = tmr.total() \n print \"Events resampled per sec = \", (nt * 2 * ns * num_giter) / elapsed",
"def AsBBit(x, B):\n assert x <= 1, \"it's a probability\"\n M = 2**B\n scaled_x = x*M\n rem = scaled_x - np.floor(scaled_x)\n\n if (x == 1):\n x_bin = M - 1\n elif (scaled_x > M - 2):\n # in this range, things are ugly\n # because we reserve 'all ones' as 'always go left'\n r = np.random.rand()\n if (2 * r < scaled_x - M - 2):\n x_bin = M - 2\n else:\n x_bin = M - 1\n \n else:\n r = np.random.rand()\n if (r < rem):\n x_bin = np.floor(scaled_x)\n else:\n x_bin = np.floor(scaled_x) + 1\n\n assert x_bin < M, \"exceeded bit width\"\n return x_bin",
"def binomial(n: int, p: float) -> int:\n return sum(bernoulli_trial(p) for _ in range(n))",
"def test_gan():\n nbr_qubits = 5\n\n # Normal law\n # N = 5*10 ** 3\n #\n # Database = np.random.normal(0, 1, N)\n # test_gan_qiskit(nbr_qubits, Database)\n\n # beta\n arr_beta = beta_proba(nbr_qubits, 2, 5)\n\n general_gantest(arr_beta, nbr_qubits)\n\n # uniform not on [0, 32]\n if nbr_qubits == 5:\n arr_unif = [1 / 24] * 24 + 8 * [0]\n general_gantest(arr_unif, nbr_qubits)",
"def test_binomial(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n rng_R = random_state_type()\r\n # Use non-default parameters, and larger dimensions because of\r\n # the integer nature of the result\r\n post_r, bin = binomial(rng_R, (7, 12), 5, 0.8)\r\n\r\n f = compile.function(\r\n [compile.In(rng_R,\r\n value=numpy.random.RandomState(utt.fetch_seed()),\r\n update=post_r, mutable=True)],\r\n [bin], accept_inplace=True)\r\n\r\n numpy_rng = numpy.random.RandomState(utt.fetch_seed())\r\n val0 = f()\r\n val1 = f()\r\n numpy_val0 = numpy_rng.binomial(5, 0.8, size=(7, 12))\r\n numpy_val1 = numpy_rng.binomial(5, 0.8, size=(7, 12))\r\n print val0\r\n print numpy_val0\r\n print val1\r\n print numpy_val1\r\n self.assertTrue(numpy.all(val0 == numpy_val0))\r\n self.assertTrue(numpy.all(val1 == numpy_val1))",
"def bernoulliSample(x):\n \n g = tf.get_default_graph()\n \n with ops.name_scope(\"BernoulliSample\") as name:\n with g.gradient_override_map({\"Ceil\": \"Identity\", \"Sub\": \"BernoulliSample_ST\"}):\n return tf.ceil(x - tf.random_uniform(tf.shape(x)),name=name)",
"def _bernoulli_sampling(self, prob):\n distribution = tf.distributions.Bernoulli(probs=prob, dtype=tf.float32)\n return tf.cast(distribution.sample(), tf.float32)",
"def conditional_gumbel_rsample(hard_sample: torch.Tensor, probs: torch.Tensor, bernoulli: bool, temperature) ->torch.Tensor:\n shape = hard_sample.shape\n probs = clamp_probs(probs.expand_as(hard_sample))\n v = clamp_probs(torch.rand(shape, dtype=probs.dtype, device=probs.device))\n if bernoulli:\n pos_probs = probs[hard_sample == 1]\n v_prime = torch.zeros_like(hard_sample)\n v_prime[hard_sample == 1] = v[hard_sample == 1] * pos_probs + (1 - pos_probs)\n v_prime[hard_sample == 0] = v[hard_sample == 0] * (1 - probs[hard_sample == 0])\n log_sample = (probs.log() + probs.log1p() + v_prime.log() + v_prime.log1p()) / temperature\n return log_sample.sigmoid()\n b = hard_sample.max(-1).indices\n log_v = v.log()\n log_v_b = torch.gather(log_v, -1, b.unsqueeze(-1))\n cond_gumbels = -(-torch.div(log_v, probs) - log_v_b).log()\n index_sample = hard_sample.bool()\n cond_gumbels[index_sample] = -(-log_v[index_sample]).log()\n scores = cond_gumbels / temperature\n return (scores - scores.logsumexp(dim=-1, keepdim=True)).exp()",
"def gibbs_ask_traffic(self, X, e, Z, bn, N):\n\n #makes copies\n X = e\n e = e\n\n #probability\n probability = [0,0]\n numerator = 0\n\n\n #True, False\n\n for x in range(N):\n # second joint\n if Z == True: # if non evidence variable\n random_choice = np.random.choice([0,1], 1, True, [0.5, 0.5])[0] #Rain or No Rain\n X[1] = bn[1][random_choice][0]\n else:\n random_choice = np.random.choice([0, 1], 1, True, [0.5, 0.5])[0] #Rain or No Rain\n X[1] = bn[1][random_choice][1]\n\n # first joint\n if X[1] == 0.8 or X[1] == 0.2: # Rain is true\n X[0] = bn[0][0]\n else: # Rain is False\n X[0] = bn[0][1]\n\n # third joint\n if X[1] == 0.8 or X[1] == 0.1: # traffic\n random_late = np.random.choice([0,1], 1, True, [0.5,0.5])[0]\n X[2] = bn[2][0][random_late]\n else: # no traffic\n random_late = np.random.choice([0, 1], 1, True, [0.5, 0.5])[0]\n X[2] = bn[2][1][random_late]\n\n # print(X)\n if X[0] == 0.1:\n probability[0] += 1\n else:\n probability[1] += 1\n\n\n probability[0] = probability[0] / N\n probability[1] = probability[1] / N\n # print(probability)\n return probability",
"def Bernstein(i, n, t):\n return special.binom(n, i) * t ** i * (1 - t) ** (n - i)",
"def prbs(m, n):\n return np.array(np.random.rand(m, n) > 0.5, dtype=np.int) - 0.5",
"def probability(prob):\n return random.random() <= prob",
"def test_gamma_basis_hon(self):\n def row_generator():\n return [random.betavariate(0.5, 0.5) for i in range(self.d)]\n\n self._test_sample_basis_hon(row_generator)"
] | [
"0.7547345",
"0.7430234",
"0.737914",
"0.7260277",
"0.7170436",
"0.70253694",
"0.6839243",
"0.67702055",
"0.653989",
"0.6529904",
"0.6523155",
"0.64852256",
"0.648092",
"0.6472643",
"0.6457794",
"0.64568967",
"0.64539707",
"0.6446213",
"0.63996285",
"0.63980657",
"0.6349709",
"0.6291071",
"0.6269517",
"0.62574685",
"0.6245582",
"0.6233084",
"0.62082386",
"0.6207911",
"0.61526394",
"0.6152623"
] | 0.77244 | 0 |
Test binomial distribution is assigned metropolis method. | def test_binomial(self):
with Model() as model:
Binomial('x', 10, 0.5)
steps = assign_step_methods(model, [])
assert isinstance(steps, Metropolis) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_bernoulli(self):\n with Model() as model:\n Bernoulli('x', 0.5)\n steps = assign_step_methods(model, [])\n assert isinstance(steps, BinaryGibbsMetropolis)",
"def test_multinomial(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n m = Module()\r\n m.random = RandomStreams(utt.fetch_seed())\r\n m.fn = Method([], m.random.multinomial((20,20), 1, [0.1]*10))\r\n\r\n made = m.make()\r\n made.random.initialize()\r\n fn_val0 = made.fn()\r\n fn_val1 = made.fn()\r\n\r\n rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)\r\n rng = numpy.random.RandomState(int(rng_seed)) #int() is for 32bit\r\n numpy_val0 = rng.multinomial(1, [0.1]*10, size=(20,20))\r\n numpy_val1 = rng.multinomial(1, [0.1]*10, size=(20,20))\r\n\r\n assert numpy.all(fn_val0 == numpy_val0)\r\n assert numpy.all(fn_val1 == numpy_val1)",
"def test_Bernoulli_NB_estimators():",
"def bernoulli(p):\n bern = rn.binomial(1,p)\n return bern",
"def test_binomial(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n rng_R = random_state_type()\r\n # Use non-default parameters, and larger dimensions because of\r\n # the integer nature of the result\r\n post_r, bin = binomial(rng_R, (7, 12), 5, 0.8)\r\n\r\n f = compile.function(\r\n [compile.In(rng_R,\r\n value=numpy.random.RandomState(utt.fetch_seed()),\r\n update=post_r, mutable=True)],\r\n [bin], accept_inplace=True)\r\n\r\n numpy_rng = numpy.random.RandomState(utt.fetch_seed())\r\n val0 = f()\r\n val1 = f()\r\n numpy_val0 = numpy_rng.binomial(5, 0.8, size=(7, 12))\r\n numpy_val1 = numpy_rng.binomial(5, 0.8, size=(7, 12))\r\n print val0\r\n print numpy_val0\r\n print val1\r\n print numpy_val1\r\n self.assertTrue(numpy.all(val0 == numpy_val0))\r\n self.assertTrue(numpy.all(val1 == numpy_val1))",
"def binomial(n, p):\n sum_ans = 0\n for k in range(n):\n sum_ans = sum_ans + bernoulli(p)\n return sum_ans",
"def binomial(n: int, p: float) -> int:\n return sum(bernoulli_trial(p) for _ in range(n))",
"def binom_distribution(self):\n null_control = stats.binom.rvs(p = self.p_sample, n = self.n_control, size = 1000000) / self.n_control\n null_treatment = stats.binom.rvs(p = self.p_sample, n = self.n_treatment, size = 1000000) / self.n_treatment\n\n alt_control = stats.binom.rvs(p = self.p_control, n = self.n_control, size = 1000000) / self.n_control\n alt_treatment = stats.binom.rvs(p = self.p_treatment, n = self.n_treatment, size = 1000000) / self.n_treatment\n\n self.binom_null = null_treatment - null_control\n self.binom_alt = alt_treatment - alt_control\n\n self.binom_control = alt_control\n self.binom_treatment = alt_treatment",
"def test_multinomial(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n random = RandomStreams(utt.fetch_seed())\r\n fn = function([], random.multinomial((4,4), 1, [0.1]*10), updates=random.updates())\r\n\r\n fn_val0 = fn()\r\n fn_val1 = fn()\r\n\r\n rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)\r\n rng = numpy.random.RandomState(int(rng_seed)) #int() is for 32bit\r\n numpy_val0 = rng.multinomial(1, [0.1]*10, size=(4,4))\r\n numpy_val1 = rng.multinomial(1, [0.1]*10, size=(4,4))\r\n\r\n assert numpy.all(fn_val0 == numpy_val0)\r\n assert numpy.all(fn_val1 == numpy_val1)",
"def BinomialCoefficient(n, m):\n if m == 0:\n return 1\n\n elif m == 1:\n return n\n\n else:\n ma = max(n - m, m)\n mi = min(n - m, m)\n\n enum = functools.reduce(lambda x, y: x * y, range(ma + 1, n + 1), 1)\n\n return enum / Factorial(mi)",
"def base_binom_pro(pro,n0):\n res = stats.binom.pmf(range(n0+1), n0, 1/2.0)\n a = 0\n for i in range(n0+1):\n a = a + res[i]\n if a>=pro: \n return i",
"def test_binomial_init_from_monomials(self):\n m1 = Monomial(3, 3)\n m2 = Monomial(4, 4)\n t1 = (3, 3)\n t2 = (4, 4)\n expected = Polynomial([m1, m2], from_monomials=True)\n\n b1 = Binomial(m1, m2)\n b2 = Binomial(t1, t2)\n\n self.assertEqual(expected, b1)\n self.assertEqual(expected, b2)\n self.assertEqual(b1, b2)",
"def base_binom_num(x,n0):\n res = stats.binom.pmf(range(n0+1), n0, 1/2.0) \n a = 0 \n for i in range(n0+1):\n if i <= x:\n a = a +res[i]\n return a",
"def test_logistic():\n r=np.random.normal(size=20)\n assert np.isclose( ilogistic(logistic(r)),r ).all()",
"def bernoulli_num(n):\n return mp.bernoulli(n)",
"def binom(n, k):\n if n < 0 or k < 0:\n raise Exception(\"Error: Negative argument in binomial coefficient!\")\n if n < k:\n return 0\n if n == k or k == 0:\n return 1\n if k < n - k:\n delta = n - k\n iMax = k\n else:\n delta = k\n iMax = n - k\n ans = delta + 1\n for i in range(2, iMax + 1):\n ans = (ans * (delta + i)) // i\n return ans",
"def test_probabilities_are_ok(self, seed):\n bins = defaultdict(int)\n probs = (0.1, 0.2, 0.3, 0.4)\n categories = (\"asdfa\", \"2\", \"3\", \"4\")\n categories = OrderedDict(zip(categories, probs))\n dim = Categorical(\"yolo\", categories)\n for _ in range(500):\n sample = dim.sample(seed=seed)[0]\n bins[sample] += 1\n for keys in bins.keys():\n bins[keys] /= float(500)\n for key, value in categories.items():\n assert abs(bins[key] - value) < 0.01",
"def normal_approximation_to_binomial(n, p):\n mu = p * n\n sigma = math.sqrt(p * (1 - p ) * n)\n return mu, sigma",
"def test_test_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run([\"MNB\", \"LR\"])\n assert atom.test.equals(atom.mnb.test)\n assert check_scaling(atom.lr.test)",
"def bernoulli(p):\r\n if np.random.random() < p:\r\n return 0\r\n else:\r\n return 1",
"def normal_approximation_to_binomial(n, p):\n mu = p * n\n sigma = math.sqrt(p * (1 - p) * n)\n return mu, sigma",
"def normal_approximation_to_binomial(n, p):\n mu = p * n\n sigma = math.sqrt(p * (1 - p) * n)\n return mu, sigma",
"def normal_approximation_to_binomial(n, p):\n mu = p * n\n sigma = math.sqrt(p * (1 - p) * n)\n return mu, sigma",
"def normal_approximation_to_binomial(n, p):\n mu = p * n\n sigma = math.sqrt(p * (1 - p) * n)\n return mu, sigma",
"def bernoulli(n):\n\n x, res, s, c = Rat(0), Rat(0), Rat(0), Rat(-1)\n for k in range(1, n+2):\n c *= 1 - Rat(n + 2)/k\n s += x**n\n x += 1\n res += c*s/k\n return res",
"def normal_approximation_to_binomial(n, p):\r\n mu = p*n\r\n sigma = math.sqrt(p * (1 - p) * n)\r\n\r\n return mu, sigma",
"def test_binomial_default_init(self):\n expected = Polynomial(1, 1, 0)\n\n b = Binomial()\n\n self.assertEqual(expected, b)",
"def maclaurin_binomial(value,m,k):\n global first_value\n first_value = 0.0\n error(value)\n\n #attempt to Approximate (1+x)^m for given values \n try:\n \n for item in xrange(1,k):\n next_value =m*(value**item)/factorial(item)\n \n for i in range(2,item+1): \n next_second_value =(m-i+1)\n next_value *= next_second_value\n first_value += next_value\n\n return first_value + 1\n \n #Raise TypeError if input is not within\n #the interval of convergence\n except TypeError,exception:\n print exception\n\n #Raise OverflowError if an over flow occur \n except OverflowError:\n print '\\n<Please enter a lower k value to avoid the Over flow\\n '",
"def __init__(self):\n GinacFunction.__init__(self, \"binomial\", nargs=2, preserved_arg=1,\n conversions=dict(maxima='binomial',\n mathematica='Binomial',\n sympy='binomial'))",
"def test_hamiltonian_boson(n_particles):\n coefficients = get_coefficients(n_particles, coef_range=(0, 1), seed=3)\n result = quadratic_op(n_particles, is_bosonic=True, dimensions=3, coefficients=coefficients)\n expected = np.array(custom_quadratic(n_particles, coefficients, is_bosonic=True)).astype(complex)\n assert result.toarray() == pytest.approx(expected, 1e-3)"
] | [
"0.69088066",
"0.6723618",
"0.66573316",
"0.6643335",
"0.6618572",
"0.6482851",
"0.6428169",
"0.6424013",
"0.6345978",
"0.62615824",
"0.6225464",
"0.61995316",
"0.61335987",
"0.6075258",
"0.60746217",
"0.6054197",
"0.6051179",
"0.60234743",
"0.60210484",
"0.60168356",
"0.5997175",
"0.5997175",
"0.5997175",
"0.5997175",
"0.5964645",
"0.59639007",
"0.596122",
"0.5960919",
"0.5953373",
"0.5951818"
] | 0.76352745 | 0 |
Ensure that, for the given service, the video_id is valid. | def clean_video_id(self):
failed = False
d = self.cleaned_data
service = d.get('service')
# Get the video id and clear whitespace on either side.
video_id = d.get('video_id', '').strip()
# Validate using YouTube's API:
if service == 'youtube':
url = ('http://gdata.youtube.com/feeds/api/videos/{}?alt=json'.
format(video_id))
data = requests.get(url)
# Ensure we can parse the JSON data.
try:
json = simplejson.loads(data.text)
# If not, mark this as a failure.
except ValueError:
failed = True
# Validate using Vimeo's API:
elif service == 'vimeo':
data = requests.get('http://vimeo.com/api/v2/video/{}.json'.
format(video_id))
# Ensure we can parse the JSON data.
try:
json = simplejson.loads(data.text)
# If not, mark this as a failure.
except ValueError:
failed = True
# Respond based on the outcome.
if failed:
message = _("Couldn't validate video id using {} API. Please "
"verify it exists and check for "
"typos.".format(service))
raise forms.ValidationError(message)
return video_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def allow_video(self, video_id):\n print(\"allow_video needs implementation\")",
"def allow_video(self, video_id):\n print(\"allow_video needs implementation\")",
"def allow_video(self, video_id):\n print(\"allow_video needs implementation\")",
"def allow_video(self, video_id):\n print(\"allow_video needs implementation\")",
"def api_isin_playlist(a_playlist_id, a_video_id, api_service):\n lst_of_videos = []\n next_page_token = None\n\n while 1:\n\n success = False\n\n while not success:\n try:\n request = api_service.playlistItems().list(playlistId=a_playlist_id,\n part=['snippet', 'contentDetails'],\n maxResults=50,\n pageToken=next_page_token).execute()\n lst_of_videos += request['items']\n next_page_token = request.get('nextPageToken')\n success = True\n\n except ConnectionResetError:\n print(\"ConnectionResetError: let me sleep for 5 seconds, just enough time to recover...\")\n sleep(5)\n\n except HttpError:\n print(\"OUT OF GOOGLE CREDIT - COME BACK LATER\")\n success = True\n\n if next_page_token is None:\n break\n\n id_set = {video['contentDetails']['videoId'] for video in lst_of_videos}\n\n # print(id_set)\n\n if a_video_id not in id_set:\n print(f\"Oops, the video \\\" https://www.youtube.com/watch?v={a_video_id} \\\" is not here yet!\")\n return False\n\n return True",
"def video_id(self, video_id):\n # type: (string_types) -> None\n\n if video_id is not None:\n if not isinstance(video_id, string_types):\n raise TypeError(\"Invalid type for `video_id`, type has to be `string_types`\")\n\n self._video_id = video_id",
"def _is_validation(video_id):\n hasher = md5()\n hasher.update(bytes(video_id, 'utf-8'))\n first = hasher.hexdigest()[0]\n return first in ['0', '1']",
"def test_get_video_id_from_url(self):\n self.assertEqual(get_video_id_from_url('https://www.youtube.com/embed/DqGwxR_0d1M'),\n 'DqGwxR_0d1M')\n self.assertEqual(get_video_id_from_url('https://youtu.be/DqGwxR_0d1M'), 'DqGwxR_0d1M')\n self.assertEqual(\n get_video_id_from_url('https://www.youtube.com/watch?v=DqGwxR_0d1M&feature=youtu.be'),\n 'DqGwxR_0d1M')\n self.assertEqual(get_video_id_from_url('https://www.youtube.com/watch?v=DqGwxR_0d1M'),\n 'DqGwxR_0d1M')",
"def assert_video_status(self, url, edx_video_id, status):\n response = self.client.get_json(url)\n self.assertEqual(response.status_code, 200)\n videos = json.loads(response.content.decode('utf-8'))[\"videos\"]\n for video in videos:\n if video['edx_video_id'] == edx_video_id:\n return self.assertEqual(video['status'], status)\n\n # Test should fail if video not found\n self.assertEqual(True, False, 'Invalid edx_video_id')",
"def allow_video(self, video_id):\n if self._video_library.get_video(video_id) is None:\n print(\"Cannot remove flag from video: Video does not exist\")\n elif not self._video_library.get_video(video_id).flagged:\n print(\"Cannot remove flag from video: Video is not flagged\")\n else:\n print(f\"Successfully removed flag from video: {self._video_library.get_video(video_id).title}\")\n self._video_library.get_video(video_id).flagged = False\n self._video_library.get_video(video_id).flag_reason = \"Not supplied\"",
"def test_api_video_create_for_nonexistent_playlist(self):\n user = factories.UserFactory()\n some_uuid = uuid.uuid4()\n\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(user.id)\n jwt_token.payload[\"user\"] = {\n \"id\": str(user.id),\n \"username\": user.username,\n }\n self.assertEqual(models.Video.objects.count(), 0)\n\n response = self.client.post(\n \"/api/videos/\",\n {\"lti_id\": \"video_one\", \"playlist\": some_uuid, \"title\": \"Some video\"},\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(models.Video.objects.count(), 0)\n self.assertEqual(response.status_code, 403)",
"def validate_youtube(fragment):\n request=urllib.urlopen('https://www.youtube.com/watch?v=' + fragment)\n return request.getcode() == 200",
"def valid_google_id(google_id: str = None):\n def fake_google(id_token: str) -> str:\n if google_id is None:\n return f\"ID_FROM_{id_token}\"\n else:\n return google_id\n return patch(\"youtube_podcast_api.controllers.user.verify_google_auth\", new=fake_google)",
"def clean_avatar_service_id(self):\n avatar_service_id = self.cleaned_data['avatar_service_id']\n\n try:\n avatar_service = self.avatar_service_registry.get(\n 'avatar_service_id', avatar_service_id)\n except AvatarServiceNotFoundError:\n avatar_service = None\n else:\n if not self.avatar_service_registry.is_enabled(avatar_service):\n avatar_service = None\n\n if avatar_service is None or avatar_service.hidden:\n raise ValidationError(_('Invalid service ID'))\n\n return avatar_service_id",
"def play_youtube_video_service(service, media_id):\n target_players = component.extract_from_service(service)\n\n if media_id:\n for player in target_players:\n player.play_youtube(media_id)",
"def _check_lidvid_field(self, doi: Doi):\n\n vid: Optional[str]\n if \"::\" in doi.pds_identifier:\n lid, vid = doi.pds_identifier.split(\"::\")\n else:\n lid = doi.pds_identifier\n vid = None\n\n lid_tokens = lid.split(\":\")\n\n try:\n # Make sure the prescribed static fields are correct\n required_prefix_elements = [\"urn\", \"nasa\", \"pds\"]\n if lid_tokens[:3] != required_prefix_elements:\n raise InvalidIdentifierException(f\"LIDVID must start with elements {required_prefix_elements}\")\n\n # Make sure we got the minimum number of fields, and that\n # the number of fields is consistent with the product type\n if not MIN_LID_FIELDS <= len(lid_tokens) <= MAX_LID_FIELDS:\n raise InvalidIdentifierException(\n f\"LIDVID must contain only between {MIN_LID_FIELDS} \"\n f\"and {MAX_LID_FIELDS} colon-delimited fields, \"\n f\"got {len(lid_tokens)} field(s)\"\n )\n\n # Now check each field for the expected set of characters\n token_regex = re.compile(r\"[a-z0-9-._]*\")\n\n for index, token in enumerate(lid_tokens):\n if not token_regex.fullmatch(token):\n raise InvalidIdentifierException(\n f\"LID field {index + 1} ({token}) is invalid. \"\n f\"Fields must only consist of lowercase letters, digits, \"\n f\"hyphens (-), underscores (_) or periods (.), per PDS SR Sec. 6D.2\"\n )\n\n # Make sure the VID conforms to a version number\n version_regex = re.compile(r\"^\\d+\\.\\d+$\")\n\n if vid and not version_regex.fullmatch(vid):\n raise InvalidIdentifierException(\n f\"Parsed VID ({vid}) does not conform to a valid version identifier. \"\n \"Version identifier must consist only of a major and minor version \"\n \"joined with a period (ex: 1.0), per PDS SR Sec. 6D.3\"\n )\n\n # Finally, ensure the whole identifier conforms to the length constraint\n identifier_max_length = 255\n if not len(doi.pds_identifier) <= identifier_max_length:\n raise InvalidIdentifierException(\n f\"LIDVID {doi.pds_identifier} does not conform to PDS identifier max length constraint \"\n f\"({identifier_max_length}), per PDS SR Sec. 6D\"\n )\n except InvalidIdentifierException as err:\n raise InvalidIdentifierException(\n f\"The record identifier {doi.pds_identifier} (DOI {doi.doi}) \"\n f\"does not conform to a valid LIDVID format.\\n\"\n f\"Reason: {str(err)}\\n\"\n \"If the identifier is not intended to be a LIDVID, use the \"\n \"--force option to bypass the results of this check.\"\n )",
"def exists_by_video_id (self, video_id):\n return self.read_by_video_id (video_id) is not None",
"def if_already_present(video_id: str) -> bool:\n return Video.objects.filter(video_id=video_id).exists()",
"def test_api_video_create_token_user_playlist_preexists(self):\n jwt_token = AccessToken()\n response = self.client.post(\n \"/api/videos/\", HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\"\n )\n self.assertEqual(response.status_code, 401)\n self.assertFalse(models.Video.objects.exists())",
"def allow_video(self, video_id):\n video = self._video_library.get_video(video_id)\n if not self._video_library.get_video(video_id):\n print(\"Cannot remove flag from video: Video does not exist\")\n return\n if not video.flag:\n print(\"Cannot remove flag from video: Video is not flagged\")\n return\n print(f\"Successfully removed flag from video: {video.title}\")\n video.set_flag(None)",
"def check_db_for_vid(self):\n with db.cursor() as cursor:\n if self.videoId in db.\n pass",
"def is_valid(video):\n return video.length != -1",
"def google_youtube_check(id):\n\tif not API_KEY:\n\t\traise ConfigException(\"Require API_KEY for googleapi. Reload after setting.\")\n\td = {\"id\" : quote(id.encode(\"utf-8\")), \"part\" : \"id,status\", \"key\" : API_KEY}\n\t\n\tf = urlopen(YOUTUBE_INFO_URL % (urlencode(d)))\n\tytdata = load(f)\n\tif not ytdata.get(\"items\"): # if there are no items for the ID search, return False\n\t\treturn False\n\treturn True",
"def test_validate_ticket_service_mismatch(self):\n service = 'http://sub.example.com/'\n st = ServiceTicketFactory()\n with self.assertRaises(InvalidService):\n ServiceTicket.objects.validate_ticket(st.ticket, service)",
"def validate_and_insert(self, video_details: dict):\n video_id = video_details.get('id', {}).get('videoId')\n\n if not video_id or self.if_already_present(video_id):\n return\n\n try:\n Video.objects.create(\n etag=video_details['etag'],\n video_id=video_id,\n thumbnails=video_details['snippet']['thumbnails'],\n title=video_details['snippet']['title'],\n description=video_details['snippet']['description'],\n published_at=video_details['snippet']['publishTime']\n )\n except (AttributeError, IntegrityError, ValueError):\n return",
"def search_video_id(broken_link):\n for param in broken_link:\n vid = regex_video_id(param)\n if vid:\n return vid",
"def check(self):\n #\n # *****************\n # *****************\n # TODO: Check really if video is valid\n # *****************\n # *****************\n return True",
"def _validate_app_id(self, app_id):\n try:\n uuid_hex = UUID(app_id)\n regex = APP_SECRET_REGEX_LIST[0]\n m = regex.search(app_id)\n if not m:\n return False\n elif uuid_hex or m:\n return True\n except ValueError:\n return False",
"def play_video(self, video_id):\n video_info = self._video_library.get_all_videos()\n for i in video_info:\n if video_id == i.video_id:\n print(f\"Playing video: {i.title}\")\n def search(list, platform):\n for i in range(len(list)):\n if list[i] == platform:\n return False\n return True\n video_id_list = []\n for i in video_info:\n video_id_list.append(i.video_id)\n if search(video_id_list, video_id):\n print(\"Cannot play video: Video does not exist\")",
"def test_clean_only_youtube_id():\n resource = models.MediaResource(youtube_id=\"dQw4w9WgXcQ\")\n\n resource.clean()"
] | [
"0.60907304",
"0.60907304",
"0.60907304",
"0.60907304",
"0.6068158",
"0.5851678",
"0.57884514",
"0.56182677",
"0.5566757",
"0.55377215",
"0.5534833",
"0.5517205",
"0.5513362",
"0.5502711",
"0.5484015",
"0.54670113",
"0.54354274",
"0.54342276",
"0.53538764",
"0.53443223",
"0.53250456",
"0.5314621",
"0.52855134",
"0.5281763",
"0.52759343",
"0.5228741",
"0.5207245",
"0.52055854",
"0.51913553",
"0.5164831"
] | 0.7674897 | 0 |
Computes coverage rate for `y_pred`. | def coverage(y_true, y_pred):
m = tf.shape(y_pred)[1] - tf.constant(1, dtype=tf.int32)
n_samples = tf.cast(tf.shape(y_pred)[0], tf.float32)
n_abstain = tf.reduce_sum(
tf.where(tf.argmax(y_pred, axis=1, output_type=tf.int32) == m, 1.0, 0.0)
)
return tf.constant(1.0) - n_abstain / n_samples | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hit_rate(y_true, y_pred, spu=None):\n mask = y_pred.astype(bool)\n y_true_in_pred = y_true[mask]\n return y_true_in_pred.sum()/y_true.sum()",
"def recall_score(y_true, y_pred):\n return ((y_true == 1) * (y_pred == 1)).sum() / (y_true == 1).sum()",
"def ari(y_pred, y_true):\n return metrics.adjusted_rand_score(y_true, y_pred)",
"def precision_score(y_true, y_pred):\n return ((y_true == 1) * (y_pred == 1)).sum() / (y_pred == 1).sum()",
"def recall(y_true, y_pred):\n true_positives = bk.sum(bk.round(bk.clip(y_true * y_pred, 0, 1)))\n possible_positives = bk.sum(bk.round(bk.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + bk.epsilon())\n return recall",
"def score(self, y_true, y_pred):\r\n pass",
"def calc_coverage(self, y):\n\n coverage = sum(np.any(self._round(y), axis=1)) / len(y)\n\n return coverage",
"def accuaracy_score(y_true, y_pred):\n\taccuracy = np.sum(y_true == y_pred, axis=0) / len(y_true)\n\treturn accuracy",
"def recall(y_true, y_pred):\n true_positives = backend.sum(backend.round(backend.clip(y_true * y_pred, 0, 1)))\n possible_positives = backend.sum(backend.round(backend.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + backend.epsilon())\n return recall",
"def recall(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall",
"def recall(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall",
"def recall(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall",
"def recall(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall",
"def recall(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall",
"def recall(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall",
"def recall(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall",
"def recall(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall",
"def recall(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall",
"def recall(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall",
"def recall(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall",
"def recall(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall",
"def recall(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall",
"def recall(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall",
"def recall(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall",
"def recall(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall",
"def recall(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall",
"def recall(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall",
"def recall(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall",
"def recall(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall",
"def recall(y_true, y_pred):\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\r\n recall = true_positives / (possible_positives + K.epsilon())\r\n return recall"
] | [
"0.71039015",
"0.70715314",
"0.6947408",
"0.6926472",
"0.68919915",
"0.6836527",
"0.6757787",
"0.6753589",
"0.67308986",
"0.67180943",
"0.66951615",
"0.66951615",
"0.66951615",
"0.66951615",
"0.66951615",
"0.66951615",
"0.66951615",
"0.66951615",
"0.66951615",
"0.66951615",
"0.66951615",
"0.66951615",
"0.66951615",
"0.66951615",
"0.66951615",
"0.66951615",
"0.66951615",
"0.66951615",
"0.66951615",
"0.6690663"
] | 0.75946474 | 0 |
View products in Cart. | def index(self, user):
cart_products = CartProduct.index(user)
CartProductsView.index(cart_products) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def products(request):\n\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n products = Product.objects.all()\n template = \"auctionsmng/products.html\"\n\n context = {\n 'products': products\n }\n\n return render(request, template, context)",
"def view_cart(request):\n categories = all_categories()\n productTypes = all_productTypes()\n return render(request, \"cart.html\", {\"categories\": categories,\n \"productTypes\": productTypes})",
"def view_cart(request):\n \n return render(request, \"cart.html\" )",
"def products(request):\n\n return render(request, \"core/products.html\", {\n \"products\": Product.objects.all()\n })",
"def display(auth_context):\n\n cart = carts.get_cart(auth_context.get('uid'))\n for item in cart:\n product = product_catalog.get_product(item.item_id)\n item.info = product\n\n return render_template('cart.html',\n cart=cart,\n auth_context=auth_context,\n bucket=product_catalog.BUCKET)",
"def cart_detail(request):\n cart = Cart(request)\n return render(request, 'cart/cart.html', {'cart': cart})",
"def view_cart(request):\n\n return render(request, 'cart/cart.html')",
"def view_cart(request):\n\n return render(request, 'cart/cart.html')",
"def product_list_view(request):\n queryset = Product.objects.all()\n context = {\n \"object_list\": queryset\n }\n\n return render(request, \"products/product_list.html\", context)",
"def product_view(request, product):\n product = Products.objects.get(product=product)\n\n context = {\n \"product\": product,\n }\n\n return render(request, \"products/product_detail.html\", context)",
"def products():\n\n\treturn render_template(\"products.html\")",
"def all_products(request):\n products = Product.objects.all()\n return render(request, \"products.html\", {\"products\": products})",
"def view_cart(request):\n return render(request, \"cart.html\")",
"def view_cart(request):\n return render(request, \"cart.html\")",
"def view_cart(request):\n return render(request, \"cart.html\")",
"def view_product(cls, product_id):\n product = Product.get_by_id(product_id)\n print(f'Product ID: {product.product_id}')\n print(f'Product Name: {product.product_name}')\n print(f'Quantity: {product.product_quantity}')\n print(f'Price: ${product.product_price / 100:.2f}\\n')",
"def all_products(request):\n\n products = Product.objects.all()\n return render(request, 'products.html', {'products': products})",
"def menu(request):\n cart = cartData(request)\n cart_items = cart['cart_items']\n # order = cart['order']\n # items = cart['items']\n # Get all our object\n products = BobaProduct.objects.all()\n # Dictionary to hold our products\n context = {\"products\": products, \"cart_items\": cart_items}\n return render(request, 'store/menu.html', context)",
"def product_detail(request, product_id):\n product = get_object_or_404(Product, pk=product_id)\n print(request.path)\n template = './product_detail.html'\n context = {\n 'product': product,\n }\n\n # products = Product.objects.all()\n\n return render(request, template, context)",
"def detail(request):\n # del request.session['cart_id']\n # del request.session['total_in_cart']\n data = {}\n if (cart_id := request.session.get('cart_id', None)):\n cart = Cart.objects.get(pk=cart_id)\n data['products_in_cart'] = cart.cartitems.all()\n data['total_price'] = cart.cart_price\n\n return render(request, 'cart/details.html', data)",
"def index(request):\n\n products = Top_selling_product.objects.all()\n context = {'products':products}\n\n return render(request, 'home/index.html',context)",
"def shop_products(request):\n\n shop = Shop.objects.get(user=request.user)\n products = Products.objects.filter(shop_rel=shop)\n paginator = pagination.PageNumberPagination()\n paginator.page_size = 7\n result_page = paginator.paginate_queryset(products, request=request)\n serializer = ProductSerializer(result_page, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def api_display_cart():\r\n\tconn = sqlite3.connect('Shopify_products.db')\r\n\tconn.row_factory = dict_factory\r\n\tcur = conn.cursor()\r\n\tcart = cur.execute('SELECT * FROM cart;').fetchall()\r\n\tcart.append(cur.execute('SELECT SUM(price) from cart;').fetchone())\r\n\treturn jsonify(cart)",
"def display(auth_context):\n\n products = product_catalog.list_products()\n # Get promoted products recommended by the AutoML model.\n promos = product_catalog.get_promos()\n return render_template('product_catalog.html',\n products=products,\n promos=promos,\n auth_context=auth_context,\n bucket=product_catalog.BUCKET)",
"def product_detail(request, product_id):\n product = get_object_or_404(Product, pk=product_id)\n\n context = {\n 'product': product,\n }\n\n return render(request, 'products/product_detail.html', context)",
"def products():\n username = session['username']\n api_key = session['api_key']\n url = 'https://consumernotebook.com/api/v1/products/?username={0}&apikey={1}'.format(username, api_key)\n r = requests.get(url)\n products = []\n if r.status_code != 200:\n error = \"{0} error. Are you sure you entered a valid API key?\".format(r.status_code)\n return render_template('products.html', error=error)\n else:\n products_json = json.loads(r.content)\n for product in products_json[u'objects']:\n products.append(product[u'title'])\n return render_template('products.html', products=products)",
"def cart_detail(request):\n cart = Cart(request)\n # Allow user to change the quantity from the details page.\n for item in cart:\n # Remember that a cart is stored as a dictionary in the user's session.\n # Here, we're adding a new key/value pair to the cart.\n # Create an instance of CartAddProductForm for each item in the cart to\n # allow changing product quantities. Initialize the form with the current\n # item quantity and set the update field to True so that when we submit the\n # form to the cart_add view, the current quantity is replaced with the new\n # one.\n # I DON'T QUITE UNDERSTAND WHAT THIS CODE IS DOING.\n item['update_quantity_form'] = CartAddProductForm(\n initial={'quantity': item['quantity'],\n 'update': True})\n coupon_apply_form = CouponApplyForm()\n return render(request, 'cart/detail.html', {'cart': cart, 'coupon_apply_form': coupon_apply_form})",
"def list_products(self):\n return self._make_get_request(self._urls['products'])",
"def product_detail(request, product_id):\n # Search for product in Product Model using pk identifier obtained from project_id\n product = get_object_or_404(Product, pk=product_id)\n context = {\n 'product': product,\n }\n return render(request, 'products/product_detail.html', context)",
"def get_context_data(self, *args, object_list=None, **kwargs):\n context = super(ProductListView, self).get_context_data(*args, **kwargs)\n cart_obj, new_obj = Cart.objects.new_or_get(self.request)\n context[\"cart\"] = cart_obj\n return context"
] | [
"0.7544936",
"0.74219257",
"0.72852266",
"0.7261602",
"0.72421676",
"0.7014876",
"0.70145303",
"0.70145303",
"0.7007686",
"0.6988518",
"0.6979214",
"0.6970391",
"0.69648755",
"0.69648755",
"0.69648755",
"0.69543886",
"0.69423074",
"0.6799814",
"0.6670603",
"0.6653604",
"0.6581831",
"0.65502435",
"0.65358865",
"0.6484672",
"0.6478781",
"0.64779305",
"0.6445377",
"0.6441145",
"0.6440783",
"0.6429206"
] | 0.77032435 | 0 |
Delete Product from Cart. | def delete(self, user, product):
cart_product = CartProduct.delete(user, product)
CartProductsView.delete(cart_product) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete(self, product):\n product_id = str(product)\n if product_id in self.cart:\n del self.cart[product_id]\n self.save()",
"def cart_remove(request, product_id):\n cart = Cart(request)\n product = get_object_or_404(Product, id=product_id)\n cart.remove(product)\n return redirect('cart:cart_detail')",
"def remove(self, product):\n product_id = str(product.id)\n if product_id in self.cart:\n del self.cart[product_id]\n self.save()",
"def delete(self, product):\n product_id = str(product)\n\n if product_id in self.basket:\n del self.basket[product_id]\n #print(product_id)\n self.save()",
"def delete_product(request, product_id):\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n product.delete()\n messages.success(request, 'Product deleted!')\n return redirect(reverse('products'))",
"def delete_product(request, product_id):\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n product.delete()\n messages.success(request, 'Product successfully deleted')\n return redirect(reverse('products'))",
"def delete_product(request, product_id):\n if not request.user.is_superuser:\n messages.error(request, 'This feature is for Admin only.')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n product.delete()\n messages.success(request, 'Product Deleted')\n return redirect(reverse('home'))",
"def delete_product(self, product_id):\n con = dbcon()\n cur = con.cursor()\n cur.execute(\"SELECT * FROM products WHERE product_id=%(product_id)s\",\\\n {\"product_id\":product_id})\n found_id = cur.fetchall()\n if found_id:\n cur.execute(\"DELETE FROM products WHERE product_id=%(product_id)s\",\\\n {'product_id':product_id})\n con.commit()\n return jsonify({'message': 'Product deleted successfully'})\n return jsonify({\"message\":\"Couldn't find product ID\"})",
"def delete_product(self, product_id):\n return self._make_delete_request(self._urls['product'] % product_id)",
"def delete_product(request, product_id):\n if not request.user.is_superuser:\n messages.error(request, 'Access denied!\\\n Sorry, only site owners have this permission.')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n product.delete()\n messages.success(request, f'{product.name} was successfully deleted!')\n return redirect(reverse('products'))",
"def delete(self):\n if current_user:\n cart = Cart.query.current_user().first()\n else:\n cart_token = request.args.get('cart_token')\n cart = Cart.query.filter_by(token=cart_token).first()\n\n if cart:\n db.session.delete(cart)\n db.session.commit()\n return {\n \"message\":\"successfuly cleared the shopping cart.\"\n }",
"def delete(self, product_id):\n\n return product.delete_product(product_id)",
"def delete(self, product):\n product_id = str(product)\n\n\n if product_id in self.sepet:\n del self.sepet[product_id]\n print(product_id)\n self.session.modified=True",
"def delete_product(request, product_id):\n if not request.user.is_superuser:\n messages.error(request, 'Invalid Request: Only admin can delete products/services.')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n product.delete()\n messages.success(request, 'Product/Service deleted!')\n return redirect(reverse('products'))",
"def delete(self, product_id):\n product = ProductModel.query.filter_by(id=product_id).first()\n if not product:\n product_api.abort(404, \"Product {} not found\".format(product_id))\n product.delete()\n return '', 204",
"def delete_product_view(request, id):\n Product.objects.get(id=id).delete()\n messages.success(request, \"Product deleted successfully.\")\n return redirect(\"products\")",
"def delete(self, product_id):\n product = ProductModel.find_by_id(product_id)\n if product:\n product.delete_from_db()\n\n return {'message': \"Product deleted\"}\n else:\n return {'message': \"Product doesn't exist\"}, 404",
"def remove_item(self, product):\n if product in self.items_in_cart:\n del self.items_in_cart[product]\n print (product + \" removed.\")\n else:\n print (product + \" is not in the cart.\")",
"def remove_item(self, product):\r\n if product in self.items_in_cart:\r\n del self.items_in_cart[product]\r\n print(product + \" removed.\")\r\n else:\r\n print(product + \" is not in the cart.\")",
"def remove_item(self, product):\n if product in self.items_in_cart:\n del self.items_in_cart[product]\n print product + \" removed.\"\n else:\n print product + \" is not in the cart.\"",
"def delete(self, _id):\n try:\n \n product_to_delete = self.productModel.query.filter_by(id=_id).first()\n \n if product_to_delete: \n self.db.session.delete(product_to_delete)\n self.db.session.commit()\n \n return Response(\n response=json.dumps({\n 'Message': 'Product deleted.'\n }),\n status=200,\n mimetype='application/json')\n else:\n return Response(\n response=json.dumps({\n 'Message': 'Product not found.'\n }),\n status=200,\n mimetype='application/json')\n \n except SQLAlchemyError as err:\n self.db.session.rollback()\n \n return Response(\n response=json.dumps({\"Error\": str(err.args[0])}),\n status=500,\n mimetype='application/json')\n \n finally:\n self.db.session.close()",
"def remove_from_cart(request):\n post_data = request.POST.copy()\n item_id = post_data['item_id']\n cart_item = get_single_item(request, item_id)\n if cart_item:\n cart_item.delete()",
"def delete_product(product_id):\n with MY_CONNECTION as connection:\n connection.execute(\"DELETE FROM Products WHERE id_product=?\", (product_id,))",
"def delete_item(request, product_id):\n\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, you are not permitted to do that.')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n product.delete()\n messages.success(request, 'You have deleted the item!')\n return redirect(reverse('items'))",
"def remove_product(self, product_id):\n\n cur.execute(\"\"\"DELETE FROM catalogue WHERE productid = ? AND vendorname = ?\"\"\",\n (product_id, self.vendorname))",
"def remove_product(product_id: str) -> None:\n with db_session() as session:\n session.query(Product).filter_by(id=product_id).delete()\n session.commit()",
"def delete(self, Product):\n with api.commit_or_abort(\n db.session,\n default_error_message=\"Some error message\"\n ):\n db.session.delete(Product)\n return None, 204",
"def remove_product_from_cart(user_name, product_id, quantity, store_name):\n user_name = auth.get_username_from_hash(user_name)\n user_handler.remove_product(user_name, store_name, product_id, quantity)\n users.remove_from_cart(user_name, product_id, quantity, store_name)",
"def delete_product(request, id):\n\n return render(request, \"core/delete_product.html\", {\n \"object\": Product.objects.get(id=id)\n })",
"def delete_product(conn, product_id: int) -> None:\n with conn.cursor() as cursor:\n cursor.execute(f\"\"\"update products set deleted = True where id = '{product_id}'\"\"\")\n if cursor.rowcount:\n conn.commit()\n else:\n raise errors.StoreError"
] | [
"0.8534601",
"0.8041614",
"0.79457",
"0.770682",
"0.7690166",
"0.7668008",
"0.76628315",
"0.7649517",
"0.7617706",
"0.7565354",
"0.75316346",
"0.7501226",
"0.7420002",
"0.73552704",
"0.7347085",
"0.7338678",
"0.73137146",
"0.7273116",
"0.7239314",
"0.7206781",
"0.7196743",
"0.7192126",
"0.71859413",
"0.71701545",
"0.71562207",
"0.7151532",
"0.7134603",
"0.7121191",
"0.7081776",
"0.70484257"
] | 0.81086886 | 1 |
Yield the classes in module ``mod`` that inherit from ``cls`` | def get_subclasses(mod, cls):
for name, obj in inspect.getmembers(mod):
if hasattr(obj, "__bases__") and cls in obj.__bases__:
yield obj | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _classes_(cls):\n for base_cls in cls.__bases__:\n # Avoid infinite loop\n if base_cls == Sandbox:\n continue\n\n yield base_cls",
"def _classesToCheck(self, cls):\r\n yield cls\r\n yield from inspect.getmro(cls)",
"def get_all_classes_defined_in_module(module):\n for _cls in inspect.getmembers(module, inspect.isclass):\n if module.__name__ == _cls[1].__module__:\n yield _cls",
"def get_subclasses(module, clazz):\n for subclazz_name, subclazz in inspect.getmembers(module):\n if hasattr(subclazz, '__bases__') and clazz in subclazz.__bases__:\n yield (subclazz_name, subclazz)",
"def discover_classes(\n package,\n cls_match_func=trivial,\n module_match_func=trivial,\n):\n for module in discover_modules(package, module_match_func):\n # Check all the classes in that module\n for _, imported_class in inspect.getmembers(module, inspect.isclass):\n # Don't include things that are only there due to a side-effect of\n # importing\n if imported_class.__module__ != module.__name__:\n continue\n\n if cls_match_func(imported_class):\n yield imported_class",
"def iter_classes(base_class, *modules, class_filter=None):\n for root_module in modules:\n try:\n module_repo = walk_modules(root_module)\n except:\n continue\n for module in module_repo:\n for obj in vars(module).values():\n if inspect.isclass(obj) and issubclass(obj, base_class) and obj.__module__ == module.__name__:\n if not class_filter or class_filter(obj):\n yield obj",
"def find_all_classes(module_path: Union[str, ModuleType], cls: type) -> List[type]:\n if isinstance(module_path, ModuleType):\n mod = module_path\n else:\n mod = importlib.import_module(module_path)\n\n cls_list = []\n\n def _append_cls(obj):\n # Leverage the closure trick to reuse code\n if isinstance(obj, type) and issubclass(obj, cls) and cls not in cls_list:\n cls_list.append(obj)\n\n for attr in dir(mod):\n _append_cls(getattr(mod, attr))\n\n if hasattr(mod, \"__path__\"):\n # if the model is a package\n for _, modname, _ in pkgutil.iter_modules(mod.__path__):\n sub_mod = importlib.import_module(f\"{mod.__package__}.{modname}\")\n for m_cls in find_all_classes(sub_mod, cls):\n _append_cls(m_cls)\n return cls_list",
"def iter_cls(*classes, blacklist=tuple()):\n for bases in permutations(classes):\n if bases not in blacklist:\n yield type('_'.join(c.__name__ for c in bases), bases, {})",
"def all_subclasses(cls):\r\n for s in cls.__subclasses__():\r\n yield s\r\n for c in s.all_subclasses():\r\n yield c",
"def iter_spider_classes(module):\n ...",
"def find_classes(cls, cutoff_class=None):\n cutoff_class = cutoff_class or Interface\n module = sys.modules[__name__]\n for ni, vi in inspect.getmembers(module, inspect.isclass):\n if issubclass(vi, cutoff_class) and vi is not cutoff_class:\n yield vi",
"def get_classes(mod):\n return [\n key\n for key, _ in inspect.getmembers(mod, inspect.isclass)\n if key[0].isupper()\n ]",
"def get_classes_from_module(self, module):\n classes = dict([(name, cls)\n for name, cls in module.__dict__.items()\n if isinstance(cls, type)])\n self.set_latest_classes(classes)\n return self.get_latest_classes()",
"def itersubclasses(cls, _seen=None):\n\n if not isinstance(cls, type):\n raise TypeError('itersubclasses must be called with '\n 'new-style classes, not %.100r' % cls)\n if _seen is None:\n _seen = set()\n try:\n subs = cls.__subclasses__()\n except TypeError: # fails only when cls is type\n subs = cls.__subclasses__(cls)\n for sub in subs:\n if sub not in _seen:\n _seen.add(sub)\n yield sub\n for sub in itersubclasses(sub, _seen):\n yield sub",
"def itersubclasses(cls, _seen=None):\n if not isinstance(cls, type):\n raise TypeError('itersubclasses must be called with '\n 'new-style classes, not %.100r' % cls)\n if _seen is None:\n _seen = set()\n try:\n subs = cls.__subclasses__()\n except TypeError: # fails only when cls is type\n subs = cls.__subclasses__(cls)\n for sub in subs:\n if sub not in _seen:\n _seen.add(sub)\n yield sub\n for sub in itersubclasses(sub, _seen):\n yield sub",
"def GetScaffolderClasses(cls) -> Iterator[Type[interface.Scaffolder]]:\n for scaffolder_class in cls._scaffolder_classes.values():\n yield scaffolder_class",
"def class_hierarchy(clslist):\n for cls in clslist:\n subclass_list = cls.__subclasses__()\n if subclass_list:\n for subcls in class_hierarchy(subclass_list):\n yield subcls\n else:\n yield cls",
"def all_subclasses(cls):\n for subclass in cls.__subclasses__():\n yield subclass\n for subc in all_subclasses(subclass):\n yield subc",
"def subclass_iterator(cls, _seen=None):\r\n\r\n if not isinstance(cls, type):\r\n raise TypeError('_subclass_iterator must be called with '\r\n 'new-style classes, not %.100r' % cls)\r\n\r\n _seen = _seen or set()\r\n\r\n try:\r\n subs = cls.__subclasses__()\r\n except TypeError: # fails only when cls is type\r\n subs = cls.__subclasses__(cls)\r\n for sub in subs:\r\n if sub not in _seen:\r\n _seen.add(sub)\r\n yield sub\r\n for sub in subclass_iterator(sub, _seen):\r\n yield sub",
"def subclass_iterator(cls, _seen=None):\n\n if not isinstance(cls, type):\n raise TypeError('_subclass_iterator must be called with '\n 'new-style classes, not %.100r' % cls)\n\n _seen = _seen or set()\n\n try:\n subs = cls.__subclasses__()\n except TypeError: # fails only when cls is type\n subs = cls.__subclasses__(cls)\n for sub in subs:\n if sub not in _seen:\n _seen.add(sub)\n yield sub\n for sub in subclass_iterator(sub, _seen):\n yield sub",
"def visit_Module(self, node):\n self.generic_visit(node)\n return self.classes",
"def parent_class_modules(cls):\n if not issubclass(cls, spack.package_base.PackageBase) or issubclass(\n spack.package_base.PackageBase, cls\n ):\n return []\n result = []\n module = sys.modules.get(cls.__module__)\n if module:\n result = [module]\n for c in cls.__bases__:\n result.extend(parent_class_modules(c))\n return result",
"def find_all(m, cls):\n return [node for node in ast.walk(m) if isinstance(node, cls)]",
"def register_classes(\n self, module: ModuleType, base_cls: Type, override: bool = False, show_deprecation: bool = True\n ) -> None:\n for cls in self.get_members(module, base_cls):\n self(cls=cls, override=override, show_deprecation=show_deprecation)",
"def classes(self) -> Iterable[GDScriptClass]:\n for item in self._classes_by_type_id.values():\n yield item",
"def get_module_plugins(module, classobj):\n try:\n names = module.__all__\n except AttributeError:\n names = [x for x in vars(module) if not x.startswith('_')]\n for name in names:\n try:\n obj = getattr(module, name)\n except AttributeError:\n continue\n try:\n if issubclass(obj, classobj):\n yield obj\n except TypeError:\n continue",
"def subclasses(cls) -> Iterator:\n for subclass in cls.__subclasses__():\n if subclass._type_definition.description: # type: ignore\n yield subclass\n yield from subclass.subclasses()",
"def base_subclasses(cls):\n for subclass in cls.__subclasses__():\n yield from subclass.base_subclasses()\n if isinstance(subclass.base, type):\n yield subclass",
"def derived_classes(self, what: Union[GDScriptClass, str, int]):\n base_cls: Optional[GDScriptClass] = None\n if isinstance(what, GDScriptClass):\n base_cls = what\n else:\n base_cls = self.get_class(what)\n\n for cls in self._classes_by_type_id.values():\n if cls.base == base_cls:\n yield cls",
"def getProtocolClasses(self):\n for name, obj in inspect.getmembers(protocol):\n if inspect.isclass(obj):\n # We're only interested in sublasses of ProtocolElement\n pe = protocol.ProtocolElement\n if issubclass(obj, pe) and obj is not pe:\n yield obj"
] | [
"0.7661169",
"0.75540936",
"0.74157786",
"0.74068946",
"0.71664065",
"0.7122239",
"0.70386964",
"0.68738073",
"0.6851402",
"0.6728931",
"0.66658",
"0.66300493",
"0.6608214",
"0.65761477",
"0.65552634",
"0.65492725",
"0.65423816",
"0.65163565",
"0.647793",
"0.64746547",
"0.6449505",
"0.64285886",
"0.6400966",
"0.6359176",
"0.63412505",
"0.62918824",
"0.6258197",
"0.6242958",
"0.6237608",
"0.6216494"
] | 0.80770713 | 0 |
A main function to run the simulation | def Main():
numberOfPopulation = 350
numberOfDays = 60
simulation = Simulation(Covid19(), numberOfPopulation, numberOfDays, "Covid 19 Simulation")
simulation.run()
simulation = Simulation(Ebola(), numberOfPopulation, numberOfDays, "Ebola Simulation")
simulation.run() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Main():\n EnigmaSim = simulation() #Creates the simulation object\n EnigmaSim.Run() #Runs the simulation",
"def main():\n run_simulation(spectral=False, ml=False, num_procs=1)\n run_simulation(spectral=True, ml=False, num_procs=1)\n run_simulation(spectral=False, ml=True, num_procs=1)\n run_simulation(spectral=True, ml=True, num_procs=1)\n run_simulation(spectral=False, ml=True, num_procs=10)\n run_simulation(spectral=True, ml=True, num_procs=10)",
"def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.5) # reduce update_delay to speed up simulation\n sim.run(n_trials=100) # press Esc or close pygame window to quit",
"def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline= True ) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.5) # reduce update_delay to speed up simulation\n sim.run(n_trials=100) # press Esc or close pygame window to quit",
"def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create learning agent\n # a = e.create_agent(RandomAgent) # create random agent\n e.set_primary_agent(a, enforce_deadline=True) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.01)\n # reduce update_delay to speed up simulation\n sys.stdout = open(\"./output.txt\", \"w\")\n tic = time()\n sim.run(n_trials=100) # press Esc or close pygame window to quit\n toc = time()\n sys.stdout = sys.__stdout__\n\n print \"Totoal time used: {}.\".format(toc - tic)\n parse(\"./output.txt\")",
"def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.00000001, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line",
"def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.001, display=True) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line",
"def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=False) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0) # reduce update_delay to speed up simulation\n sim.run(n_trials=num_of_experiments) # press Esc or close pygame window to quit\n \n pd.Series(a.success).to_pickle('success_' + exp_id + '.pickle')\n a.Q_table.to_pickle('qtable_' + exp_id + '.pickle')\n pd.Series(a.q_delta_avg).to_pickle('convergence_' + exp_id + '.pickle')\n pd.Series(a.t_total).to_pickle('steps_' + exp_id + '.pickle')",
"def main(*args):\n #\n # Use argparse to handle parsing the command line arguments.\n # https://docs.python.org/3/library/argparse.html\n #\n parser = argparse.ArgumentParser(description='Animate an epidemic')\n parser.add_argument('--size', metavar='N', type=int, default=50,\n help='Use a N x N simulation grid')\n parser.add_argument('--duration', metavar='T', type=int, default=100,\n help='Simulate for T days')\n parser.add_argument('--recovery', metavar='P', type=float, default=0.1,\n help='Probability of recovery (per day)')\n parser.add_argument('--infection', metavar='P', type=float, default=0.1,\n help='Probability of infecting a neighbour (per day)')\n parser.add_argument('--death', metavar='P', type=float, default=0.005,\n help='Probability of dying when infected (per day)')\n parser.add_argument('--cases', metavar='N', type=int, default=2,\n help='Number of initial infected people')\n parser.add_argument('--plot', action='store_true',\n help='Generate plots instead of an animation')\n parser.add_argument('--file', metavar='N', type=str, default=None,\n help='Filename to save to instead of showing on screen')\n args = parser.parse_args(args)\n\n # Set up the simulation\n simulation = Simulation(args.size, args.size,\n args.recovery, args.infection, args.death)\n simulation.infect_randomly(args.cases)\n\n # Plot or animation?\n if args.plot:\n fig = plot_simulation(simulation, args.duration)\n\n if args.file is None:\n # python runsim.py --plot\n plt.show()\n else:\n # python runsim.py --plot --file=plot.pdf\n fig.savefig(args.file)\n else:\n animation = Animation(simulation, args.duration)\n\n if args.file is None:\n # python runsim.py\n animation.show()\n else:\n # python runsim.py --file=animation.mp4\n #\n # NOTE: this needs ffmpeg to be installed.\n animation.save(args.file)",
"def run():\n main()",
"def main():\n run_program()",
"def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=False) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0000001, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n\n print 'alpha, gamma:', a.alpha, a.gamma\n print 'penalties:', a.total_penalties\n print 'total rewards:', a.total_rewards",
"def run():\n\tif len(sys.argv) > 1 and sys.argv[1] in {'-V', '--version'}:\n\t\tprint(\"pokesim - Pokémon Battle Simulator - Version %s\" % __version__)\n\t\texit()\n\n\trandom.seed()\n\ttry:\n\t\tmain()\n\texcept (KeyboardInterrupt, EOFError):\n\t\texit(0)",
"def startSimulation(self):\n self.saveParameters()\n self.simulation.main()",
"def main(argv):\n parser = argparse.ArgumentParser()\n \n parser.add_argument(\"sim_name\", type=str, help=\"name of simulation folder\")\n args = parser.parse_args() \n \n sim_path = '/'.join([current_dir, args.sim_name]) \n \n sim_input(sim_path) # write the fortran input files\n runmodel(sim_path) # compile and run fortran code\n sim_read(sim_path)",
"def run(): \n learning_rate = 0.42\n discount_rate = 0.15\n initial_q_hat = 4\n \n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent, learning_rate, discount_rate, initial_q_hat) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n print \"Failed trials: \"\n print a.get_failed_trials()\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line",
"def main():\n ex = Experiment(SEED)\n ex.main()",
"def main():\n character1 = generate_random_character(\"Dr. Bones\", 100, 60, 15, 5)\n character2 = generate_random_character(\"Mr. Meeseeks\", 100, 60,\n 15, 5)\n battle = BattleSimulator(character1, character2)\n battle.simulate()",
"def main():\n tester = Tester()\n # parse args, load configuration and create all required objects.\n tester.setup_experiment()\n # GO!\n tester.run_experiment()",
"def test_simulation(self):\n\t\tprint \"Simulation is being tested\"\n\n\t\tif toggles.DEBUG_FLAG:\n\t\t\tprint \"Debug Flag Set!\"\n\t\t\tprint self.getConfig()\n\n\t\tif toggles.PACKING:\n\t\t\ttoggles.OUTPUT_PATH = toggles.OUTPUT_PATH+toggles.RUN_NAME+'/'\n\t\t\tpackageMaker(toggles.OUTPUT_PATH,self.getConfig())\n\t\tif toggles.IDEAL_GRID:\n\t\t\tself.consensusGrid()\n\n\t\tif toggles.REAL_DATA:\n\t\t\tsampleData = self.load_data()\n\t\t\tif toggles.RUN_DATA_STATS:\n\t\t\t\tself.output_data_stats(sampleData)\n\t\t\t\tself.reset_database()\n\t\t\tif toggles.RUN_AVERAGE_COST:\n\t\t\t\tself.sim_average_cost(sampleData)\n\t\t\t\tself.reset_database()\n\t\t\tif toggles.RUN_SINGLE_PAIR:\n\t\t\t\tself.sim_single_pair_cost(sampleData, pending_eddy(self.pick_worker([0], [0])))\n\t\t\t\tself.reset_database()\n\t\telse:\n\t\t\tsampleData = {}\n\t\t\tsyn_load_data()\n\n\t\tif toggles.RUN_ITEM_ROUTING and not (toggles.RUN_TASKS_COUNT or toggles.RUN_MULTI_ROUTING):\n\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\tprint \"Running: item Routing\"\n\t\t\tself.run_sim(deepcopy(sampleData))\n\t\t\tself.reset_database()\n\n\t\tif PRED_SCORE_COUNT and not (RUN_TASKS_COUNT or RUN_MULTI_ROUTING):\n\t\t\tif DEBUG_FLAG:\n\t\t\t\tprint \"Running: Pred Score count\"\n\t\t\tself.run_sim(sampleData)\n\t\t\tself.reset_database()\n\n\n\n\t\tif toggles.COUNT_TICKETS and not (toggles.RUN_TASKS_COUNT or toggles.RUN_MULTI_ROUTING):\n\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\tprint \"Running: ticket counting\"\n\t\t\tself.run_sim(deepcopy(sampleData))\n\t\t\tself.reset_database()\n\n\t\tif toggles.SELECTIVITY_GRAPH and not (toggles.RUN_TASKS_COUNT or toggles.RUN_MULTI_ROUTING):\n\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\tprint \"Running: selectivity amounts over time\"\n\t\t\tself.run_sim(sampleData)\n\t\t\tself.reset_database()\n\n\t\t#____FOR LOOKING AT ACCURACY OF RUNS___#\n\t\tif toggles.TEST_ACCURACY and toggles.REAL_DATA:\n\t\t\tcorrectAnswers = self.get_correct_answers(toggles.INPUT_PATH + toggles.ITEM_TYPE + '_correct_answers.csv')\n\t\t\tpassedItems = self.get_passed_items(correctAnswers)\n\n\n\t\tif toggles.RUN_OPTIMAL_SIM:\n\t\t\tcountingArr=[]\n\t\t\tself.reset_database()\n\t\t\tfor i in range(toggles.NUM_SIM):\n\t\t\t\tprint \"running optimal_sim \" +str(i)\n\t\t\t\tself.num_tasks = self.optimal_sim(sampleData)\n\t\t\t\tcountingArr.append(self.num_tasks)\n\t\t\t\tself.reset_database()\n\t\t\tdest = toggles.OUTPUT_PATH+toggles.RUN_NAME+'_optimal_tasks'\n\t\t\tgeneric_csv_write(dest+'.csv',[countingArr])\n\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\tprint \"Wrote File: \" + dest+'.csv'\n\n\n\n\t\tif toggles.RUN_TASKS_COUNT or toggles.RUN_MULTI_ROUTING or toggles.RUN_CONSENSUS_COUNT:\n\t\t\tif toggles.RUN_TASKS_COUNT:\n\t\t\t\t#print \"Running: task_count\"\n\t\t\t\t#f = open(toggles.OUTPUT_PATH + toggles.RUN_NAME + '_tasks_count.csv', 'a')\n\t\t\t\t#f1 = open(toggles.OUTPUT_PATH + toggles.RUN_NAME + '_incorrect_count.csv', 'a')\n\n\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\toutputArray = []\n\n\t\t\trunTasksArray = []\n\t\t\tgoodArray, badArray = [], []\n\t\t\tgoodPoints, badPoints = [], []\n\t\t\taccCount = []\n\t\t\tlocArray = [[],[],[],[]]\n\n\t\t\tfor i in range(toggles.NUM_SIM):\n\t\t\t\tprint \"running simulation \" + str(i+1)\n\t\t\t\tself.run_sim(deepcopy(sampleData))\n\t\t\t\trunTasksArray.append(self.num_tasks)\n\n\t\t\t\t#____FOR LOOKING AT ACCURACY OF RUNS___#\n\t\t\t\tif toggles.TEST_ACCURACY and toggles.REAL_DATA:\n\t\t\t\t\tnum_incorrect = self.final_item_mismatch(passedItems)\n\t\t\t\t\taccCount.append(num_incorrect)\n\t\t\t\tif toggles.RUN_CONSENSUS_COUNT or toggles.VOTE_GRID:\n\t\t\t\t\tdonePairs = IP_Pair.objects.filter(Q(num_no__gt=0)|Q(num_yes__gt=0))\n\t\t\t\t\tif toggles.TEST_ACCURACY:\n\t\t\t\t\t\tgoodPairs, badPairs = [], []\n\t\t\t\t\t\tfor pair in donePairs:\n\t\t\t\t\t\t\tval = bool((pair.num_yes-pair.num_no)>0)\n\t\t\t\t\t\t\tif toggles.REAL_DATA:\n\t\t\t\t\t\t\t\tcorrect = ((correctAnswers[(pair.item,pair.predicate)]) == val)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tcorrect = (pair.true_answer == val)\n\t\t\t\t\t\t\tif correct:\n\t\t\t\t\t\t\t\tgoodArray.append(pair.num_no+pair.num_yes)\n\t\t\t\t\t\t\t\tgoodPoints.append((pair.num_no,pair.num_yes))\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tbadArray.append(pair.num_no+pair.num_yes)\n\t\t\t\t\t\t\t\tbadPoints.append((pair.num_no,pair.num_yes))\n\t\t\t\t\telse:\n\t\t\t\t\t\tfor pair in donePairs:\n\t\t\t\t\t\t\tgoodArray.append(pair.num_no + pair.num_yes)\n\t\t\t\t\t\t\tgoodPoints.append((pair.num_no,pair.num_yes))\n\n\t\t\t\t\t#print \"This is number of incorrect items: \", num_incorrect\n\n\t\t\t\tself.reset_database()\n\n\t\t\tif toggles.RUN_TASKS_COUNT:\n\t\t\t\tgeneric_csv_write(toggles.OUTPUT_PATH+toggles.RUN_NAME+'_tasks_count.csv',[runTasksArray])\n\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\tprint \"Wrote File: \" + toggles.OUTPUT_PATH + toggles.RUN_NAME + '_tasks_count.csv'\n\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\tif len(runTasksArray)>1:\n\t\t\t\t\t\tdest = toggles.OUTPUT_PATH + toggles.RUN_NAME + '_tasks_count.png'\n\t\t\t\t\t\ttitle = toggles.RUN_NAME + ' Cost distribution'\n\t\t\t\t\t\thist_gen(runTasksArray, dest, labels = ('Cost','Frequency'), title = title)\n\t\t\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\t\t\tprint \"Wrote File: \" + dest\n\t\t\t\t\telif toggles.DEBUG_FLAG:\n\t\t\t\t\t\tprint \"only ran one sim, not running hist_gen\"\n\n\t\t\tif toggles.RUN_MULTI_ROUTING:\n\t\t\t\t\tdest = toggles.OUTPUT_PATH + toggles.RUN_NAME + '_Eddy_sys_' + str(toggles.EDDY_SYS) + '_multi_routing.png'\n\t\t\t\t\ttitle = toggles.RUN_NAME + ' Average Predicate Routing'\n\t\t\t\t\tquestions = toggles.CHOSEN_PREDS\n\t\t\t\t\tarrayData = []\n\t\t\t\t\tfor i in range(len(questions)):\n\t\t\t\t\t\tarrayData.append([])\n\t\t\t\t\tfor routingL in ROUTING_ARRAY:\n\t\t\t\t\t\tfor i in range(len(questions)):\n\t\t\t\t\t\t\tarrayData[i].append(routingL[i])\n\t\t\t\t\tmrsavefile = open(toggles.OUTPUT_PATH+toggles.RUN_NAME+'_multi_routing.csv','w')\n\t\t\t\t\tmrwriter = csv.writer(mrsavefile)\n\t\t\t\t\tmrwriter.writerow(questions)\n\t\t\t\t\tfor row in arrayData:\n\t\t\t\t\t\tmrwriter.writerow(row)\n\t\t\t\t\tmrsavefile.close()\n\t\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\t\tprint \"Wrote File: \"+toggles.OUTPUT_PATH+toggles.RUN_NAME+'_multi_routing.csv'\n\t\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\t\tstats_bar_graph_gen(arrayData, questions, dest, labels = ('Predicate','# of Items Routed'), title = title)\n\t\t\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\t\t\tprint \"Wrote File: \" + toggles.OUTPUT_PATH+toggles.RUN_NAME+'_multi_routing.png'\n\t\t\tif toggles.ACCURACY_COUNT:\n\t\t\t\tdest = toggles.OUTPUT_PATH+toggles.RUN_NAME+'_acc_count'\n\t\t\t\tgeneric_csv_write(dest+'.csv',[accCount])\n\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\thist_gen(accCount, dest+'.png')\n\n\t\t\tif toggles.RUN_CONSENSUS_COUNT:\n\t\t\t\tdest = toggles.OUTPUT_PATH + toggles.RUN_NAME+'_consensus_count'\n\t\t\t\tif len(goodArray)>1:\n\t\t\t\t\tif len(badArray) == 0:\n\t\t\t\t\t\tgeneric_csv_write(dest+'.csv',[goodArray])\n\t\t\t\t\t\t#print goodArray\n\t\t\t\t\telse:\n\t\t\t\t\t\tgeneric_csv_write(dest+'.csv',[goodArray,badArray])\n\t\t\t\t\t\t#print goodArray,badArray\n\t\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\t\tprint \"Wrote File: \" + dest + '.csv'\n\t\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\t\ttitle = 'Normalized Distribution of Tasks before Consensus'\n\t\t\t\t\t\tlabels = ('Number of Tasks', 'Frequency')\n\t\t\t\t\t\tif len(badArray) < 2:\n\t\t\t\t\t\t\thist_gen(goodArray, dest+'.png',labels=labels,title=title)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tleg = ('Correctly Evaluated IP pairs','Incorrectly Evaluated IP pairs')\n\t\t\t\t\t\t\tmulti_hist_gen([goodArray,badArray],leg,dest+'.png',labels=labels,title=title)\n\t\t\t\telif toggles.DEBUG_FLAG:\n\t\t\t\t\tprint \"only ran one sim, ignoring results\"\n\t\t\tif toggles.VOTE_GRID:\n\t\t\t\tdest = toggles.OUTPUT_PATH + toggles.RUN_NAME+'_vote_grid'\n\t\t\t\tif len(goodPoints)>1:\n\t\t\t\t\tif len(badPoints)==0:\n\t\t\t\t\t\tgeneric_csv_write(dest+'.csv',goodPoints)\n\t\t\t\t\telse:\n\t\t\t\t\t\tgeneric_csv_write(dest+'_good.csv',goodPoints)\n\t\t\t\t\t\tgeneric_csv_write(dest+'_bad.csv',badPoints)\n\t\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\t\ttitle = \"Vote Grid Graph\"\n\t\t\t\t\t\tlabels = (\"Number of No Votes\",\"Number of Yes Votes\")\n\t\t\t\t\t\tif len(badPoints)==0:\n\t\t\t\t\t\t\txL,yL=zip(*goodPoints)\n\t\t\t\t\t\t\tline_graph_gen(xL,yL,dest+'.png',title=title,labels=labels,scatter=True,square=True)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tgX,gY = zip(*goodPoints)\n\t\t\t\t\t\t\tbX,bY = zip(*badPoints)\n\t\t\t\t\t\t\tmulti_line_graph_gen((gX,bX),(gY,bY),('Correct','Incorrect'),dest+'_both.png',title=title,labels=labels,scatter=True,square=True)\n\t\t\t\t\t\t\tline_graph_gen(gX,gY,dest+'_good.png',title=title+\" goodPoints\",labels=labels,scatter=True,square=True)\n\t\t\t\t\t\t\tline_graph_gen(bX,bY,dest+'_bad.png',title=title+\" badPoints\",labels=labels,scatter=True,square=True)\n\t\tif toggles.TIME_SIMS:\n\t\t\tself.timeRun(sampleData)\n\n\t\tif toggles.RUN_ABSTRACT_SIM:\n\t\t\tself.abstract_sim(sampleData, toggles.ABSTRACT_VARIABLE, toggles.ABSTRACT_VALUES)",
"def main():\n greetings()\n run_jarvis()",
"def run(sim_attr_generator):\n#TODO: clean\n#TODO: integrate analyses\n def analyze_and_save(simulation,simulation_attributes):\n#? Ugly conf file analyses integration.\n if simulation_attributes.analyses and Args.output_file != None:\n verbose_print(\"Saving analyses for {0}.\".format(simulation_attributes.id_name),2)\n results = analyze_datas(\n simulation.result,\n simulation_attributes.analyses\n )\n plotables = ana_results_to_plotables(\n results,\n simulation_attributes.analyses\n )\n#TODO error handling for save\n analysis_save_dm(\n results,\n plotables,\n simulation_attributes.analyses,\n simulation_attributes.id_name\n )\n\n def save_simulation(simulation,simulation_attributes):\n if not simulation_attributes.analyses and Args.output_file != None:\n verbose_print(\"Saving simulation datas of {0}.\".format(\n simulation_attributes.id_name\n ),2) \n try:\n np.save(\n simulation_attributes.id_name,\n simulation.result\n )\n except:\n raise EnvironmentError(\"Can't save data to {}.\".format(\n simulation_attributes.id_name\n ))\n\n verbose_print(\"Starting simulation run.\",1)\n for i,simulation_attributes in enumerate(sim_attr_generator):\n verbose_print(\"Starting simulation number {0}: {1}\".format(\n i,\n simulation_attributes.id_name\n ),2)\n simulation = Simulation(\n SimulationVariables(simulation_attributes)\n )\n simulation.start()\n save_simulation(simulation,simulation_attributes)\n analyze_and_save(simulation,simulation_attributes)",
"def run_simulator(self):\n\n self.update_settings()\n\n # Pass in the progress bar and the master so that the simulator can\n # update the progress bar and then refresh the screen when the progress\n # checkpoints are hit\n\n self.sim_results = self.sim.run(self.progress_bar, self.master)\n self.graph_results()",
"def test_run_sim():\n rnd = rand.Arrivals(31, 40)\n sim.run_sim(2, 1, 3, 4, 24, rnd)",
"def run():\n import hmmmAssembler ; reload(hmmmAssembler) # import helpers\n hmmmAssembler.main(Random) # this runs the code!",
"def main():\n\tif len(sys.argv) < 12 or len(sys.argv) > 13:\n\t\tprint(\"Input parameters must be: 'filename lambda mu C c0 Q theta L H simulation_time is_debug repeats(optionally)'\")\n\telse:\n\t\tstart_time = time.time()\n\n\t\tfile_name = sys.argv[1]\n\t\tlambd = float(sys.argv[2])\n\t\tmu = float(sys.argv[3])\n\t\tC = int(sys.argv[4])\n\t\tc0 = int(sys.argv[5])\n\t\tQ = int(sys.argv[6])\n\t\ttheta = float(sys.argv[7])\n\t\tL = int(sys.argv[8])\n\t\tH = int(sys.argv[9])\n\t\tsimulation_time = float(sys.argv[10]);\n\t\tis_debug = True if sys.argv[11] == \"True\" else False;\n\t\trepeats = int(sys.argv[12]) if len(sys.argv) == 13 else 1;\n\n\t\tprint(\"Simulation started for params: lambda =\", lambd,\n\t\t\t \", mu =\", mu,\n\t\t\t \", C =\", C,\n\t\t\t \", c0 =\", c0,\n\t\t\t \", Q =\", Q,\n\t\t\t \", theta =\", theta,\n\t\t\t \", L =\", L,\n\t\t\t \", H =\", H,\n\t\t\t \", repeats =\", repeats)\n\n\t\tblocked = 0\n\t\tserved = 0\n\t\tgenerated = 0\n\t\tB = 0\n\t\tN = 0\n\n\t\tsimulation = Simulation(\"m/m/c[c0]/r[l,h]\", lambd, mu, theta, C, c0, L, H, simulation_time, Q, is_debug)\n\t\tfor i in range(0, repeats):\n\t\t\tsimulation = Simulation(\"m/m/c[c0]/r[l,h]\", lambd, mu, theta, C, c0, L, H, simulation_time, Q, is_debug)\n\t\t\tsimulation.start()\n\t\t\tblocked += simulation.queue.blocked\n\t\t\tserved += simulation.served_count\n\t\t\tgenerated += simulation.flow.generated_count\n\t\t\tB += simulation.queue.blocked/(simulation.served_count+simulation.queue.blocked)\n\t\t\tN += simulation.served_count/simulation_time\n\t\tend_time = time.time()\n\n\t\tblocked = blocked/repeats\n\t\tserved = served/repeats\n\t\tgenerated = generated/repeats\n\t\tB = B/repeats\n\t\tN = N/repeats\n\n\t\tprint( \"\")\n\t\tprint( \"Summary results:\")\n\t\tprint( \"blocked=\", blocked, \" served=\", served, \", generated=\", generated)\n\t\tprint(\"B = \", B)\n\t\tprint(\"N = \", N)\n\t\tprint(\"Execution time = %s seconds\" % (end_time - start_time))\n\t\tprint( \"... to be implemented more summary ...\")\n\n\t\t# write stats to file\n\t\tabs_path = os.path.abspath(__file__)\n\t\tpath = os.path.relpath('stats', abs_path)\n\t\tpath = os.path.join(path, file_name + '-(%s,%s,%s,%s,%s,%s,%s,%s).csv' % (lambd,mu,theta,C,c0,L,H,simulation_time))\n\n\t\toutfile=open(path,'w')\n\t\toutput = csv.writer(outfile, delimiter=';')\n\t\toutput.writerow(['Request ID','Queue', 'Arrival_Time','Queue_Arrival_time','Server_Arrival_time','alpha','beta'])\n\n\t\ti=0\n\t\tfor request in simulation.served_requests:\n\t\t\ti=i+1\n\t\t\toutrow=[]\n\t\t\toutrow.append(request.ID)\n\t\t\toutrow.append(request.queue_size_at_serving)\n\t\t\toutrow.append(request.arrival_time)\n\t\t\toutrow.append(request.queue_arrival_time)\n\t\t\toutrow.append(request.server_arrival_time)\n\t\t\toutrow.append(request.alpha)\n\t\t\toutrow.append(request.beta)\n\t\t\toutput.writerow(outrow)\n\t\toutfile.close()\n\n\t\treturn simulation",
"def main():\n return",
"def make_simulation(self):\n pass",
"def main():\n driver = Driver()\n driver.start()",
"def _exe_(self):\n print(\"\\n Start simulation (using Pharlap) ...\")\n dic = \"data/sim/{dn}/{rad}/\".format(dn=self.event.strftime(\"%Y.%m.%d.%H.%M\"), rad=self.rad)\n self._copy_ne_()\n [self._compute_(case) for case in [\"bgc\", \"flare\"]]\n plotlib.plot_exp_rays(dic, self.event, self.bmnum, \"bgc\")\n plotlib.plot_exp_rays(dic, self.event, self.bmnum, \"flare\")\n self._compute_doppler_()\n rec = self._compute_velocity_()\n return rec"
] | [
"0.82430094",
"0.80094373",
"0.770448",
"0.7694861",
"0.7663943",
"0.7658844",
"0.7653654",
"0.76189035",
"0.76126456",
"0.74813896",
"0.74432015",
"0.73796666",
"0.73725206",
"0.7330815",
"0.7320483",
"0.73116446",
"0.72484505",
"0.7242806",
"0.7231775",
"0.7191046",
"0.71183807",
"0.71162075",
"0.710941",
"0.71014994",
"0.7093545",
"0.7093206",
"0.70831364",
"0.704329",
"0.70379514",
"0.70076656"
] | 0.8399111 | 0 |
Uses the readings from all nodes to report the mean and standard deviation of all nodes | def get_network_reading(self):
# update the readings for all nodes
self.update_all_readings()
# get the current readings from all nodes
node_readings = []
for node_name in self.nodes:
node_readings.append(self.nodes[node_name].stable_reading)
node_readings = np.array(node_readings)
# get the average
network_avg = scipy.average(node_readings)
# get the standard deviation
network_std = scipy.std(node_readings)
return network_avg, network_std | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_training_stats():\n means, stds = [], []\n data = SUNRGBDTrainDataset(True)\n for i in range(len(data)):\n print(i)\n img, _ = data[i]\n std, mean = t.std_mean(input=img, dim=(1, 2))\n means.append(mean)\n stds.append(std)\n means = t.sum(t.vstack(means), dim=0) / len(means)\n stds = t.sum(t.vstack(stds), dim=0) / len(stds)\n print(means, stds)",
"def compute_analysis(self):\r\n def get_mean(self):\r\n \"\"\"\r\n Compute mean in all sensors\r\n \"\"\"\r\n for i in range(1,len(self.data[0])):\r\n self.prom.append(np.mean(self.data[:,i])) \r\n\r\n \r\n def get_stddev(self):\r\n \"\"\"\r\n Compute mean in all sensors\r\n \"\"\"\r\n for i in range(1,len(self.data[0])):\r\n self.stddev.append(np.std(self.data[:,i])) \r\n \r\n # Get the values\r\n get_mean(self)\r\n get_stddev(self)\r\n \r\n # Check condition\r\n [(self.out_of_3stddev.append(i)) \r\n for (i) in (self.data[:,0:4]) \r\n if (any(\r\n (i[1:4] > 3*np.array(self.stddev)+np.array(self.prom))|\r\n (i[1:4] < -3*np.array(self.stddev)+np.array(self.prom))\r\n ))]",
"def Means_Stds(self):\n self.means=[] # list taking care for the means of ll experiments\n self.stds=[] # list taking care fro the Stds of all experiments\n for replica in self.exper(): # remember self.exper, from above returns ListExperiments\n mean, Std = self._ReplicaStats(replica.T) # here calculates the means and Stds. WE have to transpose the matrix. .T stands for transpose\n self.means.append(mean) # the calculted data for each experiment is gethered in one place\n self.stds.append(Std)\n #print(self.means, self.stds)\n return self.means, self.stds",
"def mean_std_calc(dataloader):\n mean = 0\n std = 0\n samples = 0\n for data, _, _ in dataloader:\n batch_samples = data.size(0)\n data = data.view(batch_samples, data.size(1), -1)\n mean += data.mean(2).sum(0)\n std += data.std(2).sum(0)\n samples += batch_samples\n\n return (mean / samples),(std / samples)",
"def meanstd(self):\n\t\tmean = [125.3, 123.0, 113.9] # R,G,B\n\t\tstddev = [63.0, 62.1, 66.7] # R,G,B\n\t\treturn [mean, stddev]",
"def get_mean_stddev(self):\n return self.get_mean(), self.get_std_dev()",
"def store_std_dev_of_means(src_file: H5File) -> None:\n mean_perp_mean = src_file.attrs['perp_mean']\n mean_par_mean = src_file.attrs['par_mean']\n mean_ref_mean = src_file.attrs['ref_mean']\n perp_sum = 0\n par_sum = 0\n ref_sum = 0\n counts = 0\n for path in rawnav.pump_group_paths(src_file):\n perp_path = path + '/perp'\n par_path = path + '/par'\n ref_path = path + '/ref'\n perp_mean = src_file[perp_path].attrs['mean']\n par_mean = src_file[par_path].attrs['mean']\n ref_mean = src_file[ref_path].attrs['mean']\n perp_sum += (perp_mean - mean_perp_mean) ** 2\n par_sum += (par_mean - mean_par_mean) ** 2\n ref_sum += (ref_mean - mean_ref_mean) ** 2\n counts += 1\n src_file.attrs['perp_mean_std_dev'] = np.sqrt(perp_sum / (counts - 1))\n src_file.attrs['par_mean_std_dev'] = np.sqrt(par_sum / (counts - 1))\n src_file.attrs['ref_mean_std_dev'] = np.sqrt(ref_sum / (counts - 1))\n return",
"def get_mean_and_std(dataloader):\n mean = torch.zeros(3)\n std = torch.zeros(3)\n len_dataset = 0\n print('==> Computing mean and std..')\n for inputs, targets in dataloader:\n len_dataset += 1\n for i in range(len(inputs[0])):\n mean[i] += inputs[:,i,:,:].mean()\n std[i] += inputs[:,i,:,:].std()\n mean.div_(len_dataset)\n std.div_(len_dataset)\n return mean, std",
"def get_mean_and_std(dataset):\n dataloader = torch.utils.data.DataLoader(\n dataset, batch_size=1, shuffle=True, num_workers=2\n )\n mean = torch.zeros(3)\n std = torch.zeros(3)\n print(\"==> Computing mean and std..\")\n for inputs, targets in dataloader:\n for i in range(3):\n mean[i] += inputs[:, i, :, :].mean()\n std[i] += inputs[:, i, :, :].std()\n mean.div_(len(dataset))\n std.div_(len(dataset))\n return mean, std",
"def _get_normalisation_stats(self):\n p_net_datasets = [self.pdf_dataset] + [self.PDE_dataset] + [self.BC_dataset]\n p_net_means, p_net_stds = get_mean_std_from_datasets(p_net_datasets)\n\n D_net_datasets = [self.PDE_dataset]\n D_net_means, D_net_stds = get_mean_std_from_datasets(D_net_datasets)\n\n U_net_datasets = [self.PDE_dataset]\n U_net_means, U_net_stds = get_mean_std_from_datasets(U_net_datasets)\n\n return p_net_means, p_net_stds, D_net_means, D_net_stds, U_net_means, U_net_stds",
"def mean_std_dev_tfrecords2(tfrecord_files):\n num_examples = 0\n n = 0\n S = 0.0\n m = 0.0\n \n for tfrecord_file in tqdm(tfrecord_files):\n for example in tf.python_io.tf_record_iterator(tfrecord_file):\n num_examples += 1\n eg = tf.train.Example.FromString(example)\n x = eg.features.feature[\"spectrogram\"].float_list.value\n for x_i in x:\n n = n + 1\n m_prev = m\n m = m + (x_i - m) / n\n S = S + (x_i - m) * (x_i - m_prev)\n print('Finished processing %i examples' % num_examples)\n return {'mean': m, 'std': np.sqrt(S/n)}",
"def run(self, data):\n for idx, block in enumerate(data):\n current = float(np.mean(block['current']['value']))\n print(f'{idx}: {current}')",
"def _get_mean_and_log_std(self, *inputs):\n return self._shared_mean_log_std_network(*inputs)",
"def get_mean_and_std(dataset):\n dataloader = torch.utils.data.DataLoader(\n dataset, batch_size=1, shuffle=True, num_workers=2\n )\n mean = torch.zeros(3)\n std = torch.zeros(3)\n for inputs, targets in dataloader:\n for i in range(3):\n mean[i] += inputs[:, i, :, :].mean()\n std[i] += inputs[:, i, :, :].std()\n mean.div_(len(dataset))\n std.div_(len(dataset))\n return mean, std",
"def __init__(self):\n self.mean = 0.0\n self.std = 1.0",
"def aggregate_stats(self):\n if self.split_bn.track_running_stats:\n (\n self.bn.running_mean.data,\n self.bn.running_var.data,\n ) = self._get_aggregated_mean_std(\n self.split_bn.running_mean,\n self.split_bn.running_var,\n self.num_splits,\n )",
"def mean_stdev_numeric_attribute(nodes: typ.Iterable[vtna.graph.TemporalNode], attribute_name: str) \\\n -> typ.Tuple[float, float]:\n values = [node.get_global_attribute(attribute_name) for node in nodes]\n return float(np.mean(values)), float(np.std(values))",
"def mean_stddev_network_parameters(net):\n all_params = []\n for p in net.parameters():\n all_params.append(p.data.cpu().numpy().flatten())\n all_params = np.concatenate(all_params)\n mean, stddev = all_params.mean(), all_params.std()\n # print(mean, stddev)\n return mean, stddev",
"def _loss_std_mean(self, iterations):\n\n loss_array = np.array(self._loss_list[-iterations:])\n return loss_array.mean(), loss_array.std()",
"def get_stddev(self):\r\n for i in range(1,len(self.data[0])):\r\n self.stddev.append(np.std(self.data[:,i]))",
"def compute_statistics(self):",
"def update_all_readings(self):\n\n # update the reading of all nodes\n for node_name in self.nodes:\n\n # update the readings of all nodes\n self.nodes[node_name].reading()\n\n # once all nodes have updated, they can be stabilized\n for node_name in self.nodes:\n\n self.nodes[node_name].stabilize()",
"def get_dataset_parameters(dataloader):\n mean = 0.0\n meansq = 0.0\n count = 0\n \n for index, (data, targets) in enumerate(dataloader):\n mean = data.sum()\n meansq = meansq + (data**2).sum()\n count += np.prod(data.shape)\n \n total_mean = mean/count\n total_var = (meansq/count) - (total_mean**2)\n total_std = torch.sqrt(total_var)\n print(\"mean: \" + str(total_mean))\n print(\"std: \" + str(total_std))",
"def MeanAndStandardDeviation(data):\n n = len(data)\n if n == 0:\n return 0.0, 0.0\n mean = float(sum(data)) / n\n variance = sum([(element - mean)**2 for element in data]) / n\n return mean, math.sqrt(variance)",
"def test_device_readings_mean(self):\n request = self.client().get('/devices/{}/readings/mean/'.format(self.device_uuid))\n\n self.assertEqual(request.json.get('value', None), 61)",
"def find_mean_std(self, data):\n if self._data_mean is None:\n self._data_mean = np.mean(data)\n if self._data_std is None:\n self._data_std = np.std(data)",
"def mean_STD(self,counter):\n \n \n pass",
"def dataStats(self):\n print (\"Performing statistical analysis of the data\")\n # stuff to do",
"def print_avg():",
"def get_data_set_mean_and_std(self):\n cnt = 0\n fst_moment = torch.empty(3)\n snd_moment = torch.empty(3)\n\n for idx in range(self.__len__()):\n outputs = self.__getitem__(idx)\n\n # Outputs = img, label (BIPED Dataset)\n # Outputs = img_with_end_dots, classification_label, single_contour_with_end_dots\n img = outputs[0]\n\n c, h, w = img.shape\n nb_pixels = h * w\n sum_ = torch.sum(img, dim=[1, 2])\n sum_of_square = torch.sum(img ** 2, dim=[1, 2])\n fst_moment = (cnt * fst_moment + sum_) / (cnt + nb_pixels)\n snd_moment = (cnt * snd_moment + sum_of_square) / (cnt + nb_pixels)\n\n cnt += nb_pixels\n\n return fst_moment, torch.sqrt(snd_moment - fst_moment ** 2)"
] | [
"0.6639394",
"0.6492293",
"0.6476488",
"0.6289496",
"0.6249173",
"0.61678153",
"0.61512434",
"0.6148738",
"0.61152846",
"0.6097314",
"0.60312355",
"0.60029006",
"0.5964977",
"0.5934712",
"0.5916757",
"0.59007215",
"0.5894467",
"0.58875763",
"0.5869987",
"0.58500725",
"0.58438903",
"0.58391947",
"0.58048654",
"0.57800597",
"0.5778893",
"0.5761997",
"0.57576275",
"0.5744655",
"0.57295215",
"0.57275265"
] | 0.6637554 | 1 |
Generator that returns the names of all nodes in the network | def node_names(self):
for node_name in self.nodes.keys():
yield node_name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def all_nodes_as_iterable(self, include_metadata: bool = False) -> Generator:\n if include_metadata:\n return [\n (self._names.get_name(i), self._meta.get_node(self._names.get_name(i)))\n for i in self._nk_graph.iterNodes()\n ]\n return [self._names.get_name(i) for i in self._nk_graph.iterNodes()]",
"def get_node_names(self) -> List[str]:\n\t\t# Variables\n\t\tnames: List[str] = []\n\n\t\t# Iterate over nodes\n\t\tfor node in self.nodes:\n\t\t\tnames.append(node.name)\n\t\t# Return Names\n\t\treturn sorted(names, key=str.lower)",
"def nodeNames(self):\n return self.backend.configuration.getNodeNames()",
"def nodes_iter(self) -> Generator:\n for n in self.graph.nodes(data=True):\n yield n",
"def get_node_names(self):\n return set({node.get_name() for node in self.get_nodeset()}) # return the set of names",
"def print_nodes(graph):\n print([n.name for n in graph.node])",
"def node_name_list(self):\n return list(self._node_reg.keys())",
"def getNodes(self):\n return self.graph.keys()",
"def nodes(self):\n return list(self.__graph.keys())",
"def iter_nodes(self):",
"def sorted_nodes_names(self):\n return [nd.name for nd in self._sorted_nodes]",
"def nodes(self): \n return [n for n in self.iternodes()]",
"def get_output_node_names(self, node_name):\n # (str) -> list\n node = self.get_node(node_name)\n return node.tops",
"def nodes_iter(topology):\n return topology.nodes_iter()",
"def nodes_names_map(self):\n return {nd.name: nd for nd in self.nodes}",
"def get_nodes(self):\n try:\n return list(self._adjacency_list.keys())\n except Exception as error:\n print(f'An error occurred: {error}')",
"def nodes(self):\n return list(self._g.keys())",
"def get_nodes(self):\n return_set = set()\n for value in self._name:\n return_set.add(value)\n return return_set",
"def nodes(self):\n return self.source_net.nodes()",
"def nodes(self):\n for node_set in self.itervalues():\n for node in node_set:\n yield node",
"def nodes(self):\n return list(self.node_dict.keys())",
"def nodes(self):\n\n return list(set(self._graph.keys() + [x for x in itertools.chain.from_iterable(self._graph.values())]))",
"def getNodes(self):\n nodes = [{\"address\": \"http://0.0.0.0:100\"}\n ,{\"address\": \"http://0.0.0.0:200\"}\n ,{\"address\": \"http://0.0.0.0:300\"}\n ,{\"address\": \"http://0.0.0.0:400\"}\n ,{\"address\": \"http://0.0.0.0:500\"}]\n return nodes",
"def nodes(topology):\n return topology.nodes()",
"def list_nodes(self):\n return self.datanodes.keys()",
"def getNodes(self):\n return self.__allNodes",
"def get_nodes(self):\n self.map_graph_id()\n self.nodes_list = [\n self.NX_GRAPHS[self.graph_id].nodes[idx]['label'] \n for idx in range(len(self.NX_GRAPHS[self.graph_id].nodes))]",
"def getNodeNames(self, includeDisabled=False):",
"def get_node_names(self, label_selector=None):\n return [node.metadata.name for node in self.get_nodes(label_selector).items]",
"def nodes(self):\n return list(self.keys())"
] | [
"0.72962666",
"0.71726984",
"0.7021836",
"0.6995421",
"0.6957169",
"0.68337035",
"0.67822737",
"0.6635629",
"0.66200304",
"0.6613424",
"0.66128385",
"0.6590175",
"0.6584443",
"0.6574914",
"0.6530787",
"0.6516793",
"0.6490995",
"0.64839625",
"0.6473152",
"0.6454208",
"0.6428152",
"0.63950336",
"0.63901085",
"0.6382366",
"0.6365829",
"0.6350574",
"0.6350103",
"0.6337588",
"0.63242614",
"0.63237613"
] | 0.80899006 | 0 |
gets average position of all nodes in the network | def get_network_average_position(self):
# the total number of nodes in the network
num_nodes = self.total_nodes()
# get the location of all nodes
all_nodes = np.empty((num_nodes, R_space))
for index, item in enumerate(self.nodes.values()):
all_nodes[index] = item.get_position()
# get the sum of all of the positions along space dim and divide by the number of nodes
average_position = np.sum(all_nodes, axis=0) / num_nodes
return average_position | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mean_average_position():\n pass",
"def streets_per_node_avg(G):\n spn_vals = streets_per_node(G).values()\n return sum(spn_vals) / len(G.nodes)",
"def average_distance(self):\r\n total = 0\r\n edges = 0\r\n for code, _list in self.edges.items():\r\n for edge in _list:\r\n total += edge.distance\r\n edges += 1\r\n return total / edges",
"def compute_node_positions(self):\n pass",
"def net_position(self):\n average_price = 0\n sum = 0\n for transaction in self.transactions:\n average_price += abs(transaction[0]/transaction[1])\n sum += transaction[1]\n\n average_price /= len(self.transactions) \n average_price *= sum\n \n return average_price",
"def get_average_neighbors(self,radius):\n return np.mean([agent.n_neighbors(radius) for agent in self.agents])",
"def get_network_score(self):\n for key in self.nodes:\n node = self.nodes[key]\n if node.is_dc:\n neighbors_total = 0\n for neighbor in node.neighbors:\n weight = (neighbor.weight / node.neighbors_magnitude)\n neighbors_total += weight * neighbor.individual_score\n\n neighbor_score = neighbors_total / len(node.neighbors)\n relative_score = (node.individual_score + neighbor_score) / 2\n node.relative_score = relative_score\n\n total = 0\n for key in self.nodes:\n node = self.nodes[key]\n total += node.relative_score\n score = total / len(self.nodes)\n\n return score",
"def aver_score(datalist):\n scores_per_position = []\n \n for tupl in datalist:\n count = 0\n sum_of_position = 0\n for element in tupl[3]:\n sum_of_position += element\n count +=1\n aver_pos = sum_of_position/ count\n scores_per_position += [aver_pos]\n \n return scores_per_position",
"def average_coords_nt(all_profile_nt: namedtuple) -> tuple:\n \"\"\"Param: all_profile_nt: Named tuple containing all profiles\"\"\"\n x, y = sum(map(lambda t: t[0], map(lambda v: v[4], all_profile_nt)))/len(all_profile_nt), sum(\n map(lambda t: t[1], map(lambda v: v[4], all_profile_nt)))/len(all_profile_nt)\n return x, y",
"def get_avg_points(self):\n pass",
"def get_mean_coord(self):\n # load dataset in a dummy manner\n dataset = torchvision.datasets.MNIST('../../data/MNIST_data/', train=True, download=False)\n mean = (dataset.data.float().mean(0) / 255).unsqueeze(0) # [1,28,28]\n return mean",
"def calc_mean_dists(Z, node_index, out_mean_dists):\n\n N = Z.shape[0] + 1 # number of leaves\n\n left_child = int(Z[node_index, 0] - N)\n right_child = int(Z[node_index, 1] - N)\n\n if left_child < 0:\n left_average = 0\n left_merges = 0\n else:\n left_average, left_merges = calc_mean_dists(\n Z, left_child, out_mean_dists\n )\n\n if right_child < 0:\n right_average = 0\n right_merges = 0\n else:\n right_average, right_merges = calc_mean_dists(\n Z, right_child, out_mean_dists\n )\n\n this_height = Z[node_index, 2]\n this_merges = left_merges + right_merges + 1\n this_average = (\n left_average * left_merges + right_average * right_merges + this_height\n ) / this_merges\n\n out_mean_dists[node_index] = this_average\n\n return this_average, this_merges",
"def average_city_size(self):\r\n average = 0\r\n total = 0\r\n for code, node in self.vertices.items():\r\n average += node.population\r\n total += 1\r\n return average // total",
"def _mean(self,gp):\r\n return self.gp_link.transf(gp)",
"def _mean(self,gp):\r\n return self.gp_link.transf(gp)",
"def _mean(self,gp):\r\n return self.gp_link.transf(gp)",
"def _mean(self,gp):\r\n return self.gp_link.transf(gp)",
"def _mean(self,gp):\r\n return self.gp_link.transf(gp)",
"def data_flow_positive_node_count_avg(self) -> Optional[int]:\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_positive_node_count_avg or 0)",
"def assign_to_current_mean(img: np.ndarray, clustermask: np.ndarray) -> float:\n\n rows, cols = img.shape[:2]\n distances = np.zeros((numclusters, 1))\n overall_dist = 0\n\n for i in range(rows):\n for j in range(cols):\n distances = distance(img[i, j, :]) # returned shape: (numclusters, 1)\n \n k = np.argmin(distances) # closest cluster\n clustermask.itemset((i, j), k) # update cluster mask\n overall_dist += distances[k, 0] # sum distance\n\n return overall_dist",
"def avg_net(self) -> float:\n return torch.mean(self.units.net)",
"def average_impurity(self):\n children = tf.squeeze(tf.slice(self.variables.tree, [0, 0], [-1, 1]),\n squeeze_dims=[1])\n is_leaf = tf.equal(LEAF_NODE, children)\n leaves = tf.to_int32(tf.squeeze(tf.where(is_leaf), squeeze_dims=[1]))\n counts = tf.gather(self.variables.node_sums, leaves)\n impurity = self._weighted_gini(counts)\n return tf.reduce_sum(impurity) / tf.reduce_sum(counts + 1.0)",
"def center(coords):\n for c in coords:\n if 'avg' not in locals():\n avg = c\n else:\n avg += c\n return avg / len(coords)",
"def average_degree(self):\n return (self.L.size() - 1) - self.num_loop_vertices() / self.n",
"def get_mean_coord(self):\n # load dataset in a dummy manner\n dataset = torchvision.datasets.CIFAR10('../../data/CIFAR_data/', train=True, download=False)\n data = torch.FloatTensor(dataset.data).permute(0, 3, 1, 2) # shape [num_img, 3, 32, 32]\n mean = data.mean(0) / 255 # [3,32,32]\n return mean",
"def get_node_coordinates(nodes) :\r\n\r\n coords = [] #The list of coordinates\r\n\r\n for node in nodes :\r\n coords.append(node.coords)\r\n\r\n return coords",
"def mean(self):\n return np.average(self.particles, weights=self.weights, axis=0)",
"def get_mean_degree(self):\n\n return np.mean(self.graph.degree())",
"def nodalSum(val,elems,work,avg):\n nodes = unique1d(elems)\n for i in nodes:\n wi = where(elems==i)\n vi = val[wi]\n if avg:\n vi = vi.sum(axis=0)/vi.shape[0]\n else:\n vi = vi.sum(axis=0)\n val[wi] = vi",
"def calc_centroid(self):\n num = 0\n centroid = numpy.zeros(3, float)\n for atm in self:\n if atm.position is not None:\n centroid += atm.position\n num += 1\n return centroid / num"
] | [
"0.71411693",
"0.6822021",
"0.6471728",
"0.6465491",
"0.63243866",
"0.6256085",
"0.62065727",
"0.592237",
"0.58896816",
"0.5823813",
"0.5820218",
"0.57679445",
"0.5765885",
"0.57562655",
"0.57562655",
"0.57562655",
"0.57562655",
"0.57562655",
"0.57481986",
"0.5742098",
"0.5738348",
"0.5721083",
"0.56751126",
"0.5664723",
"0.56561947",
"0.56458855",
"0.5640217",
"0.5615892",
"0.5595603",
"0.55866015"
] | 0.84714663 | 0 |
get the nodes with the lowest and highest number of neighbors | def get_interest_nodes(self):
# go through each node in the network to find the min and max degrees
max_value = 0
min_value = len(self.nodes)
for name in self.nodes:
# check for new max
if self.nodes[name].get_degree() >= max_value:
max_value = self.nodes[name].get_degree()
self.max_node = name
# check for new min
elif self.nodes[name].get_degree() <= min_value:
min_value = self.nodes[name].get_degree()
self.min_node = name
return self.max_node, self.min_node | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_neighbours(self):\n return []",
"def neighbours(self):\n return [x.node for x in self.edges]",
"def neighbors((min_i, min_j), (max_i, max_j), (i, j)):\n if j + 1 <= max_j:\n yield (i, j + 1)\n if j - 1 >= min_j:\n yield (i, j - 1)\n if i + 1 <= max_i:\n yield (i + 1, j)\n if i - 1 >= min_i:\n yield (i - 1, j)",
"def get_neighbours(self):\n return self.neighbours",
"def get_nodes(self, latlon=False):\n ids = np.where(np.isnan(self.data[:,:,:]))\n i_nan = ids[0][0] ; j_nan = ids[1][0]\n \n def area_neighbours(Area, i_nan, j_nan):\n rows = np.array(Area)[:,0]\n cols = np.array(Area)[:,1]\n rows_m = rows-1\n cols_m = cols-1\n rows_p = rows+1\n cols_p = cols+1\n \n p1 = np.array([rows_m,cols]).ravel().reshape(len(rows),2,order='F')\n p2 = np.array([rows_p,cols]).ravel().reshape(len(rows),2,order='F')\n p3 = np.array([rows,cols_m]).ravel().reshape(len(rows),2,order='F')\n p4 = np.array([rows,cols_p]).ravel().reshape(len(rows),2,order='F')\n cond1 = p1[:,0]<0\n cond2 = p2[:,0]>self.dimX-1\n cond3 = p3[:,1]<0\n cond4 = p4[:,1]>self.dimY-1\n if latlon:\n p3[:,1][cond3] = self.dimY-1\n p4[:,1][cond4] = 0\n else:\n p3[:,0][cond3] = i_nan\n p3[:,1][cond3] = j_nan\n p4[:,0][cond4] = i_nan\n p4[:,1][cond4] = j_nan\n p1[:,0][cond1] = i_nan\n p1[:,1][cond1] = j_nan\n p2[:,0][cond2] = i_nan\n p2[:,1][cond2] = j_nan\n p = np.concatenate((p1,p2,p3,p4)).tolist()\n return [i for i in p if i not in self.unavail]\n\n def area_max_correlation(Area, neighbours):\n Rmean = [] ; X = []\n for cell in neighbours:\n R = []\n new_cell = cell[0]*self.dimY + cell[1]\n if new_cell in self.gridcells:\n X.append(cell)\n IDm = np.where(self.gridcells==new_cell)\n Rmean.append(np.nanmean(self.corrs[cells_in_k,IDm]))\n try:\n Rmax = np.nanmax(Rmean)\n except ValueError:\n Rmax = np.nan\n return np.array(X), Rmean, Rmax\n \n def diag_indices(a, k):\n rows, cols = np.diag_indices_from(a)\n if k < 0:\n return rows[-k:], cols[:k]\n elif k > 0:\n return rows[:-k], cols[k:]\n else:\n return rows, cols\n\n #S T E P 1 (C R E A T E N O D E S)\n\n self.nodes = {}\n self.unavail = []\n if latlon:\n neighbour_corrs1 = self.corrs.diagonal(offset=1)\n neighbour_corrs2 = self.corrs.diagonal(offset=self.dimY-1)\n subset = np.arange(0,len(neighbour_corrs2),self.dimY)\n neighbour_corrs2 = neighbour_corrs2[subset]\n neighbour_corrs = np.concatenate((neighbour_corrs1,neighbour_corrs2))\n\n cellIDs1 = diag_indices(self.corrs,1)\n cellIDs2 = diag_indices(self.corrs,self.dimY-1)\n\n cellIDs = (np.concatenate((cellIDs1[0],cellIDs2[0][subset])),\\\n np.concatenate((cellIDs1[1],cellIDs2[1][subset])))\n else:\n neighbour_corrs = self.corrs.diagonal(offset=1)\n cellIDs = diag_indices(self.corrs,1)\n \n cellIDs = (self.gridcells[cellIDs[0]],self.gridcells[cellIDs[1]])\n k = 0\n neighbour_corrs,cellIDs1,cellIDs2 = list(zip(*sorted(zip(neighbour_corrs,cellIDs[0],cellIDs[1]),reverse=True)))\n cell_IDs = (cellIDs1,cellIDs2)\n np.random.seed(2)\n for it in range(len(neighbour_corrs)):\n cells_in_k = []\n i = cell_IDs[0][it]\n j = cell_IDs[1][it]\n r = neighbour_corrs[it]\n \n row_i = int(np.floor(i/self.dimY)) ; col_i = int(i % self.dimY)\n row_j = int(np.floor(j/self.dimY)) ; col_j = int(j % self.dimY)\n \n if ([row_i,col_i] not in self.unavail) & ([row_j,col_j] not in self.unavail):\n if r>self.tau:\n self.nodes.setdefault(k, []).append([row_i,col_i])\n self.nodes.setdefault(k, []).append([row_j,col_j])\n self.unavail.append([row_i,col_i])\n self.unavail.append([row_j,col_j])\n cells_in_k.extend(np.where(self.gridcells==i)[0])\n cells_in_k.extend(np.where(self.gridcells==j)[0])\n\n while True: #expand\n neighbours = area_neighbours(self.nodes[k], i_nan, j_nan)\n X, Rmean, Rmax = area_max_correlation(Area=self.nodes[k], neighbours=neighbours)\n if Rmax > self.tau:\n m = X[Rmean==Rmax].tolist()\n if len(m)>1:\n m = m[np.random.randint(low=0,high=len(m))]\n else:\n m = m[0]\n self.nodes.setdefault(k, []).append(m)\n self.unavail.append(m)\n cells_in_k.extend(np.where(self.gridcells==m[0]*self.dimY+m[1])[0])\n else:\n break\n if len(self.nodes[k]) <= 2:\n del self.nodes[k]\n k += 1\n else:\n break\n \n #S T E P 2 (M E R G E N O D E S)\n \n self.unavail = []\n while True:\n Rs = {}\n unavail_neighbours = {}\n num_cells = dict([(area,len(self.nodes[area])) if self.nodes[area] not in self.unavail else (area,np.inf) for area in self.nodes.keys()])\n maxID = min(num_cells.items(), key=operator.itemgetter(1))[0]\n if num_cells[maxID] > 175: #arbitrary choice?\n break\n else:\n cells_in_k = [np.where(self.gridcells==cell[0]*self.dimY+cell[1])[0] for cell in self.nodes[maxID]]\n neighbours = area_neighbours(self.nodes[maxID], i_nan, j_nan)\n for cell in neighbours:\n gcell = cell[0]*self.dimY + cell[1]\n Rmean = []\n cond1 = gcell in self.gridcells\n cond2 = cell not in self.nodes[maxID]\n cond3 = cell not in [k for k, g in itertools.groupby(sorted(itertools.chain(*unavail_neighbours.values())))]\n cond4 = len([area for area, cells in self.nodes.items() if cell in cells]) > 0\n if (cond1) & (cond2) & (cond3) & (cond4):\n nID = [area for area, cells in self.nodes.items() if cell in cells][0]\n unavail_neighbours[nID] = self.nodes[nID]\n X, Rmean, Rmax = area_max_correlation(Area=self.nodes[nID]+self.nodes[maxID], neighbours=self.nodes[nID]+self.nodes[maxID])\n if nID not in Rs: \n Rs[nID] = np.nanmean(Rmean)\n try:\n Rs_maxID = max(Rs.items(), key=operator.itemgetter(1))[0]\n if Rs[Rs_maxID] > self.tau:\n for cell in self.nodes.pop(Rs_maxID, None):\n self.nodes.setdefault(maxID, []).append([cell[0],cell[1]])\n else:\n self.unavail.append(self.nodes[maxID])\n except ValueError:\n self.unavail.append(self.nodes[maxID])",
"def get_neighbors(n):\n if n < 3:\n return ValueError(\"Integer must be greater than 3.\")\n p = generate()\n q = []\n l = 0\n g = 0\n while g <= n:\n q = next(p)\n g = q[-1]\n if q[-1] == n:\n l = q[0][-2]\n q = next(p)\n g = q[-1]\n elif q[-1] > n:\n l = q[0][-3]\n return l, g",
"def __getNodeNeighbors(self, node, numAbove, numBelow):\n parents = self.__getNodeParents(node, numAbove)\n children = self.__getNodeChildren(node, numBelow)\n return parents + children + [node]",
"def get_neighbours(self):\n return self.points_to.keys()",
"def neighbours(self):\n\n neighbours = []\n root = self.root\n if self == root:\n return neighbours\n\n ########################\n # IMMEDIATELY ADJACENT #\n sizes = [self.maxs[0] - self.mins[0], self.maxs[1] - self.mins[1]]\n coords = [(self.mins[0] + sizes[0] / 2, self.maxs[1] + sizes[1] / 2,),\n (self.maxs[0] + sizes[0] / 2, self.mins[1] + sizes[1] / 2,),\n (self.mins[0] + sizes[0] / 2, self.mins[1] - sizes[1] / 2,),\n (self.maxs[0] - sizes[0] / 2, self.mins[1] + sizes[1] / 2,),]\n # loop through top, right, bottom, left\n for i in range(4):\n x, y = coords[i]\n query_quad = root.query_xy(x, y)\n if query_quad is not None:\n same_size_idx = query_quad.location[: self.tree_depth]\n same_size_quad = root[same_size_idx]\n neighbours += list(self._get_border_children(same_size_quad, i))\n\n #############\n # DIAGONALS #\n root_sizes = [root.maxs[0] - root.mins[0], root.maxs[1] - root.mins[1]]\n xs, ys = (root_sizes / 2 ** root.max_tree_depth) / 2\n neighbours += [\n root.query_xy(self.mins[0] - xs, self.mins[1] - ys), # TL\n root.query_xy(self.maxs[0] + xs, self.mins[1] - ys), # TR\n root.query_xy(self.mins[0] - xs, self.maxs[1] + ys), # BL\n root.query_xy(self.maxs[0] + xs, self.maxs[1] + ys), # BR\n ]\n\n unique_neighbours = list(set(neighbours))\n try:\n unique_neighbours.remove(self)\n except ValueError:\n pass\n\n return unique_neighbours",
"def neighbors(self, max_dist=3):\n # TODO: this may have problems because the set doesn't\n # compare object id but uses user defined comparison methods\n # TODO: outgoing edges are no longer saved\n found = set()\n found.add(self)\n queue = [(self, 0)]\n while queue:\n node, d = queue.pop(0)\n if d < max_dist:\n for edge in node.outgoing:\n if edge.head not in found:\n found.add(edge.head)\n queue.append((edge.head, d+1))\n for edge in node.incoming:\n for tailnode in edge.tail:\n if tailnode not in found:\n found.add(tailnode)\n queue.append((tailnode, d+1))\n return found",
"def neighbors(node, topology):\n return [n for n in topology[node]]",
"def get_neighbours(self):\n return self._neighbours",
"def get_neighbours_8(x, y):\n return [(x - 1, y - 1), (x, y - 1), (x + 1, y - 1), \\\n (x - 1, y), (x + 1, y), \\\n (x - 1, y + 1), (x, y + 1), (x + 1, y + 1)]",
"def neighbours(x, y):\n n = []\n for c in ((y-1, x-1), (y-1, x), (y-1, x+1), (y, x-1), (y, x+1), (y+1, x-1), (y+1, x), (y+1, x+1)):\n n.append(c)\n return n",
"def nodes_with_m_nbrs(G,m):\n nodes = set()\n \n # Iterate over all nodes in G\n for n in G.nodes():\n \n # Check if the number of neighbors of n matches m\n if len(G.neighbors(n)) == m:\n \n # Add the node n to the set\n nodes.add(n)\n \n # Return the nodes with m neighbors\n return nodes",
"def neighbours((u,v)):\r\n return ((u,v+1), (u+1,v), (u,v-1), (u-1,v))",
"def get_max_and_min(self):\n max_x = float('-inf')\n min_x = float('inf')\n max_y = float('-inf')\n min_y = float('inf')\n max_z = float('-inf')\n min_z = float('inf')\n ans = max_x, max_y, max_z, min_x, min_y, min_z\n counter = 0\n for src, node in self._graph.get_all_v().items():\n if node.location is not None:\n x = node.location.x\n y = node.location.y\n z = node.location.z\n counter += 1\n max_x = x if x > max_x else max_x\n min_x = x if x < min_x else min_x\n max_y = y if y > max_y else max_y\n min_y = y if y < min_y else min_y\n max_z = z if z > max_z else max_z\n min_z = z if z < min_z else min_z\n if counter > 4:\n ans = max_x, max_y, max_z, min_x, min_y, min_z\n return ans",
"def get_neighbors(self):\n return self.neighbors",
"def get_neighbors(self):\n return self.neighbors",
"def get_neighbors(self):\n return self.neighbors",
"def get_neighbors(self):\n return self.neighbors",
"def getNeighborNodes(self, signature):\n x, y, z = signature[0], signature[1], signature[2]\n return [(x+1, y+1, z+1), (x+1, y, z+1), (x+1, y-1, z+1),\n (x, y+1, z+1), (x, y, z+1), (x, y-1, z+1),\n (x-1, y+1, z+1), (x-1, y, z+1), (x-1, y-1, z+1),\n (x+1, y+1, z-1), (x+1, y, z-1), (x+1, y-1, z-1),\n (x, y+1, z-1), (x, y, z-1), (x, y-1, z-1),\n (x-1, y+1, z-1), (x-1, y, z-1), (x-1, y-1, z-1),\n (x+1, y+1, z), (x+1, y, z), (x+1, y-1, z),\n (x, y+1, z), (x, y, z), (x, y-1, z),\n (x-1, y+1, z), (x-1, y, z), (x-1, y-1, z)]",
"def get_neighbouring_nodes(node) :\r\n\r\n connected_nodes = [] #A list of the connected nodes\r\n\r\n #Checking if the node belongs to the 1st row\r\n if(node.coords[0] != 0) :\r\n connected_node = Node((node.coords[0] - 1, node.coords[1]), goal_pos, node.gn_value - 1)\r\n #Checking if the node is an obstacle\r\n if(not connected_node.coords in obstacle_coords) :\r\n connected_nodes.append(connected_node)\r\n\r\n #Checking if the node belongs to the last row\r\n if(node.coords[0] != grid_dims[0] - 1) :\r\n connected_node = Node((node.coords[0] + 1, node.coords[1]), goal_pos, node.gn_value - 1)\r\n #Checking if the node is an obstacle\r\n if(not connected_node.coords in obstacle_coords) :\r\n connected_nodes.append(connected_node)\r\n\r\n #Checking if the node belongs to the 1st column\r\n if(node.coords[1] != 0) :\r\n connected_node = Node((node.coords[0], node.coords[1] - 1), goal_pos, node.gn_value - 1)\r\n #Checking if the node is an obstacle\r\n if(not connected_node.coords in obstacle_coords) :\r\n connected_nodes.append(connected_node)\r\n\r\n #Checking if the node belongs to the 1st column\r\n if(node.coords[1] != grid_dims[1] - 1) :\r\n connected_node = Node((node.coords[0], node.coords[1] + 1), goal_pos, node.gn_value - 1)\r\n #Checking if the node is an obstacle\r\n if(not connected_node.coords in obstacle_coords) :\r\n connected_nodes.append(connected_node)\r\n\r\n return connected_nodes",
"def getNeighbors(self):\n targets = set()\n for arc in self._arcsFrom:\n targets.add(arc.getFinish())\n return [ node for node in sorted(targets) ]",
"def branches(self):\n unique_nodes, unique_counts = np.unique(self.edges, return_counts=True)\n return unique_nodes[ unique_counts >= 3 ]",
"def guess_num_nodes_from(edgelist):\n return np.max(edgelist) + 1",
"def nodes_with_m_nbrs(G, m):\n nodes = set()\n\n # Iterate over all nodes in G\n for n in G.nodes():\n\n # Check if the number of neighbors of n matches m\n if len(G.neighbors(n)) == m:\n\n # Add the node n to the set\n nodes.add(n)\n\n # Return the nodes with m neighbors\n return nodes",
"def compact_neighb(self):\n order = np.argsort(self.edges[:, 0] * float(self.V) + self.edges[:, 1])\n neighb = self.edges[order, 1].astype(np.int_)\n weights = self.weights[order]\n degree, _ = self.degrees()\n idx = np.hstack((0, np.cumsum(degree))).astype(np.int_)\n return idx, neighb, weights",
"def neighbours(number: int, number_sectors: int) -> [int, int, int, int]:\n col = number % number_sectors\n row = number // number_sectors\n\n nieg = [number - number_sectors, number + number_sectors, number - 1, number + 1]\n\n if row == 0:\n nieg[0] = -1\n if row == number_sectors - 1:\n nieg[1] = -1\n if col == 0:\n nieg[2] = -1\n if col == number_sectors - 1:\n nieg[3] = -1\n return nieg",
"def neighbor_nodes(self,node):\n\n neighbors = []\n if node > self.cols:\n neighbors.append(node-self.cols)\n if node <= self.cols*(self.rows-1):\n neighbors.append(node+self.cols)\n if node % self.cols != 1:\n neighbors.append(node-1)\n if node % self.cols != 0:\n neighbors.append(node+1)\n\n return neighbors"
] | [
"0.7004768",
"0.6752797",
"0.67516655",
"0.6725832",
"0.6695274",
"0.66674364",
"0.66209084",
"0.65983164",
"0.6586327",
"0.6554205",
"0.6553236",
"0.6543602",
"0.6524486",
"0.64795715",
"0.64779377",
"0.6466439",
"0.6460438",
"0.6440627",
"0.6440627",
"0.6440627",
"0.6440627",
"0.64268225",
"0.64206976",
"0.6415797",
"0.6413385",
"0.6397233",
"0.6394802",
"0.63690215",
"0.6349489",
"0.6328354"
] | 0.6979103 | 1 |
Recibe el numero de segmentos y el valor de W Esta es la funcion de sumatoria para los numeros Impares Regresa el total de la sumatoria | def sumaImpar(self,numSeg,w):
total=0
for i in range(1,numSeg,2):
total+=4*self.F(i*w)
return total | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sumaPar(self,numSeg,w):\n total=0\n for i in range(2,numSeg-1,2):\n total+=2*self.F(i*w)\n return total",
"def som(getallenlijst):\r\n total = sum(getallenlijst)\r\n return total",
"def patrimony_total(self):\n pass",
"def P(self,numSeg):\n w=self.x/numSeg\n return (w/3)*(self.F(0)+self.sumaImpar(numSeg,w)+self.sumaPar(numSeg,w)+self.F(self.x))",
"def n_suma(a1,nr_wyrazu,r):\n return (2*a1+(nr_wyrazu-1))*nr_wyrazu/2",
"def calculate(self):\n\n gt = self.ground_truth.flatten()\n seg = self.segmentation.flatten()\n\n n = gt.size\n mean_gt = gt.mean()\n mean_seg = seg.mean()\n mean = (mean_gt + mean_seg) / 2\n\n m = (gt + seg) / 2\n ssw = np.power(gt - m, 2).sum() + np.power(seg - m, 2).sum()\n ssb = np.power(m - mean, 2).sum()\n\n ssw /= n\n ssb = ssb / (n - 1) * 2\n\n return (ssb - ssw) / (ssb + ssw)",
"def segment_sum(self, left, right):\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater",
"def summation(self):\n return sum(self.read_ints())",
"def _ss_tot(self):\n squares = np.square(self.y - np.expand_dims(self._ybar, axis=-2))\n if self.w is None:\n return np.sum(squares, axis=-2)\n else:\n return np.sum(np.matmul(self.w_diag, squares), axis=-2)",
"def whc_tot(mukey, layers=''):\n #read appropriate soils.in content to a python list\n mukey = str(mukey)\n soil_path = \"/data/paustian/ernie/SSURGO_master_script/soil_test2/\"\n soil_fpath = soil_path+mukey[:-3]+\"/\"+mukey+\".in\"\n cont = [[]]\n data_input = open(soil_fpath, 'r')\n for line in data_input:\n cont.append(line.split())\n del cont[0]\n\n #convert all entries in the 2D list to float format where possible, or zero in the case\n #of very small numbers recorded in scientific notation\n for k in range(len(cont)):\n for l in range(len(cont[k])):\n cont[k][l] = float(cont[k][l])\n\n #loop through list and compute the water holding capacity increment represented in \n #each line\n min_h2o_evap = 0\n min_h2o = 0\n max_h2o = 0\n whc = 0\n for i in range(len(cont)):\n if not layers:\n depth = cont[i][1] - cont[i][0]\n FC = cont[i][3]\n WP = cont[i][4]\n WHC = FC - WP\n if i != 0:\n min_h2o_evap += depth*WP\n min_h2o += depth*WP\n max_h2o += depth*FC\n whc += depth*WHC\n else:\n if 1+i <= layers:\n depth = cont[i][1] - cont[i][0]\n FC = cont[i][3]\n WP = cont[i][4]\n WHC = FC - WP\n if i != 0:\n min_h2o_evap += depth*WP\n min_h2o += depth*WP\n max_h2o += depth*FC\n whc += depth*WHC\n if layers:\n if layers > len(cont):\n print \"NOTE: specified layer limit exceeds number of layers found in soils.in file\"\n\n return whc, min_h2o, max_h2o",
"def _compute_cuantia_subtotal(self):\n for line in self:\n line.gasto = line.unidades * line.pvp",
"def calculate(self):\n avg = self.sum / self.n if self.n != 0 else 0\n self.running_avg.append(avg)\n return avg",
"def DW_cal(data, data_sm):\n n = len(data)\n numerator = 0\n denominator = 0\n for i in range(n):\n if i == 0:\n numerator = numerator + 0\n else:\n numerator = numerator + ((data[i] - data_sm[i]) - (data[i-1] - data_sm[i-1]))**2\n denominator = denominator + (data[i] - data_sm[i])**2\n return numerator/denominator*n/(n - 1)",
"def nb_murs_total(self):\r\n murs_pleins=0\r\n for x in range(0,self.largeur):\r\n for y in range(0,self.hauteur):\r\n murs_pleins+=self.matrice_cases[x][y].nb_murs_pleins()\r\n \r\n return int((murs_pleins-self.hauteur*2-self.largeur*2)/2)",
"def _compute_gasto_subtotal(self):\n beneficio = ingreso_subtotal - gasto_subtotal_comercial",
"def calcula(self, is_deterministico):\n # criando header da tabela\n tabela = PrettyTable([\"Rodadas\",\n \"E[T1]\",\n \"E[W1]\",\n \"E[X1]\",\n \"E[N1]\",\n \"E[Nq1]\",\n \"E[Ns1]\",\n \"E[T2]\",\n \"E[W2]\",\n \"E[X2]\",\n \"E[N2]\",\n \"E[Nq2]\",\n \"E[Ns2]\",\n \"Var[W1]\",\n \"Var[W2]\"])\n \n\n for index in range(1, self.n_rodadas+1):\n # calculando a esperanca das metricas da fila 1\n # print(\"n fregueses por rodada: \", self.fregueses_por_rodada, \". E len w1: \", len(self.w1[index]))\n if len(self.w1[index]) > 0:\n self.x1_med_rodada[index] = sum(self.x1[index])/len(self.w1[index])\n self.w1_med_rodada[index] = sum(self.w1[index])/len(self.w1[index])\n self.nq1_med_rodada[index] = sum(self.nq1[index])/len(self.w1[index])\n self.ns1_med_rodada[index] = sum(self.ns1[index])/len(self.w1[index])\n self.n1_med_rodada[index] = sum(self.n1[index])/len(self.w1[index])\n self.t1_med_rodada[index] = sum(self.t1[index])/len(self.w1[index])\n\n # calculando a esperanca das metricas da fila 2\n # print(\"n fregueses por rodada: \", self.fregueses_por_rodada, \". E len w2: \", len(self.w2[index]))\n if len(self.w2[index]) > 0:\n self.x2_med_rodada[index] = sum(self.x2[index])/len(self.w2[index])\n self.w2_med_rodada[index] = sum(self.w2[index])/len(self.w2[index])\n self.nq2_med_rodada[index] = sum(self.nq2[index])/len(self.w2[index])\n self.ns2_med_rodada[index] = sum(self.ns2[index])/len(self.w2[index])\n self.n2_med_rodada[index] = sum(self.n2[index])/len(self.w2[index])\n self.t2_med_rodada[index] = sum(self.t2[index])/len(self.w2[index])\n\n # calculo de Var[W1] e Var[W2] para exibir na tabela\n if len(self.w1[index]) == 1:\n self.var_w1_med_rodada[index] = 0\n else:\n for amostra in range(len(self.w1[index])):\n self.var_w1_med_rodada[index] += (self.w1[index][amostra] - self.w1_med_rodada[index]) ** 2\n self.var_w1_med_rodada[index] /= (len(self.w1[index]) - 1)\n\n if len(self.w2[index]) == 1:\n self.var_w2_med_rodada[index] = 0\n else:\n for amostra2 in range(len(self.w2[index])):\n self.var_w2_med_rodada[index] += (self.w2[index][amostra2] - self.w2_med_rodada[index]) ** 2\n self.var_w2_med_rodada[index] /= (len(self.w2[index]) - 1)\n\n tabela.add_row([\"rodada_\" + str(index),\n round(self.t1_med_rodada[index], 6),\n round(self.w1_med_rodada[index], 6),\n round(self.x1_med_rodada[index], 6),\n round(self.n1_med_rodada[index], 6),\n round(self.nq1_med_rodada[index], 6),\n round(self.ns1_med_rodada[index], 6),\n round(self.t2_med_rodada[index], 6),\n round(self.w2_med_rodada[index], 6),\n round(self.x2_med_rodada[index], 6),\n round(self.n2_med_rodada[index], 6),\n round(self.nq2_med_rodada[index], 6),\n round(self.ns2_med_rodada[index], 6),\n round(self.var_w1_med_rodada[index], 6),\n round(self.var_w2_med_rodada[index], 6)])\n\n # acumulando medias totais\n self.x1_med_total += self.x1_med_rodada[index]\n self.w1_med_total += self.w1_med_rodada[index]\n self.nq1_med_total += self.nq1_med_rodada[index]\n self.ns1_med_total += self.ns1_med_rodada[index]\n self.n1_med_total += self.n1_med_rodada[index]\n self.t1_med_total += self.t1_med_rodada[index]\n self.x2_med_total += self.x2_med_rodada[index]\n self.w2_med_total += self.w2_med_rodada[index]\n self.nq2_med_total += self.nq2_med_rodada[index]\n self.ns2_med_total += self.ns2_med_rodada[index]\n self.n2_med_total += self.n2_med_rodada[index]\n self.t2_med_total += self.t2_med_rodada[index]\n self.var_w1_med_total += self.var_w1_med_rodada[index]\n self.var_w2_med_total += self.var_w2_med_rodada[index]\n\n # dividindo medias acumuladas pelo total de rodadas e enfim, calculando a media total de cada metrica\n self.x1_med_total /= self.n_rodadas\n self.w1_med_total /= self.n_rodadas\n self.nq1_med_total /= self.n_rodadas\n self.ns1_med_total /= self.n_rodadas\n self.n1_med_total /= self.n_rodadas\n self.t1_med_total /= self.n_rodadas\n self.x2_med_total /= self.n_rodadas\n self.w2_med_total /= self.n_rodadas\n self.nq2_med_total /= self.n_rodadas\n self.ns2_med_total /= self.n_rodadas\n self.n2_med_total /= self.n_rodadas\n self.t2_med_total /= self.n_rodadas\n self.var_w1_med_total /= self.n_rodadas\n self.var_w2_med_total /= self.n_rodadas\n\n tabela.add_row([\"Media\",\n round(self.t1_med_total, 6),\n round(self.w1_med_total, 6),\n round(self.x1_med_total, 6),\n round(self.n1_med_total, 6),\n round(self.nq1_med_total, 6),\n round(self.ns1_med_total, 6),\n round(self.t2_med_total, 6),\n round(self.w2_med_total, 6),\n round(self.x2_med_total, 6),\n round(self.n2_med_total, 6),\n round(self.nq2_med_total, 6),\n round(self.ns2_med_total, 6),\n round(self.var_w1_med_total, 6),\n round(self.var_w2_med_total, 6)\n ])\n\n print(tabela, \"\\n\")\n\n if not is_deterministico:\n self.calcula_ic()",
"def calculate(self):",
"def subtotal(self):\n return self.cantidad * self.precio",
"def overall_reduction(self):\n return 84",
"def wsum(self):\n return reduce(operator.add, self.wvalues, 0.0)",
"def calculo(self):\n return self.peso / (self.altura * self.altura)",
"def addition_homework(data: Iterator[str]) -> int:\n n = final_sum(data)\n return n.magnitude",
"def calcularTotal(self):\n subtotales=[]\n for row in range(0,self.tableFactura.rowCount()):\n subtotales.append(float(self.tableFactura.item(row,2).text()))\n importeTotal=sum(subtotales)\n return importeTotal",
"def resultado(self):\n return self.__numerador/self.__denominador",
"def sumar(self):\n self.resultado = self.valor_1 + self.valor_2",
"def somme(self) -> Numeric:\n return query_sum(self.offres(), \"prix\", output_field=models.DecimalField())",
"def get_salario_total(self):\n s = 0\n for e in self.empleados:\n s += e.get_salario()\n return s",
"def subtotal(self):\n return self.precio_unitario * self.cantidad",
"def calculate(self) -> float:",
"def number_total(self):\n return sum(self.grid[pos][1] for pos in [\"n1\", \"n2\", \"n3\", \"n4\", \"n5\", \"n6\"] if self.grid[pos][0])"
] | [
"0.6964878",
"0.6134664",
"0.594122",
"0.5869036",
"0.5832696",
"0.58005255",
"0.5768656",
"0.57542235",
"0.57383496",
"0.5733486",
"0.5723789",
"0.5633767",
"0.5631011",
"0.5616186",
"0.5609557",
"0.5607594",
"0.56038254",
"0.5596513",
"0.5594047",
"0.5584726",
"0.55708295",
"0.55494",
"0.55375826",
"0.5518914",
"0.5515521",
"0.55130935",
"0.5504907",
"0.55022407",
"0.5490591",
"0.5490098"
] | 0.6899784 | 1 |
Recibe el numero de segmentos y el valor de W Esta es la funcion de sumatoria para los numeros Pares Regresa el total de la sumatoria | def sumaPar(self,numSeg,w):
total=0
for i in range(2,numSeg-1,2):
total+=2*self.F(i*w)
return total | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def som(getallenlijst):\r\n total = sum(getallenlijst)\r\n return total",
"def patrimony_total(self):\n pass",
"def sumaImpar(self,numSeg,w):\n total=0\n for i in range(1,numSeg,2):\n total+=4*self.F(i*w)\n return total",
"def n_suma(a1,nr_wyrazu,r):\n return (2*a1+(nr_wyrazu-1))*nr_wyrazu/2",
"def segment_sum(self, left, right):\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater",
"def _compute_cuantia_subtotal(self):\n for line in self:\n line.gasto = line.unidades * line.pvp",
"def number_total(self):\n return sum(self.grid[pos][1] for pos in [\"n1\", \"n2\", \"n3\", \"n4\", \"n5\", \"n6\"] if self.grid[pos][0])",
"def P(self,numSeg):\n w=self.x/numSeg\n return (w/3)*(self.F(0)+self.sumaImpar(numSeg,w)+self.sumaPar(numSeg,w)+self.F(self.x))",
"def sumar(self):\n self.resultado = self.valor_1 + self.valor_2",
"def calculo(self):\n return self.peso / (self.altura * self.altura)",
"def calcular_promedio(lista):\r\n suma = 0\r\n promedio = 0\r\n \r\n for marca in lista:\r\n suma += marca[1]\r\n \r\n promedio = suma//len(lista)\r\n \r\n return promedio",
"def nb_murs_total(self):\r\n murs_pleins=0\r\n for x in range(0,self.largeur):\r\n for y in range(0,self.hauteur):\r\n murs_pleins+=self.matrice_cases[x][y].nb_murs_pleins()\r\n \r\n return int((murs_pleins-self.hauteur*2-self.largeur*2)/2)",
"def summation(self):\n return sum(self.read_ints())",
"def subtotal(self):\n return self.cantidad * self.precio",
"def somme(self) -> Numeric:\n return query_sum(self.offres(), \"prix\", output_field=models.DecimalField())",
"def calcularTotal(self):\n subtotales=[]\n for row in range(0,self.tableNC.rowCount()):\n subtotales.append(float(self.tableNC.item(row,2).text()))\n return sum(subtotales)",
"def calcularTotal(self):\n subtotales=[]\n for row in range(0,self.tableFactura.rowCount()):\n subtotales.append(float(self.tableFactura.item(row,2).text()))\n importeTotal=sum(subtotales)\n return importeTotal",
"def get_total(self):\n\n # Total sum\n self.sum = 0.00\n\n # Determine which Check buttons are selected\n # and add the charges to find the total\n if self.check_1.get() == 1:\n self.sum += 30.00\n if self.check_2.get() == 1:\n self.sum += 20.00\n if self.check_3.get() == 1:\n self.sum += 40.00\n if self.check_4.get() == 1:\n self.sum += 100.00\n if self.check_5.get() == 1:\n self.sum += 35.00\n if self.check_6.get() == 1:\n self.sum += 200.00\n if self.check_7.get() == 1:\n self.sum += 20.00\n\n # Convert the sum to string\n # and store in StringVar object\n # to automatically update the total_val label\n self.sum_str.set(self.sum)",
"def subtotal(self):\n return self.precio_unitario * self.cantidad",
"def _ss_tot(self):\n squares = np.square(self.y - np.expand_dims(self._ybar, axis=-2))\n if self.w is None:\n return np.sum(squares, axis=-2)\n else:\n return np.sum(np.matmul(self.w_diag, squares), axis=-2)",
"def get_salario_total(self):\n s = 0\n for e in self.empleados:\n s += e.get_salario()\n return s",
"def SumaryPresupuesto(vj):\n\n sumaUSD = sumaCUC = totalUSD = totalCUC = 0.0\n\n for row in vj.tbPresupesto.rows.values():\n cambio = row.cambio\n moneda = row.moneda\n value = row.value\n\n if moneda == MD.Usd:\n sumaUSD += value\n totalUSD += value\n totalCUC += ( value * cambio )\n else:\n sumaCUC += value\n totalCUC += value\n totalUSD += ( value / cambio )\n\n vj.PresupCuc = totalCUC\n if totalUSD>0 and totalCUC>0: \n vj.MD.SetChange( totalCUC/totalUSD, MD.Usd, MD.Cuc )",
"def totalEnergy(self, controlpoints):\n # spacing is positive and unbound, but smaller than n-1 in pratice\n # curvature is within [0, 2*(n-2)]\n internal = self.spacingEnergy(controlpoints) + self.curvatureEnergy(controlpoints)\n n = len(self.controlpoints)\n internal_max = n-1 + 2*(n-2) \n \n # external is within [0, self.ExternalEnergy.max]\n external = self.externalEnergy(controlpoints)\n \n # return the sum of the scaled internal and the external energy\n return self.ExternalEnergy.max*(internal/internal_max)*self.inner_weight + external*self.outer_weight",
"def calcula(self, is_deterministico):\n # criando header da tabela\n tabela = PrettyTable([\"Rodadas\",\n \"E[T1]\",\n \"E[W1]\",\n \"E[X1]\",\n \"E[N1]\",\n \"E[Nq1]\",\n \"E[Ns1]\",\n \"E[T2]\",\n \"E[W2]\",\n \"E[X2]\",\n \"E[N2]\",\n \"E[Nq2]\",\n \"E[Ns2]\",\n \"Var[W1]\",\n \"Var[W2]\"])\n \n\n for index in range(1, self.n_rodadas+1):\n # calculando a esperanca das metricas da fila 1\n # print(\"n fregueses por rodada: \", self.fregueses_por_rodada, \". E len w1: \", len(self.w1[index]))\n if len(self.w1[index]) > 0:\n self.x1_med_rodada[index] = sum(self.x1[index])/len(self.w1[index])\n self.w1_med_rodada[index] = sum(self.w1[index])/len(self.w1[index])\n self.nq1_med_rodada[index] = sum(self.nq1[index])/len(self.w1[index])\n self.ns1_med_rodada[index] = sum(self.ns1[index])/len(self.w1[index])\n self.n1_med_rodada[index] = sum(self.n1[index])/len(self.w1[index])\n self.t1_med_rodada[index] = sum(self.t1[index])/len(self.w1[index])\n\n # calculando a esperanca das metricas da fila 2\n # print(\"n fregueses por rodada: \", self.fregueses_por_rodada, \". E len w2: \", len(self.w2[index]))\n if len(self.w2[index]) > 0:\n self.x2_med_rodada[index] = sum(self.x2[index])/len(self.w2[index])\n self.w2_med_rodada[index] = sum(self.w2[index])/len(self.w2[index])\n self.nq2_med_rodada[index] = sum(self.nq2[index])/len(self.w2[index])\n self.ns2_med_rodada[index] = sum(self.ns2[index])/len(self.w2[index])\n self.n2_med_rodada[index] = sum(self.n2[index])/len(self.w2[index])\n self.t2_med_rodada[index] = sum(self.t2[index])/len(self.w2[index])\n\n # calculo de Var[W1] e Var[W2] para exibir na tabela\n if len(self.w1[index]) == 1:\n self.var_w1_med_rodada[index] = 0\n else:\n for amostra in range(len(self.w1[index])):\n self.var_w1_med_rodada[index] += (self.w1[index][amostra] - self.w1_med_rodada[index]) ** 2\n self.var_w1_med_rodada[index] /= (len(self.w1[index]) - 1)\n\n if len(self.w2[index]) == 1:\n self.var_w2_med_rodada[index] = 0\n else:\n for amostra2 in range(len(self.w2[index])):\n self.var_w2_med_rodada[index] += (self.w2[index][amostra2] - self.w2_med_rodada[index]) ** 2\n self.var_w2_med_rodada[index] /= (len(self.w2[index]) - 1)\n\n tabela.add_row([\"rodada_\" + str(index),\n round(self.t1_med_rodada[index], 6),\n round(self.w1_med_rodada[index], 6),\n round(self.x1_med_rodada[index], 6),\n round(self.n1_med_rodada[index], 6),\n round(self.nq1_med_rodada[index], 6),\n round(self.ns1_med_rodada[index], 6),\n round(self.t2_med_rodada[index], 6),\n round(self.w2_med_rodada[index], 6),\n round(self.x2_med_rodada[index], 6),\n round(self.n2_med_rodada[index], 6),\n round(self.nq2_med_rodada[index], 6),\n round(self.ns2_med_rodada[index], 6),\n round(self.var_w1_med_rodada[index], 6),\n round(self.var_w2_med_rodada[index], 6)])\n\n # acumulando medias totais\n self.x1_med_total += self.x1_med_rodada[index]\n self.w1_med_total += self.w1_med_rodada[index]\n self.nq1_med_total += self.nq1_med_rodada[index]\n self.ns1_med_total += self.ns1_med_rodada[index]\n self.n1_med_total += self.n1_med_rodada[index]\n self.t1_med_total += self.t1_med_rodada[index]\n self.x2_med_total += self.x2_med_rodada[index]\n self.w2_med_total += self.w2_med_rodada[index]\n self.nq2_med_total += self.nq2_med_rodada[index]\n self.ns2_med_total += self.ns2_med_rodada[index]\n self.n2_med_total += self.n2_med_rodada[index]\n self.t2_med_total += self.t2_med_rodada[index]\n self.var_w1_med_total += self.var_w1_med_rodada[index]\n self.var_w2_med_total += self.var_w2_med_rodada[index]\n\n # dividindo medias acumuladas pelo total de rodadas e enfim, calculando a media total de cada metrica\n self.x1_med_total /= self.n_rodadas\n self.w1_med_total /= self.n_rodadas\n self.nq1_med_total /= self.n_rodadas\n self.ns1_med_total /= self.n_rodadas\n self.n1_med_total /= self.n_rodadas\n self.t1_med_total /= self.n_rodadas\n self.x2_med_total /= self.n_rodadas\n self.w2_med_total /= self.n_rodadas\n self.nq2_med_total /= self.n_rodadas\n self.ns2_med_total /= self.n_rodadas\n self.n2_med_total /= self.n_rodadas\n self.t2_med_total /= self.n_rodadas\n self.var_w1_med_total /= self.n_rodadas\n self.var_w2_med_total /= self.n_rodadas\n\n tabela.add_row([\"Media\",\n round(self.t1_med_total, 6),\n round(self.w1_med_total, 6),\n round(self.x1_med_total, 6),\n round(self.n1_med_total, 6),\n round(self.nq1_med_total, 6),\n round(self.ns1_med_total, 6),\n round(self.t2_med_total, 6),\n round(self.w2_med_total, 6),\n round(self.x2_med_total, 6),\n round(self.n2_med_total, 6),\n round(self.nq2_med_total, 6),\n round(self.ns2_med_total, 6),\n round(self.var_w1_med_total, 6),\n round(self.var_w2_med_total, 6)\n ])\n\n print(tabela, \"\\n\")\n\n if not is_deterministico:\n self.calcula_ic()",
"def calcula_variancias(self):\n for index in range(1, self.n_rodadas+1):\n self.var_x1 += (self.x1_med_rodada[index] - self.x1_med_total) ** 2\n self.var_w1 += (self.w1_med_rodada[index] - self.w1_med_total) ** 2\n self.var_nq1 += (self.nq1_med_rodada[index] - self.nq1_med_total) ** 2\n self.var_ns1 += (self.ns1_med_rodada[index] - self.ns1_med_total) ** 2\n self.var_n1 += (self.n1_med_rodada[index] - self.n1_med_total) ** 2\n self.var_t1 += (self.t1_med_rodada[index] - self.t1_med_total) ** 2\n self.var_w1_med += (self.var_w1_med_rodada[index] - self.var_w1_med_total) ** 2\n\n self.var_x2 += (self.x2_med_rodada[index] - self.x2_med_total) ** 2\n self.var_w2 += (self.w2_med_rodada[index] - self.w2_med_total) ** 2\n self.var_nq2 += (self.nq2_med_rodada[index] - self.nq2_med_total) ** 2\n self.var_ns2 += (self.ns2_med_rodada[index] - self.ns2_med_total) ** 2\n self.var_n2 += (self.n2_med_rodada[index] - self.n2_med_total) ** 2\n self.var_t2 += (self.t2_med_rodada[index] - self.t2_med_total) ** 2\n self.var_w2_med += (self.var_w2_med_rodada[index] - self.var_w2_med_total) ** 2\n\n self.var_x1 /= (self.n_rodadas - 1)\n self.var_w1 /= (self.n_rodadas - 1)\n self.var_nq1 /= (self.n_rodadas - 1)\n self.var_ns1 /= (self.n_rodadas - 1)\n self.var_n1 /= (self.n_rodadas - 1)\n self.var_t1 /= (self.n_rodadas - 1)\n self.var_w1_med /= (self.n_rodadas - 1)\n\n self.var_x2 /= (self.n_rodadas - 1)\n self.var_w2 /= (self.n_rodadas - 1)\n self.var_nq2 /= (self.n_rodadas - 1)\n self.var_ns2 /= (self.n_rodadas - 1)\n self.var_n2 /= (self.n_rodadas - 1)\n self.var_t2 /= (self.n_rodadas - 1)\n self.var_w2_med /= (self.n_rodadas - 1)",
"def resultado(self):\n return self.__numerador/self.__denominador",
"def get_total_haberes(self):\n return float(self.input.get_text(liquidaciones_historicas_catalog.TOTAL_HABERES).replace(\".\", \"\"))",
"def countPoints(self,sumation):\n if sumation == 21:\n points = 7\n elif sumation == 20:\n points = 5\n elif sumation == 19:\n points = 4\n elif sumation == 18:\n points = 3\n elif sumation == 17:\n points = 2\n elif sumation <=16:\n points = 1\n else:\n points = 0\n return points",
"def __puntuacion_total(self):\n disparos = []\n for disparo in self.__disparos:\n total = 0\n for puntaje in disparo['disparos']:\n total += puntaje\n disparo['puntaje_total'] = total\n disparos.append(disparo)\n return disparos",
"def calculate(self):\n\n gt = self.ground_truth.flatten()\n seg = self.segmentation.flatten()\n\n n = gt.size\n mean_gt = gt.mean()\n mean_seg = seg.mean()\n mean = (mean_gt + mean_seg) / 2\n\n m = (gt + seg) / 2\n ssw = np.power(gt - m, 2).sum() + np.power(seg - m, 2).sum()\n ssb = np.power(m - mean, 2).sum()\n\n ssw /= n\n ssb = ssb / (n - 1) * 2\n\n return (ssb - ssw) / (ssb + ssw)"
] | [
"0.63764817",
"0.6370845",
"0.6358655",
"0.6194959",
"0.6053004",
"0.5966047",
"0.59162605",
"0.5848194",
"0.5827601",
"0.57799345",
"0.5768629",
"0.5754656",
"0.5723134",
"0.57056737",
"0.5684519",
"0.56820756",
"0.56553745",
"0.56527156",
"0.5647538",
"0.56249356",
"0.5621911",
"0.56203496",
"0.5617958",
"0.56100816",
"0.5589113",
"0.5582694",
"0.55693114",
"0.5562495",
"0.555947",
"0.5549849"
] | 0.72560215 | 0 |
Return a decorator which will parse a gerber file before running the test. | def use_file(filename):
def decorator(test_method):
""" Add params to decorator function. """
@wraps(test_method)
def wrapper(self):
""" Parse file then run test. """
parser = Gerber(ignore_unknown=False)
self.design = parser.parse(path.join(DIR, filename))
test_method(self)
return wrapper
return decorator | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def decorator(test_method):\n\n @wraps(test_method)\n def wrapper(self):\n \"\"\" Parse file then run test. \"\"\"\n parser = Gerber(ignore_unknown=False)\n self.design = parser.parse(path.join(DIR, filename))\n test_method(self)\n\n return wrapper",
"def wrapper(self):\n parser = Gerber(ignore_unknown=False)\n self.design = parser.parse(path.join(DIR, filename))\n test_method(self)",
"def test_gen_parser(self):\n pass",
"def mock_parser_fcn(s):",
"def requiresParsing(function):\n\[email protected](function)\n\tdef parsedFile(self, *args, **kwargs):\n\t\t\"\"\"\n\t\tI'm hidden in a function. Do not parse me.\n\t\t\"\"\"\n\t\tif not self._parsed:\n\t\t\tself.parse()\n\n\t\treturn function(self, *args, **kwargs)\n\n\treturn parsedFile",
"def test_basic_parsers():",
"def test_create_new_gerber_parser(self):\n parser = Gerber()\n assert parser != None",
"def test_parser():\n return parser(\"Testing\", \"Use this from a test\", \"\")",
"def testgen(self):\n self.parse()\n self.generate()",
"def parsedFile(self, *args, **kwargs):\n\t\tif not self._parsed:\n\t\t\tself.parse()\n\n\t\treturn function(self, *args, **kwargs)",
"def importer():\n pass",
"def mock_parser_fcn(s):\n return s",
"def process(self, filename: str, contents: str) -> None:\n self._current_file_decorators = set()\n self._current_file = filename\n try:\n parsed = ast.parse(contents, filename=filename)\n except Exception as e: # pylint: disable=broad-exception-caught\n # logging errors when parsing file\n logging.exception('Error parsing %s: %s', filename, e)\n else:\n self.visit(parsed)\n finally:\n self._current_file = None\n self._current_file_decorators = set()",
"def test_parser_patch_______(parser):\n pass",
"def test_basic_parser_trace():",
"def parser_for(self, name):\n\n def decorator(func):\n self.add_parser(name, func)\n return func\n\n return decorator",
"def test_function_runs(self):\n\t\tanalyse_text(self.filename)",
"def __call__( self, file_contents, regression_var ):\n return super()._process( self.__load( file_contents ), regression_var )",
"def __call__( self, file_contents, regression_var ):\n return super()._process( self.__load( file_contents ), regression_var )",
"def buildTestCase(xmlfile, xmlBase, description, method, exc, params):\n func = lambda self, xmlfile=xmlfile, exc=exc, params=params: \\\n method(self, exc, params, feedvalidator.validateString(open(xmlfile).read(), fallback='US-ASCII', base=xmlBase)['loggedEvents'])\n func.__doc__ = description\n return func",
"def main():\n parse_file(sys.argv[1])",
"def test_simple_parse(self):\n pass",
"def from_config(func):\n\t\n\tdef decorator(filename):\n\t\twith open(filename, 'r') as file_in:\n\t\t\tconfig = json.load(file_in)\n\n\t\t#'**' takes a dict and extracts its contents and passes them as parameters to a function.\n\t\t#returns the intial function with new arguments????\n\t\treturn func(**config)\n\t\n\t## return the decorated input function\n\treturn decorator",
"def create_parser():\n pass",
"def parse_input_mocked_metadata(monkeypatch):\n\n def _parse_input(text, cwd=None):\n \"\"\"The parser fixture accepts a blackbird string to parse\"\"\"\n text = \"name mockname\\nversion 1.0\\n\" + text\n lexer = blackbirdLexer(antlr4.InputStream(text))\n stream = antlr4.CommonTokenStream(lexer)\n parser = blackbirdParser(stream)\n\n tree = parser.start()\n\n bb = BlackbirdListener(cwd=cwd)\n walker = antlr4.ParseTreeWalker()\n walker.walk(bb, tree)\n\n return bb.program\n\n return _parse_input",
"def __init__(self, file_pattern, validate=True, **nucleus_kwargs):\n\n super(ReadGenomicsFile, self).__init__()\n self._source = self._source_class(\n file_pattern, validate=validate, **nucleus_kwargs)",
"def reader(self):\n @contextmanager\n def generator(data):\n \"\"\"\n Args:\n data (str): could be a filename or the text to tokenize.\n Returns:\n a context manager that can be used in a `with` contruct,\n yielding each line of the tokenized `data`.\n \"\"\"\n if not os.path.exists(data):\n yield self.format(self.predict(data))\n else:\n with open(data) as f:\n yield self.format(self.predict(f.read()))\n return generator",
"def test_regexp_chunk_parser():",
"def create_test_function(source, output, lang):\n with open(source) as f:\n snippet = f.read()\n with open(output) as f:\n res = f.read()\n\n def tst_func(slf):\n slf.do(snippet, res, lang=lang)\n\n return tst_func",
"def test_probabilistic_parsers():"
] | [
"0.7448634",
"0.71167976",
"0.59627867",
"0.59301746",
"0.5918239",
"0.59080213",
"0.58810973",
"0.57334006",
"0.56384057",
"0.5625038",
"0.55969214",
"0.55764806",
"0.5508951",
"0.550502",
"0.5491165",
"0.5372833",
"0.53668374",
"0.53651786",
"0.53651786",
"0.52833706",
"0.5275567",
"0.5267529",
"0.5259192",
"0.52264065",
"0.5209716",
"0.52039385",
"0.51788163",
"0.5124714",
"0.5123727",
"0.5103501"
] | 0.7256232 | 1 |
Create an empty gerber parser. | def test_create_new_gerber_parser(self):
parser = Gerber()
assert parser != None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_parser():\n pass",
"def _make_parser(self):\n return DefusedExpatParser()",
"def __init__(self, parser=None):",
"def __init__(self):\n print \"You asked for a Parser!\"",
"def __parser__(self):\n return self",
"def __init__(self, parser: Any = None):",
"def test_gen_parser(self):\n pass",
"def __init__(self, *args, **kw):\n self.parser = Parser(*args, **kw)",
"def setup_parser(self, parser):",
"def CreateParser(skip_meta: bool = False):\n return ParserWithLines(skip_meta)",
"def _construct_full_parser(self):\r\n return self._construct_partial_parser().groups(self._global_options.values())",
"def parserFactory(intLanguageName, debugMode):\r\n #if text.getDebug() != debugMode:\r\n # text.setDebugRecurs(debugMode)\r\n\r\n return THE_PARSER",
"def make_parser(language):\n parser = Parser()\n parser.onto_mode = True\n mappings = {'en': 'ENGLISH', 'de': \"GERMAN\", 'cn': \"CHINESE\" }\n parser.language = mappings[language]\n return parser",
"def _create_parser(self):\n default_options = self._create_defaults()\n\n all_categories = ['build', 'whitespace']\n\n mock_stderr = self._MockStdErr()\n\n return ArgumentParser(\n all_categories=all_categories,\n base_filter_rules=[],\n default_options=default_options,\n mock_stderr=mock_stderr,\n usage='test usage')",
"def init_parser():\n parser = argparse.ArgumentParser(\n description='Backup application code and data.')\n parser.add_argument('-a', '--app-id', required=True,\n help='the application ID to run the backup for')\n parser.add_argument('--source-code', action='store_true',\n default=False, help='backup the source code too. Disabled by default.')\n parser.add_argument('-d', '--debug', required=False, action=\"store_true\",\n default=False, help='display debug messages')\n parser.add_argument('--skip', required=False, nargs=\"+\",\n help='skip the following kinds, separated by spaces')\n\n return parser",
"def create_parser():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n add_help=False)\n parser.add_argument(\n '--help', '-h',\n action='store_true',\n dest='help',\n help=\"\"\"show this help message and exit\"\"\")\n parser.add_argument(\n '--verbose', '-v',\n action='count',\n default=0,\n help=\"\"\"Enable verbose output from '%(prog)s'. A second and third\n '-v' increases verbosity.\"\"\")\n parser.add_argument(\n '--sequential',\n action='store_true',\n help=\"\"\"Execute analyzer sequentialy.\"\"\")\n parser.add_argument(\n '--cdb',\n metavar='<file>',\n default=\"compile_commands.json\",\n help=\"\"\"The JSON compilation database.\"\"\")\n return parser",
"def create_parser():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('manga_name',\n type = str,\n help = \"Input the name of the manga.\"\n )\n parser.add_argument('-b','--begin',\n type = int,\n help = 'Input the starting chapter.Defaults to first chapter.'\n )\n parser.add_argument('-e','--end',\n type = int,\n help = 'Input the ending chapter.Defaults to the last possible chapter.'\n )\n parser.add_argument('-c','--chapter',\n type = int,\n help = 'Provide if you want to download only one chapter.'\n )\n parser.add_argument('-t','--target',\n type = str,\n help = 'The location where manga has to be downloaded.Defaults to the current directory.',\n default = '.'\n )\n parser.add_argument('-s','--site',\n type = str,\n help = 'The site through which the manga has to be downloaded. Defaults to MangaPanda.',\n default = 'mangapanda'\n )\n\n return parser",
"def __init__(self):\n\n self.prim_parser = parser.Parser()",
"def make_minilang_parser():\n gramm = Grammar.from_string(GRAMMAR)\n return parser_from_grammar(gramm, 'program')",
"def setup_parser():\r\n parser = argparse.ArgumentParser(description='Freeseer Recording Utility',\r\n formatter_class=argparse.RawTextHelpFormatter)\r\n parser.add_argument(\"-v\", \"--version\", action='version',\r\n version=textwrap.dedent('''\\\r\n Freeseer {version} ({platform})\r\n Python {pymajor}.{pyminor}.{pymicro}\r\n PyGst {pygst_version}\r\n PyQt {pyqt_version}\r\n Qt {qt_version}\r\n Yapsy {yapsy_version}\r\n '''.format(version=__version__,\r\n platform=sys.platform,\r\n pymajor=sys.version_info.major,\r\n pyminor=sys.version_info.minor,\r\n pymicro=sys.version_info.micro,\r\n pygst_version=pygst._pygst_version,\r\n pyqt_version=QtCore.PYQT_VERSION_STR,\r\n qt_version=QtCore.QT_VERSION_STR,\r\n yapsy_version=yapsy.__version__)))\r\n\r\n # Configure Subparsers\r\n subparsers = parser.add_subparsers(dest='app', help='Command List')\r\n setup_parser_record(subparsers)\r\n setup_parser_config(subparsers)\r\n setup_parser_talk(subparsers)\r\n setup_parser_report(subparsers)\r\n setup_parser_upload(subparsers)\r\n return parser",
"def initialize_parser():\n ftypes = [\n \"png\",\n \"jpg\",\n \"jpeg\",\n \"pdf\",\n \"ps\",\n \"eps\",\n \"rgba\",\n \"svg\",\n \"tiff\",\n \"tif\",\n \"pgf\",\n \"svgz\",\n \"raw\",\n ]\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-s\",\n \"--savefig\",\n action=\"store\",\n default=False,\n choices=ftypes,\n help=\"Save figure to a file\",\n )\n return parser",
"def finalize(self):\n return Parser(self)",
"def make_parser():\n\n parser = ArgumentParser(description=\"Create dummy sensor stream esque data\")\n parser.add_argument('--tuples-per-emit', '-t', type=int, default=1,\n help='number of tuples to emit at once')\n parser.add_argument('--sensors', '-s', type=int, default=1,\n help='number of sensors to generate')\n\n return parser",
"def create_parser():\n desc_str = (\n \"\"\"Look at the results of inference with cbayes scripts.\"\"\"\n )\n\n parser = argparse.ArgumentParser(description=desc_str)\n \n parser.add_argument('-dir', '--directory',\n help = 'name of the cbayes ouput directory',\n type = str,\n required = True\n )\n \n # do the parsing\n args = parser.parse_args()\n\n return args",
"def create_parser():\n p = NewParser()\n\n p.add_argument('reference', type=str,\n help = \"Fasta reference file that reads were mapped to.\")\n\n p.add_argument('gff', type=str,\n help = \"GFF file containing reference genome annotations.\")\n\n p.add_argument('vcf', type=str,\n help = \"VCF file to parse.\")\n\n args = p.parse_args(sys.argv[1:])\n return args",
"def _init_parser(self):\n # outputParser = (pyparsing.Literal('>>') | (pyparsing.WordStart() + '>') | pyparsing.Regex('[^=]>'))('output')\n outputParser = (pyparsing.Literal(self.redirector * 2) |\n (pyparsing.WordStart() + self.redirector) |\n pyparsing.Regex('[^=]' + self.redirector))('output')\n\n terminatorParser = pyparsing.Or(\n [(hasattr(t, 'parseString') and t) or pyparsing.Literal(t) for t in self.terminators])('terminator')\n stringEnd = pyparsing.stringEnd ^ '\\nEOF'\n self.multilineCommand = pyparsing.Or(\n [pyparsing.Keyword(c, caseless=self.case_insensitive) for c in self.multilineCommands])('multilineCommand')\n oneLineCommand = (~self.multilineCommand + pyparsing.Word(self.legalChars))('command')\n pipe = pyparsing.Keyword('|', identChars='|')\n self.commentGrammars.ignore(pyparsing.quotedString).setParseAction(lambda x: '')\n doNotParse = self.commentGrammars | self.commentInProgress | pyparsing.quotedString\n afterElements = \\\n pyparsing.Optional(pipe + pyparsing.SkipTo(outputParser ^ stringEnd, ignore=doNotParse)('pipeTo')) + \\\n pyparsing.Optional(\n outputParser + pyparsing.SkipTo(stringEnd, ignore=doNotParse).setParseAction(lambda x: x[0].strip())(\n 'outputTo'))\n if self.case_insensitive:\n self.multilineCommand.setParseAction(lambda x: x[0].lower())\n oneLineCommand.setParseAction(lambda x: x[0].lower())\n if self.blankLinesAllowed:\n self.blankLineTerminationParser = pyparsing.NoMatch\n else:\n self.blankLineTerminator = (pyparsing.lineEnd + pyparsing.lineEnd)('terminator')\n self.blankLineTerminator.setResultsName('terminator')\n self.blankLineTerminationParser = ((self.multilineCommand ^ oneLineCommand) +\n pyparsing.SkipTo(self.blankLineTerminator, ignore=doNotParse).setParseAction(\n lambda x: x[0].strip())('args') + self.blankLineTerminator)('statement')\n self.multilineParser = (((self.multilineCommand ^ oneLineCommand) + pyparsing.SkipTo(terminatorParser,\n ignore=doNotParse).setParseAction(\n lambda x: x[0].strip())('args') + terminatorParser)('statement') +\n pyparsing.SkipTo(outputParser ^ pipe ^ stringEnd, ignore=doNotParse).setParseAction(\n lambda x: x[0].strip())('suffix') + afterElements)\n self.multilineParser.ignore(self.commentInProgress)\n self.singleLineParser = ((oneLineCommand + pyparsing.SkipTo(terminatorParser ^ stringEnd ^ pipe ^ outputParser,\n ignore=doNotParse).setParseAction(\n lambda x: x[0].strip())('args'))('statement') +\n pyparsing.Optional(terminatorParser) + afterElements)\n # self.multilineParser = self.multilineParser.setResultsName('multilineParser')\n # self.singleLineParser = self.singleLineParser.setResultsName('singleLineParser')\n self.blankLineTerminationParser = self.blankLineTerminationParser.setResultsName('statement')\n self.parser = self.prefixParser + (\n stringEnd |\n self.multilineParser |\n self.singleLineParser |\n self.blankLineTerminationParser |\n self.multilineCommand + pyparsing.SkipTo(stringEnd, ignore=doNotParse)\n )\n self.parser.ignore(self.commentGrammars)\n\n inputMark = pyparsing.Literal('<')\n inputMark.setParseAction(lambda x: '')\n fileName = pyparsing.Word(self.legalChars + '/\\\\')\n inputFrom = fileName('inputFrom')\n inputFrom.setParseAction(replace_with_file_contents)\n # a not-entirely-satisfactory way of distinguishing < as in \"import from\" from <\n # as in \"lesser than\"\n self.inputParser = inputMark + pyparsing.Optional(inputFrom) + pyparsing.Optional('>') + \\\n pyparsing.Optional(fileName) + (pyparsing.stringEnd | '|')\n self.inputParser.ignore(self.commentInProgress)",
"def create_parser():\n parser = argparse.ArgumentParser(\n description=\"First example\",\n epilog=\"Batch 2017\")\n\n # script\n parser.add_argument('--script',\n required=True,\n action='store',\n dest='script',\n help=\"A script to execute\")\n\n parser.add_argument('--dataset',\n required=True,\n action='store',\n dest='dataset',\n help=\"A dataset to use\")\n#\n# parser.add_argument('--features',\n# required=True,\n# action='store',\n# dest='features',\n# help=\"Number of features\")\n return parser",
"def __init__(self):\n\n self.parser = self.define_parser()\n self.pen = Pen()",
"def build_parser(self, parser: ArgumentParser) -> None:",
"def parser(cls, *args, **kwargs):\n\n parser = ArgumentParser(*args, **kwargs)\n parser.add_argument('-a', \"--address\",\n help=\"Force entry point address\", default=None)\n parser.add_argument('-b', \"--dumpblocs\", action=\"store_true\",\n help=\"Log disasm blocks\")\n parser.add_argument('-z', \"--singlestep\", action=\"store_true\",\n help=\"Log single step\")\n parser.add_argument('-d', \"--debugging\", action=\"store_true\",\n help=\"Debug shell\")\n parser.add_argument('-g', \"--gdbserver\", type=int,\n help=\"Listen on port @port\")\n parser.add_argument(\"-j\", \"--jitter\",\n help=\"Jitter engine. Possible values are: gcc (default), tcc, llvm, python\",\n default=\"gcc\")\n parser.add_argument(\n '-q', \"--quiet-function-calls\", action=\"store_true\",\n help=\"Don't log function calls\")\n parser.add_argument('-i', \"--dependencies\", action=\"store_true\",\n help=\"Load PE and its dependencies\")\n\n for base_cls in cls._classes_():\n base_cls.update_parser(parser)\n return parser"
] | [
"0.70855314",
"0.686953",
"0.63831055",
"0.6263777",
"0.61320263",
"0.6107546",
"0.60626626",
"0.6037953",
"0.59399813",
"0.5923526",
"0.5910576",
"0.5903135",
"0.5893762",
"0.58550376",
"0.57761353",
"0.5761796",
"0.5757238",
"0.5713794",
"0.5700876",
"0.56808746",
"0.56754124",
"0.5663162",
"0.5649211",
"0.56291926",
"0.56153965",
"0.56103075",
"0.56098986",
"0.55955297",
"0.5595365",
"0.55866075"
] | 0.7622803 | 0 |
The Modifier can evaluate expressions correctly. | def test_modifier(self):
modif = Modifier('1.2')
self.assertEqual(modif.evaluate({}), 1.2)
modif = Modifier('$1')
self.assertEqual(modif.evaluate({1:3.2}), 3.2)
modif = Modifier('1+1')
self.assertEqual(modif.evaluate({}), 2)
modif = Modifier('3-1.5')
self.assertEqual(modif.evaluate({}), 1.5)
modif = Modifier('2.2X3')
self.assertAlmostEqual(modif.evaluate({}), 6.6, 3)
modif = Modifier('4.4/2.2')
self.assertAlmostEqual(modif.evaluate({}), 2, 2)
modif = Modifier('1+4.4/2.2')
self.assertAlmostEqual(modif.evaluate({}), 3, 2)
modif = Modifier('$1+$2')
self.assertAlmostEqual(modif.evaluate({1:1, 2:2.2}), 3.2, 2)
modif = Modifier('$3=$1+$2')
values = {1:1, 2:2}
self.assertEqual(modif.evaluate(values), 3)
self.assertEqual(values, {1:1, 2:2, 3:3.0}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _should_eval(self):\n return False",
"def evaluate(compiled_expression):",
"def evaluate(self):\n pass",
"def evaluate(self):\n pass",
"def eval(self):\n pass",
"def eval(self):\n pass",
"def eval(self):\n pass",
"def evaluate(self) :\n pass",
"def test(self):\n self.eval()",
"def test_expression_sanitizer(self):\n\n self.assertFalse(_is_math_expr_safe('INSERT INTO students VALUES (?,?)'))\n self.assertFalse(_is_math_expr_safe('import math'))\n self.assertFalse(_is_math_expr_safe('complex'))\n self.assertFalse(_is_math_expr_safe('__import__(\"os\").system(\"clear\")'))\n self.assertFalse(_is_math_expr_safe('eval(\"()._\" + \"_class_\" + \"_._\" +'\n ' \"_bases_\" + \"_[0]\")'))\n self.assertFalse(_is_math_expr_safe('2***2'))\n self.assertFalse(_is_math_expr_safe('avdfd*3'))\n self.assertFalse(_is_math_expr_safe('Cos(1+2)'))\n self.assertFalse(_is_math_expr_safe('hello'))\n self.assertFalse(_is_math_expr_safe('hello_world'))\n self.assertFalse(_is_math_expr_safe('1_2'))\n self.assertFalse(_is_math_expr_safe('2+-2'))\n self.assertFalse(_is_math_expr_safe('print(1.0)'))\n self.assertFalse(_is_math_expr_safe('1.1.1.1'))\n self.assertFalse(_is_math_expr_safe('abc.1'))\n\n self.assertTrue(_is_math_expr_safe('1+1*2*3.2+8*cos(1)**2'))\n self.assertTrue(_is_math_expr_safe('pi*2'))\n self.assertTrue(_is_math_expr_safe('-P1*cos(P2)'))\n self.assertTrue(_is_math_expr_safe('-P1*P2*P3'))\n self.assertTrue(_is_math_expr_safe('-P1'))\n self.assertTrue(_is_math_expr_safe('-1.*P1'))\n self.assertTrue(_is_math_expr_safe('-1.*P1*P2'))\n self.assertTrue(_is_math_expr_safe('-(P1)'))",
"def eval(self) -> typing.Any:\n return self.expr()",
"def evaluate(self):\r\n raise Exception(\"Not implemented.\")",
"def evaluator_side_effect(_, __, math_string):\r\n if math_string != '4':\r\n raise err",
"def eval(self):\n raise NotImplemented()",
"def expression(self) -> Expression:\n ...",
"def evaluate(self):\n raise Exception(\"Not implemented.\")",
"def eval(self):\n raise NotImplementedError",
"def evaluateBoolean(compiled_expression):",
"def expression(self):\n assert not self._handle_used\n self._expression_used = True\n return self._expression",
"def evaluate(self):\n raise NotImplementedError()",
"def evaluateValue(compiled_expression):",
"def test_expr(self):\n self.common_test_expr(True)",
"def evaluator_side_effect(_, __, math_string):\r\n return mapping[math_string]",
"def evaluate(self, tree):\n\t\tpass",
"def evaluate_raw(self):\n raise NotImplementedError",
"def _evaluate(self, x):\n raise NotImplementedError()",
"def _evalAndDer(self, x):\n raise NotImplementedError()",
"def test_RestrictingNodeTransformer__visit_LtE__1():\n assert restricted_eval('1 <= 3') is True",
"def is_equation(self): \n return False",
"def test_RestrictingNodeTransformer__visit_GtE__1():\n assert restricted_eval('1 >= 3') is False"
] | [
"0.6572281",
"0.6568479",
"0.65574527",
"0.65574527",
"0.64303815",
"0.64303815",
"0.64303815",
"0.6407245",
"0.6345367",
"0.62577146",
"0.6183484",
"0.61799407",
"0.61565596",
"0.6149802",
"0.61396885",
"0.6117585",
"0.6077628",
"0.60701233",
"0.6039489",
"0.60292995",
"0.60206705",
"0.60191786",
"0.59915483",
"0.59470314",
"0.5879327",
"0.585465",
"0.5849977",
"0.584972",
"0.58419895",
"0.5833385"
] | 0.71497124 | 0 |
Trap coord preceding gerber format spec. | def test_coord_preceding_fs(self): | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def trace(self, coord01: np.ndarray) -> np.ndarray:\n rect = self.clip_rect()\n return (rect.position + coord01 * rect.size).astype(np.int)",
"def frac11(self,lx,ly,lz):\n return str(self.coord[0]/lx*2)+'\\t'+str(self.coord[1]/ly*2)+'\\t'+str(self.coord[2]/lz*2)",
"def _format_point(self, point):\n return (point + self.draw_offset).intify()",
"def frac01(self,lx,ly,lz):\n return str(self.coord[0]/lx)+'\\t'+str(self.coord[1]/ly)+'\\t'+str(self.coord[2]/lz)",
"def xy(self):\n ...",
"def extra_coords(self) -> ExtraCoordsABC:",
"def convertdirection(self, frame):\n return _coordsys.coordsys_convertdirection(self, frame)",
"def setup(self):\r\n m = re.match(r'\\[([0-9]+),([0-9]+)]',\r\n self.value.strip().replace(' ', ''))\r\n if m:\r\n # Note: we subtract 15 to compensate for the size of the dot on the screen.\r\n # (is a 30x30 image--lms/static/green-pointer.png).\r\n (self.gx, self.gy) = [int(x) - 15 for x in m.groups()]\r\n else:\r\n (self.gx, self.gy) = (0, 0)",
"def coordinates(self):",
"def calcPos(self,newpol):\n\n\t\tdetlatoff=(self.offset9()-self.offset10())*cosd(newpol)+self.offset10()\n\t\tnewoffcry = (self.offset2()-self.offset3())*cosd(newpol)+self.offset3()\n\t\tnewdetoff = (self.offset4()-self.offset8())*cosd(newpol)+self.offset8() +self.offset5()\n\n\t\twl = BLi.getWavelength()\n\t\tself.thbragg = 180/pi*asin(wl/(2*self.dspace))\n\t\tnewthp=self.sign()*self.thbragg+newoffcry\n\t\tnewtthp=2*self.sign()*self.thbragg+newdetoff\n\t\tprint \"stokes=%1.2f thp=%1.2f tthp=%1.2f detlatoff=%1.2f\"%(newpol,newthp,newtthp,detlatoff)",
"def coords_format(format):\n if format == 'galactic':\n return 'galactic'\n elif format in ['fk5','icrs']:\n return 'celestial'",
"def parse_telemetry(self):\n telemetry = self.buffer\n self.buffer = \"\"\n if telemetry[0] == '<':\n if telemetry[len(telemetry) - 1] == '>':\n values = telemetry[1:len(telemetry)-1].split(',')\n coord = Coordinate(int(values[0]), int(values[1]))\n return coord\n return None",
"def set_coord(self, l, sign, b):\n if l is not None:\n self.l = float(l)\n if b is not None:\n self.b = float(b)\n if sign == '-':\n self.b *= -1.",
"def get_stig_xy(self):\n raise NotImplementedError",
"def _fixupPosition(self, position):\n if \"latitudeI\" in position:\n position[\"latitude\"] = position[\"latitudeI\"] * 1e-7\n if \"longitudeI\" in position:\n position[\"longitude\"] = position[\"longitudeI\"] * 1e-7",
"def deg_locations_writer(cell,width,(x,y)):\n if np.any(np.isnan(cell)):\n sys.stdout.write(' ' * width)\n else:\n sys.stdout.write(\n \"{0:^{width}s}\".format(\n \"{0:.2f},{1:.2f}\".format(cell[0],cell[1]),\n width=width\n )\n )",
"def __coding_coordinate(self):\n region1 = self.long_side_len\n region2 = self.short_side_len\n length = len(self.seq)\n if self.direction == '+':\n a_s = 0\n a_e = region2\n b_s = self.length - region1\n b_e = self.length - 1\n elif self.direction == '-':\n a_s = 0\n a_e = region1\n b_s = self.length - region2\n b_e = self.length - 1\n return (a_s, a_e, b_s, b_e)",
"def setGxLocation(self):\n if self.xyz is None:\n gxobs = self.obs.get(None, {}).get(\"GX\")\n if gxobs is not None:\n gxyz = np.array((0.0, 0.0, 0.0))\n for gx in gxobs:\n gxyz += gx.obsvalue.value\n self.xyz = gxyz / len(gxobs)\n self.locator.write(\n \"Fixing station {0} using GNSS coordinate observations\\n\".format(\n self.code\n )\n )",
"def update_coords(self, l, b):\n self.l = l\n self.b = b\n self.ra, self.dec = astLib.astCoords.convertCoords(\n \"GALACTIC\", \"J2000\", self.l, self.b, epoch=2000.)",
"def hex_from_oddr(newsystem, coord):\n x = coord.x + (coord.y + (coord.y&1)) // 2\n z = -coord.y\n y = -x-z\n newcoord = newsystem.coord(x=x, y=y, z=z)\n #logger.debug(\"OldCoord:%s NewCoord:%s\", coord, newcoord)\n return newcoord",
"def _cte_postformat(self):\n# if type(self.idxs) == list:\n# self.idxs = np.array(self.idxs)\n if self.sp_relative_pos is not None:\n if type(self.sp_relative_pos) == list:\n self.sp_relative_pos = np.array(self.sp_relative_pos)",
"def set_robot_pos(self):\n\t\tx,y,z = self.geo2desiredENU(self.curr_lat, self.curr_lon, self.gpsAlt)\n\t\tself.robot_msg.point.x = x\n\t\tself.robot_msg.point.y = y\n\t\tself.robot_msg.point.z = z",
"def pfs_format_coord(x, y, detectorMap=detectorMap):\n if detectorMap is None:\n return \"\"\n else:\n fid = detectorMap.findFiberId(geom.PointD(x, y))\n fidStr = f\"{fid:3}\"\n if pfsConfig:\n try:\n mtpInfo = fiberIds.fiberIdToMTP([fid], pfsConfig)[0]\n fidStr += f\" {', '.join([str(i) for i, l in zip(mtpInfo, mtpDetails) if l])}\"\n except RuntimeError:\n pass # fiber isn't in pfsConfig\n\n return f\"FiberId {fidStr} {detectorMap.findWavelength(fid, y):8.3f}nm\"",
"def aligne_grille(self,x,y,t):\r\n [xmin,ymin,xmax,ymax] = self.can.coords(self.hitbox[t])\r\n tx,ty=xmax-xmin,ymax-ymin\r\n a,b=23,23\r\n if tx==92 or ty==92 or tx==184 or ty==184:\r\n if tx==92 or tx==184:a,b=0,23\r\n if ty==92 or ty==184:a,b=23,0\r\n if 142<y<602 and 66<x<528:\r\n x=(x-66)//46*46+66+a\r\n y=(y-142)//46*46+142+b\r\n return x,y",
"def return_zeropoint():\n return 22.5",
"def _coord(self, x, y):\n gridEdge = 7 # originally 5\n y = gridEdge - y\n cx = 100 * (x - 1) + 50\n cy = 100 * (y - 1) + 50\n r = 20\n return (cx - r, cy - r, cx + r, cy + r)",
"def coord(self, coord):\n\n self._coord = coord",
"def configure_location(self):\n # Set floor correctly\n self.floor.set(\"pos\", array_to_string(self.bottom_pos))",
"def __getxy(x1, y1, x2, y2):\n\t\treturn x1*27+y1*9+x2*3+y2",
"def GetLoCorner(self):\n ..."
] | [
"0.52641153",
"0.5259147",
"0.5174216",
"0.5166887",
"0.5154606",
"0.514098",
"0.51310825",
"0.51202244",
"0.51102793",
"0.5103446",
"0.50844723",
"0.5079553",
"0.507823",
"0.50599504",
"0.5053271",
"0.5025921",
"0.5018617",
"0.4984303",
"0.49542806",
"0.4949716",
"0.49197924",
"0.49083477",
"0.49048135",
"0.4904781",
"0.48888385",
"0.48834026",
"0.48808634",
"0.48483774",
"0.48334146",
"0.4830994"
] | 0.61945975 | 0 |
Unsubscribe events for a callback. | def unsubscribe(callback):
if callback in _subscribers:
del _subscribers[callback] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def unsubscribe(self, callback: Callable) -> None:\n self.callbacks.discard(callback)",
"def unsubscribe(self, event_type: typing.Type[typing.Any], callback: CallbackT[typing.Any]) -> None:",
"def unsubscribe_callback(self, callback, sensor):\n if sensor in self._callbacks:\n self._callbacks[sensor].remove(callback)",
"def unregister(self, callback):\n\t\tcallbacks = []\n\t\tfor i in range(0, len(self.callbacks)):\n\t\t\tif self.callbacks[i][0] != callback:\n\t\t\t\tcallbacks.append(self.callbacks[i])\n\t\t\t\t\n\t\tself.callbacks = callbacks\n\t\tself.events[str(callback)] = []",
"def unsubscribe(self, event_handler):\n pass # pragma: no cover",
"def unsubscribe(self, event, callback, args = None):\n if {\"event\": event, \"callback\": callback, \"args\": args, }\\\n in self.events:\n self.events.remove({\"event\": event, \"callback\": callback,\\\n \"args\": args, })\n\n return True",
"def unsubscribe(receiver):",
"def unsubscribe(receiver):",
"def unsubscribe(receiver):",
"def unsubscribe(receiver):",
"def unsubscribe(receiver):",
"def unsubscribe(self, name, callback_function):\n # Remove the callback from _callbacks.\n if self._callbacks.has_key(name):\n if callback_function in self._callbacks[name]:\n self._callbacks[name].remove(callback_function)\n if len(self._callbacks[name]) == 0:\n self._callbacks.pop(name)\n else:\n raise PresenceException('This function is not registered to receive callbacks.')\n else:\n raise PresenceException('Unknown service name. No callback handler exists.')",
"def unsubscribeFromEvent(eventName,subscriber):",
"def unregister(self, event, callback):\n if self._events_tree[event].isListed(callback):\n self._events_tree[event].remove(callback)\n self._events_cbs[event][0] = self._events_tree.getCallbacksSequence()\n else:\n self._events_cbs[event][1].remove(callback)",
"def remove_callback(event_name, callback):\n _callbacks.get(event_name, set()).discard(callback)",
"def unsubscribe(self, meta_type, callback):\n try:\n self.subscribers.get(meta_type, []).remove(callback)\n except ValueError:\n pass\n try:\n self.nackables.get(meta_type, []).remove(callback)\n except ValueError:\n pass",
"def unsubscribe_all_known(self):\n for key, value in self.__callbacks.items():\n self.__logger.debug(f'unsubscribe from event {key}')\n succ = self.__twitch.delete_eventsub_subscription(key)\n if not succ:\n self.__logger.warning(f'failed to unsubscribe from event {key}')\n self.__callbacks.clear()",
"def unregister(self, event_name, callback_func=None, identifier=None):\n if callback_func:\n self.events[event_name].remove(callback_func)\n else:\n del self.events[event_name]",
"def unsubscribe(self, event_type, func):\n if func in self.event_subscribers[event_type]:\n kwargs = {event_type: func}\n self.unbind(**kwargs)\n self.event_subscribers[event_type].remove(func)",
"def unsubscribe(observer):",
"def unsubscribe(observer):",
"def unregister_handler(self, event_type, callback):\n\n if event_type not in self._event_handlers:\n return\n if callback not in self._event_handlers[event_type]:\n return\n\n self._event_handlers[event_type].remove(callback)\n\n if not self._event_handlers[event_type]:\n del self._event_handlers[event_type]",
"def subscribe_off(self, callback: callable):\n topic = f\"{self._subscriber_topic}_off\"\n subscribe_topic(callback, topic)",
"def deregister(self):\n self.callback = None",
"def remove_callback(self, callback):\n if callback in self._async_callbacks:\n self._async_callbacks.remove(callback)",
"def unsubscribe(self):\r\n self._unregister()",
"def unsubscribe(self):\n pass # pragma: no cover",
"def unregister_signals(self):\n for _, callback in self.signal_callbacks:\n Signal.unsubscribe(self, callback)",
"def unregister(self, signal_id, callback):\n # if the callback was registered the remove it \n if signal_id in self.registered_callbacks:\n self.registered_callbacks[signal_id].remove(callback)",
"def subscribe_off(self, callback: callable):\n subscribe_topic(callback, self._off_subscriber_topic)"
] | [
"0.84749424",
"0.8190403",
"0.79367375",
"0.7856983",
"0.7775709",
"0.7710769",
"0.7495644",
"0.7495644",
"0.7495644",
"0.7495644",
"0.7495644",
"0.74490726",
"0.74136734",
"0.73761433",
"0.72853684",
"0.72737384",
"0.72261065",
"0.72020453",
"0.7193984",
"0.71859956",
"0.71859956",
"0.7172428",
"0.7135467",
"0.71341306",
"0.7131223",
"0.70963967",
"0.70826983",
"0.70612276",
"0.7038875",
"0.70177114"
] | 0.85581386 | 0 |
Calculate mean of role/token embeddings for a node. | def _mean_vec(self, node) -> Tuple[np.array, int]:
tokens = [t for t in chain(node.token, ("RoleId_%d" % role for role in node.roles))
if t in self.emb]
if not tokens:
return None, 0
return np.mean([self.emb[t] for t in tokens], axis=0), len(tokens) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_mean(self):\n # load_in_all_parameters(self.save_directory, self.auto_encoder)\n for i, data_row in enumerate(self.X_train_naive):\n input_nn = data_row\n if torch.cuda.is_available():\n input_nn = Variable(torch.Tensor(np.asarray(input_nn).reshape(1, 242)).cuda())\n else:\n input_nn = Variable(torch.Tensor(np.asarray(input_nn).reshape(1, 242)))\n\n prediction_embedding = self.model.forward_only_encoding(input_nn)\n print(prediction_embedding)\n if i == 0:\n self.embedding_np = prediction_embedding.data.clone().cpu().numpy()[0]\n else:\n self.embedding_np = np.vstack((self.embedding_np, prediction_embedding.data.clone().cpu().numpy()[0]))\n self.mean_embedding = np.average(self.embedding_np, axis=0)\n print('mean embedding is ', self.mean_embedding)",
"def get_mean_emb(self, text):\n return np.mean([self.emb.get(w.lower(), self.emb.get(\"_UNK\")) for w in text.split()], axis=0)",
"def embed_token(self, token):\r\n embs, words = [], token.split()\r\n for word in words:\r\n emb_list=[]\r\n for element in word.split('_'):\r\n # If we have a trailing _ we don't want to embed an empty string\r\n if element:\r\n emb,_ = self(element, mean_sequence=True)\r\n emb_list.append(emb)\r\n embs.append(torch.mean(torch.stack(emb_list), dim=0))\r\n\r\n return torch.mean(torch.stack(embs), dim=0)",
"def convert_mean(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n mx_axis = attrs.get(\"axis\", None)\n axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None\n\n keepdims = get_boolean_attribute_value(attrs, \"keepdims\")\n\n if axes is not None:\n node = onnx.helper.make_node(\n 'ReduceMean',\n inputs=input_nodes,\n outputs=[name],\n axes=axes,\n keepdims=keepdims,\n name=name\n )\n\n return [node]\n else:\n node = onnx.helper.make_node(\n 'ReduceMean',\n inputs=input_nodes,\n outputs=[name],\n keepdims=keepdims,\n name=name\n )\n\n return [node]",
"def mean(self):\n return np.average(self.particles, weights=self.weights, axis=0)",
"def _get_mean_embedding(self, words):\n\n # ensure the size still matches if it's loaded from pretrained word vectors\n size = self.size\n if self.w2v is not None:\n size = next(iter(self.w2v_.values())).size\n\n zero = np.zeros(size)\n if self.tfidf:\n embedding = np.mean([self.w2v_[w] * self.w2idf_[w]\n if w in self.w2v_ else zero for w in words], axis = 0)\n else:\n embedding = np.mean([self.w2v_.get(w, zero) for w in words], axis = 0)\n\n return embedding",
"def get_review_embedding(review):\n review_sentences = nltk_tokenize.sent_tokenize(review)\n sentence_embeddings = list(map(get_sentence_embedding, review_sentences))\n if len(sentence_embeddings) == 0:\n print(\"Sentence_embeddings are empty!\")\n print(review)\n return torch.zeros(1,128)\n if review_embedding_type == \"avg\":\n # avg over all pairs [pairs, 1, 128] => [1, 128]\n mean = torch.mean(torch.stack(sentence_embeddings), axis=0)\n return mean",
"def generate_avg_vector(self, data):\r\n doc=nlp(data)\r\n data_vector = [token.vector for token in doc]\r\n mean_vector = np.mean(data_vector, axis=0)\r\n return mean_vector",
"def mean(tensor, axis=None):\n raise NotImplementedError",
"def _get_u_mean(self, nodelist: List[Tuple[int, int]]) -> Optional[float]:\n meanlist = [self.u_matrix[u_node] for u_node in nodelist]\n u_mean = None\n if self.u_mean_mode_ == \"mean\":\n u_mean = np.mean(meanlist)\n elif self.u_mean_mode_ == \"median\":\n u_mean = np.median(meanlist)\n elif self.u_mean_mode_ == \"min\":\n u_mean = np.min(meanlist)\n elif self.u_mean_mode_ == \"max\":\n u_mean = np.max(meanlist)\n return u_mean",
"def ensemble_mean(self):\n return self.mean(dim='mem')",
"def mean(self):\n return self.sum / self.sum_weights",
"def global_mean(self):\n return self.interaction_data.label.mean()",
"def get_global_mean(self, ratings):\n total_ratings = []\n for user, movie, rating in ratings:\n total_ratings.append(rating)\n return sum(total_ratings) / len(total_ratings)",
"def mean(self):\n return self._lift(\"mean\")",
"def word_average(self, sent):\n mean = []\n for word in sent:\n if word in self.word_model.wv.vocab:\n mean.append(self.word_model.wv.get_vector(word))\n\n if not mean: # empty words\n # If a text is empty, return a vector of zeros.\n logging.warning(\n \"cannot compute average owing to no vector for {}\".format(sent))\n return np.zeros(self.vector_size)\n else:\n mean = np.array(mean).mean(axis=0)\n return mean",
"def mean(self):\n return self._summarize(lambda c: c.mean)",
"def word_average(self, sent):\n\n mean = []\n for word in sent:\n if word in self.word_model.wv.vocab:\n mean.append(self.word_model.wv.get_vector(word) *\n self.word_idf_weight[word]) # idf weighted\n\n if not mean: # empty words\n # If a text is empty, return a vector of zeros.\n logging.warning(\n \"cannot compute average owing to no vector for {}\".format(sent))\n return np.zeros(self.vector_size)\n else:\n mean = np.array(mean).mean(axis=0)\n return mean",
"def matrix_mean(matrix):\n return sum(map(mean,matrix))",
"def reduce_mean(tensor):\n if not (dist.is_available() and dist.is_initialized()):\n return tensor\n tensor = tensor.clone()\n dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM)\n return tensor",
"def reduce_mean(tensor):\n if not (dist.is_available() and dist.is_initialized()):\n return tensor\n tensor = tensor.clone()\n dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM)\n return tensor",
"def _get_mean(self):\n return [layer._get_mean() for layer in self.layers]",
"def mean(self):\n return self.mu",
"def calculate_mean(self) -> float:\n\n if self.data:\n return np.mean(self.data)\n else:\n return self.mu",
"def parade_mean(p):\n return [noneneg(mj.get('mean')) for mj in p['moments']]",
"def mean_flat(tensor):\n return tensor.mean(dim=list(range(1, len(tensor.shape))))",
"def average_impurity(self):\n children = tf.squeeze(tf.slice(self.variables.tree, [0, 0], [-1, 1]),\n squeeze_dims=[1])\n is_leaf = tf.equal(LEAF_NODE, children)\n leaves = tf.to_int32(tf.squeeze(tf.where(is_leaf), squeeze_dims=[1]))\n counts = tf.gather(self.variables.node_sums, leaves)\n impurity = self._weighted_gini(counts)\n return tf.reduce_sum(impurity) / tf.reduce_sum(counts + 1.0)",
"def avg_e_score(self, entity):\n return float(entity['es']) / float(entity['count'])",
"def get_means(self):\n if self.metadata is None:\n self.get_metadata()\n\n # we want only the numerical features\n df = self.metadata.select_dtypes(include=['int64', 'float64'])\n return df.mean()",
"def mean(self):\n\n return self._reduce_for_stat_function(F.mean, only_numeric=True)"
] | [
"0.6682953",
"0.6198933",
"0.6104026",
"0.61020845",
"0.606138",
"0.59226686",
"0.59124076",
"0.5786445",
"0.5759134",
"0.57552254",
"0.57463694",
"0.5732782",
"0.572047",
"0.57162076",
"0.5711725",
"0.5707686",
"0.5694124",
"0.56877744",
"0.5685827",
"0.567916",
"0.567916",
"0.5677996",
"0.5658731",
"0.56570256",
"0.5644531",
"0.5627499",
"0.5621408",
"0.5618254",
"0.5615184",
"0.56138414"
] | 0.7650001 | 0 |
Convert UAST into feature and label arrays. Had to be defined outside of RolesMLP so that we don't suppply `self` twice. | def _process_uast(self, filename: str) -> Tuple[np.array, np.array]:
X, y = [], []
uast_model = UASTModel().load(filename)
for uast in uast_model.uasts:
child_vecs, parent_vecs = self._mean_vecs(uast)
for node, node_idx in node_iterator(uast):
child_vec = child_vecs[node_idx]
parent_vec = parent_vecs[node_idx]
if child_vec is not None and parent_vec is not None:
labels = np.zeros(len(self.roles), dtype=np.int8)
labels[[self.roles["RoleId_%d" % role] for role in node.roles]] = 1
X.append(np.concatenate((child_vec, parent_vec)))
y.append(labels)
return np.array(X), np.array(y) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _convert_to_features(self, img: np.ndarray) -> np.ndarray:",
"def convert_examples_to_features(tokens_set, labels_set, max_seq_length, tokenizer):\r\n\r\n #label_map = {label: i for i, label in enumerate(label_list, 1)}\r\n\r\n input_ids, input_masks, segment_ids, labels = [], [], [], []\r\n for index in tqdm_notebook(range(len(tokens_set)),desc=\"Converting examples to features\"):\r\n textlist = tokens_set[index] #example.text_a.split(' ')\r\n labellist = labels_set[index]\r\n input_id, input_mask, segment_id,label = convert_single_example(\r\n textlist, labellist,max_seq_length,tokenizer\r\n )\r\n input_ids.append(input_id)\r\n input_masks.append(input_mask)\r\n segment_ids.append(segment_id)\r\n labels.append(label)\r\n return (\r\n np.array(input_ids),\r\n np.array(input_masks),\r\n np.array(segment_ids),\r\n np.array(labels)\r\n )",
"def build_label_transform():\n\n return NALabelEncoder()",
"def dataConvertToNumpy( self ):\n self.featureNumpy = np.asarray( self.feature )\n self.ClassNumpy = np.asarray( self.Class )",
"def array2(self):\n print \"array2\"\n msgbox(whoami())\n #research\n inputLabelID = self.__needleLabelSelector.currentNode().GetID()\n labelnode=slicer.mrmlScene.GetNodeByID(inputLabelID)\n i = labelnode.GetImageData()\n shape = list(i.GetDimensions())\n shape.reverse()\n a = vtk.util.numpy_support.vtk_to_numpy(i.GetPointData().GetScalars()).reshape(shape)\n labels=[]\n val=[[0,0,0] for i in range(a.max()+1)]\n for i in xrange(2,a.max()+1):\n w =numpy.transpose(numpy.where(a==i))\n # labels.append(w.mean(axis=0))\n val[i]=[0,0,0]\n val[i][0]=w[int(round(w.shape[0]/2))][2]\n val[i][1]=w[int(round(w.shape[0]/2))][1]\n val[i][2]=w[int(round(w.shape[0]/2))][0]\n if val[i] not in self.previousValues:\n labels.append(val[i])\n self.previousValues.append(val[i])\n return labels",
"def _transformEx(self,example):\n x = []\n x.extend(example) \n x.append(1)\n return x,example.label",
"def load_as_one_hot(self):\n\n labels = [] \n examples = [] \n\n # document number -> label mapping\n doc2label = n2b2.map_patients_to_labels(\n self.xml_dir,\n self.category)\n\n # load examples and labels\n for f in os.listdir(self.cui_dir):\n doc_id = f.split('.')[0]\n file_path = os.path.join(self.cui_dir, f)\n file_feat_list = read_cuis(file_path)\n examples.append(' '.join(file_feat_list))\n \n string_label = doc2label[doc_id]\n int_label = LABEL2INT[string_label]\n labels.append(int_label)\n\n examples = self.token2int.texts_to_matrix(examples, mode='binary')\n\n return examples, labels",
"def _handle_feature(fea):\n if len(fea.shape) == 1:\n fea = np.array([fea]).T\n\n return fea",
"def type_uc(x):\r\n return Feature(metamer(x)[0], 'observation')",
"def convert_example(example,\n tokenizer,\n label_list,\n max_seq_length=512,\n is_test=False):\n if not is_test:\n # `label_list == None` is for regression task\n label_dtype = \"int64\" if label_list else \"float32\"\n # Get the label\n label = example['labels']\n label = np.array([label], dtype=label_dtype)\n # Convert raw text to feature\n if (int(is_test) + len(example)) == 2:\n example = tokenizer(example['sentence'], max_seq_len=max_seq_length)\n else:\n example = tokenizer(\n example['sentence1'],\n text_pair=example['sentence2'],\n max_seq_len=max_seq_length)\n\n if not is_test:\n return example['input_ids'], example['token_type_ids'], label\n else:\n return example['input_ids'], example['token_type_ids']",
"def feature_label(features):\n f=[]\n l=[]\n for item in features:\n f.append(item[0])\n l.append(item[1])\n return f,l",
"def output_rule_feature_arrays(self, prediction, label):\n # Subclasses must implement\n raise NotImplementedError()",
"def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n\n label_map = {} # label\n for (i, label) in enumerate(label_list): # ['0', '1']\n label_map[label] = i\n\n features = [] # feature\n for (ex_index, example) in enumerate(examples):\n text_a_id = int(example.text_a_id)\n text_b_id = int(example.text_b_id)\n\n text_a_fields = example.text_a.split(\" _eop_ \")\n \n tokens_a = []\n text_a_subtype = []\n for text_a_field_idx, text_a_field in enumerate(text_a_fields):\n text_a_field_token = tokenizer.tokenize(text_a_field)\n tokens_a.extend(text_a_field_token)\n text_a_subtype.extend([text_a_field_idx]*len(text_a_field_token))\n assert len(tokens_a) == len(text_a_subtype)\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b) # text_b tokenize\n\n if tokens_b: # if has b\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) # truncate\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because # (?)\n # the entire model is fine-tuned.\n tokens = []\n segment_ids = []\n subtype_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n subtype_ids.append(0)\n for token_idx, token in enumerate(tokens_a):\n tokens.append(token)\n segment_ids.append(0)\n subtype_ids.append(text_a_subtype[token_idx])\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n subtype_ids.append(1)\n\n if tokens_b:\n for token_idx, token in enumerate(tokens_b):\n tokens.append(token)\n segment_ids.append(1)\n subtype_ids.append(2)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n subtype_ids.append(2)\n\n input_sents = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_sents) # mask\n\n # Zero-pad up to the sequence length.\n while len(input_sents) < max_seq_length:\n input_sents.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n subtype_ids.append(0)\n\n assert len(input_sents) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n assert len(subtype_ids) == max_seq_length\n\n label_id = label_map[example.label]\n\n if ex_index%2000 == 0:\n print('convert_{}_examples_to_features'.format(ex_index))\n\n features.append(\n InputFeatures( # object\n text_a_id=text_a_id,\n text_b_id=text_b_id,\n input_sents=input_sents,\n input_mask=input_mask,\n segment_ids=segment_ids,\n subtype_ids=subtype_ids,\n label_id=label_id))\n\n return features",
"def createFeatureArray(self, lyrFeats): \n featIdlist = []\n fullFeatureList= []\n #add features to the attribute list\n for feat in lyrFeats:\n if feat == NULL:\n feat = None\n featIdlist.append(feat.id())\n featAttributes = feat.attributes()\n fullFeatureList.extend(featAttributes)\n \n #get size of attribute table\n rows = len(featIdlist)\n cols = len(featAttributes)\n \n #create an array af attributes and return it\n featArray = np.array([fullFeatureList])\n featArray2 = np.reshape(featArray, (rows, cols))\n return featArray2",
"def array2(self):\r\n profbox(whoami())\r\n # research\r\n inputLabelID = self.__needleLabelSelector.currentNode().GetID()\r\n labelnode = slicer.mrmlScene.GetNodeByID(inputLabelID)\r\n i = labelnode.GetImageData()\r\n shape = list(i.GetDimensions())\r\n shape.reverse()\r\n a = vtk.util.numpy_support.vtk_to_numpy(i.GetPointData().GetScalars()).reshape(shape)\r\n labels = []\r\n val = [[0, 0, 0] for i in range(a.max() + 1)]\r\n for i in xrange(2, a.max() + 1):\r\n w = numpy.transpose(numpy.where(a == i))\r\n # labels.append(w.mean(axis=0))\r\n val[i] = [0, 0, 0]\r\n val[i][0] = w[int(round(w.shape[0] / 2))][2]\r\n val[i][1] = w[int(round(w.shape[0] / 2))][1]\r\n val[i][2] = w[int(round(w.shape[0] / 2))][0]\r\n if val[i] not in self.previousValues:\r\n labels.append(val[i])\r\n self.previousValues.append(val[i])\r\n return labels",
"def convert_example(example, tokenizer):\n\n feature = tokenizer(\n text=example['question'],\n text_pair=example['answer'],\n max_seq_len=args.max_seq_length)\n feature['labels'] = example['labels']\n feature['id'] = example['id']\n\n return feature",
"def labeledTensors(self):\n return self.__normalizeData__(self.__tensors__)",
"def __convert_labeled_featuresets(self, labeled_featuresets, output):\n\n\t\tif isinstance(output, str):\n\t\t\toutput = open(output,'w')\n\t\telif not isinstance(output, file):\n\t\t\traise TypeError('output is a str or a file.')\n\n\t\tfor featureset, label in labeled_featuresets:\n\t\t\tfeat, label = self.__text_converter.toSVM(\" \".join(featureset), label)\n\t\t\tfeat = ''.join(' {0}:{1}'.format(f,feat[f]) for f in sorted(feat))\n\t\t\tif label == None:\n\t\t\t\tlabel = -1\n\t\t\toutput.write(str(label) + ' ' + feat + '\\n')\n\t\toutput.close()",
"def mapper(line): \n feats = line.strip().split(\",\") \n # labels must be at the beginning for LRSGD\n label = feats[len(feats) - 1] \n feats = feats[: len(feats) - 1]\n feats.insert(0,label)\n features = [ float(feature) for feature in feats ] # need floats\n return np.array(features)",
"def features_to_array(features_table, scaler):\n\n # Check arguments\n X = features_to_unscaled_matrix(features_table)\n return scaler.transform(X)",
"def load_features_labels(self):\n MFCCs = torch.from_numpy(np.load(self.feature_file))\n labels = torch.from_numpy(np.load(self.label_file))\n 'Loading from files finished!'\n return MFCCs.view(-1,1,128,128), labels.long()",
"def convert_features(self,b_x):\n # reshape\n b,_=b_x.shape\n default_xy=self.default_xy[None].expand(b,-1,-1,-1,-1).type_as(b_x)\n # here shape to be [b,side,side,(num*(coords+rescore)+classes)]\n b_x=b_x.view(b,self.side,self.side,(self.num*(self.coords+self.rescore)+self.classes))\n \n \n\n # 2. some transformations on net output\n b_out_locoff_conf=b_x[...,:self.num*(self.coords+self.rescore)]\n \n # [b,side,side,num,(coords+rescore)]\n b_out_locoff_conf=b_out_locoff_conf.view(b,self.side,self.side,self.num,self.coords+self.rescore) \n \n b_out_locoff=b_out_locoff_conf[...,:self.coords] # [b,side,side,num,coords]\n b_out_conf=b_out_locoff_conf[...,self.coords] # [b,side,side,num]\n\n b_out_cls=b_x[...,self.num*(self.coords+self.rescore):] # [b,side,side,classes]\n \n # 3. from output to predict box...\n # out_loc,out_conf,out_cls\n # I think should add sigmoid on location.\n b_out_loc=b_out_locoff.sigmoid() # [b,side,side,num,4]\n b_out_conf=b_out_conf.sigmoid() # [b,side,side,num]\n\n if self.softmax:\n b_out_cls=b_out_cls.softmax(dim=3) # [b,side,side,20]\n else:\n b_out_cls=b_out_cls.sigmoid() # [b,side,side,20]\n \n b_pred_loc=b_out_loc.clone() # [b,side,side,num,4]\n # NOTE: add default_xy, from the offset to the relative coords...\n b_pred_loc[...,:2]=b_pred_loc[...,:2].clone()+default_xy\n\n # normalize the x,y in pred_loc to [0,1]\n b_pred_loc[...,:2]=b_pred_loc[...,:2].clone()/self.side\n\n # NOTE: no normalization, since offset laies on [0,1]\n # normalize to [0.,1.]\n # default_xy[...,0]/=1.*self.side\n # default_xy[...,1]/=1.*self.side \n\n return b_out_loc,b_out_conf,b_out_cls,b_pred_loc,default_xy",
"def convert_data(self,i,j,words,tags,di,b_table):\n testing_features = TestingFeatures(self.training_features)\n self._add_features_to(testing_features,i,j,words,tags,di,b_table)\n return testing_features.to_vector()",
"def extractFeatures(self, datum):\n abstract",
"def alspostprocess(data, prediction, features, user_features, movie_features, n_features=10):\r\n \r\n\r\n data['ALS'] = prediction[data.loc[:, 'userID']-1, data.loc[:, 'movieID']-1]\r\n features.append('ALS')\r\n \r\n total_features = len(movie_features)\r\n if n_features>total_features:\r\n n_features = total_features\r\n \r\n for i in range(n_features):\r\n data[\"UserFeature{}\".format(i)] = user_features[data.loc[:, 'userID']-1, i]\r\n features.append(\"UserFeature{}\".format(i))\r\n data[\"MovieFeature{}\".format(i)] = movie_features[i, data.loc[:, 'movieID']-1]\r\n features.append(\"MovieFeature{}\".format(i))\r\n return data, features",
"def convert_examples_to_features(self):\n features = []\n max_label_len = 0\n # find ou the max label length\n labels_list = []\n for ex_index, example in enumerate(self.examples):\n processor = example.processor\n label_ids = self.tokenizer.text_to_ids(processor.label2string(example.label)) + [self.tokenizer.eos_id]\n max_label_len = max(len(label_ids), max_label_len)\n labels_list.append(label_ids)\n if self.max_seq_length_decoder is None:\n self.max_seq_length_decoder = max_label_len\n else:\n self.max_seq_length_decoder = max(\n self.max_seq_length_decoder, max_label_len\n ) # take the max of the two to be conservative\n for ex_index, example in enumerate(self.examples):\n taskname = example.taskname\n taskname_ids = self.tokenizer.text_to_ids(taskname)\n processor = example.processor\n if ex_index % 10000 == 0:\n logging.info(f\"Writing example {ex_index} of {len(self.examples)}\")\n label_ids = labels_list[ex_index]\n enc_query = processor.get_ptune_query(\n example.content,\n self.pseudo_token_id,\n self.max_seq_length - self.max_seq_length_decoder + 1,\n self.templates,\n self.tokenizer,\n )\n input_ids = enc_query + label_ids[:-1]\n labels = [SMALL_NUM for i in range(len(enc_query) - 1)] + label_ids\n features.append([input_ids, labels, enc_query, taskname_ids])\n return features",
"def lblencoder(self):\n for i in self.data.columns:\n if self.data[i].dtype=='object':\n lbl = preprocessing.LabelEncoder()\n lbl.fit(list(self.data[i].values))\n self.data[i] = lbl.transform(list(self.data[i].values))\n \n self.X = self.data.drop(self.target, axis =1)\n self.y = self.data[self.target]",
"def compute_feature(cls, HL : Headline) -> np.ndarray:\n TokenizerContainer.init()\n tokens, input_ids = cls.get_ids(f'[CLS] {HL.GetSentWithoutEdit()} [SEP]')\n segments = cls.get_segments(tokens)\n masks = cls.get_masks(tokens)\n input_ids.extend(segments)\n input_ids.extend(masks)\n return np.array(input_ids)",
"def transform(self, X):\n featurizers = [self.featurizer1, self.featurizer2, self.featurizer3, self.featurizer4, self.featurizer5,\n self.featurizer6, self.featurizer7, self.featurizer8, self.featurizer9, self.featurizer10]\n fvs = []\n for datum in X:\n [fv] = [f(datum) for f in featurizers if f is not None]\n fvs.append(fv)\n return np.array(fvs).astype(float)",
"def transform_sequences(self,tokens_labels):\n X_train = []\n y_train = []\n for seq in tokens_labels:\n features_seq = []\n labels_seq = []\n for i in range(0, len(seq)):\n features_seq.append(self.word2features(seq, i))\n labels_seq.append(self.word2labels(seq[i]))\n X_train.append(features_seq)\n y_train.append(labels_seq)\n return X_train,y_train"
] | [
"0.5990265",
"0.568541",
"0.55585814",
"0.5532058",
"0.54721767",
"0.54471904",
"0.54443514",
"0.5410015",
"0.5367583",
"0.5366209",
"0.53495264",
"0.5344927",
"0.5325683",
"0.53237617",
"0.53112566",
"0.5272182",
"0.52613175",
"0.5252806",
"0.52363384",
"0.5218285",
"0.5210587",
"0.52097076",
"0.5207369",
"0.5174944",
"0.5158936",
"0.5147939",
"0.5143509",
"0.51341075",
"0.51157564",
"0.5113944"
] | 0.6829204 | 0 |
on_load is called when a objects is instantiated from database | def on_load(self):
self.__init__() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def on_load(self):\n pass",
"def on_load(self):\n pass",
"def postLoad(self):\n pass",
"def on_load(self):",
"def __init_on_load__(self):",
"def _post_load(self):\n pass",
"def on_loaded(self, func):\n self._on_loaded_funcs.append(func)",
"def onInit(self):\n pass",
"def afterLoadSceneObject(self):\n\t\tpass",
"def post_init(self):\n\t\tpass",
"def afterInit(self):",
"def on_initialize(self) -> None:\n pass",
"def on_load(self, bot):\n self.bot = bot\n self.connection = bot.get_connection()\n self.plugin_manager = bot.get_plugin_manager()\n self.config = bot.get_config_manager()\n self.data_manager = bot.get_data_manager()",
"def __init__(self):\n self.load()",
"def initialized(self, identifier):",
"def init(self):\n # IMPORTANT: create a new gob database model entry for this object\n self.gobify()",
"def on_init(self):\n self.write_log(\"策略初始化\")\n self.load_bar(1) # 具体加载多少天的数据, 1表示1天的数据,如果是2表示过去2天的数据",
"def load(self):\n self._really_load()",
"def _post_init(self):\n pass",
"def before_dataobj_create(self, dataobj):",
"def on_init(self):\n self.write_log(\"策略初始化\")\n\n self.load_bar(10)",
"def on_init(self):\n self.write_log(\"策略初始化\")\n self.exchange_load_bar(self.exchange)",
"def on_create(self):",
"def on_init(self):\n self.write_log(\"策略初始化\")\n self.load_bar(1)",
"def on_start(self):\n self.init()",
"def on_init(self):\n self.write_log(\"策略初始化\")\n self.load_bar(10)",
"def on_init(self):\n self.write_log(\"策略初始化\")\n self.load_bar(10)",
"def load(self, *args, **kwargs):\n pass",
"def init_classes():\r\n\r\n\tglobal data\r\n\r\n\tif data is None:\r\n\t\twith app.app_context():\r\n\t\t\tprint \"initializing db\"\r\n\t\r\n\t\t\tdata = status.DataManager( db_path=dbPath, ext=pycklerext )\r\n\t\r\n\t\t\tprint \"db loaded\"\r\n\r\n\telse:\r\n\t\twith app.app_context():\r\n\t\t\tprint \"updating db\"\r\n\t\t\tdata.loadlast()\r\n\t\t\tprint \"db updated\"\r\n\r\n\twith app.app_context():\r\n\t\tg.modules = {\r\n\t\t\t'memall': get_mem_all,\r\n\t\t\t'memone': get_mem_one\r\n\t\t}\r\n\t\tg.glanularity = 60",
"def __init__(self, loader):\n self.loader = loader\n self.models = []"
] | [
"0.77701694",
"0.77701694",
"0.74983925",
"0.7490084",
"0.7307622",
"0.71171945",
"0.6513983",
"0.6450546",
"0.64347255",
"0.6360762",
"0.63267064",
"0.6259976",
"0.6241833",
"0.6180361",
"0.61745167",
"0.6063532",
"0.6045345",
"0.6027878",
"0.6021052",
"0.60068715",
"0.59551114",
"0.5947084",
"0.59404707",
"0.5939246",
"0.59375536",
"0.5933213",
"0.5933213",
"0.59125423",
"0.5895492",
"0.5868246"
] | 0.7820616 | 0 |
Return column number of first zombie in row. | def first_zombie_col(self, row_num):
row = self.board[row_num]
for col_num, square in enumerate(row):
if any(self.is_zombie([row_num, col_num])):
return col_num | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_colnumber(self, header):\n for i in range(0, len(self.data)):\n if self.data[i][0] == header:\n return i\n return None",
"def row(self):\n\t\tif self._parent != None:\n\t\t\treturn self._parent._children.index(self)\n\t\telse:\n\t\t\treturn 0",
"def get_rownumber(self, first_col_val):\n\n try:\n (col_name, col_contents) = self.data[0]\n col_data = [col_name] + col_contents\n return col_data.index(first_col_val)\n except ValueError:\n return None",
"def get_next_empty_cell(self):\n for row in range(len(self.grid)):\n for col in range(len(self.grid[0])):\n if self.grid[row][col] == 0:\n return (row, col)\n return None",
"def firstEmptyCell(board):\r\n for i in range(9):\r\n for j in range(9):\r\n if board[i][j] == 0:\r\n return (i, j) # row, col\r\n return None",
"def find_first_free_cell(board, picked_column):\n for row in reversed(range(len(board))):\n if board[row][picked_column] == 0:\n return row",
"def get_cellcount(self):\n self.cellcount += 1\n return self.cellcount - 1",
"def get_nearest_row(self):\n return (self.rect.top - (self.screen.get_height() // 12)) // self.maze.block_size",
"def get_nearest_col(self):\n return (self.rect.left - (self.screen.get_width() // 5)) // self.maze.block_size",
"def get_drop_row(self, x):\n for y in range(self.size_y):\n if self.get_piece_at_opening(x, y) == Piece.NONE:\n return y\n return -1",
"def num_cells(self):\n cbi = self.cbi\n if cbi is None:\n return None\n return cbi[-1] # pylint: disable=E1136",
"def num_cells(self):\n cbi = self.cbi\n if cbi is None:\n return None\n return cbi[-1] # pylint: disable=E1136",
"def num_cells(self):\n cbi = self.cbi\n if cbi is None:\n return None\n return cbi[-1] # pylint: disable=E1136",
"def num_cells(self):\n cbi = self.cbi\n if cbi is None:\n return None\n return cbi[-1] # pylint: disable=E1136",
"def position(self):\n # (this to be able to let the model know my 'row')\n if self.parent and self in self.parent.children:\n return self.parent.children.index(self)\n return 0",
"def get_pos_index(self):\n return [self.row-1, self.col-1]",
"def getRowColumn(N):\n N += 1\n y = int((np.sqrt(1 + 8 * N) - 1) / 2)\n b = int(N - (y**2 + y) / 2)\n if b == 0:\n return (y - 1, y - 1)\n else:\n return (y, b - 1)",
"def getHeaderRowPosition(sheetData):\n for index, row in enumerate(sheetData):\n if row[1] != '':\n return index\n return 0",
"def _get_header_position(header_row: List[str], column_title: str) -> int:\n for pos, column in enumerate(header_row):\n if column_title.lower() in column.lower():\n return pos\n\n raise Exception(\"Expected column header not found for {}\".format(column_title))",
"def _next_unlabelled_col(x):\n for i in range(self.n_cols):\n idx = (x + i) % self.n_cols\n x_current = self._x_positions[idx]\n if self._cols[x_current].label is None:\n return idx",
"def find_next_empty_cell(grid):\n for i, row in enumerate(grid):\n for j, col in enumerate(row):\n if col == 0:\n return (i, j)\n return None",
"def test_get_date_column_index_first_col(self, one_row_worksheet):\n\n actual_result = one_row_worksheet.get_date_column_index()\n assert actual_result == 0",
"def nrows(self):\n if self.ncolumns() == 0:\n return 0\n nrows = self.table_column(0).nrows()\n for i in range(1, self.ncolumns()):\n nrows = min(self.table_column(i).nrows(), nrows)\n return nrows",
"def column(self) -> int:\n return self._column",
"def __get_cell_index(self, x, y) -> int:\n # \"The map data, in row-major order, starting with (0,0)\"\n return x + y * self.occupancy_map.info.width",
"def _find_empty_cell(self):\n\n for r, row in enumerate(self._board):\n for c, cell in enumerate(row):\n if cell is None:\n return r, c",
"def rank(self):\n\n if self._rank >= 0:\n return self._rank\n\n reduced, operations = self.to_row_echelon()\n non_leading_rows = 0\n for i in range(self.rows, 0, -1):\n if not reduce(lambda x,y: x or y, reduced.row(i)):\n non_leading_rows += 1\n else:\n break\n\n self._rank = self.rows - non_leading_rows\n return self._rank",
"def which_cell(loc_x, loc_y):\n column = int(math.ceil((loc_x - LEFT_MARGIN) / CELL_SIZE))\n row = int(math.ceil((loc_y - TOP_MARGIN) / CELL_SIZE))\n cell_id = (row - 1) * CELL_COLUMN + column\n return cell_id",
"def find_blank_cell(self, board: list):\n cells = {}\n for i in range(9): # Iterate over rows\n for j in range(9): # Iterate over columns\n if board[i][j] == 0:\n cells[str(i) + ' ' + str(j)] = self.count_numbers(board, j, i)\n m = max(cells.values())\n for k in cells:\n if cells[k] == m:\n s = k.split()\n x, y = int(s[1]), int(s[0])\n return x, y",
"def _get_row_index(self, row: Row) -> int:\n row_index = -1\n for index, table_row in enumerate(self.table_data):\n if table_row.values == row.values:\n row_index = index\n break\n return row_index"
] | [
"0.6535265",
"0.6504764",
"0.6361898",
"0.6265344",
"0.6231301",
"0.62140507",
"0.61831784",
"0.61463916",
"0.61094284",
"0.6089981",
"0.60148174",
"0.60148174",
"0.60148174",
"0.60148174",
"0.5994073",
"0.5961097",
"0.59416634",
"0.59152573",
"0.5907639",
"0.5887093",
"0.5885397",
"0.58745635",
"0.5856625",
"0.5856272",
"0.58361137",
"0.5833648",
"0.58267105",
"0.58106667",
"0.58019084",
"0.5796834"
] | 0.7976616 | 0 |
Removes an item from it's 2D location on the board. | def del_item(self, item):
index = self.board[item.pos[0]][item.pos[1]].index(item)
del self.board[item.pos[0]][item.pos[1]][index] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delItem(self,row,column):\n data = self.data\n if row in data and column in data[row]:\n del data[row][column]\n self.hasChanged = True",
"def remove_item(self, idx_of_item):\n del self.items[idx_of_item]",
"def remove(self, item) -> None:\n entry = self.entry_finder.pop(item)\n entry[-1][0] = None",
"def remove(self, item: T) -> None:\n index = self.index(item)\n self.delete_at_index(index)",
"def remove(self, item: T) -> None:\n index = self.index(item)\n self.delete_at_index(index)",
"def remove(self, item):\n index = self.get_all().index(item)\n removed_item = self._items.pop(item)\n previous_item, next_item = removed_item[1]\n if item == self._first:\n self._first = next_item\n if next_item:\n self._items[next_item][1][0] = None\n elif item == self._last:\n self._last = previous_item\n if previous_item:\n self._items[previous_item][1][1] = None\n else:\n if previous_item:\n self._items[previous_item][1][1] = next_item\n if next_item:\n self._items[next_item][1][0] = previous_item\n return index",
"def removeItem(self, item):\n # remove this item from our list\n if item in self.sceneItems:\n self.sceneItems.remove(item)\n\n # remove it from the scene\n self.scene.removeItem(item)\n\n # update the viewport\n self.viewport().update()",
"def remove(self, item):\n\n if item in self:\n item_index = self._index_map[item]\n last_item = self._list[-1]\n\n # Swap in the item from the end of the list\n self._list[item_index] = last_item\n self._list.pop()\n\n self._index_map[last_item] = item_index",
"def remove(self, item):\n try:\n entry = self.set.pop(item)\n entry[-1] = self.REMOVED\n except KeyError:\n print(\"Can't remove a non-existing item\")",
"def drop(self, item: Item):\n self.items.remove(item)\n item.place(self.parent.x, self.parent.y, self.gamemap)\n\n self.engine.message_log.add_message(f'You yeeted the {item.name}.')",
"def remove_item(self, item: tuple) -> None:\n self._antecedent.remove(item)\n self._is_updated = False",
"def remove(self, item: Item) -> None:\n raise NotImplementedError(\"remove\")",
"def remove_from_basket(self, item):\n self._products.pop(item)",
"def troop_remove(self, pos):\n x, y = pos\n # tile_id = AIV_SIZE * y + x\n \n troop = self.tmap[y, x]\n if (troop == 0):\n return\n \n # update tmap\n self.tmap[y, x] = 0\n\n # first remove thing from tarr, then find something new in tmap\n\n\n # for slot in range(0, len(self.tarr)):\n # if (self.tarr[slot] == tile_id):\n # self.tmap[y, x] = slot//10\n \n # # update tarr\n # for slot in range(10*troop, 11*troop):\n # if (self.tarr[slot] == tile_id):\n # for slot_slot in range(slot, 11*troop-1):\n # self.tarr[slot_slot] = self.tarr[slot_slot+1]",
"def remove(self, item):\n # type: (Any) -> None\n return list.remove(self, self.ref(item))",
"def removeItem(self, item):\n if item.type not in self.__inventory__:\n return\n for i in range(0, len(self.__inventory__[item.type])):\n if self.__inventory__[item.type][i].id == item.id:\n self.__inventory__[item.type].pop(i)\n return",
"def remove(self, item):\n\t\tif self.len == 0:\n\t\t\traise ValueError(\"Lista vacia\")\n\t\tif self.prim.dato == item:\n\t\t\tself.borrar_primero()\n\t\t\treturn\n\t\tanterior = self.prim\n\t\tactual = anterior.prox\n\t\twhile actual and actual.dato != item:\n\t\t\tanterior = anterior.prox\n\t\t\tactual = actual.prox\n\t\tif not actual:\n\t\t\traise ValueError(\"Elemento no encontrado\")\n\t\tanterior.prox = actual.prox\n\t\tself.len -= 1",
"def remove_item(self, item_id):\n self.items.pop(item_id)",
"def _remove(self, cell_coord, o):\n cell = self.d[cell_coord]\n cell.remove(o)\n\n # Delete the cell from the hash if it is empty.\n if not cell:\n del(self.d[cell_coord])",
"def _itemRemoved(self, item):\n group = self.item()\n if group is None:\n return\n\n # Find item\n for row in self.children():\n if isinstance(row, Item3DRow) and row.item() is item:\n self.removeRow(row)\n break # Got it\n else:\n raise RuntimeError(\"Model does not correspond to scene content\")",
"def remove_poss(self, row, col):\n if self.poss_tiles[row][col] is not None:\n self.poss_tiles[row][col].remove()\n self.poss_tiles[row][col] = None",
"def remove_item(self, key, item):\n self[key].remove(item)\n self._remove_reverse_mapping(item, key)",
"def remove(self, item):\n item_found = False\n\n try:\n # Traverse through the array to look for the 'item'\n for i in range(len(self)):\n if self.the_array[i] == item:\n # Move every item after the 'item' found to left in order\n # to remove the 'item'\n for j in range(i, self.count - 1):\n self.the_array[j] = self.the_array[j + 1]\n self.count -= 1\n item_found = True\n\n if (self.capacity // 2 >= self.BASE_SIZE) and (self.count < self.capacity / 8):\n self._resize(self.capacity // 2)\n break\n\n if not item_found:\n raise ValueError\n\n except ValueError:\n print(\"Item not found in list.\")\n\n return item_found",
"def remove_obstacle(self, x, y):\n self.BOARD[y][x].traversable = True\n self.board_array[y][x] = 0",
"def item_remove(self, item):\n\t\treturn self._modify_object(item=item, new_item=\"\")",
"def remove(self, loc):\n j = loc._index\n if not (0 <= j < len(self) and self._data[j] is loc):\n raise ValueError('Invalid locator')\n if j == len(self) - 1: # item at last position\n self._data.pop() # just remove it\n else:\n self._swap(j, len(self)-1) # swap item to the last position\n self._data.pop() # remove it from the list\n self._bubble(j) # fix item displaced by the swap\n return (loc._key, loc._value)",
"def remove_piece(self) -> None:\r\n if self.has_piece():\r\n self.piece.square = None\r\n self.piece = None",
"def delete_row(self, pos):\n del self._grid[pos]",
"def drop(self, pitem):\n\n #if the item is not inside the item list, can't drop it \n if pitem not in self.items:\n print('The player does not carry the item')\n\n #if not, remove the item \n else:\n self.items.remove(pitem)",
"def remove_from_inventory(self, item):\n\t\tif item in self.inventory:\n\t\t\tself.inventory[item] -= 1\n\t\t\tif self.inventory[item] == 0:\n\t\t\t\tdel self.inventory[item]"
] | [
"0.7109872",
"0.7100431",
"0.6982199",
"0.6915672",
"0.6915672",
"0.689267",
"0.68600845",
"0.6831838",
"0.67709464",
"0.67571646",
"0.67114854",
"0.66505504",
"0.6625568",
"0.6608895",
"0.6597562",
"0.65652883",
"0.6518909",
"0.6506016",
"0.6490994",
"0.6490032",
"0.64801955",
"0.6476876",
"0.6465768",
"0.6443875",
"0.6417258",
"0.63979775",
"0.63932043",
"0.63884395",
"0.6386593",
"0.63777953"
] | 0.834085 | 0 |
Randomly add new Zombie to board | def spawn(self):
new_zombie_lvl = random.randint(0, min(self.level, 3))
_ = Zombie(new_zombie_lvl, [random.randint(0, 4), 99], self.board)
self.zombie_spawn_delay = random.randint(*self.zombie_spawn_delay_range) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def new_tile(self):\r\n rand_x = random.randrange(self.width)\r\n rand_y = random.randrange(self.height)\r\n while self.get_tile(rand_y, rand_x) != 0:\r\n rand_x = random.randrange(self.width)\r\n rand_y = random.randrange(self.height)\r\n value = random.choice([2,2,2,2,2,2,2,2,2,4])\r\n del self.board[rand_y][rand_x]\r\n self.board[rand_y].insert(rand_x,value)\r\n return self.board",
"def add_zombie(self, row, col):\r\n self._zombie_list.append((row, col))",
"def add_zombie(self, row, col):\n self._zombie_list.append((row,col))",
"def add_zombie(self, row, col):\n self._zombie_list.append((row, col))",
"def add_zombie(self, entity):\n if entity not in self._entities['all']:\n self._entities['all'].append(entity)\n self._entities['zombies'].append(entity)\n self._entities[entity._team]['zombies'].append(entity)\n entity.set_id(self._entities[entity._team]['id'])\n self._entities[entity._team]['id'] += 1",
"def create_enemy():\n if randint(0, 20) == 5:\n try:\n check.check_life(common.COLS-1, common.MIDS_R, \"Enemy\")\n eitem = person.Enemy(common.COLS-1, common.MIDS_R)\n config.E_LIST.append(eitem)\n except (config.EnemyHere, config.GapHere):\n pass\n\n for i in config.E_LIST:\n try:\n i.move(i.x_pos-2, i.y_pos)\n except config.WallHere:\n pass\n except config.EnemyHere:\n config.E_LIST.remove(i)",
"def new_tile(self):\r\n # check if is zero or not\r\n new_tile_added = False\r\n # a list to 2 90% of the time and 4 10% of the time\r\n new_tile_list = [2,2,2,2,2,2,2,2,2,4]\r\n counter = 0\r\n while not new_tile_added:\r\n row_position = random.randrange(0,self.grid_height)\r\n col_position = random.randrange(0,self.grid_width)\r\n if self.grid[row_position][col_position] == 0:\r\n self.grid[row_position][col_position] = random.choice(new_tile_list)\r\n new_tile_added = True\r\n if counter > self.grid_width * self.grid_height:\r\n print 'you failed'\r\n break\r\n\r\n counter +=1",
"def new_tile(self):\n\n # creating a random float variable that will roll a random value\n # if randomvalue > .90\n #\n\n tile_added = False\n while not tile_added:\n row = random.randint(0,self.grid_height - 1)\n col = random.randint(0,self.grid_width - 1)\n if self.board[row][col] == 0:\n tile_added = True\n random_tile = random.random()\n if random_tile < .90:\n self.board[row][col] = 2\n else:\n self.board[row][col] = 4",
"def new_tile(self):\r\n # replace with your code\r\n empty_square_lists = []\r\n for row in range(self._grid_height):\r\n for col in range(self._grid_width):\r\n if(self.get_tile(row, col) == 0):\r\n empty_square_lists.append((row, col))\r\n \r\n if len(empty_square_lists) == 0:\r\n return \"game over!\"\r\n \r\n random_cell = random.choice(empty_square_lists)\r\n random_cell_row = random_cell[0]\r\n random_cell_col = random_cell[1]\r\n \r\n values = [2] * 90 + [4] * 10\r\n value = random.choice(values)\r\n \r\n self.set_tile(random_cell_row, random_cell_col, value)",
"def new_tile(self):\n\n if len(self._available_new_tiles) == 0:\n # Refill the _available_new_tiles after 10 moves\n self._available_new_tiles = TOTAL_AVAILABLE_MOVES[:]\n\n while True:\n # Checks for 0 in a random row and column\n row = random.randrange(self._grid_height)\n col = random.randrange(self._grid_width)\n if self._grid[row][col] == 0:\n break\n\n new_tile = random.choice(self._available_new_tiles)\n # Remove the selected tile from _available_new_tiles\n self._available_new_tiles.remove(new_tile)\n self._grid[row][col] = new_tile",
"def new_tile(self):\n while True:\n random_row = random.randrange(self._grid_height)\n random_column = random.randrange(self._grid_width)\n if self._grid[random_row][random_column] == 0:\n self._grid[random_row][random_column] = random.choice([2] * 9 + [4])\n break",
"def make_board(self):\n generate = lambda: random.randint(1, 100) in range(1, self.p_pit+1)\n some_number = self.some_number\n agent = Agent(some_number)\n agent.program = Oozeplorer_Percept(agent)\n self.add_agent(agent)\n gold = Gold()\n self.add_thing(gold, None)\n for row in range(1, some_number + 1):\n for col in range(1, some_number + 1):\n valid_spot = (row, col) != gold.location and (row, col) != (1, 1)\n if valid_spot and generate():\n t_pt = Pit()\n t_pt.location = (row, col)\n self.things.append(t_pt)",
"def new_tile(self):\r\n count = 0\r\n tot_count = self.get_grid_width() * self.get_grid_height()\r\n\r\n while count < 2 and tot_count > 0:\r\n # my_list = 4 10% of the time and a 2 90%\r\n my_list = [4] * 10 + [2] * 90\r\n new_tile = random.choice(my_list)\r\n\r\n # Selects a random number from 0 to width * height -1\r\n\r\n spot = random.randint(0, self._grid_height * self._grid_width - 1)\r\n\r\n # sets location to random selection from spot\r\n loc = [spot / self._grid_width, spot % self._grid_width]\r\n # if loc is empty ( == 0 ) sets number, else repeats process.\r\n\r\n if self._board[loc[0]][loc[1]] == 0:\r\n # sets radom selected board tile to new_tile number\r\n self._board[loc[0]][loc[1]] = new_tile\r\n count += 1\r\n tot_count -= 1",
"def new_tile(self):\n \n # get random corordinates for new tile\n row = random.randint(0,self._grid_width)\n col = random.randint(0,self._grid_height)\n # keeps generating random tile corordinates for non-empty tile\n while self.get_tile(row,col) != 0:\n row = random.randint(0,self._grid_width)\n col = random.randint(0,self._grid_height)\n \n # get random index of new tile value\n freq = random.randint(0,9)\n if freq == 9:\n self.set_tile(row, col, 4)\n else:\n self.set_tile(row, col, 2)",
"def generatePiece(self):\n\n empty_tiles = []\n for y in range(BOARD_SIZE):\n for x in range(BOARD_SIZE):\n if self.grid[x][y].isEmpty():\n empty_tiles.append(self.grid[x][y])\n\n two_or_four = random.choice([2, 4])\n random.choice(empty_tiles).set(two_or_four)",
"def new_tile(self):\n # replace with your code\n empty_list = []\n counter_1 = 0\n for _ in self._grid:\n counter_2 = 0\n line = _\n for blank in line:\n if blank == 0:\n blank_tile = (counter_1, counter_2)\n empty_list.append(blank_tile)\n counter_2 += 1\n else:\n counter_2 += 1\n counter_1 += 1\n #print empty_list\n \n self._tile = empty_list[random.randrange(len(empty_list))]\n \n value = [2,2,2,2,2,2,2,2,2,4]\n tile_value = value[random.randint(0,9)]\n \n self.set_tile(self._tile[0], self._tile[1], tile_value)",
"def create_some_random_pos(actor_cls, n, actor_type, actor_list, game,\r\n probability_each=100):\r\n ITERATIONS_MAX = 12\r\n cell_size = lib_jp.Size(w=actor_cls.size.w, h=actor_cls.size.h)\r\n cell_size_with_border = lib_jp.Size(w=cell_size.w + Actor.CELL_SCREEN_SECURITY_SIZE,\r\n h=cell_size.h + Actor.CELL_SCREEN_SECURITY_SIZE)\r\n cell_total_security_border = lib_jp.Size(w=actor_cls.cell_added_size.w\r\n + Actor.CELL_SCREEN_SECURITY_SIZE,\r\n h=actor_cls.cell_added_size.h\r\n + Actor.CELL_SCREEN_SECURITY_SIZE)\r\n if len(actor_list) >= actor_cls.max_qty_on_board:\r\n return\r\n elif n + len(actor_list) >= actor_cls.max_qty_on_board:\r\n n = actor_cls.max_qty_on_board - len(actor_list)\r\n iterations = 0\r\n for _ in range(n):\r\n if probability_each < 100 and randint(1, 100) > probability_each:\r\n continue\r\n actor_added = False\r\n iterations = 0\r\n actor_obj = None\r\n while not actor_added and (iterations <= ITERATIONS_MAX):\r\n iterations += 1\r\n x = randint(cell_total_security_border.w,\r\n Settings.screen_width - cell_size_with_border.w)\r\n y = randint(Settings.screen_near_top + cell_total_security_border.h,\r\n Settings.screen_height - cell_size_with_border.h)\r\n # Check if there is some sprite in this position\r\n position_not_taken = True\r\n rect1 = pg.Rect(x, y, cell_size.w, cell_size.h)\r\n if actor_cls.actor_type != ActorType.BAT:\r\n # Apples and mines cannot collide with any kind of sprite\r\n for sprite in game.active_sprites:\r\n if rect1.colliderect(sprite.rect):\r\n position_not_taken = False\r\n break\r\n else:\r\n # Bats cannot collide with snakes and other bats\r\n for sprite in game.snakes:\r\n if rect1.colliderect(sprite.rect):\r\n position_not_taken = False\r\n break\r\n if position_not_taken:\r\n for sprite in game.bats:\r\n if rect1.colliderect(sprite.rect):\r\n position_not_taken = False\r\n break\r\n if position_not_taken:\r\n actor_obj = actor_cls(x, y, actor_type, game=game)\r\n if actor_obj.actor_type == ActorType.BAT:\r\n actor_obj.change_x = randint(3, 5)\r\n actor_obj.change_y = randint(3, 5)\r\n actor_obj.initialize_boundaries()\r\n actor_added = True",
"def get_red():\n # return name of actor, movement speed\n zombies = ['Zombie-1','Zombie-2','Zombie-3']\n return choice(zombies), randint(1,4)",
"def new_tile(self):\n zero_list = []\n zero_cell = ()\n # self._cells = [[0 for col in range(self._grid_width)] for row in range(self._grid_height)]\n for row in range(self._grid_height):\n for col in range(self._grid_width):\n if self._cells[row][col] == 0:\n zero_cell = (row, col)\n zero_list.append(zero_cell)\n if len(zero_list) > 0:\n chance = random.randrange(0,10)\n cell_idx = random.randrange(len(zero_list))\n if chance == 9:\n self._cells[zero_list[cell_idx][0]][zero_list[cell_idx][1]] = 4\n else:\n self._cells[zero_list[cell_idx][0]][zero_list[cell_idx][1]] = 2\n else:\n print(\"You lost! Better luck next time!\")",
"def new_tile(self):\n col = random.choice(range(self.grid_width))\n row = random.choice(range(self.grid_height))\n if self.grid[row][col] == 0:\n if random.random() >= 0.9:\n self.grid[row][col] = 4\n else:\n self.grid[row][col] = 2\n else:\n self.new_tile()",
"def move_random(self, board: Board) -> None:\n rnd_move_idx = randint(0,4)\n # moves: stay, up, left, right, down\n moves = [[0,0], [0,-1], [-1,0], [1,0], [0,1]]\n\n if board.can_position_at(self.x + moves[rnd_move_idx][0], self.y + moves[rnd_move_idx][1]):\n board.set_element_at_position(0, self.x, self.y)\n self.x += moves[rnd_move_idx][0]\n self.y += moves[rnd_move_idx][1]\n board.set_element_at_position(3, self.x, self.y)\n print(\"Bomberman moved to [\", self.x, \",\", self.y, \"]\")",
"def new_tile(self):\r\n random_row = random.randrange(0, self._grid_height)\r\n random_col = random.randrange(0, self._grid_width)\r\n random_choice = random.choice([2]*90 + [4] * 10)\r\n \r\n if 0 in [num for elem in self._cells for num in elem]: \r\n if self._cells[random_row][random_col] == 0:\r\n self._cells[random_row][random_col] = random_choice \r\n else:\r\n self.new_tile()\r\n else:\r\n pass",
"def generate_board(self):\n random.seed(self.seed)\n for row in self.grid:\n for column in row:\n probability = random.random()\n if self.live_probability > probability:\n column.set_alive()",
"def TrySpawnEnemy(self):\n if ((pygame.time.get_ticks() / self.clock.get_time()) % \n ENEMY_SPAWN_FREQUENCY == 0):\n lineNumber = 0 #here may be some random if there is more than one line\n type = 0 #here may be random also\n newEnemy = self.MakeZombie(0, self.fieldTop + \n lineNumber * VTAB_SIZE * self.height)\n self.enemies.add(newEnemy)\n return True\n return False",
"def new_tile(self):\n \n empty_items = []\n for row in range(self.get_grid_height()):\n for col in range(self.get_grid_width()):\n if self.get_tile(row, col) == 0:\n empty_items.append((row, col))\n \n random_row = 0\n random_col = 0\n if len(empty_items) != 0:\n random_empty_tile = random.randrange(0, len(empty_items))\n (random_row, random_col) = empty_items[random_empty_tile]\n else:\n return\n # the % of getting \"4\" from 0~9 is 10%\n random_time = random.randrange(0, 10)\n \n if random_time == 4:\n self._cells[random_row][random_col] = 4\n else:\n self._cells[random_row][random_col] = 2",
"def create_zombie(self, team, contagion):\n self._type = Genre.ZOMBIE\n self._team = team\n self._contagion = contagion",
"def create_enemies(self, count):\n self.enemies = []\n\n while count > 0:\n # Randomly select a cell\n x = int(random() * self.map_size[0])\n y = int(random() * self.map_size[1])\n\n # If it has been filled with something, choose another cell\n if self.is_filled(x, y):\n continue\n\n # Randomly select a type of enemy to generate\n r = int(random() * 10)\n if 4 < r and r < 8:\n enemy = self.create_enemy_bombeater_at(x, y)\n elif r == 8:\n enemy = self.create_enemy_flying_at(x, y)\n elif r == 9:\n enemy = self.create_enemy_bomber_at(x, y)\n else:\n enemy = self.create_enemy_normal_at(x, y)\n\n # Create dummy objects to prevent enemies \n # from concentrating at one place\n self.create_dummy_obj_at(x - 1, y)\n self.create_dummy_obj_at(x + 1, y)\n self.create_dummy_obj_at(x, y - 1)\n self.create_dummy_obj_at(x, y + 1)\n\n self.enemies.append(enemy)\n count -= 1",
"def add_bombs(self, protected_tile):\n bomb_list =[]\n count = self.num_of_bombs\n\n while count > 0:\n index = random.randint(0, len(self.stack)-1)\n if not(self.stack[index]['value'] == 'bomb' or index == protected_tile):\n self.stack[index]['value'] = 'bomb'\n count -= 1\n bomb_list.append(index)\n self.add_bomb_proximities(bomb_list)",
"def Spawn(self):\n if len(Ant.antArray) < Ant.antLimit:\n Ant.antArray.append(self)\n self.facing = random.randint(0,3)\n self.display.set_at((self.x,self.y), Colors.A_Zombie)\n pygame.display.update(pygame.Rect(self.x,self.y,1,1))",
"def move_humans(self, zombie_distance_field):\r\n blocked = self.get_grid_height() * self.get_grid_width() #getting the distance value of obstacles\r\n new_positions = []\r\n for human in self.humans(): #calculate move for each human\r\n moves = self.eight_neighbors(human[0], human[1]) #getting list of up to 8 possible moves\r\n moves.append((human[0], human[1]))\r\n potential_moves = []\r\n distance = zombie_distance_field[human[0]][human[1]]\r\n for move in moves: #storing potential move if the distance is the max but not that of an obstacle\r\n if zombie_distance_field[move[0]][move[1]] < blocked:\r\n if zombie_distance_field[move[0]][move[1]] > distance:\r\n potential_moves = [move]\r\n distance = zombie_distance_field[move[0]][move[1]]\r\n elif zombie_distance_field[move[0]][move[1]] == distance: #getting multiple moves if valid\r\n potential_moves.append(move) \r\n \r\n new_positions.append(random.choice(potential_moves))\r\n self._human_list = new_positions"
] | [
"0.6850253",
"0.6792333",
"0.67741394",
"0.6765152",
"0.67399603",
"0.6598132",
"0.6563493",
"0.6486989",
"0.6478938",
"0.6375682",
"0.6322192",
"0.62981117",
"0.62190646",
"0.6154359",
"0.6128631",
"0.61138046",
"0.6106024",
"0.60815513",
"0.60446703",
"0.6044434",
"0.60030496",
"0.600085",
"0.59687096",
"0.59541047",
"0.59333295",
"0.59307486",
"0.5921244",
"0.5920678",
"0.590263",
"0.58999276"
] | 0.73000026 | 0 |
If there is a Sun at a position, convert it to player gold. | def try_collecting(self, event):
sun_list = [i for i in self.board[event.pos] if isinstance(i, Sun)]
if sun_list:
sun_list[0].collected = True
self.player.gold += Sun.gold
self.ev_manager.post(events.SunCollected(self.player.gold)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def checkSun(ontology_sun):\n elevation = ontology_sun.has_elevation[0] #gets the elevation value of the Sun in the ontology. \n azimuth = ontology_sun.has_azimuth[0] #gets the azimuth value of the Sun in the ontology. \n intensity = ontology_sun.has_intensity[0] #gets the intensity value of the Sun in the ontology.\n return xosc.Sun(intensity,azimuth,elevation)",
"def doGreatSaintEffect(self, unit):\n\t\t\n\t\tiPlayer = unit.getOwner()\n\t\tpPlayer = gc.getPlayer(iPlayer)\n\t\tiReligion = pPlayer.getStateReligion()\n\t\t\n\t\t# loop through all cities and save city+distance tuples for those that don't have the religion\n\t\tcityValueList = []\n\t\tfor iLoopPlayer in range(iNumTotalPlayers+1): # include barbarians\n\t\t\tapCityList = PyPlayer(iLoopPlayer).getCityList()\n\t\t\tfor pCity in apCityList:\n\t\t\t\tcity = pCity.GetCy()\n\t\t\t\tif city.isHasReligion(iReligion) or city.isNone():\n\t\t\t\t\tcontinue\n\t\t\t\tiDistance = self.calculateDistance(unit.getX(), unit.getY(), city.getX(), city.getY())\n\t\t\t\tcityValueList.append((city, iDistance))\n\t\t\n\t\t# sort cities from closest to farthest to the unit\n\t\tcityValueList.sort(key=itemgetter(1))\n\t\t\n\t\t# loop through the new city list and spread the religion 3-5 times\n\t\tiCount = 0\n\t\tfor tCity in cityValueList:\n\t\t\tcity = tCity[0]\n\t\t\tiDistance = tCity[1]\n\t\t\t#if gc.getPlayer(city.getOwner()).getCivics(4) != con.iPersecutionCivic:\n\t\t\tcity.setHasReligion(iReligion, True, True, True)\n\t\t\tiCount += 1\n\t\t\tif iCount == 6 or (iCount == 5 and iDistance > 30) or (iCount == 4 and iDistance > 55) or (iCount == 3 and iDistance > 70):\n\t\t\t\tbreak\n\t\t\n\t\t# interface message\n\t\tif iCount > 0:\n\t\t\tszText = localText.getText(\"TXT_KEY_MINOR_EVENT_GREAT_SAINT_SUCCESS\", (unit.getName(), iCount))\n\t\t\tCyInterface().addMessage(iPlayer, False, con.iDuration, szText, \"AS2D_RELIGION_CONVERT\", InterfaceMessageTypes.MESSAGE_TYPE_MINOR_EVENT, \"\", ColorTypes(con.iGreen), -1, -1, False, False)\n\t\telse:\n\t\t\tszText = localText.getText(\"TXT_KEY_MINOR_EVENT_GREAT_SAINT_FAILURE\", (unit.getName(), ))\n\t\t\tCyInterface().addMessage(iPlayer, False, con.iDuration, szText, \"AS2D_RELIGION_CONVERT\", InterfaceMessageTypes.MESSAGE_TYPE_MINOR_EVENT, \"\", ColorTypes(con.iRed), -1, -1, False, False)\n\t\t\n\t\t# consume the Great Saint\n\t\tunit.kill(0, -1)",
"def find_gold(self, player):\n gold_found = random.randint(0, 100)\n player.gold += gold_found\n print(\"{} you found {} gold. You have {} gold.\".format(player.name, gold_found, player.gold))",
"def check_evolve(self):\n if self.team == 'white':\n if self.position[0] == 0:\n self.evolve()\n \n else:\n if self.position[0] == 7:\n self.evolve()",
"def SunPosition(time):\n # Correct for light travel time from the Sun.\n # Otherwise season calculations (equinox, solstice) will all be early by about 8 minutes!\n adjusted_time = time.AddDays(-1.0 / C_AUDAY)\n earth2000 = _CalcEarth(adjusted_time)\n sun2000 = [-earth2000.x, -earth2000.y, -earth2000.z]\n\n # Convert to equatorial Cartesian coordinates of date.\n stemp = _precession(sun2000, adjusted_time, _PrecessDir.From2000)\n sun_ofdate = _nutation(stemp, adjusted_time, _PrecessDir.From2000)\n\n # Convert equatorial coordinates to ecliptic coordinates.\n true_obliq = math.radians(adjusted_time._etilt().tobl)\n return _RotateEquatorialToEcliptic(sun_ofdate, true_obliq, time)",
"def player_place_units(self):\n # If player is \"losing\" to someone next to them, funnel units towards that square\n places_needing_units = []\n left_to_place = self.max_units\n for node in self.nodes:\n old_units = self.board.nodes[node]['old_units']\n num_needed_here = 0\n for neighbor in self.board[node]:\n neighbor = self.board.nodes[neighbor]\n if(neighbor['owner'] != self.player_num):\n num_needed_here += neighbor['old_units']\n self.front_line.append(node)\n places_needing_units.append((node, num_needed_here, self.find_dist_to_enemy_node(src_node=node)))\n places_needing_units = sorted(filter(lambda x: x[1] > 0,places_needing_units), key = lambda x: x[1])\n\n places_needing_units = sorted(places_needing_units, key=lambda x: x[2])\n\n for (node,amount) in places_needing_units:\n amount_to_place_here = min(left_to_place,amount+1)\n self.place_unit(node, amount_to_place_here)\n left_to_place -= amount_to_place_here\n if(left_to_place <= 0):\n break\n\n nodes_added_to = [a[0] for a in places_needing_units]\n\n # add one to front_line nodes\n if(left_to_place > 0):\n for node in self.front_line:\n self.place_unit(node,1)\n left_to_place -= 1\n if(left_to_place <= 0):\n break\n\n return self.dict_moves #Returns moves built up over the phase. Do not modify!",
"def handle_auction_end() -> None:\n auction_suns = game_state.get_auction_suns()\n max_sun = None\n if sum(1 for el in auction_suns if el is not None) > 0:\n max_sun = max(el for el in auction_suns if el is not None)\n\n # if no suns were bid and the auction tiles are full, clear\n # the tiles\n if max_sun is None:\n if game_state.get_num_auction_tiles() == game_state.get_max_auction_tiles():\n game_state.clear_auction_tiles()\n\n # if a sun was bid, give auction tiles to the winner\n else:\n winning_player = auction_suns.index(max_sun)\n\n # swap out winning player's auctioned sun with the center sun\n game_state.exchange_sun(\n winning_player, max_sun, game_state.get_center_sun()\n )\n game_state.set_center_sun(max_sun)\n\n # give auction tiles to the winner\n auction_tiles = game_state.get_auction_tiles()\n game_state.clear_auction_tiles()\n game_state.give_tiles_to_player(\n winning_player,\n (tile for tile in auction_tiles if gi.index_is_collectible(tile)),\n )\n\n winning_player_collection = game_state.get_player_collection(winning_player)\n\n # resolve pharoah disasters\n num_phars_to_discard = gi.NUM_DISCARDS_PER_DISASTER * sum(\n 1 for tile in auction_tiles if tile == gi.INDEX_OF_DIS_PHAR\n )\n if num_phars_to_discard > 0:\n num_phars_owned = winning_player_collection[gi.INDEX_OF_PHAR]\n num_phars_to_discard = min(num_phars_to_discard, num_phars_owned)\n game_state.remove_single_tiles_from_player(\n [gi.INDEX_OF_PHAR] * num_phars_to_discard, winning_player\n )\n\n # resolve nile disasters\n num_niles_to_discard = gi.NUM_DISCARDS_PER_DISASTER * sum(\n 1 for tile in auction_tiles if tile == gi.INDEX_OF_DIS_NILE\n )\n if num_niles_to_discard > 0:\n num_floods_owned = winning_player_collection[gi.INDEX_OF_FLOOD]\n num_niles_owned = winning_player_collection[gi.INDEX_OF_NILE]\n\n num_floods_to_discard = min(num_floods_owned, num_niles_to_discard)\n num_niles_to_discard = min(\n num_niles_to_discard - num_floods_to_discard, num_niles_owned\n )\n\n game_state.remove_single_tiles_from_player(\n [gi.INDEX_OF_FLOOD] * num_floods_to_discard\n + [gi.INDEX_OF_NILE] * num_niles_to_discard,\n winning_player,\n )\n\n # resolve civ disasters\n num_civs_to_discard = gi.NUM_DISCARDS_PER_DISASTER * sum(\n 1 for tile in auction_tiles if tile == gi.INDEX_OF_DIS_CIV\n )\n if num_civs_to_discard > 0:\n num_civs_owned = sum(\n gi.get_civs_from_collection(winning_player_collection)\n )\n if num_civs_owned <= num_civs_to_discard:\n game_state.remove_all_tiles_by_index_from_player(\n range(\n gi.STARTING_INDEX_OF_CIVS,\n gi.STARTING_INDEX_OF_CIVS + gi.NUM_CIVS,\n ),\n winning_player,\n )\n else:\n game_state.set_num_civs_to_discard(num_civs_to_discard)\n game_state.set_auction_winning_player(winning_player)\n\n # resolve monument disasters\n num_mons_to_discard = gi.NUM_DISCARDS_PER_DISASTER * sum(\n 1 for tile in auction_tiles if tile == gi.INDEX_OF_DIS_MON\n )\n if num_mons_to_discard > 0:\n num_mons_owned = sum(\n gi.get_monuments_from_collection(winning_player_collection)\n )\n if num_mons_owned <= num_mons_to_discard:\n game_state.remove_all_tiles_by_index_from_player(\n range(\n gi.STARTING_INDEX_OF_MONUMENTS,\n gi.STARTING_INDEX_OF_MONUMENTS + gi.NUM_MONUMENTS,\n ),\n winning_player,\n )\n else:\n game_state.set_num_mons_to_discard(num_mons_to_discard)\n game_state.set_auction_winning_player(winning_player)\n\n mark_player_passed_if_no_disasters(winning_player)\n\n # clear auction suns and mark auction as over\n game_state.end_auction()\n\n # if it's the final round and all playesr are passed\n if game_state.is_final_round() and game_state.are_all_players_passed():\n end_round(game_state)\n # else if no disasters to be resolved, advance current player\n elif not game_state.disasters_must_be_resolved():\n game_state.advance_current_player()\n # else, that means there IS a disaster to be resolved, so set current\n # player to auction winner to resolve\n else:\n game_state.set_current_player(game_state.get_auction_winning_player())",
"def evaulate_monster(generikmon):\r\n score = generikmon['headA']\r\n #score = generikmon['chinA']\r\n return score",
"def sense_earthquake(self, earthquake):\n if earthquake == True:\n self.location = \"gone dark\"\n return self.name + \" has gone dark!\"\n else:\n return self.location",
"def step(self):\n self.world.slosh_oceans()\n self.world.transfer_energy_vertically()\n self.world.transfer_energy_horizontally()\n self.world.absorb_energy_from_core()\n self.world.absorb_energy_from_sun(self.sun)",
"def ayanamsha(tee):\n return Solar.solar_longitude(tee) - sidereal_solar_longitude(tee)",
"def sky(seed=425, th=150, old=False):\n \n # impact parameters\n M = 3e7*u.Msun\n B = 19.95*u.kpc\n #B = 20.08*u.kpc\n V = 190*u.km/u.s\n phi = coord.Angle(0*u.deg)\n th = 150\n theta = coord.Angle(th*u.deg)\n Tenc = 0.01*u.Gyr\n T = 0.5*u.Gyr\n dt = 0.05*u.Myr\n rs = 0*u.pc\n \n old_label = ''\n np.random.seed(seed)\n observer = {'z_sun': 27.*u.pc, 'galcen_distance': 8.3*u.kpc, 'roll': 60*u.deg, 'galcen_coord': coord.SkyCoord(ra=300*u.deg, dec=-90*u.deg, frame='icrs')}\n vobs = {'vcirc': 220*u.km/u.s, 'vlsr': [0, 0, 0]*u.km/u.s}\n wangle = 180*u.deg\n \n if old:\n old_label = '_old_up'\n observer = {'z_sun': -2000.*u.pc, 'galcen_distance': 8.3*u.kpc, 'roll': 50*u.deg, 'galcen_coord': coord.SkyCoord(ra=300*u.deg, dec=-90*u.deg, frame='icrs')}\n vobs = {'vcirc': 220*u.km/u.s, 'vlsr': [0,0,0]*u.km/u.s}\n \n # impact parameters\n M = 3e7*u.Msun\n B = 20.06*u.kpc\n V = 190*u.km/u.s\n phi = coord.Angle(0*u.deg)\n th = 155\n theta = coord.Angle(th*u.deg)\n Tenc = 0.01*u.Gyr\n T = 0.55*u.Gyr\n dt = 0.05*u.Myr\n #dt = 1*u.Myr\n rs = 0*u.pc\n \n # potential parameters\n potential = 3\n Vh = 220*u.km/u.s\n q = 1*u.Unit(1)\n rhalo = 20*u.pc\n par_pot = np.array([Vh.si.value, q.value, rhalo.si.value])\n \n # setup tube\n Nstar = 1400\n wx = 30*u.kpc\n wy = 0*u.pc\n wz = 0*u.pc\n sx = 0*u.km/u.s\n \n xphi = np.linspace(-0.3*np.pi,0.3*np.pi, Nstar)\n xphi0 = np.linspace(-0.1*np.pi, 0.1*np.pi, 1000)\n xphi1 = np.linspace(-0.28*np.pi, -0.1*np.pi, 200)\n xphi2 = np.linspace(0.1*np.pi, 0.32*np.pi, 200)\n xphi = np.concatenate([xphi1, xphi0, xphi2])\n \n xr = 20*u.kpc + np.random.randn(Nstar)*0.0*u.kpc\n x = np.sin(xphi) * xr\n y = np.cos(xphi) * xr\n z = x * 0\n vx = -np.cos(xphi) * Vh# * 0.94\n vy = np.sin(xphi) * Vh #* 0.97\n vz = vx * 0\n # closest to impact\n ienc = np.argmin(np.abs(x))\n \n # generate stream model\n potential_perturb = 1\n par_perturb = np.array([M.si.value, 0., 0., 0.])\n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream = {}\n stream['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal = coord.Galactocentric(stream['x'], **observer)\n xeq = xgal.transform_to(coord.ICRS)\n veq_ = gc.vgal_to_hel(xeq, stream['v'], **vobs)\n veq = [None] * 3\n veq[0] = veq_[0].to(u.mas/u.yr)\n veq[1] = veq_[1].to(u.mas/u.yr)\n veq[2] = veq_[2].to(u.km/u.s)\n \n # unperturbed stream\n par_perturb = np.array([0*M.si.value, 0., 0., 0.])\n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream0 = {}\n stream0['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream0['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal0 = coord.Galactocentric(stream0['x'], **observer)\n xeq0 = xgal0.transform_to(coord.ICRS)\n veq0_ = gc.vgal_to_hel(xeq0, stream0['v'], **vobs)\n veq0 = [None] * 3\n veq0[0] = veq0_[0].to(u.mas/u.yr)\n veq0[1] = veq0_[1].to(u.mas/u.yr)\n veq0[2] = veq0_[2].to(u.km/u.s)\n \n # rotate to native coordinate system\n R = find_greatcircle(xeq0.ra.deg[::10], xeq0.dec.deg[::10])\n xi0, eta0 = myutils.rotate_angles(xeq0.ra, xeq0.dec, R)\n xi0 = coord.Angle(xi0*u.deg)\n \n # place gap at xi~0\n xioff = xi0[ienc]\n xi0 -= xioff\n \n xi, eta = myutils.rotate_angles(xeq.ra, xeq.dec, R)\n xi = coord.Angle(xi*u.deg)\n xi -= xioff\n \n vlabel = ['$\\mu_{\\\\alpha_\\star}$ [mas yr$^{-1}$]','$\\mu_{\\delta}$ [mas yr$^{-1}$]', '$V_r$ [km s$^{-1}$]']\n ylims = [[-0.5, 0.5], [-0.5, 0.5], [-25,25]]\n color = '0.35'\n ms = 4\n \n # plotting\n plt.close()\n fig, ax = plt.subplots(5,1,figsize=(12,12), sharex=True)\n \n plt.sca(ax[0])\n g = Table(fits.getdata('/home/ana/projects/GD1-DR2/output/gd1_members.fits'))\n plt.scatter(g['phi1']+40, g['phi2'], s=g['pmem']*2, c=g['pmem'], cmap=mpl.cm.binary, vmin=0.5, vmax=1.1)\n \n plt.xlim(-45,45)\n plt.ylim(-10,10)\n plt.gca().set_aspect('equal')\n plt.ylabel('$\\phi_1$ [deg]')\n \n plt.sca(ax[1])\n plt.plot(xi.wrap_at(wangle), eta, 'o', mec='none', color=color, ms=ms)\n \n plt.ylabel('$\\phi_1$ [deg]')\n plt.ylim(-10,10)\n plt.gca().set_aspect('equal')\n \n xeqs = [xeq.ra, xeq.dec, xeq.distance.to(u.kpc)]\n for i in range(3):\n plt.sca(ax[i+2])\n \n # interpolate expected kinematics from an unperturbed stream\n vexp = np.interp(xi.wrap_at(wangle), xi0.wrap_at(wangle), veq0[i].value) * veq0[i].unit\n plt.plot(xi.wrap_at(wangle), veq[i]-vexp, 'o', mec='none', color=color, ms=ms)\n \n plt.ylabel('$\\Delta$ {}'.format(vlabel[i]))\n plt.ylim(*ylims[i])\n\n plt.xlabel('$\\phi_2$ [deg]')\n \n plt.tight_layout()\n plt.savefig('../plots/spur_morphology_sky{}.png'.format(old_label))",
"def on_update(self, delta_time):\n for i in self.water_list:\n i.x += i.change_x\n i.y += i.change_y\n\n if i.x < i.size:\n i.change_x *= -1\n\n if i.y < i.size:\n i.change_y *= -1\n\n if i.x > SCREEN_WIDTH - i.size:\n i.change_x *= -1\n\n if i.y > SCREEN_HEIGHT - i.size:\n i.change_y *= -1\n\n if i.y - i.size/2 - self.player.size/2 < self.player.y and self.player.y < i.y + i.size/2 + self.player.size/2 :\n if i.x - i.size/2 - self.player.size/2 < self.player.x and self.player.x < i.x + i.size/2 + self.player.size/2:\n self.player.size += i.size\n self.water_list.remove(i)\n\n for i in self.soda_list:\n i.x += i.change_x\n i.y += i.change_y\n\n if i.x < i.size:\n i.change_x *= -1\n\n if i.y < i.size:\n i.change_y *= -1\n\n if i.x > SCREEN_WIDTH - i.size:\n i.change_x *= -1\n\n if i.y > SCREEN_HEIGHT - i.size:\n i.change_y *= -1\n\n if i.y - i.size/2 - self.player.size/2 < self.player.y and self.player.y < i.y + i.size/2 + self.player.size/2 :\n if i.x - i.size/2 - self.player.size/2 < self.player.x and self.player.x < i.x + i.size/2 + self.player.size/2:\n self.player.size -= i.size\n self.soda_list.remove(i)\n\n self.player.x += self.player.change_x\n self.player.y += self.player.change_y\n\n if self.player.x < self.player.size:\n self.player.change_x *= -1\n\n if self.player.y < self.player.size:\n self.player.change_y *= -1\n\n if self.player.x > SCREEN_WIDTH - self.player.size:\n self.player.change_x *= -1\n\n if self.player.y > SCREEN_HEIGHT - self.player.size:\n self.player.change_y *= -1\n\n if self.player.size < 5:\n self.player.size = 0\n del self.player",
"def MoonPhase(time):\n return PairLongitude(Body.Moon, Body.Sun, time)",
"def goto_sun(self, seconds_ahead = 0, blocking = True):\n assert self.is_initialized\n solar_ephemeris = self.devices['solar_ephemeris']\n tracking_mirror_positioner = self.controllers['tracking_mirror_positioner']\n #self.set_windings('on')\n #start tracking time\n t0 = time.time()\n #get current sun location\n jd_now, el_now, az_now = solar_ephemeris.update()\n #predict where sun will be at next control point\n jd_future, el_future, az_future = solar_ephemeris.predict(seconds_ahead, jd_now)\n #send start event\n info = OrderedDict()\n info['timestamp'] = t0\n info['seconds_ahead'] = seconds_ahead\n info['jd_now'] = jd_now\n info['az_now'] = az_now\n info['el_now'] = el_now\n info['jd_future'] = jd_future\n info['az_future'] = az_future\n info['el_future'] = el_future\n \n self._send_event(\"SOLAR_TRACKER_GOTO_SUN_STARTED\", info)\n if blocking:\n tracking_mirror_positioner.goto(az_target = az_future,\n el_target = el_future,\n blocking = blocking,\n )\n t1 = time.time()\n used_t = t1-t0\n #send end event\n info = OrderedDict()\n info['timestamp'] = t1\n info['az_pos'] = self.az_pos\n info['el_pos'] = self.el_pos\n info['used_time'] = used_t\n self._send_event(\"SOLAR_TRACKER_GOTO_SUN_COMPLETED\", info)\n return used_t\n else:\n tracking_mirror_positioner.goto(az_target = az_future,\n el_target = el_future,\n blocking = blocking,\n )",
"def satPos(ephi_dict, t_eval):\n \n c_rs = ephi_dict['crs']\n delta_n = ephi_dict['deltan']\n M_0 = ephi_dict['M0']\n c_uc = ephi_dict['cuc']\n ecc = ephi_dict['ecc']\n c_us = ephi_dict['cus']\n roota = ephi_dict['roota']\n t_oe = ephi_dict['toe']\n Omega0 = ephi_dict['Omega0']\n c_is = ephi_dict['cis']\n i0 = ephi_dict['i0']\n c_rc = ephi_dict['crc']\n c_ic = ephi_dict['cic']\n omega = ephi_dict['omega']\n Omega_dot = ephi_dict['Omegadot']\n i_dot = ephi_dict['idot']\n #codes = ephi_dict['codes']\n #weekno = ephi_dict['weekno']\n #L2flag = ephi_dict['L2flag']\n #svaccur = ephi_dict['svaccur']\n #svhealth = ephi_dict['svhealth']\n #tgd = ephi_dict['tgd']\n #iodc = ephi_dict['iodc']\n #tom = ephi_dict['tom']\n #datetime = ephi_dict['datetime']\n \n mu = 3.986005e14 # WGS 84 value of the earth's gravitational constant for GPS user\n Omega_e_dot = 7.2921151467e-5 # WGS 84 value of the earth's rotation rate\n \n a = roota**2 # Semi-major axis\n n0 = math.sqrt(mu/(a**3)) # Computed mean motion (rad/sec)\n \n tgps_sec = Time(t_eval).gps % 604800\n t_k = tgps_sec - t_oe # Time from ephemeris reference epoch\n \n # Account for beginning or end of week crossovers\n if t_k > 302400:\n t_k = t_k - 604800\n elif t_k < -302400:\n t_k = t_k + 604800\n \n n = n0 + delta_n # Corrected mean motion\n M_k = M_0 + n * t_k # Mean anomaly\n M_k = (M_k + 2*np.pi) % (2*np.pi)\n \n # Kepler's Equation for Eccentric Anomaly (may be solved by iteration) (radians)\n E_k = M_k # First guess for E_k\n M_k_delta = 1 # difference between two iterations\n \n #print('----------------------------')\n #print('t_k = %d' % t_k)\n #print('M_k = %12.6f' % M_k)\n for i in range(6):\n M_k_temp = E_k + ecc*math.sin(E_k)\n M_k_delta = M_k - M_k_temp\n # print('i = %d, M_k_temp = %12.6f, M_k_delta = %3.3e' % (i, M_k_temp, M_k_delta))\n E_k = E_k + M_k_delta\n if abs(M_k_delta) < 1e-12:\n break\n \n E_k = (E_k + 2*np.pi) % (2*np.pi)\n \n # True Anomaly\n sinv_k = math.sqrt(1 - ecc**2)*math.sin(E_k) #/ (1 - ecc*math.cos(E_k))\n cosv_k = (math.cos(E_k) - ecc) #/ (1 - ecc*math.cos(E_k))\n v_k = math.atan2(sinv_k, cosv_k)\n \n #E_k = math.acos((ecc+cosv_k)/(1+ecc*cosv_k)) # Eccentric Anomaly\n \n Phi_k = (v_k + omega) % (2*np.pi) # Argument of Latitude\n \n # Second Harmonic Perturbations\n delta_u_k = c_us * math.sin(2*Phi_k) + c_uc * math.cos(2*Phi_k) # Argument of Latitude Correction\n delta_r_k = c_rs * math.sin(2*Phi_k) + c_rc * math.cos(2*Phi_k) # Radius Correction\n delta_i_k = c_is * math.sin(2*Phi_k) + c_ic * math.cos(2*Phi_k) # Inclination Correction\n \n u_k = Phi_k + delta_u_k # Corrected Argument of Latitude\n r_k = a*(1-ecc*math.cos(E_k)) + delta_r_k # Corrected Radius\n i_k = i0 + delta_i_k + i_dot * t_k # Corrected Inclination\n \n # Positions in orbital plane\n x_k_dash = r_k * math.cos(u_k)\n y_k_dash = r_k * math.sin(u_k)\n \n # Corrected longitude of ascending node\n Omega_k = Omega0 + (Omega_dot - Omega_e_dot) * t_k - Omega_e_dot * t_oe\n Omega_k = uwr(Omega_k)\n \n # Earth-fixed coordinates\n x_k = x_k_dash * math.cos(Omega_k) - y_k_dash * math.cos(i_k) * math.sin(Omega_k)\n y_k = x_k_dash * math.sin(Omega_k) + y_k_dash * math.cos(i_k) * math.cos(Omega_k)\n z_k = y_k_dash * math.sin(i_k)\n \n return np.array([x_k, y_k, z_k])",
"def _get_grounding_from_name(self):\n grounding_name = remove_article(self.grounding)\n\n for area_name, area in self.map.areas.iteritems():\n if grounding_name == area_name:\n grounding = area\n\n for object_name, object_ in self.map.objects.iteritems():\n if grounding_name == object_name:\n grounding = object_\n\n for cop_name, cop in self.map.cops.iteritems():\n if grounding_name == cop_name:\n grounding = cop\n break\n else:\n if grounding_name == 'Deckard':\n logging.debug(\"No grounding available for Deckard yet.\")\n return None\n\n try:\n grounding\n except NameError:\n logging.error(\"No grounding available for {}\".format(grounding_name))\n return None\n\n return grounding",
"def get_penguin_placement(self, state: FishGameState):\n return self.strategy.place_penguin(state=state)",
"def punch(self, a_fighter):\n points = int(uniform(0.7,1.0)*10*self.get_strength()/a_fighter.get_agility())\n a_fighter.__health_points = a_fighter.get_health_points() - points\n return a_fighter.__health_points",
"def day_round(g):\n # day\n if is_seer_alive(g):\n if game_state['s_found_w_prev_night']:\n wolf_id = game_state['s_prev_night_id']\n game_state['s_found_w_prev_night'] = False\n g = kill_player(g, wolf_id)\n else:\n d = random_pick(g.keys())\n g = kill_player(g, d)\n else:\n # kill one random player\n d = random_pick(g.keys())\n # if guard is alive\n g = kill_player(g, d)\n\n return g",
"def teleportation(personnage,largeur_terrain,longeur_terrain):\n\n if 0 >personnage[\"x\"]:\n personnage[\"x\"]= largeur_terrain\n \n elif personnage[\"x\"] > largeur_terrain:\n personnage[\"x\"] = 0\n \n elif 0 > personnage[\"y\"]:\n personnage[\"y\"] = longeur_terrain\n \n elif personnage[\"y\"] > longeur_terrain:\n personnage[\"y\"] = 0",
"def update_players_locations(self):\n self.loc = self.find_value(1)\n self.opponent_loc = self.find_value(2)",
"def big_psi(sun_pos, sat_3d_pos):\n return np.arccos(np.dot(sun_pos.T, sat_3d_pos) / (vector_magnitude(sun_pos[0], sun_pos[1], sun_pos[2]) * vector_magnitude(sat_3d_pos[0], sat_3d_pos[1], sat_3d_pos[2])))",
"def eval(self, state):\n valueOfPlayers = 0\n valueOfRebelAdvancments = 0\n valueOfLocations = 0\n\n\n\n for coordinate in state.gameState:\n if state.gameState[coordinate]==state.blank:\n continue\n elif state.gameState[coordinate]==state.rebel:\n valueOfRebelAdvancments = -coordinate[0]\n elif state.gameState[coordinate]==state.jedi:\n continue\n elif state.gameState[coordinate]==state.sith:\n continue\n \n valueOfLocations += valueOfRebelAdvancments\n\n \n valueOfPlayers = state.numRebels + 4*state.numJedi - 4*state.numSith\n \n return valueOfPlayers*4 + valueOfLocations",
"def draw_sun():\n lisandro.penup()\n lisandro.goto(40, 90)\n lisandro.begin_fill()\n lisandro.circle(150) # draws out a circle with a radius of 150 for the sun.\n lisandro.end_fill()\n lisandro.hideturtle()",
"def swiss_to_gts(v):\n return v - np.array([667400, 158800, 1700])",
"def update(self):\n self.player.eaten_cheese = False\n # Checa se o jogador ou agente chegaram no objetivo\n if self.grid[self.player.x][self.player.y] == 2:\n self.player.score += self.player.reward_amount\n self.done = True\n\n # Checa se o jogador ou agente comeram o queijo\n elif self.grid[self.player.x][self.player.y] == 4:\n self.player.score += 0.2\n self.player.eaten_cheese = True\n self.clear_position(self.player.x, self.player.y)\n\n # Popule a atual posicao do jogador com 1 e a do agente com 10\n if self.player.name == \"Player\":\n self.grid[self.player.x][self.player.y] = 1\n elif self.player.name == \"Agent\":\n self.grid[self.player.x][self.player.y] = 10",
"def uncleScrooge(pos):\n bulldozer(pos)\n #print mc.postToChat(\"We made some free place. done !\")\n mc.postToChat(\"We made some free place. done !\")\n \n ground(pos, mainColor= wWhite, secondColor=wBlack)\n mc.setBlock(pos.x, pos.y, pos.z, 40)\n mc.setBlock(pos.x-1, pos.y, pos.z, 40)\n mc.postToChat(\"Ground done !\")\n\n pos.z += 5\n makeTheHouse(pos, blockTypeMain = wExtraWhite, blockTypeSecond = wGold, mainColor=1 , secondColor=0, myDoor= wDoorIron)\n mc.postToChat(\"House done !\")\n \n theRoof(pos, blockTypeMain = wDiamond_Block, mainColor=wBlack)\n mc.postToChat(\"The roof is done !\")\n\n makeTheDeco(pos, flowers = wFlower_Yellow)\n mc.postToChat(\"ALL Work done !\")\n\n # Ends Uncle Scrooge House",
"def unit_sun_r(sun_pos):\n return sun_pos / vector_magnitude(sun_pos[0], sun_pos[1], sun_pos[2])",
"def shoot(self, a_fighter):\n if self.get_ammos()>0:\n lostPoints = int(self.get_damage() / a_fighter.get_agility())\n lostPoints = int(lostPoints * uniform(0.5,1)) # some random added\n a_fighter.__health_points = a_fighter.get_health_points() - lostPoints\n self.__ammos -= 1 # remove one ammo\n return a_fighter.get_health_points()"
] | [
"0.5345387",
"0.5232973",
"0.51683986",
"0.5097628",
"0.5020059",
"0.48194417",
"0.47753277",
"0.47593406",
"0.46944553",
"0.46863693",
"0.4672161",
"0.46698081",
"0.4665255",
"0.46307704",
"0.46187636",
"0.45967177",
"0.45949432",
"0.45921183",
"0.45918754",
"0.45915216",
"0.45882726",
"0.45877257",
"0.45669726",
"0.45657036",
"0.45518273",
"0.45426005",
"0.45419896",
"0.45283952",
"0.45276877",
"0.45253745"
] | 0.5606547 | 0 |
Initialize the Salesforce location strategies 'text' and 'title' plus any strategies registered by other keyword libraries | def initialize_location_strategies(self):
locator_manager.register_locators("sf", lex_locators)
locator_manager.register_locators("text", "Salesforce.Locate Element by Text")
locator_manager.register_locators("title", "Salesforce.Locate Element by Title")
# This does the work of actually adding all of the above-registered
# location strategies, plus any that were registered by keyword
# libraries.
locator_manager.add_location_strategies() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def InitStrategy(self, sname, strategy):\n\n self._string = sname\n\n self.strategy = strategy\n self.postracker = position.PositionTracker(self.strategy)",
"def __init__(self):\r\n\t\tself.label = \"Linked Data Spatial Query\"\r\n\t\tself.description = \"Get geographic features from wikidata by mouse clicking. The Place type can be specified.\"\r\n\t\tself.canRunInBackground = False\r\n\t\tself.entityTypeURLList = []\r\n\t\tself.entityTypeLabel = []\r\n\t\tself.enterTypeText = \"\"",
"def setup(cls):\n cls.location = {\"longitude\": 0.1270, \"latitude\": 51.5194}\n cls.search_query = {\n \"search_key\": \"intro_service\",\n \"search_value\": \"intro_alice\",\n \"constraint_type\": \"==\",\n }\n cls.search_radius = 5.0\n cls.admin_host = \"127.0.0.1\"\n cls.admin_port = 8021\n cls.ledger_url = \"http://127.0.0.1:9000\"\n config_overrides = {\n \"models\": {\n \"strategy\": {\n \"args\": {\n \"location\": cls.location,\n \"search_query\": cls.search_query,\n \"search_radius\": cls.search_radius,\n \"admin_host\": cls.admin_host,\n \"admin_port\": cls.admin_port,\n \"ledger_url\": cls.ledger_url,\n }\n }\n },\n }\n\n super().setup(config_overrides=config_overrides)\n\n # behaviours\n cls.faber_behaviour = cast(\n FaberBehaviour,\n cls._skill.skill_context.behaviours.faber,\n )\n\n # dialogues\n cls.default_dialogues = cast(\n DefaultDialogues, cls._skill.skill_context.default_dialogues\n )\n cls.http_dialogues = cast(\n HttpDialogues, cls._skill.skill_context.http_dialogues\n )\n cls.oef_search_dialogues = cast(\n OefSearchDialogues, cls._skill.skill_context.oef_search_dialogues\n )\n\n # handlers\n cls.http_handler = cast(HttpHandler, cls._skill.skill_context.handlers.http)\n cls.oef_search_handler = cast(\n OefSearchHandler, cls._skill.skill_context.handlers.oef_search\n )\n\n # models\n cls.strategy = cast(Strategy, cls._skill.skill_context.strategy)\n\n cls.logger = cls._skill.skill_context.logger\n\n # mocked objects\n cls.mocked_method = \"SOME_METHOD\"\n cls.mocked_url = \"www.some-url.com\"\n cls.mocked_version = \"some_version\"\n cls.mocked_headers = \"some_headers\"\n cls.body_dict = {\"some_key\": \"some_value\"}\n cls.body_str = \"some_body\"\n cls.body_bytes = b\"some_body\"\n cls.mocked_body_bytes = json.dumps(cls.body_str).encode(\"utf-8\")\n cls.mocked_query = Query(\n [Constraint(\"some_attribute_name\", ConstraintType(\"==\", \"some_value\"))],\n DataModel(\n \"some_data_model_name\",\n [\n Attribute(\n \"some_attribute_name\",\n str,\n False,\n \"Some attribute descriptions.\",\n )\n ],\n ),\n )\n cls.mocked_proposal = Description(\n {\n \"contract_address\": \"some_contract_address\",\n \"token_id\": \"123456\",\n \"trade_nonce\": \"876438756348568\",\n \"from_supply\": \"543\",\n \"to_supply\": \"432\",\n \"value\": \"67\",\n }\n )\n\n # list of messages\n cls.list_of_http_messages = (\n DialogueMessage(\n HttpMessage.Performative.REQUEST,\n {\n \"method\": cls.mocked_method,\n \"url\": cls.mocked_url,\n \"headers\": cls.mocked_headers,\n \"version\": cls.mocked_version,\n \"body\": cls.mocked_body_bytes,\n },\n is_incoming=False,\n ),\n )\n\n cls.list_of_oef_search_messages = (\n DialogueMessage(\n OefSearchMessage.Performative.SEARCH_SERVICES,\n {\"query\": cls.mocked_query},\n ),\n )",
"def __init__(self, **keywords):\n\t\tfrom pymodule import ProcessOptions\n\t\tProcessOptions.process_function_arguments(keywords, self.option_default_dict, error_doc=self.__doc__, class_to_have_attr=self)\n\t\tself.setup_engine(metadata=__metadata__, session=__session__, entities=entities)",
"def __init__(self, engine: str = \"sfdp\"):\n self.engine = engine",
"def initialize_survey(self, **kwargs):",
"def setup(cls):\n super().setup()\n cls.search_behaviour = cast(\n GenericSearchBehaviour, cls._skill.skill_context.behaviours.search\n )\n cls.tx_behaviour = cast(\n GenericTransactionBehaviour, cls._skill.skill_context.behaviours.transaction\n )\n cls.strategy = cast(GenericStrategy, cls._skill.skill_context.strategy)\n\n cls.logger = cls._skill.skill_context.logger",
"def initialise(self, **kwargs):\n pass",
"def __init__(self, **kwargs):\n\n # call base class constructor registering that this tool performs everything.\n Algorithm.__init__(\n self,\n performs_projection = True,\n use_projected_features_for_enrollment = True,\n requires_enroller_training = True\n )",
"def initiate(self):\n\n for item in config.WEATHER_PROVIDERS[self.title]:\n self.__setattr__(item, config.WEATHER_PROVIDERS[self.title][item])\n\n # RP5 and Sinoptik have same URLs for hourly and next day weather info\n if self.title in ('RP5', 'Sinoptik'):\n self.URL_hourly = self.URL\n self.URL_next_day = self.URL\n\n self.logger = self._get_logger(self.title, self.app.args.verbosity)",
"def __init__(self):\r\n\t\tself.label = \"Linked Data Location Linkage Exploration\"\r\n\t\tself.description = \"\"\"This Tool enables the users to explore the linkages between locations in wikidata. \r\n\t\tGiven an input feature class, this tool gets all properties whose objects are also locations. \r\n\t\tThe output is another feature class which contains the locations which are linked to the locations of input feature class.\"\"\"\r\n\t\tself.canRunInBackground = False",
"def SetupKeywords(self):\n kwlist = u\" \".join(self._keywords)\n self.SetKeyWords(0, kwlist)",
"def _init_locators(self):\n try:\n version = int(float(self.get_latest_api_version()))\n self.builtin.set_suite_metadata(\"Salesforce API Version\", version)\n locator_module_name = \"locators_{}\".format(version)\n\n except RobotNotRunningError:\n # We aren't part of a running test, likely because we are\n # generating keyword documentation. If that's the case we'll\n # use the latest supported version\n here = os.path.dirname(__file__)\n files = sorted(glob.glob(os.path.join(here, \"locators_*.py\")))\n locator_module_name = os.path.basename(files[-1])[:-3]\n\n self.locators_module = importlib.import_module(\n \"cumulusci.robotframework.\" + locator_module_name\n )\n lex_locators.update(self.locators_module.lex_locators)",
"def initialize(self, **kwargs):",
"def initialize(context):",
"def initialize(context):",
"def initialize(context):",
"def initialize(context):",
"def initialize(context):",
"def initialize(context):",
"def initialize(context):",
"def initialize(context):",
"def initialize(context):",
"def initialize(context):",
"def initialize(context):",
"def initialize(context):",
"def initialize(context):",
"def initialize(context):",
"def initialize(context):",
"def initialize(context):"
] | [
"0.6045109",
"0.57539535",
"0.5687557",
"0.5604759",
"0.5474559",
"0.54511374",
"0.5398299",
"0.5344921",
"0.5298376",
"0.5295734",
"0.5262278",
"0.52585125",
"0.52350414",
"0.5218184",
"0.5204428",
"0.5204428",
"0.5204428",
"0.5204428",
"0.5204428",
"0.5204428",
"0.5204428",
"0.5204428",
"0.5204428",
"0.5204428",
"0.5204428",
"0.5204428",
"0.5204428",
"0.5204428",
"0.5204428",
"0.5204428"
] | 0.77248496 | 0 |
Set the locale for fake data This sets the locale for all calls to the ``Faker`` keyword and ``${faker}`` variable. The default is en_US For a list of supported locales see | def set_faker_locale(self, locale):
try:
self._faker = faker.Faker(locale)
except AttributeError:
raise Exception(f"Unknown locale for fake data: '{locale}'") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setLocale(self, value):\n return self._set(locale=value)",
"def set_locale_de():\n try:\n if platform.system() == \"Windows\":\n locale.setlocale(locale.LC_ALL, \"German\")\n else:\n locale.setlocale(locale.LC_ALL, \"de_DE.utf8\")\n except locale.Error:\n pass",
"def setUp(self):\n super().setUp()\n translation.activate(\"en-us\")",
"def with_locale(self, locale):\n self.__locale = locale\n return self",
"def _initializeLocale():\n \n if sys.platform == constants.WIN32:\n locale.setlocale(locale.LC_ALL, \"\")\n else:\n if constants.LC_ALL in os.environ:\n try:\n locale.setlocale(locale.LC_ALL, os.environ[constants.LC_ALL])\n return\n except locale.Error:\n # First try did not work, encoding must be set first then set locale.\n pass\n languageCode, encoding = locale.getdefaultlocale()\n if languageCode is None:\n languageCode = \"en_US\"\n # Set the encoding of the Python environment if no encoding is set.\n if encoding is None:\n encoding = constants.UTF8\n if encoding.lower() == \"utf\":\n encoding = constants.UTF8\n try:\n locale.setlocale(locale.LC_ALL, \"%s.%s\" % (languageCode, encoding))\n except locale.Error:\n try:\n locale.setlocale(locale.LC_ALL, \"en_US.UTF-8\")\n except locale.Error:\n locale.setlocale(locale.LC_ALL, \"C\")",
"def setPortalLocale( self ):\n info = getLanguageInfo( self )\n\n # find default and effective locale settings\n def_locale = info.get( sys.platform + '_locale' ) or info.get( os.name + '_locale' )\n cur_locale = getlocale()\n cur_locale = None not in cur_locale and '.'.join( cur_locale ) or ''\n\n # check whether locale is already ok\n if def_locale is None or cur_locale.lower() == def_locale.lower():\n return\n\n # change effective locale\n try:\n setlocale( LC_ALL, def_locale )\n except Exceptions.LocaleError:\n pass",
"def use_en(self):\n pass",
"def __init__(self, locale: Optional[str] = None,\n seed: Optional[Seed] = None) -> None:\n super().__init__(seed=seed)\n self.locale = setup_locale(locale)",
"def set_locale(cls, force=None):\n # disable i18n if config.locales array is empty or None\n if not config.locales:\n return None\n # 1. force locale if provided\n locale = force\n if locale not in config.locales:\n # 2. retrieve locale from url query string\n locale = cls.request.get(\"hl\", None)\n if locale not in config.locales:\n # 3. retrieve locale from cookie\n locale = cls.request.cookies.get('hl', None)\n if locale not in config.locales:\n # 4. retrieve locale from accept language header\n locale = get_locale_from_accept_header(cls.request)\n if locale not in config.locales:\n # 5. detect locale from IP address location\n territory = get_territory_from_ip(cls) or 'ZZ'\n locale = str(Locale.negotiate(territory, config.locales))\n if locale not in config.locales:\n # 6. use default locale\n locale = i18n.get_store().default_locale\n i18n.get_i18n().set_locale(locale)\n # save locale in cookie with 26 weeks expiration (in seconds)\n cls.response.set_cookie('hl', locale, max_age = 15724800)\n return locale",
"def get_locale_for_user(self):\n return 'en_US' # TODO(psimakov): choose proper locale from profile",
"def test_momentjs_locale(self):\n with translation.override('no-no'):\n self.assertEqual(\n context_processors.momentjs_locale(True), {\n 'MOMENTJS_LOCALE_URL': None,\n }\n )\n\n with translation.override('en-us'):\n self.assertEqual(\n context_processors.momentjs_locale(True), {\n 'MOMENTJS_LOCALE_URL': None,\n }\n )\n\n with translation.override('de'):\n self.assertEqual(\n context_processors.momentjs_locale(True), {\n 'MOMENTJS_LOCALE_URL': 'misago/momentjs/de.js',\n }\n )\n\n with translation.override('pl-de'):\n self.assertEqual(\n context_processors.momentjs_locale(True), {\n 'MOMENTJS_LOCALE_URL': 'misago/momentjs/pl.js',\n }\n )",
"def get_locale():\n return \"he\"",
"def InitLocale(self):\n self.ResetLocale()\n if 'wxMSW' in wx.PlatformInfo:\n import locale\n try:\n lang, enc = locale.getdefaultlocale()\n self._initial_locale = wx.Locale(lang, lang[:2], lang)\n # locale.setlocale(locale.LC_ALL, lang)\n # locale.setlocale(locale.LC_ALL, 'C')\n with open('./launch.log', 'a') as fp:\n fp.write(f'wxApp_LocaleFix.InitLocale: lang = {lang}\\n')\n print(lang)\n except (ValueError, locale.Error) as ex:\n target = wx.LogStderr()\n orig = wx.Log.SetActiveTarget(target)\n with open('./launch.log', 'a') as fp:\n fp.write(f'wxApp_LocaleFix.InitLocale:except-0 Unable to set default locale: \\'{ex}\\'\\n')\n print(\"Unable to set default locale: '{}'\".format(ex))\n wx.LogError(\"Unable to set default locale: '{}'\".format(ex))\n wx.Log.SetActiveTarget(orig)\n try:\n locale.setlocale(locale.LC_ALL, lang.replace('_', '-'))\n except (ValueError, locale.Error) as ex:\n locale.setlocale(locale.LC_ALL, lang.replace('-', '_'))\n target = wx.LogStderr()\n orig = wx.Log.SetActiveTarget(target)\n with open('./launch.log', 'a') as fp:\n fp.write(f'wxApp_LocaleFix.InitLocale:except-1 Unable to set default locale: \\'{ex}\\'\\n')\n print(\"Unable to set default locale: '{}'\".format(ex))\n wx.LogError(\"Unable to set default locale: '{}'\".format(ex))\n wx.Log.SetActiveTarget(orig)",
"def set_default_language(language_code):\n thread_locals.DEFAULT_LANGUAGE = language_code",
"def locale(self):\n return self.__locale",
"def locale_factory(factory):\n global _get_locale\n _get_locale = factory\n return factory",
"def use_locale(self, collation):\n loc = locale.getlocale(locale.LC_COLLATE)\n if collation == UNICODE_CODEPOINT_COLLATION:\n collation = 'en_US.UTF-8'\n\n try:\n locale.setlocale(locale.LC_COLLATE, collation)\n except locale.Error:\n raise self.error('FOCH0002', 'Unsupported collation %r' % collation)\n else:\n yield\n finally:\n locale.setlocale(locale.LC_COLLATE, loc)",
"def __init__(self):\n locale_dir = resource_filename(__name__, 'locale')\n add_domain(self.env.path, locale_dir)",
"def get_locale(self):\n return self.locale",
"def _apply_localization(self, request):\n request.culture = self._get_culture_for_request(request)\n return self",
"def test_set_custom_language(self):\r\n \r\n self._invoice_manager.create(\r\n client_id = self._test_client.key().id(), \r\n invoice_no = '2011/26', \r\n invoice_date = date.today(), \r\n sale_date = date.today(),\r\n language_id = self._test_language.key().id()\r\n )\r\n \r\n self._add_invoice_item(description = 'sample item1', quantity = 1.0, unit_price = 10.0)\r\n self._add_invoice_item(description = 'sample item2', quantity = 2.0, unit_price = 15.0)\r\n \r\n # Save the invoice\r\n self._invoice_manager.save()\r\n \r\n invoice = self._invoice_manager.find_invoice_by_id(self._invoice_manager._invoice.key().id())\r\n self.assertEqual(self._test_language.key(), invoice.language.key(), 'Language does not match')",
"def set_i18n(lang, language=None):\n import gettext\n import locale\n import warnings\n import os\n\n try:\n locale.setlocale(locale.LC_ALL, lang)\n locale.setlocale(locale.LC_MESSAGES, language or lang)\n os.environ[\"LANG\"] = lang\n os.environ[\"LANGUAGE\"] = language or lang.split(\".\")[0]\n except locale.Error:\n warnings.warn(f\"locale is not supported: {lang}\")\n gettext.bindtextdomain(\"messages\", localedir=LOCALEDIR)",
"def get_locale(self):\n\n return to_locale(settings.LANGUAGE_CODE).replace(\"_\", \"-\")",
"def set_utf8_locale():\n lang, encoding = locale.getlocale()\n if encoding != 'UTF-8':\n locale.setlocale(locale.LC_CTYPE, (lang, 'UTF-8'))",
"def default_locale(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"default_locale\")",
"def default_locale(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"default_locale\")",
"def test_use_default_language_and_currency(self):\r\n self._invoice_manager.create(\r\n client_id = self._test_client.key().id(), \r\n invoice_no = '2011/44', \r\n invoice_date = date.today(), \r\n sale_date = date.today(),\r\n )\r\n \r\n self._add_invoice_item(description = 'sample item1', quantity = 1.0, unit_price = 10.0)\r\n self._add_invoice_item(description = 'sample item2', quantity = 2.0, unit_price = 15.0)\r\n \r\n # Save the invoice\r\n self._invoice_manager.save()\r\n\r\n invoice = self._invoice_manager.find_invoice_by_id(self._invoice_manager._invoice.key().id())\r\n self.assertEqual(self._test_client.default_language.key(), invoice.language.key(), 'Language does not match')\r\n self.assertEqual(self._test_client.default_currency.key(), invoice.currency.key(), 'Currency does not match')",
"def set_default_language(lang):\n\tif frappe.db.get_default(\"lang\") != lang:\n\t\tfrappe.db.set_default(\"lang\", lang)\n\tfrappe.local.lang = lang",
"def set_default_language(lang):\n\tif frappe.db.get_default(\"lang\") != lang:\n\t\tfrappe.db.set_default(\"lang\", lang)\n\tfrappe.local.lang = lang",
"def locale(self) -> \"Locale\":\n raise NotImplementedError"
] | [
"0.6244255",
"0.623005",
"0.62105423",
"0.62015533",
"0.6130801",
"0.60855186",
"0.6064039",
"0.587886",
"0.5830364",
"0.5727264",
"0.5697302",
"0.5605824",
"0.5514231",
"0.543558",
"0.5397802",
"0.5365363",
"0.5353766",
"0.5340136",
"0.52725",
"0.52648044",
"0.52474874",
"0.52052456",
"0.51660645",
"0.5141552",
"0.5121084",
"0.5121084",
"0.5104653",
"0.5091607",
"0.5091607",
"0.50854135"
] | 0.7908526 | 0 |
Call the Create Webdriver keyword. Retry on connection resets which can happen if custom domain propagation is slow. | def create_webdriver_with_retry(self, *args, **kwargs):
# Get selenium without referencing selenium.driver which doesn't exist yet
selenium = self.builtin.get_library_instance("SeleniumLibrary")
for _ in range(12):
try:
return selenium.create_webdriver(*args, **kwargs)
except ProtocolError:
# Give browser some more time to start up
time.sleep(5)
raise Exception("Could not connect to remote webdriver after 1 minute") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_driver(self, config):\n raise NotImplementedError(\"Must override WebAccess::_create_driver.\")",
"def _instantiate_driver(self) -> webdriver:\n\n if self.driver is None: return Browser.run_chromedriver()\n\n return self.driver",
"def create_driver(self, random_proxy, login):\n\n # proxyArgsList = read_proxies_file('proxies.txt')\n # proxy = random.choice(proxyArgsList)\n chrome_options = webdriver.ChromeOptions()\n\n # chrome_options.add_argument(\"--headless\")\n chrome_options.add_argument(\"--proxy-auto-detect\")\n chrome_options.add_argument(\"--no-sandbox\")\n chrome_options.add_argument(\"--window-size=1920x1080\")\n chrome_options.add_argument('--disable-gpu')\n\n chrome_options.add_argument('--ignore-certificate-errors')\n # chrome_options.add_extension('lib/extension_4_1_0_0.crx')\n chrome_options.add_extension(self.getPlugin(proxy_host=random_proxy['proxy_host'], proxy_port=random_proxy['proxy_port'], proxy_user='kimnt93',\n proxy_pass='147828'))\n\n\n web_driver = webdriver.Chrome(executable_path=\"lib/chromedriver\",\n options=chrome_options)\n\n if login == True:\n account = [i.split(\"\\t\") for i in open('account.txt', 'r').readlines()]\n # LOGIN by temp-mail\n web_driver.get('https://auth2.glosbe.com/login')\n while 1:\n acc = random.choice(account)\n try:\n web_driver.find_element_by_css_selector('#username').send_keys(str(acc[0]))\n web_driver.find_element_by_css_selector('#password').send_keys(str(acc[1]))\n web_driver.find_element_by_name('submit').click()\n break\n except NoSuchElementException as a:\n web_driver.get('https://auth2.glosbe.com/login')\n\n return web_driver",
"def __setup_driver(driver_type: str) -> webdriver:\n if driver_type == \"chrome\":\n return __setup_chrome()\n if driver_type == \"edge\":\n return __setup_edge()\n if driver_type == \"safari\":\n return __setup_safari()\n if driver_type == \"firefox\":\n return __setup_firefox()",
"def init_webdriver(driver_name=None, headless=False):\n\n if driver_name == \"chrome\":\n try:\n #try to connect with chrome driver\n driver = connect_chrome_driver(headless)\n except:\n print_pretty(\"Sorry, you can't use chrome driver, please try another driver!\")\n sys.exit(0)\n elif driver_name == \"ie\":\n try:\n #try to connect with ie driver\n driver = connect_ie_driver(headless)\n except:\n print_pretty(\"Sorry, you can't use internet explorer driver, please try another driver!\")\n sys.exit(0)\n elif driver_name == \"firefox\":\n try:\n #try to connect with firefox driver\n driver = connect_firefox_driver(headless)\n except:\n print_pretty(\"sorry, you can't use firefox driver, please try another driver!\")\n sys.exit(0)\n else:\n print_pretty(\"No browser selected, please choose 'chrome', 'ie' or 'firefox'\")\n return None\n\n print_pretty(\"Selenium driver\", driver_name, \"sucessfully initialised\")\n return driver",
"def test_from_crawler_method_should_initialize_the_driver(self):\n\n crawler = Crawler(\n spidercls=self.spider_klass,\n settings=self.settings\n )\n selenium_middleware = SeleniumMiddleware.from_crawler(crawler)\n\n # The driver must be initialized\n self.assertIsNotNone(selenium_middleware.driver)\n\n # We can now use the driver\n selenium_middleware.driver.get('http://www.python.org')\n self.assertIn('Python', selenium_middleware.driver.title)\n\n selenium_middleware.driver.close()",
"def setup(request):\n web_driver = __setup_driver(request.param)\n request.cls.driver = web_driver\n failed_before = request.session.testsfailed\n yield\n if request.session.testsfailed != failed_before:\n test_name = request.node.name\n __take_screenshot(web_driver, test_name)\n __clean_up_server_details_file()\n web_driver.close()\n web_driver.quit()",
"def init_driver(self):\n # Set UserAgent to prevent issues with blocking bot\n self.chrome_options.add_argument(\n \"user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36\")\n # Set headless\n if self.headless:\n self.chrome_options.add_argument('headless')\n # Initiate driver\n driver = webdriver.Chrome(\n self.chromedriver_path + \"/chromedriver\", options=self.chrome_options)\n self.driver = driver\n self.wait = WebDriverWait(self.driver, 20)\n return self.driver",
"def set_up_driver(self):\r\n\t\ttry:\r\n\t\t\tself.driver = webdriver.Firefox()\r\n\t\texcept Exception:\r\n\t\t\tself.driver = False",
"def setUp(self):\r\n self.verificationErrors = []\r\n self.selenium = (selenium(selvars.set_localhost(), selvars.set_port(), selvars.set_browser(self.id(),self.shortDescription()), selvars.set_site()))\r\n self.selenium.start()\r\n self.session = self.selenium.sessionId\r\n self.selenium.set_timeout(testvars.timeout)",
"def __init__(self, base_url=\"\",\n driver=None,\n driver_kwargs=None,\n enable_highlighting=False,\n enable_screen_shot_highlight=False,\n implicit_wait=0,\n think_time=0,\n download_folder='',\n screen_shots_folder='',\n highlighting_color='',\n highlighting_size=0,\n highlighting_time=0,\n remote_ip=None,\n remote_port=None,\n desired_capability=None,\n headless=None,\n display_width=None,\n display_height=None\n ):\n\n if isinstance(driver, str):\n driver = get_driver_from_name(driver)\n\n if driver is None:\n driver = get_driver_from_name(settings.DEFAULT_DRIVER_EXECUTABLE)\n\n self.web_driver = driver\n self._driver_kwargs = driver_kwargs or {}\n self.driver = None # type: WebDriver\n\n self.url = base_url if base_url.startswith('http') else (\"http://\" + base_url) if base_url else ''\n self.accept_next_alert = True\n\n self.download_folder = download_folder or settings.DOWNLOAD_FOLDER\n self.implicit_wait = implicit_wait or settings.IMPLICIT_WAIT\n screen_shots_folder = screen_shots_folder or os.path.join(self.download_folder, settings.SCREEN_SHOTS_FOLDER)\n self.screen_shot_folder = os.path.realpath(screen_shots_folder)\n self.think_time = think_time\n\n self._is_driver_open = False\n self._default_find_element_method = self._default_find_elements_method = None\n\n self._enable_highlighted_find_elements = enable_highlighting or settings.ENABLE_HIGHTLIGHTING\n self._screen_shot_highlights = enable_screen_shot_highlight or settings.ENABLE_SCREENSHOT_HIGHTLIGHTING\n self._highlighting_color = highlighting_color or settings.HIGHLIGHTING_COLOR\n self._highlighting_size = highlighting_size or settings.HIGHLIGHTING_SIZE\n self._highlighting_time = highlighting_time or settings.HIGHLIGHTING_TIME\n\n\n # headless vars\n self._headless = headless or settings.HEADLESS\n self._display_height = display_height\n self._display_width = display_width\n\n self._create_download_shots_folders()\n\n self._remote_ip = remote_ip\n self._remote_port = remote_port or settings.REMOTE_PORT\n self._desired_capability = desired_capability\n\n self._jquery_available = self._jquery_checked = False",
"def start(self):\n # iPhone\n #driver = webdriver.Remote(browser_name=\"iphone\", command_executor='http://172.24.101.36:3001/hub')\n # Android\n #driver = webdriver.Remote(browser_name=\"android\", command_executor='http://127.0.0.1:8080/hub')\n # Google Chrome \n #driver = webdriver.Chrome()\n # Firefox \n #FirefoxProfile fp = new FirefoxProfile();\n #fp.setPreference(\"webdriver.load.strategy\", \"unstable\");\n #WebDriver driver = new FirefoxDriver(fp);\n \n #driver = webdriver.Firefox(firefox_profile=self.disableImages())\n driver = webdriver.Firefox()\n \n self.driver = driver",
"def start(self):\n # iPhone\n #driver = webdriver.Remote(browser_name=\"iphone\", command_executor='http://172.24.101.36:3001/hub')\n # Android\n #driver = webdriver.Remote(browser_name=\"android\", command_executor='http://127.0.0.1:8080/hub')\n # Google Chrome \n #driver = webdriver.Chrome()\n # Firefox \n #FirefoxProfile fp = new FirefoxProfile();\n #fp.setPreference(\"webdriver.load.strategy\", \"unstable\");\n #WebDriver driver = new FirefoxDriver(fp);\n \n #driver = webdriver.Firefox(firefox_profile=self.disableImages())\n driver = webdriver.Firefox()\n \n self.driver = driver",
"def setUpClass(cls):\n super().setUpClass()\n options = Options()\n options.headless = True\n cls.selenium = WebDriver(options=options)\n cls.selenium.implicitly_wait(10)",
"def restart_crawler(self, delay_time=0):\n self.close()\n time.sleep(delay_time) \n self.driver = webdriver.Chrome(chrome_options=self.chrome_option)",
"def prepare_work(self):\n self.driver.get(self.BaseUrl)\n self.driver.add_cookie(cookie)\n self.driver.refresh()\n self.base_handle = self.driver.current_window_handle",
"def navegador():\n try:\n return webdriver.Chrome()\n except SessionNotCreatedException as e:\n print(\"Actualizando Chromedriver\")\n chromedriver_autoinstaller.install()\n return webdriver.Chrome()\n except WebDriverException as e:\n print(\"Instalando Chromedriver\")\n chromedriver_autoinstaller.install()\n return webdriver.Chrome()",
"def navegador():\n try:\n return webdriver.Chrome()\n except SessionNotCreatedException:\n print(\"Actualizando Chromedriver\")\n chromedriver_autoinstaller.install()\n return webdriver.Chrome()\n except WebDriverException:\n print(\"Instalando Chromedriver\")\n chromedriver_autoinstaller.install()\n return webdriver.Chrome()",
"def new_driver(name=\"chrome\"):\n if not name in DRIVERS:\n raise Exception(\"No driver support for '%s'\" % name)\n return DRIVERS[name]()",
"def run(self):\n try:\n\n # set the arguments and options\n chromeOptions = Options()\n prefs = {\"profile.managed_default_content_settings.images\": 2}\n chromeOptions.add_experimental_option(\"prefs\", prefs)\n chromeOptions.add_experimental_option(\"excludeSwitches\", [\"enable-logging\"])\n chromeOptions.add_argument(\"--headless\")\n chromeOptions.add_argument(\"--blink-settings=imagesEnabled=false\")\n chromeOptions.add_argument(\"--disable-popup-blocking\")\n chromeOptions.add_argument(\"--ignore-certificate-errors\")\n chromeOptions.add_argument(\"--allow-insecure-localhost\")\n chromeOptions.add_argument(\"--allow-running-insecure-content\")\n chromeOptions.accept_untrusted_certs = True\n chromeOptions.assume_untrusted_cert_issuer = True\n service_args = [\"hide_console\"]\n currentPath = (\n os.path.dirname(os.path.abspath(__file__))\n + \"\\\\ChromeDriver\\\\\"\n + chrome_browser_version\n + \"\\\\chromedriver.exe\"\n )\n\n while True:\n try:\n print(\"\\nCalling Driver\")\n\n # Creating an instance of chrome\n driver = Chrome(\n executable_path=currentPath,\n options=chromeOptions,\n service_args=service_args,\n )\n print(\"Driver Called\")\n driver.set_page_load_timeout(10)\n driver.delete_all_cookies()\n\n # open a page\n driver.get(\"Enter Checking Site Here\")\n print(\"Getting Site\")\n try:\n \"\"\"\n \n remove this try except if the your wifi doesn't block websites\n \n \"\"\"\n\n # xpath if the website is blocked\n element = driver.find_element_by_xpath(\n \"Enter xpath to an element in the blocked page\"\n )\n print(\"Site Blocked\\n\")\n\n except:\n try:\n # xpath to any thing in the website to make sure you are connected to the internet\n element = driver.find_element_by_xpath(\n \"/Enter xpath to an element in the page\"\n )\n print(\"Site Opening\\n\")\n\n except:\n try:\n \"\"\"\n \n if your portal doesn't have auto redirect, uncomment the following line and type in your login url\n \n \"\"\"\n\n # driver.get(\"Paste Login Webiste URL Here\")\n\n # change the ids to those in your login website\n # you can use developer mode to find the id of fields (use ctrl + shift + i)\n # change the username and password to the required one\n print(\"Trying To Login\")\n # select usnername field\n element = driver.find_element_by_id(\n \"Ending id of user input field\"\n )\n print(\"User Found\")\n element.send_keys(\"Enter username\")\n print(\"User Inputted\")\n # select password field\n element = driver.find_element_by_id(\n \"Ending id of password input field\"\n )\n print(\"Passwprd Found\")\n element.send_keys(\"Enter password\")\n print(\"Password Inputted\")\n # select submit button\n element = driver.find_element_by_id(\n \"Enter id of submit button\"\n )\n print(\"Button Found\")\n element.click()\n print(\"Logged In\\n\")\n # except NoSuchElementException as ex:\n # print(\"Can't Login\")\n # event.wait(120)\n except Exception as ex:\n print(\n \"Can't login:\\t\\tAn exception of type {0} occurred. Arguments:\\n{1}\".format(\n type(ex).__name__, ex.args\n )\n )\n event.wait(60)\n continue\n\n except Exception as ex:\n print(\n \"Error in loop:\\t\\tAn exception of type {0} occurred. Arguments:\\n{1}\".format(\n type(ex).__name__, ex.args\n )\n )\n try:\n driver.quit()\n except Exception as ex:\n print(\n \"Error in Quitting:\\t\\tAn exception of type {0} occurred. Arguments:\\n{1}\".format(\n type(ex).__name__, ex.args\n )\n )\n\n event.wait(60)\n continue\n\n try:\n driver.quit()\n except Exception as ex:\n print(\n \"Error in Quitting in loop:\\t\\tAn exception of type {0} occurred. Arguments:\\n{1}\".format(\n type(ex).__name__, ex.args\n )\n )\n event.wait(300)\n continue\n\n except Exception as ex:\n print(\n \"Error outside loop:\\t\\tAn exception of type {0} occurred. Arguments:\\n{1}\".format(\n type(ex).__name__, ex.args\n )\n )\n\n finally:\n try:\n driver.quit()\n except Exception as ex:\n print(\n \"Error in Quitting in final:\\t\\tAn exception of type {0} occurred. Arguments:\\n{1}\".format(\n type(ex).__name__, ex.args\n )\n )\n finally:\n print(\"Login Thread Exited\")",
"def _NewConnection(self, *args, **kwargs):\n kwargs.setdefault('dhcp_method', Connection.DHCP_DHCLIENT)\n kwargs.setdefault('dhclient_script_path', self._DHCLIENT_SCRIPT_PATH)\n\n # Disables the wpasupplicant service, which seems to interfere with\n # the device during connection. We make the assumption that wpasupplicant\n # will not be used by other parts of the factory test flow.\n # We add a sleep because it seems that if we continue bringing up the\n # WLAN interface directly afterwards, it has a change of being brought\n # right back down (either by wpasupplicant or something else).\n # TODO(kitching): Figure out a better way of either (a) disabling these\n # services temporarily, or (b) using Chrome OS's Shill to make the\n # connection.\n service = 'wpasupplicant'\n return_code = self._device.Call(['stop', service])\n if return_code == 0:\n logging.warning('Service %s does not stop before NewConnection. Add '\n '\"exclusive_resources\": [\"NETWORK\"] to testlist if you '\n 'want to revive %s after test.', service, service)\n time.sleep(0.5)\n return Connection(*args, **kwargs)",
"def __init__(self, username, password):\n self.username = username\n self.password = password\n self.base_url = 'https://live.remo.co'\n self.guests = []\n\n self.random_string = ''\n for _ in range (randint(12, 24)): self.random_string += 'abcdefghijklmnopqrstuvwxyz'[randint(0, 25)]\n\n\n\n if OS == 'nt': \n # op = webdriver.ChromeOptions()\n # op.add_argument('headless')\n # self.driver = webdriver.Chrome('chromedriver.exe',options=op)\n self.driver = webdriver.Chrome('chromedriver.exe')\n else: \n self.driver = webdriver.Chrome()\n self.login()",
"def connect():\n\n driver = webdriver.Chrome(driver_exe) # Run the simulated chrome driver\n driver.get(url) # go to the whatsapp web page\n driver.implicitly_wait(10) # wait a little to make sure the page loads\n return driver",
"def __init__(self,\n geckodriver_path = None,\n headless_browser = False,\n disable_image_load = False,\n page_delay = 10,\n logfolder = 'logs/',\n user_agent = None,\n proxy_address = None,\n proxy_port = None,\n proxy_username = None,\n proxy_password = None,\n limit_scropes = None,\n exclude_hosts = None,\n browser_profile_path = None,\n browser_executable_path = None,\n geckodriver_log_level = 'info'\n ):\n firefox_options = Firefox_Options()\n seleniumwire_options = {}\n\n if headless_browser:\n firefox_options.add_argument(\"-headless\")\n\n if browser_profile_path is not None:\n firefox_profile = webdriver.FirefoxProfile(browser_profile_path)\n else:\n firefox_profile = webdriver.FirefoxProfile()\n\n if browser_executable_path is not None:\n firefox_options.binary = browser_executable_path\n\n\n # set \"info\" by default\n # set \"trace\" for debubging, Development only\n firefox_options.log.level = geckodriver_log_level\n\n # set English language\n firefox_profile.set_preference(\"intl.accept_languages\", \"en-US\")\n\n # set User-Agent\n if user_agent is not None:\n firefox_profile.set_preference(\"general.useragent.override\", user_agent)\n\n if disable_image_load:\n # permissions.default.image = 2: Disable images load,\n # this setting can improve pageload & save bandwidth\n firefox_profile.set_preference(\"permissions.default.image\", 2)\n \n # mute audio while watching stories\n firefox_profile.set_preference(\"media.volume_scale\", \"0.0\")\n\n # prevent Hide Selenium Extension: error\n firefox_profile.set_preference(\"dom.webdriver.enabled\", False)\n firefox_profile.set_preference(\"useAutomationExtension\", False)\n firefox_profile.set_preference(\"general.platform.override\", \"iPhone\")\n firefox_profile.update_preferences()\n\n # geckodriver log in specific user logfolder\n geckodriver_log = \"{}geckodriver.log\".format(logfolder)\n check_and_create_file(geckodriver_log)\n\n\n # The list exclude hosts for capturing\n if exclude_hosts:\n seleniumwire_options['exclude_hosts'] = exclude_hosts\n\n # Add proxy with username and password authentication\n if proxy_address and proxy_port:\n if proxy_username and proxy_password:\n seleniumwire_options['proxy'] = {\n 'http': f'http://{proxy_username}:{proxy_password}@{proxy_address}:{proxy_port}',\n 'https': f'https://{proxy_username}:{proxy_password}@{proxy_address}:{proxy_port}',\n 'no_proxy': 'localhost,127.0.0.1'\n }\n else:\n seleniumwire_options['proxy'] = {\n 'http': f'http://{proxy_address}:{proxy_port}',\n 'https': f'https://{proxy_address}:{proxy_port}',\n 'no_proxy': 'localhost,127.0.0.1'\n }\n\n\n\n self.driver = webdriver.Firefox(\n firefox_profile=firefox_profile,\n executable_path=geckodriver_path,\n log_path=geckodriver_log,\n options=firefox_options,\n seleniumwire_options=seleniumwire_options\n )\n # Limit capture urls with regulater expression\n if limit_scropes:\n self.driver.scopes = limit_scropes\n\n # Set implicitly wait\n self.driver.implicitly_wait(page_delay)\n\n # Set maximum windows\n self.driver.maximize_window()",
"def setUp(self):\n\n self.driver = WebDriver(\n \"http://{0}:{1}/wd/hub\".format(swt.config.ADDRESS, swt.config.SELENIUM_SERVER_PORT),\n self._browser_capabilities,\n proxy=self.proxy.selenium_proxy()\n )\n swt.active_driver = self.driver",
"def setUpClass(cls) -> None:\n desired_caps: dict[str, Any] = {}\n desired_caps[\"app\"] = \"plasmawindowed -p org.kde.plasma.nano org.kde.plasma.systemtray\"\n desired_caps[\"timeouts\"] = {'implicit': 10000}\n cls.driver = webdriver.Remote(command_executor='http://127.0.0.1:4723', desired_capabilities=desired_caps)\n cls.driver.implicitly_wait = 10",
"def getWebDriverInstance(self):\n #baseURL = \"https://qa-test.avenuecode.com/\"\n if self.device == \"ios_mobile\":\n self.driver = appiumdriver.Remote('http://localhost:4723/wd/hub', self.capabilities)\n self.driver.implicitly_wait(10)\n #return driver\n #driver.get(baseURL)\n\n else:\n print(\"Hello\")\n # Setting Driver Implicit Time out for An Element\n self.driver = appiumdriver.Remote('http://localhost:4723/wd/hub', self.capabilities)\n self.driver.implicitly_wait(10)\n return self.driver\n # Maximize the window\n #driver.maximize_window()\n # Loading browser with App URL\n #driver.get(baseURL)\n #return driver",
"def setup_webdriver(self):\n try:\n options = webdriver.ChromeOptions()\n options.add_argument(\"user-data-dir=\" + constants.PROFILE_PATH)\n driver = webdriver.Chrome(executable_path=constants.DRIVER_PATH,\n options=options)\n return driver\n except common.exceptions.WebDriverException:\n error_message = \"ERROR: Failed to load Chrome Driver. Check \"\\\n \"path in constants.py and make sure there are no open windows with the desired profile.\\n\"\n self.logger.log(error_message)\n return None\n except Exception:\n return None",
"def setUp(self):\n logging.info(\"## SETUP METHOD ##\")\n logging.info(\"# Initializing the webdriver.\")\n self.chprofile = self.create_chprofile()\n self.driver = webdriver.Chrome(self.chprofile)\n self.driver.maximize_window()\n self.driver.implicitly_wait(5)\n self.driver.get(\"http://the-internet.herokuapp.com/\")",
"def setUp(self):\n options = webdriver.ChromeOptions()\n\n # Define a custom User Agent\n user_agent = '--user-agent=\"' + CFG['user_agent'] + '\"'\n options.add_argument(user_agent)\n\n # Setup the full screen kiosk\n if CFG['kiosk']:\n options.add_argument('--kiosk')\n\n # Alternatively launch the webdriver Firefox browser\n # test whether Firefox loads the videos better\n\n # Launch a Chrome instance with the appropriate options\n chrome_paths = ('c:\\Program Files\\chromedriver.exe',\n 'c:\\Program Files (x86)\\chromedriver.exe')\n # Try to launch the Chrome driver without any path details\n try:\n self.driver = webdriver.Chrome(chrome_options=options)\n # If it raises an exception try looping through the path options\n except webdriver.chrome.webdriver.WebDriverException:\n for chrome_path in chrome_paths:\n try:\n self.driver = webdriver.Chrome(chrome_path,\n chrome_options=options)\n except webdriver.chrome.webdriver.WebDriverException:\n pass\n else:\n break"
] | [
"0.6135514",
"0.5874731",
"0.5788318",
"0.57601655",
"0.55990976",
"0.55637485",
"0.55528617",
"0.55183816",
"0.5475551",
"0.54655373",
"0.5433355",
"0.53960615",
"0.53960615",
"0.53876483",
"0.535044",
"0.53414094",
"0.53316253",
"0.53288877",
"0.529636",
"0.52818716",
"0.52788365",
"0.52691954",
"0.526027",
"0.52597874",
"0.52420723",
"0.52097917",
"0.518126",
"0.5178128",
"0.5154037",
"0.5148971"
] | 0.73880607 | 0 |
Scrolls down until the specified related list loads. | def load_related_list(self, heading):
locator = lex_locators["record"]["related"]["card"].format(heading)
el = None
i = 0
while el is None:
i += 1
if i > 50:
raise AssertionError(
"Timed out waiting for {} related list to load.".format(heading)
)
self.selenium.execute_javascript("window.scrollBy(0, 100)")
self.wait_for_aura()
try:
self.selenium.get_webelement(locator)
break
except ElementNotFound:
time.sleep(0.2)
continue | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def scroll_to_end_by_class_name(driver, class_name, number_requested):\r\n eles = driver.find_elements_by_class_name(class_name)\r\n count = 0\r\n new_count = len(eles)\r\n\r\n while new_count != count:\r\n try:\r\n utils.update_progress(new_count / number_requested, f' - Scrolling down to load more items {new_count}/{number_requested}:')\r\n the_last_in_list = eles[-1]\r\n the_last_in_list.location_once_scrolled_into_view \r\n time.sleep(random.randint(15, 20) / 10) \r\n try:\r\n WebDriverWait(driver, timeout = 60).until(EC.visibility_of(the_last_in_list))\r\n except TimeoutException:\r\n pass \r\n count = new_count\r\n eles = driver.find_elements_by_class_name(class_name)\r\n new_count = len(eles)\r\n except TimeoutException :\r\n printR(f' Time out while scrolling down. Please retry.')\r\n except NoSuchElementException:\r\n pass\r\n if new_count < number_requested:\r\n utils.update_progress(1, f' - Scrolling down to load more items:{new_count}/{number_requested}')",
"def loadMoreItems(self, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):\r\n self.lastLoadOk = False\r\n self.lastLoadLength = 0\r\n if not continuation and not self.continuation:\r\n return\r\n self._itemsLoadedDone(self._getContent(excludeRead, continuation or self.continuation, loadLimit, since, until))",
"def fetch_more(self):\n results = self.find.execute(bookmark = self.bookmark)\n self.bookmark = results.bookmark\n self.queue.extend(results.docs)\n self.has_next_page = results.has_next_page\n return self",
"def scroll_to_end_by_class_or_tag_name(driver, number_requested, class_name= '', tag_name=''):\r\n if class_name:\r\n eles = driver.find_elements_by_class_name(class_name)\r\n elif tag_name:\r\n eles = driver.find_elements_by_tag_name(tag_name)\r\n\r\n count = 0\r\n new_count = len(eles)\r\n\r\n while new_count != count:\r\n try:\r\n utils.update_progress(new_count / number_requested, f' - Scrolling down to load more items {new_count}/{number_requested}:')\r\n the_last_in_list = eles[-1]\r\n the_last_in_list.location_once_scrolled_into_view \r\n time.sleep(random.randint(15, 20) / 10) \r\n try:\r\n WebDriverWait(driver, timeout = 60).until(EC.visibility_of(the_last_in_list))\r\n except TimeoutException:\r\n pass \r\n\r\n count = new_count\r\n if class_name:\r\n eles = driver.find_elements_by_class_name(class_name)\r\n elif tag_name:\r\n eles = driver.find_elements_by_tag_name(tag_name)\r\n new_count = len(eles)\r\n except TimeoutException :\r\n printR(f' Time out while scrolling down. Please retry.')\r\n except NoSuchElementException:\r\n pass\r\n if new_count >= number_requested:\r\n utils.update_progress(1, f' - Scrolling down to load more items:{number_requested}/{number_requested}')\r\n else:\r\n print(f' - Available items: {new_count}')\r\n return eles",
"def scroll_to_end_by_tag_name_within_element(driver, element, tag_name, number_requested, time_out = 20):\r\n eles = check_and_get_all_elements_by_tag_name(element, tag_name)\r\n count = 0\r\n new_count = len(eles)\r\n count_down_timer = time_out\r\n while new_count != count:\r\n try:\r\n utils.update_progress(new_count / number_requested, f' - Scrolling down to load more items {new_count}/{number_requested}:')\r\n the_last_in_list = eles[-1]\r\n the_last_in_list.location_once_scrolled_into_view \r\n time.sleep(1)\r\n try:\r\n WebDriverWait(driver, time_out).until(EC.visibility_of(the_last_in_list))\r\n except TimeoutException:\r\n pass \r\n\r\n count = new_count\r\n eles = check_and_get_all_elements_by_tag_name(element, tag_name)\r\n new_count = len(eles)\r\n\r\n # give the slow server a chance to load the new items \r\n while new_count == count and count_down_timer >= 0 and new_count < number_requested:\r\n utils.update_progress(count_down_timer / time_out, f' - Slow response from server. Counting down {count_down_timer}:')\r\n count_down_timer -= 1\r\n\r\n eles = check_and_get_all_elements_by_tag_name(element, tag_name)\r\n new_count = len(eles)\r\n the_last_in_list = eles[-1]\r\n the_last_in_list.location_once_scrolled_into_view \r\n time.sleep(1)\r\n\r\n except TimeoutException :\r\n printR(f' Time out ({time_out}s) while scrolling down. Please retry.')\r\n except NoSuchElementException:\r\n pass\r\n if new_count >= number_requested:\r\n utils.update_progress(1, f' - Scrolling down to load more items:{number_requested} / {number_requested}')\r\n else:\r\n # scroll down has finished, but the items obtained are less than requested. Show it\r\n utils.update_progress(1, f' - Scrolling down to load more items:{new_count} / {number_requested}')\r\n\r\n return eles",
"def scroll_down_till_limit(driver, platform):\n # Scroll page to load whole content\n last_height = 0\n while True:\n new_height = scroll_down_page(driver)\n # if no more scrolling possible\n if new_height == last_height:\n break\n # if specified point in past reached\n if is_date_reached(driver, platform):\n break\n\n last_height = new_height\n click_button_xpath(driver, platform)\n\n return driver",
"def scroll_down(fBody, driver):\n\toverflow = 0\n\textracted = 0\n\tdetection = 0\n\twhile True:\n\t\tdetection = extracted\n\t\tdriver.execute_script('arguments[0].scrollTop = arguments[0].scrollTop + arguments[0].offsetHeight;', fBody)\n\t\ttime.sleep(0.3)\n\t\textracted = len(driver.find_elements_by_xpath(\"//div[@class='isgrP']//li\"))\n\t\tif extracted == detection:\n\t\t\toverflow += 1\n\t\t\tif overflow >= 10: # break\n\t\t\t\tbreak\n\t\telse:\n\t\t\toverflow = 0\n\treturn extracted",
"def loadItems(self, excludeRead=False, loadLimit=20, since=None, until=None):\r\n self.clearItems()\r\n self.loadtLoadOk = False\r\n self.lastLoadLength = 0\r\n self._itemsLoadedDone(self._getContent(excludeRead, None, loadLimit, since, until))",
"def prefetch(self, oids):\n self.timeline.reset()\n self.timeline.start(\"prefetch\")\n fetch(oids)\n self.timeline.end(\"prefetch\")",
"def trigger_fetch_more(self):\n self.get_selected()\n self.manage_loading(loading=True)\n self.current_feed.fetch_more_content(unread_only=self.show_unread_only)\n self.manage_actions()",
"def fetch(self):\r\n if self.wp_op is None: # If we were already doing a list or save, just restart the fetch without changing the operation\r\n self.wp_op = \"fetch\"\r\n self.master.waypoint_request_list_send()",
"def scroll_down(self):\r\n self.driver.execute_script('window.scrollTo(0, document.body.scrollHeight);')\r\n sleep(self.wait)",
"def scroll_to_bottom(self):\n while self.history.position < self.history.size:\n self.next_page()",
"def _populate(self):\n if not self._populated:\n logging.debug(\"Populating lazy list %d (%s)\" % (id(self), self.__class__.__name__))\n self.populate()\n self._populated = True",
"def scroll_to_bottom(self):\n expandable_button_selectors = [\n 'button[aria-expanded=\"false\"].pv-skills-section__additional-skills',\n 'button[aria-expanded=\"false\"].pv-profile-section__see-more-inline',\n 'button[aria-expanded=\"false\"].pv-top-card-section__summary-toggle-button',\n 'button[data-control-name=\"contact_see_more\"]'\n ]\n\n current_height = 0\n while True:\n for name in expandable_button_selectors:\n try:\n self.driver.find_element_by_css_selector(name).click()\n except:\n pass\n\n # Use JQuery to click on invisible expandable 'see more...' elements\n self.driver.execute_script(\n 'document.querySelectorAll(\".lt-line-clamp__ellipsis:not(.lt-line-clamp__ellipsis--dummy) .lt-line-clamp__more\").forEach(el => el.click())')\n\n # Scroll down to bottom\n new_height = self.driver.execute_script(\n \"return Math.min({}, document.body.scrollHeight)\".format(current_height + self.scroll_increment))\n if (new_height == current_height):\n break\n self.driver.execute_script(\n \"window.scrollTo(0, Math.min({}, document.body.scrollHeight));\".format(new_height))\n current_height = new_height\n # Wait to load page\n time.sleep(self.scroll_pause)",
"def _scroll (self):\n if self.policy is not None:\n self._scroll_fn(self, *self._policy_args)",
"def scroll_down(driver, scroll_pause_time = 0.5, number_of_scrolls = 10, estimate_scrolls_needed = 3, message = ''):\r\n if number_of_scrolls == 0 :\r\n return\r\n\r\n # Get scroll height\r\n last_height = driver.execute_script(\"return document.body.scrollHeight\")\r\n iteration_count = 0\r\n scrolls_count_for_stimulated_progressbar = 0\r\n while True:\r\n if number_of_scrolls == -1:\r\n # if we were able to give an estimate of number of scrolls needed (ex. number of photos, followers, friends are known)\r\n if estimate_scrolls_needed != -1: \r\n utils.update_progress(scrolls_count_for_stimulated_progressbar / estimate_scrolls_needed, message)\r\n # here, we dont know when it ends (for example, we ask for all notifications, but we don't know how many the 500px server will provide) \r\n else:\r\n notifications_loaded_so_far = scrolls_count_for_stimulated_progressbar * config.NOTIFICATION_PER_LOAD\r\n text = f'\\r{message} {str(notifications_loaded_so_far)}'\r\n sys.stdout.write(text)\r\n sys.stdout.flush()\r\n elif iteration_count > 0:\r\n utils.update_progress(iteration_count / number_of_scrolls, message)\r\n\r\n scrolls_count_for_stimulated_progressbar += 1\r\n\r\n # Scroll down to bottom\r\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\r\n\r\n # Wait for page to load\r\n time.sleep(scroll_pause_time)\r\n innerHTML = driver.execute_script(\"return document.body.innerHTML\") #make sure document javascript is executed\r\n\r\n # exit point #1 : when number of scrolls requested has been reached\r\n if number_of_scrolls != -1:\r\n iteration_count = iteration_count + 1\r\n if iteration_count >= number_of_scrolls:\r\n break\r\n\r\n # exit point #2: when all items are loaded (by calculating new scroll height and compare with last scroll height)\r\n # or when the server stop responding after the given sleep time (scroll_pause_time)\r\n new_height = driver.execute_script(\"return document.body.scrollHeight\")\r\n if new_height == last_height:\r\n break\r\n last_height = new_height\r\n\r\n # mark the end of the progress bar update \r\n if number_of_scrolls == -1 and estimate_scrolls_needed == -1: # indeterminate number of scrolls\r\n sys.stdout.write('\\r\\n') # end the progress update with a line-feed\r\n sys.stdout.flush()\r\n else:\r\n utils.update_progress(1, message) # force the display of \"100% Done\" \r\n \r\n time.sleep(scroll_pause_time)",
"def scroll(self, relative):\n if self.ui.browser and self.ui.browser.main_column:\n self.ui.browser.main_column.scroll(relative)\n self.thisfile = self.thisdir.pointed_obj",
"def __navigate_scroll(self):\n try:\n _title = self.browser.title\n _body = self.browser.find_element_by_tag_name('body')\n\n i = 0\n while i < 3:\n _html = str(self.browser.page_source)\n _content = Content(_html, _title)\n _attrs = _content.last_divs\n\n scroll_items = []\n for _attr in _attrs:\n xpath_string = '//div'\n\n for k, v in _attr.items():\n if not v:\n xpath_string = xpath_string + \"[@\" + str(k) + \"]\"\n else:\n if isinstance(v, list):\n _vstring = [\"contains(@\" + str(k) + \", '\" + str(_v) + \"')\" for _v in v]\n vstring = \" and \".join(_vstring)\n\n xpath_string = xpath_string + \"[\" + vstring + \"]\"\n\n div = self.browser.find_elements_by_xpath(xpath_string)\n\n for d in div: scroll_items.append(d)\n\n if len(scroll_items) > 10:\n j = 0\n while j < 10:\n try:\n self.browser.execute_script(\"arguments[0].scrollIntoView(true)\", scroll_items[j])\n self.browser.execute_script(\"arguments[0].scrollIntoView(true)\", scroll_items[0])\n time.sleep(1)\n j += 1\n except Exception as e:\n print(e)\n j += 1\n continue\n \n else:\n for item in scroll_items:\n try:\n self.browser.execute_script(\"arguments[0].scrollIntoView(true)\", item)\n self.browser.execute_script(\"arguments[0].scrollIntoView(true)\", scroll_items[0])\n _body.send_keys(Keys.HOME)\n time.sleep(1)\n except Exception as e:\n print(e)\n continue\n\n self.browser.execute_script(\"arguments[0].scrollIntoView(true)\", scroll_items[0])\n new_html = str(self.driver.page_source)\n new_content = Content(new_html, _title)\n new_attrs = new_content.last_divs\n\n i += 1\n if new_attrs == _attrs:\n break\n else:\n continue\n\n return self.browser.page_source\n\n except:\n return None",
"def scrollDown(self):\n if self.__firstShownLine < len(self.__data) - 1:\n self.__firstShownLine += 1\n self.__refreshContent()\n self.__printRow(self.__firstShownLine + self.height - 2)\n else:\n curses.beep()",
"def related(num):\n if g.browse_mode != \"normal\":\n g.message = \"Related items must refer to a specific video item\"\n g.message = c.y + g.message + c.w\n g.content = generate_songlist_display()\n return\n\n g.current_page = 0\n item = g.model.songs[int(num) - 1]\n related_search(item)",
"def scroll(self):\r\n SCROLL_PAUSE_TIME = 2\r\n current_scrolls = 0\r\n\r\n last_height = driver.execute_script(\"return document.body.scrollHeight\")\r\n while True:\r\n try:\r\n if current_scrolls == total_scroll:\r\n return\r\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\r\n time.sleep(SCROLL_PAUSE_TIME)\r\n\r\n new_height = driver.execute_script(\"return document.body.scrollHeight\")\r\n if new_height == last_height:\r\n break\r\n last_height = new_height\r\n except TimeoutException:\r\n break\r\n return",
"def page_down(self):\n counter = self.get_entry_count_per_screen()\n while counter != 0 and self.pointer < (len(self.contents)-1):\n logging.debug(\"moved down\")\n self.pointer += 1\n counter -= 1\n self.refresh()\n self.reset_scrolling()\n return True",
"def scrollY(self,yrel):\n # get the display size\n dispw, disph = c_int(), c_int()\n SDL_GetRendererOutputSize(self.rend,dispw,disph)\n\n # scroll vertically\n self.scroll += yrel\n\n # limit scrolling\n if self.scroll <= 0:\n self.scroll = 0\n if self.scroll+disph.value >= (len(self.itemList.items)+1)*150+178:\n self.scroll = (len(self.itemList.items)+1)*150+178-disph.value",
"def scroll_down():\r\n \r\n # Get scroll height.\r\n last_height = browser.execute_script(\"return document.body.scrollHeight\")\r\n \r\n while True:\r\n \r\n # Scroll down to the bottom.\r\n browser.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\r\n \r\n # Wait to load the page.\r\n time.sleep(2)\r\n \r\n # Calculate new scroll height and compare with last scroll height.\r\n new_height = browser.execute_script(\"return document.body.scrollHeight\")\r\n \r\n if new_height == last_height:\r\n break\r\n \r\n last_height = new_height",
"def scroll(*args):",
"def _scrolling_request(self, path, method='GET', body=None, headers=None):\n assert 'pagination' in body\n paginated_view = body\n url = '{}{}'.format(self._url_base, path)\n headers = self._headers() if headers is None else headers\n\n scrolling = True\n while scrolling:\n response, content = super(DSBaseService, self)._request(url,\n method,\n body=str(paginated_view).replace(\"'\", '\"'),\n headers=headers)\n\n if int(response['status']) == 200:\n data = json.loads(content)\n offset = data['currentPage']['offset']\n size = data['currentPage']['size']\n total = data['total']\n if offset + size < total:\n paginated_view['pagination']['offset'] = offset + size\n else:\n scrolling = False\n yield data\n elif int(response['status']) == 429:\n # rate limited, wait before resuming scroll requests\n time.sleep(1)\n else:\n scrolling = False",
"def scroll_to_bottom(self):\n # NOTE: this starts scrolling from the current scroll position, not the top of the page.\n current_height = self.driver.execute_script(\n \"return document.documentElement.scrollTop\")\n while True:\n self.click_expandable_buttons()\n # Scroll down to bottom in increments of self.scroll_increment\n new_height = self.driver.execute_script(\n \"return Math.min({}, document.body.scrollHeight)\".format(current_height + self.scroll_increment))\n if (new_height == current_height):\n break\n self.driver.execute_script(\n \"window.scrollTo(0, {});\".format(new_height))\n current_height = new_height\n # Wait to load page\n time.sleep(self.scroll_pause)",
"def lulz(self):\n self.reset()\n self.scrollproc = threading.Thread(target=self.lulzloop)\n self.killedevent.wait()\n self.scrollproc.start()",
"def scroll_down(driver):\n\n # Get scroll height.\n last_height = driver.execute_script(\n \"return document.body.scrollHeight\")\n\n while True:\n\n # Scroll down to the bottom.\n driver.execute_script(\n \"window.scrollTo(0, document.body.scrollHeight);\")\n\n # Wait to load the page.\n time.sleep(2)\n\n # Calculate new scroll height and compare with last scroll height.\n new_height = driver.execute_script(\n \"return document.body.scrollHeight\")\n\n if new_height == last_height:\n\n break\n\n last_height = new_height"
] | [
"0.5779658",
"0.55728656",
"0.5540163",
"0.55229944",
"0.5462364",
"0.5460338",
"0.5396183",
"0.5335078",
"0.53216785",
"0.527404",
"0.524986",
"0.5238928",
"0.5187637",
"0.51747584",
"0.51492304",
"0.51451564",
"0.51219696",
"0.5107789",
"0.5088162",
"0.5058758",
"0.5033915",
"0.50111085",
"0.5005951",
"0.49822316",
"0.49748462",
"0.4959317",
"0.4937188",
"0.4931412",
"0.4930964",
"0.489043"
] | 0.71188396 | 0 |
Clicks a button in the heading of a related list. Waits for a modal to open after clicking the button. | def click_related_list_button(self, heading, button_title):
self.load_related_list(heading)
locator = lex_locators["record"]["related"]["button"].format(
heading, button_title
)
self._jsclick(locator)
self.wait_until_modal_is_open() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def click_modal_button(self, title):\n locator = lex_locators[\"modal\"][\"button\"].format(title)\n self.selenium.wait_until_page_contains_element(locator)\n self.selenium.wait_until_element_is_enabled(locator)\n self._jsclick(locator)",
"def click_button(self):\n self.q(css='div#fixture button').first.click()",
"def click_button(self):\n self.q(css='div#fixture input').first.click()",
"def click_button(self):\n self.widgets.get('button').click()",
"def click_object_button(self, title):\n locator = lex_locators[\"object\"][\"button\"].format(title)\n self._jsclick(locator)\n self.wait_until_modal_is_open()",
"def click_related_item_popup_link(self, heading, title, link):\n self.load_related_list(heading)\n locator = lex_locators[\"record\"][\"related\"][\"popup_trigger\"].format(\n heading, title\n )\n\n self.selenium.wait_until_page_contains_element(locator)\n self._jsclick(locator)\n locator = lex_locators[\"popup\"][\"link\"].format(link)\n self._jsclick(locator)\n self.wait_until_loading_is_complete()",
"def check_modal(client):\n modal_close_btn_xpath = \"/html/body/div[9]/div[3]/div/button[1]\"\n\n try:\n modal_close_btn = wait(client, 20).until(\n EC.visibility_of_element_located((By.XPATH, modal_close_btn_xpath))\n ).click()\n\n except TimeoutException:\n pass",
"def select_ok_pop_up_item(self):\n if self.driver.wait_for_object(\"retargeting_data_ok_pop_up_btn\", raise_e=False):\n self.driver.click(\"retargeting_data_ok_pop_up_btn\")",
"def pop_up(self):\n sleep(2)\n self.driver.find_element_by_link_text('Got It').click()\n self.get_search_results()",
"def click(self):\n element = self.element\n WebDriverWait(self._browser, TMO).until(\n lambda browser: element.is_displayed())\n time.sleep(0.1) # Just some pacing.\n element.click()",
"def click_request_new_deal_button(self):\n self.click_element(self.request_new_deal_button_locator)",
"def click_add_related_dispute_button(self):\n add_related_dispute_button_element = self.wait().until(EC.element_to_be_clickable(self.add_related_dispute_button_locator), 'add related dispute button locator not found before specified time out')\n add_related_dispute_button_element.click()\n self.wait_for_ajax_spinner_load()",
"def click_request_deal_button(self):\n self.click_element(self.request_deal_button_locator)\n try:\n self.wait().until(EC.visibility_of_element_located(self.confirmation_popup_locator), 'confirmation popup locator not found before specified time out')\n self.click_element(self.ok_button_locator, True)\n except:\n raise",
"def click_on_submit(context):\n submit_for_approval = context.browser.find_elements_by_css_selector(\n \"input[type='button'][value='Submit for Approval']\")\n for item in submit_for_approval:\n item.click()\n time.sleep(10)",
"def collection_delete_confirm_btn(self):\n collection_delete_confirm_btn_sitem = self.locator_finder_by_xpath(self.collection_delete_confirm_btn_id)\n collection_delete_confirm_btn_sitem.click()\n time.sleep(1)",
"def click_submit_button(self):\n self.click(by_locator=self.__ASK_QUESTION_PAGE_ASK_QUESTION_BUTTON)",
"def tool_selection_click_ok_btn(driver, class_name, index):\r\n\r\n proximity_button = driver.find_elements_by_class_name(class_name)\r\n proximity_button[index].click()\r\n time.sleep(2)",
"def click(self, id):\n el = self.wait_n_get(By.ID, id)\n el.click()",
"def click_button(button_to_click):\n try:\n button_to_click.click()\n except:\n print(\"Button not found\")",
"def click_bulk_edit_submit_button(self):\n self.click_element(self.bulk_edit_submit_button_locator)",
"def select_collection_delete_btn(self):\n select_collection_delete_btn_sitem = self.locator_finder_by_id(self.select_collection_delete_btn_id)\n select_collection_delete_btn_sitem.click()\n time.sleep(1)",
"def click_the_edit_button_that_appears(driver):\n driver.find_element_by_xpath(xpaths.users.eric_Edit_Button).click()",
"def _ClickPrimaryActionButton(self):\n self._ExecuteOobeApi('Oobe.clickGaiaPrimaryButtonForTesting')",
"def click_bulk_edit_submit_button(self):\n self.click_element(self.bulk_edit_submit_button_locator, True)",
"def click_bulk_edit_button(self):\n self.click_element(self.bulk_edit_button_locator, True)",
"def doModal(*args):",
"def doModal(*args):",
"def doModal(*args):",
"def doModal(*args):",
"def clickDetails(self):\n self.waitForElement(locator=self._userProfile_detailsBtn, locatorType=\"xpath\")\n element = self.getElementList(locator=self._userProfile_detailsBtn, locatorType=\"xpath\")\n self.elementClick(element=element[0])\n pp.time.sleep(2)"
] | [
"0.7189076",
"0.6988168",
"0.6578444",
"0.656465",
"0.65359074",
"0.6386164",
"0.6233494",
"0.61503845",
"0.61457515",
"0.6088066",
"0.60567385",
"0.5973108",
"0.5930721",
"0.5856589",
"0.5836955",
"0.57845676",
"0.57726026",
"0.575332",
"0.57077295",
"0.56876504",
"0.56873775",
"0.56751335",
"0.5667791",
"0.5656213",
"0.5646364",
"0.5640706",
"0.5640706",
"0.5640706",
"0.5640706",
"0.563048"
] | 0.7815718 | 0 |
Clicks a link in the related list with the specified heading. This keyword will automatically call Wait until loading is complete. | def click_related_item_link(self, heading, title):
self.load_related_list(heading)
locator = lex_locators["record"]["related"]["link"].format(heading, title)
try:
self._jsclick(locator)
except Exception as e:
self.builtin.log(f"Exception: {e}", "DEBUG")
raise Exception(
f"Unable to find related link under heading '{heading}' with the text '{title}'"
)
self.wait_until_loading_is_complete() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def click_related_item_popup_link(self, heading, title, link):\n self.load_related_list(heading)\n locator = lex_locators[\"record\"][\"related\"][\"popup_trigger\"].format(\n heading, title\n )\n\n self.selenium.wait_until_page_contains_element(locator)\n self._jsclick(locator)\n locator = lex_locators[\"popup\"][\"link\"].format(link)\n self._jsclick(locator)\n self.wait_until_loading_is_complete()",
"def click_related_list_button(self, heading, button_title):\n self.load_related_list(heading)\n locator = lex_locators[\"record\"][\"related\"][\"button\"].format(\n heading, button_title\n )\n self._jsclick(locator)\n self.wait_until_modal_is_open()",
"def load_related_list(self, heading):\n locator = lex_locators[\"record\"][\"related\"][\"card\"].format(heading)\n el = None\n i = 0\n while el is None:\n i += 1\n if i > 50:\n raise AssertionError(\n \"Timed out waiting for {} related list to load.\".format(heading)\n )\n self.selenium.execute_javascript(\"window.scrollBy(0, 100)\")\n self.wait_for_aura()\n try:\n self.selenium.get_webelement(locator)\n break\n except ElementNotFound:\n time.sleep(0.2)\n continue",
"def click(self, wait_load_page = True):\n\t\tif self.__element.tag == 'a':\n\t\t\tself.__browser.load_page(self.get_property('href'))",
"def onHeadlineClick(self, tag, keywords):\n self.handleEvent(\"headclick1\", tag, keywords)",
"def goto(self, n_house):\n el = self.wait_n_get(By.LINK_TEXT, houses[n_house])\n el.click()",
"def click(cls, user, link):\r\n pass",
"def click_header_field_link(self, label):\n locator = lex_locators[\"record\"][\"header\"][\"field_value_link\"].format(label)\n self._jsclick(locator)",
"def open_home_page(self):\n com_util.wait_for(self.driver, element['waitToLoad'])\n com_util.tap_on(self.driver, element['clickOnContinue'])",
"def click(cls, user, link):\n pass",
"def click_account(self):\n try:\n account_link = self.driver.find_element(\n By.XPATH,\n f\"//td[contains(., '{self.account_id}')]/following-sibling::td/a\",\n )\n except NoSuchElementException:\n raise BillingScraperAccountUnavailable(\n f\"Account {self.account_id} not available from account page.\"\n )\n scroll_to(self.driver, account_link)\n self.driver.sleep(0.5)\n account_link.click()",
"async def link_to(self, *args):\n pass",
"def link_click(_):\r\n\r\n tag_name = about_content.tag_names(tkinter.CURRENT)[0]\r\n about_content.tag_config(tag_name, foreground=\"#551A8B\")\r\n if tag_name == 'hyper':\r\n webbrowser.open(\"https://www.facebook.com/nihal.agarwal.14\")\r\n else:\r\n webbrowser.open(\"https://github.com/NihalAgarwal/Windows-Wi-Fi-Manager\")",
"def followlink(self, event):\n webbrowser.open(self.url)",
"def get_captions(link, driver):\n caption_link = 'http://www.diycaptions.com/php/start.php?id='\n \n key = link.split(\"=\")[1]\n driver.get(caption_link + key)\n caption = ''\n i = 0\n time.sleep(4)\n while(True):\n i += 1\n try:\n text = driver.find_element_by_id(str(i)).text\n except selenium.common.exceptions.NoSuchElementException:\n return caption\n caption += text + ' ' \n all_captions.append({'url': link, 'caption': caption})",
"def i_navigate_to_contact_link():\n driver.find_element_by_id(\"contact_link\").click()",
"def _link_clicked(self, href):\n\n self.main_frame.load(href)",
"def spider_thread(link):\n article_soup = bs(requests.get(link).text, features=\"html.parser\")\n text = article_soup.find('h1', class_=\"c-page-title\").getText()\n print(text)",
"def click_show_summary_link(self):\n self.click_element(self.show_summary_link_locator, True, True)",
"def __selectLink(self, links, keyword):\n link = QUrl()\n from .HelpTopicDialog import HelpTopicDialog\n dlg = HelpTopicDialog(self, keyword, links)\n if dlg.exec_() == QDialog.Accepted:\n link = dlg.link()\n return link",
"def goto_guidelines(self):\n\n self.guidelines.click()",
"def goto_faq(self):\n\n self.faq.click()",
"def goto_faq(self):\n\n self.faq.click()",
"def click(self):\n element = self.element\n WebDriverWait(self._browser, TMO).until(\n lambda browser: element.is_displayed())\n time.sleep(0.1) # Just some pacing.\n element.click()",
"def click(self, selector):\n el = self.locate_element(selector)\n el.click()",
"def go_search_results(self, driver, searchlink):\n self.go_and_assert(driver, searchlink, website)",
"def click(self, id):\n el = self.wait_n_get(By.ID, id)\n el.click()",
"def click_component_from_menu(category, component_type, is_advanced):\r\n if is_advanced:\r\n # Sometimes this click does not work if you go too fast.\r\n world.retry_on_exception(_click_advanced,\r\n ignored_exceptions=AssertionError)\r\n\r\n # Retry this in case the list is empty because you tried too fast.\r\n link = world.retry_on_exception(\r\n lambda: _find_matching_link(category, component_type),\r\n ignored_exceptions=AssertionError\r\n )\r\n\r\n # Wait for the link to be clickable. If you go too fast it is not.\r\n world.retry_on_exception(lambda: link.click())",
"def click_link(candidate_urls):\n for url in candidate_urls:\n webbrowser.open(url)",
"def pop_up(self):\n sleep(2)\n self.driver.find_element_by_link_text('Got It').click()\n self.get_search_results()"
] | [
"0.7736881",
"0.7507538",
"0.71692616",
"0.60561264",
"0.57515484",
"0.56041235",
"0.5319994",
"0.53053665",
"0.52839065",
"0.527617",
"0.5163625",
"0.51312894",
"0.51164484",
"0.5106151",
"0.5098942",
"0.504578",
"0.5043064",
"0.49847758",
"0.4957203",
"0.4951363",
"0.493011",
"0.49219683",
"0.49219683",
"0.49024627",
"0.48990977",
"0.48954225",
"0.4869787",
"0.4846678",
"0.48456454",
"0.48402658"
] | 0.82479006 | 0 |
Clicks a link in the popup menu for a related list item. heading specifies the name of the list, title specifies the name of the item, and link specifies the name of the link | def click_related_item_popup_link(self, heading, title, link):
self.load_related_list(heading)
locator = lex_locators["record"]["related"]["popup_trigger"].format(
heading, title
)
self.selenium.wait_until_page_contains_element(locator)
self._jsclick(locator)
locator = lex_locators["popup"]["link"].format(link)
self._jsclick(locator)
self.wait_until_loading_is_complete() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def click_related_item_link(self, heading, title):\n self.load_related_list(heading)\n locator = lex_locators[\"record\"][\"related\"][\"link\"].format(heading, title)\n try:\n self._jsclick(locator)\n except Exception as e:\n self.builtin.log(f\"Exception: {e}\", \"DEBUG\")\n raise Exception(\n f\"Unable to find related link under heading '{heading}' with the text '{title}'\"\n )\n self.wait_until_loading_is_complete()",
"def click_related_list_button(self, heading, button_title):\n self.load_related_list(heading)\n locator = lex_locators[\"record\"][\"related\"][\"button\"].format(\n heading, button_title\n )\n self._jsclick(locator)\n self.wait_until_modal_is_open()",
"def menu_python_for_artists(self, event=None):\n self.link('http://spe.pycs.net/stories/6.html')",
"def HandleHyperLink(self, item):\r\n\r\n if self.IsItemHyperText(item):\r\n event = TreeEvent(wxEVT_TREE_ITEM_HYPERLINK, self.GetId())\r\n event._item = item\r\n self.GetEventHandler().ProcessEvent(event)",
"def link_click(_):\r\n\r\n tag_name = about_content.tag_names(tkinter.CURRENT)[0]\r\n about_content.tag_config(tag_name, foreground=\"#551A8B\")\r\n if tag_name == 'hyper':\r\n webbrowser.open(\"https://www.facebook.com/nihal.agarwal.14\")\r\n else:\r\n webbrowser.open(\"https://github.com/NihalAgarwal/Windows-Wi-Fi-Manager\")",
"def click(cls, user, link):\r\n pass",
"def item_link(self, obj):\n if obj.item is None:\n return '\\N{EM DASH}'\n\n return format_html(\n '<a href=\"{}\">{}</a>',\n reverse('admin:mediaplatform_mediaitem_change', args=(obj.item.pk,)),\n obj.item.title if obj.item.title != '' else '[Untitled]'\n )",
"def __selectLink(self, links, keyword):\n link = QUrl()\n from .HelpTopicDialog import HelpTopicDialog\n dlg = HelpTopicDialog(self, keyword, links)\n if dlg.exec_() == QDialog.Accepted:\n link = dlg.link()\n return link",
"def click(cls, user, link):\n pass",
"def _link_items(self):\n pass",
"def menu_forum_spe(self, event=None):\n self.link('http://www.stani.be/python/spe/page_forum')",
"def menu_wxglade_tutorial(self, event=None):\n self.link(\"http://spe.stani.be/manual/wxGlade/tutorial.html\")",
"def toolbar_link(url, title):\n return LazyToolbarItem(\"staff_toolbar.items.Link\", url=url, title=title)",
"async def link(self, msg, item=None, *args):\n if not Guard.has_permission(msg, 'embed_links'):\n await msg.channel.send(**{\n 'content': 'Cannot send links on this channel',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })\n return\n if not item:\n return\n if args:\n item = f'{item} {\" \".join(args)}'\n title = await Controller.canonical_title(item)\n if title is None:\n await msg.channel.send(**{\n 'content': f'There are no pages matching `{item}`',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })\n return\n page_url = Controller.link_from_title(title)\n await msg.channel.send(**{\n 'content': page_url,\n 'reference': msg.to_reference(),\n 'mention_author': True,\n })",
"def open_link(self):\n try:\n webbrowser.open(self.url)\n except:\n self.ids.link.text=self.link_message",
"def click_to_hidden(self, title):\n element = self.driver.find_element_by_xpath('//*[@class=\"topmenu__subwrap\"]')\n self.driver.execute_script('arguments[0].style.display=\"block\"', element)\n self.driver.find_element_by_xpath('//a[contains(text(), \"{}\")]'.format(title)).click()",
"def click_component_from_menu(category, component_type, is_advanced):\r\n if is_advanced:\r\n # Sometimes this click does not work if you go too fast.\r\n world.retry_on_exception(_click_advanced,\r\n ignored_exceptions=AssertionError)\r\n\r\n # Retry this in case the list is empty because you tried too fast.\r\n link = world.retry_on_exception(\r\n lambda: _find_matching_link(category, component_type),\r\n ignored_exceptions=AssertionError\r\n )\r\n\r\n # Wait for the link to be clickable. If you go too fast it is not.\r\n world.retry_on_exception(lambda: link.click())",
"def onProductLinkClicked(self, linkId=None):\n self.OpenProductWeb.emit()",
"def open_products_page(catalog_menu):\n catalog_menu.open_products_page()",
"def create_menu(list_recipes):\n\n title = 'PyVegan - List of Recipes'\n menu = CursesMenu(title, 'Select one and press enter')\n msg = 'This search isn\\'t a valid one'\n\n for recipe in list_recipes:\n recipe_title = clean_title(recipe['post_title'])\n\n if 'post_link' in recipe:\n item = FunctionItem(\n recipe_title,\n url_open,\n args=[recipe['post_link']]\n )\n else:\n item = FunctionItem(recipe_title, lambda x: print(x), args=[msg])\n menu.append_item(item)\n\n return menu",
"def link_1_click(self, **event_args):\n store_description = str(self.item['description'])\n if self.item['completed on']:\n store_description = str(self.item['description'])\n store_time = self.item['completed on'].strftime('%b' ' %d' ' %y')\n alert(store_description + '\\n' 'Task completed on:' + store_time)\n else:\n alert(store_description)",
"def menu_forum_elysiun_python(self, event=None):\n self.link('http://blenderartists.org/forum/forumdisplay.php?f=11')",
"def menu_python_homepage(self, event=None):\n self.link('http://www.python.org')",
"def __showContextMenu(self, pos):\n idx = self.__index.indexAt(pos)\n if idx.isValid():\n menu = QMenu()\n curTab = menu.addAction(self.tr(\"Open Link\"))\n newTab = menu.addAction(self.tr(\"Open Link in New Tab\"))\n newBackgroundTab = menu.addAction(\n self.tr(\"Open Link in Background Tab\"))\n newWindow = menu.addAction(self.tr(\"Open Link in New Window\"))\n menu.move(self.__index.mapToGlobal(pos))\n \n act = menu.exec_()\n model = self.__index.model()\n if model is not None:\n keyword = model.data(idx, Qt.DisplayRole)\n links = model.linksForKeyword(keyword)\n if len(links) == 1:\n link = QUrl(links[list(links.keys())[0]])\n else:\n link = self.__selectLink(links, keyword)\n \n if not link.isEmpty() and link.isValid():\n if act == curTab:\n self.openUrl.emit(link)\n elif act == newTab:\n self.newTab.emit(link)\n elif act == newBackgroundTab:\n self.newBackgroundTab.emit(link)\n elif act == newWindow:\n self.newWindow.emit(link)",
"def _open_item(self, *args, **kwargs):\n \"Does nothing\"",
"def menu_spe_homepage(self, event=None):\n self.link('http://pythonide.stani.be')",
"def requestShowLink(self, *args, **kwargs): # real signature unknown\n pass",
"def link(self):\n return f\"[{self.numbered_title}]({self.html_url})\"",
"def add_to_menu ( self, menu_item ):\r\n pass",
"def contextMenuEvent(self, evt):\n point = evt.globalPos()\n \n if self.__browser:\n point = self.__browser.mapFromGlobal(point)\n if not self.__browser.rect().contains(point, True):\n return\n link = QUrl(self.__browser.anchorAt(point))\n else:\n point = self.__result.mapFromGlobal(point)\n link = self.__result.linkAt(point)\n \n if link.isEmpty() or not link.isValid():\n return\n \n menu = QMenu()\n curTab = menu.addAction(self.tr(\"Open Link\"))\n newTab = menu.addAction(self.tr(\"Open Link in New Tab\"))\n newBackgroundTab = menu.addAction(\n self.tr(\"Open Link in Background Tab\"))\n newWindow = menu.addAction(self.tr(\"Open Link in New Window\"))\n menu.move(evt.globalPos())\n act = menu.exec_()\n if act == curTab:\n self.openUrl.emit(link)\n elif act == newTab:\n self.newTab.emit(link)\n elif act == newBackgroundTab:\n self.newBackgroundTab.emit(link)\n elif act == newWindow:\n self.newWindow.emit(link)"
] | [
"0.74758106",
"0.6553461",
"0.6115421",
"0.6080586",
"0.5914096",
"0.5731324",
"0.57025373",
"0.5691324",
"0.5634803",
"0.55465114",
"0.5527079",
"0.5513927",
"0.55018294",
"0.5477213",
"0.5428236",
"0.5418471",
"0.54158795",
"0.5388323",
"0.53676933",
"0.53380686",
"0.5330781",
"0.53266597",
"0.532383",
"0.52970797",
"0.52912503",
"0.5271288",
"0.5268583",
"0.5256222",
"0.52349335",
"0.5202781"
] | 0.82255656 | 0 |
Closes the open modal | def close_modal(self):
locator = lex_locators["modal"]["close"]
self._jsclick(locator) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _close(self, event):\n self.EndModal(wx.ID_OK)",
"def onBtnCloseClicked(self):\n self.close()",
"def click_close_modal_content_button(self):\n self._basket.click_close_modal_content_button()",
"def close(self):\n\n\t\tself._window.close()",
"def close(self, **kwargs):\n if self.closed:\n return\n if self.viewer is not None:\n self.viewer.close()\n self.close_extras(**kwargs)\n self.closed = True",
"def close_apply_keyword_modal(self):\n self._basket.close_apply_keyword_modal()",
"def close(event):\n event.widget.destroy()",
"def close_UI(self):",
"def close(self):\n\n self.driver.close_window(self.handle)",
"def close_pop_up_windows(self):\n self.button_click(self.DECLINE_BUTTON)\n self.button_click(self.CLOSE_POPUP_BUTTON)",
"def close(self):\n self.parent.activate()",
"def close(self):\n self.Close()",
"def close_2(self):\n self.pop_up_amount.destroy()",
"def close(self):\n self._command = \"close\"",
"def close(self):\n self.window.destroy()\n self.buttons_window.destroy()",
"def Close(self):",
"def close_1(self):\n self.pop_up_del.destroy()",
"def closeEvent(self, event) -> None:\n global dialog\n dialog = None",
"def close(self):\n self.done = True\n if self.viewer is not None:\n self.viewer.close()\n self.viewer = None",
"def __window_close(self):\n pass",
"def _close_dialog(*args):\n global _dialog\n if _dialog is not None:\n _dialog.destroy()\n _dialog = None",
"def landlord_button_close(self):\n return self.write({'state': 'close'})",
"def close_attr_editing(self) -> None:\n self.attr_editing_window.Close()\n self.attr_editing_window = None",
"def exit(self):\n if self.window:\n self.window.close()",
"def close(self):\n self._isOpen = False",
"def close(self):\n\n if self._state == states['open']:\n self._do_close()",
"def close_attr_req_editing(self) -> None:\n self.attr_req_editing_window.Close()\n self.attr_req_editing_window = None",
"def web_view_close(self):\n self.webWindow.close()\n return",
"def close(self):\n self.tab.close()",
"def close(self) -> None:\n self.done = True\n if self.viewer is not None:\n self.viewer.close()\n self.viewer = None"
] | [
"0.7485135",
"0.71692693",
"0.7112385",
"0.7063925",
"0.67974085",
"0.677635",
"0.670848",
"0.67019016",
"0.66175354",
"0.6605917",
"0.65781903",
"0.6553257",
"0.65518093",
"0.6550994",
"0.65053326",
"0.6490589",
"0.64872533",
"0.6476808",
"0.6416838",
"0.6399266",
"0.6394791",
"0.6393536",
"0.63833624",
"0.6377623",
"0.6375944",
"0.6361546",
"0.63535875",
"0.63376874",
"0.6334453",
"0.6331752"
] | 0.838349 | 0 |
Deletes records that were created while running this test case. (Only records specifically recorded using the Store Session Record keyword are deleted.) | def delete_session_records(self):
self._session_records.reverse()
self.builtin.log("Deleting {} records".format(len(self._session_records)))
for record in self._session_records[:]:
self.builtin.log(" Deleting {type} {id}".format(**record))
try:
self.salesforce_delete(record["type"], record["id"])
except SalesforceResourceNotFound:
self.builtin.log(" {type} {id} is already deleted".format(**record))
except Exception as e:
self.builtin.log(
" {type} {id} could not be deleted:".format(**record),
level="WARN",
)
self.builtin.log(" {}".format(e), level="WARN") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_delete_records(self):\n pass",
"def delete_record(records):\n delete_record()",
"def delete_test_data(session_maker):\n\n orm_session = session_maker()\n orm_session.query(USERS).filter(USERS.username.like('%test%')).delete(synchronize_session=False)\n orm_session.query(USER_POSTS).filter(USER_POSTS.username.like('%test%')).delete(synchronize_session=False)\n orm_session.query(COMMENTS).filter(COMMENTS.username.like('%test%')).delete(synchronize_session=False)\n orm_session.query(EMERGENCY_CONTACTS).filter(EMERGENCY_CONTACTS.email.like('%test%')).delete(synchronize_session=False)\n\n orm_session.commit()\n orm_session.close()\n print(\"Test Records deleted\")",
"def test_delete_record(self):\n pass",
"def del_all_records():\n delete_alles = Customer.delete().where(Customer.name >= '')\n delete_alles.execute()",
"def tearDown(self) -> None:\n client = boto3.client(\"dynamodb\")\n for id in self.inserted_purchase_record_id:\n client.delete_item(\n Key={\n \"TransactionId\": {\n \"S\": id,\n },\n },\n TableName=self.transaction_table_purchase,\n )\n\n for id in self.inserted_refund_record_id:\n client.delete_item(\n Key={\n \"TransactionId\": {\n \"S\": id,\n },\n },\n TableName=self.transaction_table_refund,\n )\n\n for id in self.inserted_error_record_id:\n client.delete_item(\n Key={\n \"TransactionId\": {\n \"S\": id,\n },\n },\n TableName=self.transaction_table_error,\n )",
"def test_delete():\n store = SessionStore(user_agent=\"TestUA/1.1\", ip=\"127.0.0.1\")\n store.create()\n session_key = store.session_key\n\n session = Session.objects.get(pk=session_key)\n session.delete()\n\n assert not store.exists(session_key)",
"def clear_records(self) -> None:\n for container in self.record_containers:\n container.clear_records()",
"def test_delete(self):\n # Count the number of records before the save\n existing_records_count = Track.objects.all().count()\n resp = self.api_client.delete('/api/metadata/tracks/2/')\n data = json.loads(resp.content)\n new_records_count = Track.objects.all().count()\n\n # Ensure request was successful, and the record is removed from the database.\n # Should return with a success message.\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(existing_records_count-1, new_records_count)\n self.assertEqual(data['detail'], 'Track successfully removed')",
"def delete_records(self, records_to_delete):\n for record in records_to_delete:\n self.records.remove(record)\n self._store_writer.remove_img_file(record)\n\n self._process_change()",
"def deleteAllRecords(self):\n\n with self.getConnection() as conn:\n try:\n cur = conn.cursor()\n cur.execute(\"delete from process_run\")\n\n except sqlite3.Error as error:\n cur.close()\n raise workflowException('Database {0}: {1}'.format(self.wfname, repr(error)))\n return",
"def delete_record(self):\n for record in self.records:\n if self.date_str == record[\"date\"]:\n self.records.remove(record)\n if len(self.records) > 0:\n self.write_json_file(self.records_file, self.records)\n else:\n os.remove(self.records_file)\n return True\n return False",
"def delete(self):\n if not self.isNew:\n #We do not check the hasData property, so we can use this function to delete records\n #without reading them first.\n #TODO: this is stupid and unclean, change it\n try:\n CFG.CX.delete ( CFG.DB.SCHEMA + \".object\", { 'objectid' : self._objectid } )\n self.clearRecord()\n self.raiseEvent ( \"record_deleted\", self )\n except pg.DatabaseError, e:\n raise Record.DataManipulationError ( \"Deleting record {1} of '{0}'\".format(self._table.name, self._objectid),\n \"\",\n e)",
"def Delete(self):\n\n self.db.ExecuteSql('delete from tracks where id=%d;'\n % self.persistant['id'])\n self.db.ExecuteSql('commit;')",
"def delete_All_record():\n\twith jsonlines.open('permanRecords.jsonl', mode='w') as writer:\n\t\tprint(\"All permanent records deleted\")\n\twith jsonlines.open('tempRecords.jsonl', mode='r') as reader:\n\t\tfor obj in reader:\n\t\t\twith jsonlines.open('tempRecords.jsonl', mode='w') as writerOP:\n\t\t\t\twriterOP.write(obj)\n\t\t\tbreak\n\t\t\tprint(\"All trmporary records deleted\")",
"def cli_delete_record(field_list):\n try:\n api.delete_record(field_list)\n except NoRecordsFound as error:\n print \"%(error)s\" % locals()\n return",
"def clean_table_records(self, experiment_id):\n # delete join job records from table\n join_job_records = self.join_db_client.get_all_join_job_records_of_experiment(experiment_id)\n\n if join_job_records:\n self.join_db_client.batch_delete_items(\n experiment_id, [record[\"join_job_id\"] for record in join_job_records]\n )\n\n # delete model records from table\n model_records = self.model_db_client.get_all_model_records_of_experiment(experiment_id)\n\n if model_records:\n self.model_db_client.batch_delete_items(\n experiment_id, [record[\"model_id\"] for record in model_records]\n )\n\n # # exit sync thread\n self.sync_thread.thread_running.clear()\n\n # delete exp record from table\n self.exp_db_client.delete_item(experiment_id)\n\n self._close_existing_containers()",
"def delete_record(self, record_id):\r\n self.record.deleteObject(id=record_id)",
"def deleteAllRecord(collection):\n collection_name = collection\n collection = db[collection_name]\n collection.delete_many({})\n\n print(\"Deleting all records from \" + collection_name)\n print(\"Finished operation. Collection cleared.\")\n print(\"--------- \\n\")",
"def case_delete_records(self, refresh_db_before):\n new_name = \"tag1\"\n\n TagOp.add(new_name)\n\n tag_obj = TagOp.get(name=new_name)\n self.assertTrue(len(tag_obj) is 1)\n self.assertEqual(tag_obj[0].name, new_name)\n\n TagOp.delete(tag_obj[0])\n\n tag_obj = TagOp.get(name=new_name)\n self.assertFalse(tag_obj)",
"def delete():\n\t# Create session\n\ts = Session()\n\ts.query(Lookup).delete(synchronize_session=False)\n\ts.commit()",
"def tearDown(self):\n\n User.objects.all().delete()\n Movie.objects.all().delete()\n Vote.objects.all().delete()",
"def tearDown(self):\n #db.session.remove()\n db.drop_all()",
"def _delete_selected_records(self):\n # Display a confirmation dialog to check that user wants to proceed with deletion\n quit_msg = \"This operation cannot be undone.\\nAre you sure you want to delete these record/s?\"\n reply = QtWidgets.QMessageBox.warning(self, 'Confirm Delete',\n quit_msg, QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)\n\n # If yes, find the appropriate records and delete them\n if reply == QMessageBox.Yes:\n rows = self._table.selectionModel().selectedRows()\n records_to_delete = []\n for row in rows:\n index = row.row()\n record = self._store.get_record(index)\n records_to_delete.append(record)\n\n if self._options.backup.value():\n self._store.backup_records(self._options.get_backup_directory())\n self._store.delete_records(records_to_delete)\n\n self._load_store_records()",
"def tearDown(self):\r\n\r\n User.query.delete()\r\n Article.query.delete()\r\n\r\n db.session.commit()",
"def run(self):\n self.db.table('purchases').delete()\n self.db.table('payments').delete()",
"def test_deletion(self):\n self.assertEqual(self.store.query(BatchManholePowerup).count(), 0)",
"def delete_record(self, key):\n del self._records[key]",
"def tearDown(self):\n Review.query.delete()",
"def clear_db(self):\n self.cursor.execute(\"DELETE FROM TrackPoint\")\n self.cursor.execute(\"DELETE FROM Activity\")\n self.cursor.execute(\"DELETE FROM User\")\n self.db_connection.commit()"
] | [
"0.7481224",
"0.7373201",
"0.7205428",
"0.69691026",
"0.6664302",
"0.65132904",
"0.64798975",
"0.64461666",
"0.64307034",
"0.639961",
"0.63823223",
"0.6344583",
"0.6338384",
"0.633812",
"0.6276203",
"0.62057465",
"0.6177603",
"0.6175238",
"0.6172349",
"0.61712223",
"0.6158834",
"0.61135453",
"0.6106125",
"0.6087103",
"0.6084856",
"0.60495234",
"0.60470897",
"0.6042832",
"0.60357773",
"0.6028861"
] | 0.80059433 | 0 |
Return the id of all open browser ids | def get_active_browser_ids(self):
# This relies on some private data structures, but presently
# there is no other way. There's been a discussion in the
# robot slack channels about adding a new keyword that does
# what this keyword does. When that happens, we can remove
# this keyword.
driver_ids = []
try:
driver_cache = self.selenium._drivers
except NoOpenBrowser:
return []
for index, driver in enumerate(driver_cache._connections):
if driver not in driver_cache._closed:
# SeleniumLibrary driver ids start at one rather than zero
driver_ids.append(index + 1)
return driver_ids | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_ids(self):\n page = r.get(self.url)\n tree = html.fromstring(page.content)\n ids_elements = tree.xpath(\"//div[@id='selectedcontent']/div/ul/li/a\")\n return [self._e_to_id(e) for e in ids_elements]",
"def getIDs():",
"def getAllWindowHandles(self):\n cmdId = self.executeCommand(Command.GET_WINDOW_HANDLES)\n return cmdId",
"def get_opened_windows_list():\n\n global opened_windows_names\n EnumWindows(EnumWindowsProc(foreach_window), 0)\n return opened_windows_names",
"def getCurrentWindowId(*args):",
"def getSessionId(self) -> List[int]:\n return self.pool.getSessionId()",
"def get_ids(self):\n return self._graphs.keys()",
"def select_host_ids():\n return IMPL.select_host_ids()",
"def get_browser_contexts() -> Generator[dict, dict, list[browser.BrowserContextID]]:\n response = yield {\"method\": \"Target.getBrowserContexts\", \"params\": {}}\n return [browser.BrowserContextID(b) for b in response[\"browserContextIds\"]]",
"def getIDs(self):\n return self.multiengine.getIDs()",
"def get_socket_ids() -> List[int]:\n socket_id_list = []\n for cpu_id in cpu_ids():\n api_file = open('/sys/devices/system/cpu/cpu' + str(cpu_id) + '/topology/physical_package_id')\n socket_id_list.append(int(api_file.readline().strip()))\n return list(set(socket_id_list))",
"def hios_ids(self):\n return self._hios_ids",
"def PIDs():\n from ctypes import windll,c_ulong,byref,sizeof\n PIDs = (c_ulong*512)()\n size_of_PIDs = c_ulong()\n windll.psapi.EnumProcesses(byref(PIDs),sizeof(PIDs),byref(size_of_PIDs))\n nPIDs = size_of_PIDs.value/sizeof(c_ulong())\n pidProcess = sorted([int(i) for i in PIDs][:nPIDs])\n return pidProcess",
"def getLocationsIds():\n with driver.session() as s:\n ids = s.write_transaction(getLocationsId)\n\n lIds = []\n for idEl in ids:\n lIds.append(idEl[\"ID(l)\"])\n\n return lIds",
"def open_new_sessions(self, number_of_sessions=1):\n session_ids = []\n\n for x in range(0, number_of_sessions):\n init_request = self.make_request()\n session_ids.append(init_request['ident'])\n print 'Acquired SessionID #%s: %s' % (\n x, init_request['ident']\n )\n\n return session_ids",
"def get_hwnds(pid):\n def callback(hwnd, hwnds):\n if win32gui.IsWindowVisible(hwnd) and win32gui.IsWindowEnabled(hwnd):\n _, found_pid = win32process.GetWindowThreadProcessId(hwnd)\n if found_pid == pid:\n hwnds.append(hwnd)\n return True\n hwnds = []\n win32gui.EnumWindows(callback, hwnds)\n return (hwnds)",
"def get_session_ids(self):\n with self._sessions_lock:\n session_ids = self.sessions.keys()\n\n return session_ids",
"def get_ids(self) -> List[str]:",
"def remote_get_ids(self):\n return self.smultiengine.get_ids()",
"def get_all_master_ids(self):\r\n return self._handler.get_all_master_ids()",
"def getRefreshIDs(self):\n ids = []\n for bucket in self.router.getLonelyBuckets():\n rid = random.randint(*bucket.range).to_bytes(20, byteorder='big')\n ids.append(rid)\n return ids",
"def readBrowserHistory():\n history_db = os.path.expanduser(\n '~') + \"/Library/Application Support/Google/Chrome/Default/history\"\n # copy history_db to workaround Chrome history permissions\n copy_db = os.path.expanduser('~') + \"/History\"\n copyfile(history_db, copy_db)\n c = sqlite3.connect(copy_db)\n cursor = c.cursor()\n select_statement = \"SELECT urls.url FROM urls, visits WHERE urls.id = visits.url;\"\n cursor.execute(select_statement)\n results = cursor.fetchall()\n c.close()\n sites = set()\n for result in results:\n sites.add(parse(result[0]))\n return sites",
"def GetXIDs():\n return dict([(pwent[0], pwent[2]) for pwent in pwd.getpwall() if pwent[6] == slivermanager.sliver_password_shell])",
"def window_handles():\n handles = {}\n\n def append_window_handle(hwnd, ctx ):\n title = win32gui.GetWindowText(hwnd)\n if title:\n handles[win32gui.GetWindowText(hwnd)] = hwnd\n\n try:\n win32gui.EnumWindows(append_window_handle, None)\n except NameError:\n # Not on windows.\n pass\n\n return handles",
"def get_pids(name=None):\n results = []\n for process in win32com.client.GetObject('winmgmts:').InstancesOf('Win32_Process'):\n if name is None or process.Properties_(\"Name\").Value == name:\n results.append(process.Properties_(\"ProcessID\").Value)\n return results",
"def get_html5_ids(html5_sources):\r\n html5_ids = [x.split('/')[-1].rsplit('.', 1)[0] for x in html5_sources]\r\n return html5_ids",
"def index(self):\n return self._browser.driver.window_handles.index(self.name)",
"def all_env_ids(self) -> np.ndarray:",
"def get_refresh_ids(self):\n ids = []\n for bucket in self.router.lonely_buckets():\n rid = random.randint(*bucket.range).to_bytes(20, byteorder='big')\n ids.append(rid)\n return ids",
"def get_open_port_numbers(host):\n ports_per_host =[]\n for h in host:\n ports = h.findAll(\"port\")\n for port in ports:\n port_id = check_if_unicode(port[\"portid\"])\n ports_per_host.append(port_id)\n return ports_per_host"
] | [
"0.6569217",
"0.628429",
"0.62061906",
"0.6202266",
"0.58726",
"0.5845237",
"0.58393615",
"0.5779246",
"0.5755626",
"0.57022905",
"0.56826526",
"0.56637734",
"0.5652637",
"0.56374764",
"0.5617484",
"0.5593592",
"0.5584265",
"0.5559799",
"0.5544296",
"0.55259955",
"0.5506343",
"0.55042046",
"0.5465739",
"0.5460487",
"0.53798366",
"0.5372378",
"0.537211",
"0.53635806",
"0.5356739",
"0.5346324"
] | 0.71545905 | 0 |
Parses the current url to get the object id of the current record. | def get_current_record_id(self):
url = self.selenium.get_location()
for part in url.split("/"):
oid_match = re.match(OID_REGEX, part)
if oid_match is not None:
return oid_match.group(2)
raise AssertionError("Could not parse record id from url: {}".format(url)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_id(self, url):\n return url.split('/')[-1]",
"def obj_id(self) -> int:\n return int(self.index.split(\"/\")[-1]) if self.index else None",
"def getOID(self, selfURL):\n\n selfURL_path = urlsplit(selfURL).path\n oID = Path(selfURL_path).name\n try:\n r = int(oID)\n except ValueError as err:\n print('no object ID in the selfUrl {0}. Reason: {1}'.format(selfURL, err))\n r = None\n return r",
"def get_id(self, resource):\n try:\n return resource.href.split('/')[-1]\n except AttributeError:\n return resource['href'].split('/')[-1]",
"def get_identifier(self, object):\n try:\n identifier = object[\"uri\"]\n except KeyError:\n identifier = object[\"ref\"]\n return identifier",
"def getid(data):\n return int(data.split('/')[-1])",
"def _id_from_url(url):\n url = re.sub(r'\\?.*', '', url)\n video_id = url.split('/')[-2]\n return video_id",
"def base_object(self, obj, verb=None):\n id, url = super(Facebook, self).base_object(obj)\n if url:\n try:\n parsed = urlparse.urlparse(url)\n if parsed.path == PHOTO_PATH:\n fbids = urlparse.parse_qs(parsed.query).get(PHOTO_ID_PARAM)\n if fbids:\n return fbids[0], url\n elif verb == 'like' and '/posts/' in parsed.path:\n # add user id prefix. https://github.com/snarfed/bridgy/issues/229\n id = '%s_%s' % (parsed.path.split('/posts/')[0][1:], id)\n except BaseException, e:\n logging.error(\n \"Couldn't parse object URL %s : %s. Falling back to default logic.\",\n url, e)\n\n return id, url",
"def getId(self):\n return self.session.request('id/')",
"def getIdLink(self):\n return self.urlLink()",
"def get_id(self):\n return self.get_api_endpoint()",
"def _http_get_current_id(self):\n return self._http_request('').json()['currentplid']",
"def _get_id(self):\n return self.id",
"def id_from_url(url):\n return url.split('-')[-1].split('.html')[0]",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id"
] | [
"0.7274164",
"0.7020472",
"0.6777252",
"0.6688561",
"0.6602918",
"0.6407275",
"0.635892",
"0.63425964",
"0.6307531",
"0.6288161",
"0.6233897",
"0.6204031",
"0.619106",
"0.61868566",
"0.6139848",
"0.6139848",
"0.6139848",
"0.6139848",
"0.6139848",
"0.6139848",
"0.6139848",
"0.6139848",
"0.6139848",
"0.6139848",
"0.6139848",
"0.6139848",
"0.6139848",
"0.6139848",
"0.6139848",
"0.6139848"
] | 0.7598501 | 0 |
Return the current value of a form field based on the field label | def get_field_value(self, label):
input_element_id = self.selenium.get_element_attribute(
"xpath://label[contains(., '{}')]".format(label), "for"
)
value = self.selenium.get_value(input_element_id)
return value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getValue(self):\n return self.field.currentText()",
"def getValue(self):\n return self.field.text()",
"def field(self):\r\n return self.value",
"def get_field_value(self, field_name):\n if field_name in self.fields.keys():\n return self.fields[field_name]\n else:\n return \"No such field\"",
"def get_field_value(self, name, raw=False):\n field = self.get_field(name)\n if field is None:\n return\n\n if raw:\n return field.value\n\n val = field.show\n if not val:\n val = field.value\n if not val:\n val = field.showname\n return val",
"def label_for_field(instance, arg):\n if isinstance(instance, QuerySet):\n instance = instance.model\n try:\n return instance._meta.get_field(arg).verbose_name\n except FieldDoesNotExist:\n return ''",
"def getValue(self, label, default=None):\n # Allow getValue using the label string\n if isinstance(label, basestring):\n label = str2Label(label)\n return self._labelDict.get(label, default)",
"def get_field(entry, field):\n\n if field.name in entry.field_dict:\n if field.choices:\n return getattr(entry.object, \"get_%s_display\" % field.name)()\n return entry.field_dict[field.name]\n else:\n return settings.TEMPLATE_STRING_IF_INVALID",
"def GetValue(self):\n \n return self.choices[self.selected].GetLabel()",
"def get_field_value(instance, field_name, use_get):\n if use_get:\n field_value = instance.get(field_name)\n else:\n field_value = getattr(instance, field_name, '')\n return field_value",
"def getfield(form, fieldname):\n try:\n return form[fieldname]\n except KeyError:\n return None",
"def getFieldValue (self, fieldname):\n return self._modified_values.get(fieldname, None) or self._original_values[fieldname]",
"def label(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"label\")",
"def label(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"label\")",
"def label(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"label\")",
"def label(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"label\")",
"def getValue(self):\n return self.field.value()",
"def getValue(self):\n return self.field.value()",
"def _get_label(self):\n return self.label",
"def field_display(obj, field):\n return get_field_value(obj, field)",
"def get_value(self, field):\n field = self.find_first(field)\n if field is not None:\n return field.value\n return None",
"def first_field_by_label(self, label: str, case_sensitive=True):\n fields = self.fields_by_label(label, case_sensitive=case_sensitive)\n f = fields[0]\n return f",
"def label(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"label\")",
"def label(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"label\")",
"def label(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"label\")",
"def label(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"label\")",
"def label(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"label\")",
"def label(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"label\")",
"def label(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"label\")",
"def getfield(value, arg):\n #import pdb; pdb.set_trace()\n if hasattr(value, \"fields\"):\n fields = getattr(value, \"fields\")\n if str(arg) in fields:\n return str(fields[str(arg)])"
] | [
"0.6556248",
"0.65483963",
"0.6508708",
"0.64794666",
"0.63468665",
"0.6334696",
"0.63046134",
"0.62998545",
"0.629518",
"0.6286994",
"0.62621415",
"0.6250413",
"0.6250221",
"0.6250221",
"0.6250221",
"0.6250221",
"0.6246163",
"0.6246163",
"0.61561424",
"0.6147392",
"0.6099475",
"0.60736054",
"0.5978719",
"0.5978719",
"0.5978719",
"0.5978719",
"0.5978719",
"0.5978719",
"0.5978719",
"0.5951818"
] | 0.80294776 | 0 |
Returns a rendered locator string from the Salesforce lex_locators dictionary. This can be useful if you want to use an element in a different way than the built in keywords allow. | def get_locator(self, path, *args, **kwargs):
locator = lex_locators
for key in path.split("."):
locator = locator[key]
return locator.format(*args, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_locator(locator_text: str, locator_type: str = \"id\") -> tuple:\n locator = locator_type.upper()\n return getattr(By, locator), locator_text",
"def __str__(self):\n return 'Token({type}, {lexema})'.format(\n type= tokenNames[self.type],\n lexema=self.lexema\n )",
"def __html__(self) -> str:\n location_string = self.location.string if self.location else None\n components = [self.name, self.owner, location_string]\n return ', '.join([component for component in components if component])",
"def lexicon() -> Lexicon:\n return Lexicon()",
"def token_lookup():\n token_list = {\".\": \"||Period||\",\n \",\": \"||Comma||\",\n '\"': \"|Quotation_Mark||\",\n \";\": \"||Semicolon||\",\n \"!\": \"||Exclamation_Mark||\",\n \"?\": \"||Question_Mark||\",\n \"(\": \"||Left_Parentheses||\",\n \")\": \"||Right_Parentheses||\",\n \"--\": \"||Dash||\",\n \"\\n\": \"||Return||\"\n }\n\n return token_list",
"def storelocator():\n\n\treturn render_template(\"storelocator.html\")",
"def documented_names():\n def _get_names(runtime: Dict[str, e.Entity]):\n fn, *args = (\n e.Quoted(e.Name(key)) for key, value in runtime.items()\n if getattr(value, \"_docstring_source\", None) is not None\n )\n return e.Quoted(e.Sexpr(fn, tuple(args)))\n\n def _documented_names():\n return RuntimeDependent(_get_names)\n yield (\"(λ . &[any])\", _documented_names)",
"def getLexicon(self):\n #return self._getOb('Lexicon')\n return self._getOb('LexiconUnicode')",
"def _init_locators(self):\n try:\n version = int(float(self.get_latest_api_version()))\n self.builtin.set_suite_metadata(\"Salesforce API Version\", version)\n locator_module_name = \"locators_{}\".format(version)\n\n except RobotNotRunningError:\n # We aren't part of a running test, likely because we are\n # generating keyword documentation. If that's the case we'll\n # use the latest supported version\n here = os.path.dirname(__file__)\n files = sorted(glob.glob(os.path.join(here, \"locators_*.py\")))\n locator_module_name = os.path.basename(files[-1])[:-3]\n\n self.locators_module = importlib.import_module(\n \"cumulusci.robotframework.\" + locator_module_name\n )\n lex_locators.update(self.locators_module.lex_locators)",
"def token_lookup():\n token_dict = {}\n token_dict['.'] = \"||Period||\"\n token_dict[','] = \"||Comma||\"\n token_dict['\"'] = \"||Quotation_Mark||\"\n token_dict[';'] = \"||Semicolon||\"\n token_dict['!'] = \"||Exclamation_Mark||\"\n token_dict['?'] = \"||Question_Mark||\"\n token_dict['('] = \"||Left_Parentheses||\"\n token_dict[')'] = \"||Right_Parentheses||\"\n token_dict['--'] = \"||Dash||\"\n token_dict['\\n'] = \"||Return||\"\n\n return token_dict",
"def token_lookup():\n Tokenize = {'.': '||Period||',\n '.': '||Period||',\n ',': '||Comma||',\n '\"': '||Quotation_Mark||', \n ';': '||Semicolon||', \n '!': '||Exclamation_mark||', \n '?': '||Question_mark||', \n '(': '||Left_Parentheses||', \n ')': '||Right_Parentheses||', \n '--': '||Dash||',\n '\\n': '||Return||'} \n \n return Tokenize",
"def test_render_path(renderer):\n path = \"foo bar baz\".split()\n renderer.name_ref(User.email, path=path)\n expected = {'ExpressionAttributeNames':\n {'#n0': 'email', '#n3': 'baz', '#n2': 'bar', '#n1': 'foo'}}\n assert renderer.rendered == expected",
"def token_lookup():\n token_dict = {'.':'||Period||', ', ':'||Comma||', '\"':'||Quotation_Mark||', ';':'||Semicolon||',\n '!':'||Exclamation_mark||', '?':'||Question_mark||', '(':'||Left_Parentheses||',\n ')':'||Right_Parentheses||', '--':'||Dash||', '\\n':'||Return||'}\n return token_dict",
"def _get_locators(self):\n return locator.DashboardWidget",
"def anchor_entity_decorator(props):\n return DOM.create_element('span', { \"data-id\": props['id'], \"data-type\": 'anchor' }, props['children'])",
"def get_help(self, caller, cmdset):\n if caller.check_permstring(self.perm_for_switches):\n return self.__doc__\n help_string = \"\"\"\n @emit\n\n Usage :\n @emit <message>\n\n Emits a message to your immediate surroundings. This command is\n used to provide more flexibility than the structure of poses, but\n please remember to indicate your character's name.\n \"\"\"\n return help_string",
"def _get_input_field_locator(self, name):\n try:\n # we need to make sure that if a modal is open, we only find\n # the input element inside the modal. Otherwise it's possible\n # that the xpath could pick the wrong element.\n self.selenium.get_webelement(lex_locators[\"modal\"][\"is_open\"])\n modal_prefix = \"//div[contains(@class, 'modal-container')]\"\n except ElementNotFound:\n modal_prefix = \"\"\n\n locator = modal_prefix + lex_locators[\"object\"][\"field_label\"].format(\n name, name\n )\n input_element_id = self.selenium.get_element_attribute(locator, \"for\")\n return input_element_id",
"def _render_markup(self, per_token_info: List[Dict[str, Any]]) -> str:\n markup = \"\"\n for token in per_token_info:\n entities = sorted(token[\"entities\"], key=lambda d: d[\"render_slot\"])\n # Whitespace tokens disrupt the vertical space (no line height) so that the\n # span indicators get misaligned. We don't render them as individual\n # tokens anyway, so we'll just not display a span indicator either.\n is_whitespace = token[\"text\"].strip() == \"\"\n if entities and not is_whitespace:\n slices = self._get_span_slices(token[\"entities\"])\n starts = self._get_span_starts(token[\"entities\"])\n total_height = (\n self.top_offset\n + self.span_label_offset\n + (self.offset_step * (len(entities) - 1))\n )\n markup += self.span_template.format(\n text=escape_html(token[\"text\"]),\n span_slices=slices,\n span_starts=starts,\n total_height=total_height,\n )\n else:\n markup += escape_html(token[\"text\"] + \" \")\n return markup",
"def __repr__(self):\n return \"%s(%s)\" % (self.__class__.__name__, self.tokens())",
"def loc(y,x):\n return '\\033[%s;%sH' % (str(y),str(x))",
"def _draw_decorators(self):\n return \" \".join(self._decorators)",
"def get_display_symbol(self):\n if not hasattr(self, \"_cached_display_symbol\"):\n legend = self.xymap.legend\n default_symbol = self.symbol if self.display_symbol is None else self.display_symbol\n self._cached_display_symbol = default_symbol\n\n dirtuple = tuple((key, self.directions[key]) for key in sorted(self.directions.keys()))\n\n replacement_symbol = self.display_symbol_aliases.get(dirtuple, default_symbol)\n\n if replacement_symbol != self.symbol:\n node_or_link_class = legend.get(replacement_symbol)\n if node_or_link_class:\n # initiate class in the current location and run get_display_symbol\n # to get what it would show.\n self._cached_display_symbol = node_or_link_class(\n self.x, self.y, self.Z\n ).get_display_symbol()\n return self._cached_display_symbol",
"def spaceLocator(*args, absolute: bool=True, name: AnyStr=\"\", position: Union[List[float, float,\n float], bool]=None, relative: bool=True, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[List[AnyStr], Any]:\n pass",
"def lit(s):\n if s in ['record','doc','location']:\n if s =='record':\n return (Parse.word('we').possibly() +\n first_word('record register') +\n Parse.word('identification').possibly() +\n Parse.word('that').possibly())\n if s == 'doc':\n return lit['document'] | lit['end-document']\n if s == 'location':\n return Parse.first([lit_dict['document'],lit_dict['theorem'],lit_dict['axiom']])\n return lit_dict[s]",
"def location(self) -> str:\n return self._search_in_description(REGEXP_ATTR_LOCATION)",
"def parse_locator(locator):\n\n # handle backwards compatibility to support new Locator class\n if isinstance(locator, loc.Locator):\n locator = '{by}={locator}'.format(by=locator.by, locator=locator.locator)\n\n locator_tuple = namedtuple('Locator', 'By value')\n\n if locator.count('=') > 0 and locator.count('css=') < 1:\n by = locator[:locator.find('=')].replace('_', ' ')\n value = locator[locator.find('=')+1:]\n return locator_tuple(by, value)\n else: # assume default is css selector\n value = locator[locator.find('=')+1:]\n return locator_tuple('css selector', value)",
"def preview_locator(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"preview_locator\")",
"def _get_lexicon_generator(self, morphemes):\n delimiter = self.rare_delimiter\n for pos, data in sorted(morphemes.items()):\n foma_regex_name = self._get_valid_foma_regex_name(pos)\n if foma_regex_name:\n yield u'define %s [\\n' % foma_regex_name\n if data:\n if not (self.rich_upper or self.rich_lower):\n data = sorted(set((mb, None) for mb, mg in data))\n for mb, mg in data[:-1]:\n yield u' %s |\\n' % self._get_morpheme_representation(\n mb=mb, mg=mg, pos=pos, delimiter=delimiter)\n yield u' %s \\n' % self._get_morpheme_representation(\n mb=data[-1][0], mg=data[-1][1], pos=pos, delimiter=delimiter)\n yield u'];\\n\\n'",
"def _repr_(slf):\n title = 'ErrorMsgManager'\n func = lambda s, v: ', '.join(getattr(s, v) or [] if hasattr(s, v) else [])\n key_list = func(slf, '_keys')\n comps = func(slf, '_comps')\n comp_str = '<{0} (comp: [{1}]'.format(title, comps)\n key_str = '; keys: [{0}]'.format(key_list)\n if key_list:\n comp_str += key_str\n\n return '{0})>'.format(comp_str)",
"def get_location_name(self):\n mark_name = u'__'.join(self.query_path) + u'___' + six.text_type(self.visit_counter)\n return (mark_name, self.field)"
] | [
"0.4768227",
"0.46713182",
"0.45936868",
"0.44802696",
"0.4458644",
"0.44231966",
"0.43998763",
"0.43943584",
"0.43852997",
"0.43272206",
"0.43175244",
"0.42806837",
"0.42482546",
"0.42070994",
"0.42069843",
"0.4188468",
"0.41546643",
"0.4154236",
"0.4147403",
"0.41430965",
"0.41329145",
"0.41315675",
"0.41289806",
"0.40847704",
"0.4081309",
"0.40544683",
"0.40489036",
"0.40412185",
"0.403564",
"0.4024568"
] | 0.5943993 | 0 |
Returns the Record Type Id for a record type name | def get_record_type_id(self, obj_type, developer_name):
soql = "SELECT Id FROM RecordType WHERE SObjectType='{}' and DeveloperName='{}'".format(
obj_type, developer_name
)
res = self.cumulusci.sf.query_all(soql)
return res["records"][0]["Id"] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __get_type_id(record: TNSRecord) -> int:\n return ObjectType.get_or_create(record.type or 'Unknown').id",
"def get_id(type_: Dict[str, str]) -> int:\n return int(type_[f'{type_name}_id'])",
"def _type_str(self):\n try:\n record_name = RECORD_TYPES[self.type]\n return '{0} record'.format(record_name)\n except KeyError:\n return '{0} type 0x{1:04x}'.format(self.__class__.__name__,\n self.type)",
"def getTypeID(self) -> int:\n ...",
"def record_type(values):\n field = basic.lookup(values, name='Record Type (one of %s)' % values)\n\n return field.setResultsName('record_type')",
"def get_record_identifier(self, record):\n raise NotImplementedError()",
"def get_type_id(type_url):\n # TODO\n return type_url",
"def data_type_id(self) -> str:\n return self._data_type_id",
"def get_type_id(self):\n\n raise Exception(\"Not implemented!\"+self.__class__)",
"def type_id(self):\n return self._type_id",
"def get_sensor_type_id(sensor_type_name):\n query = db.session.query(\n TypeClass.id,\n ).filter(TypeClass.sensor_type == sensor_type_name)\n sensor_id = db.session.execute(query).fetchone()\n if isinstance(sensor_id, Iterable):\n sensor_id = sensor_id[0]\n return sensor_id",
"def identifier_type(self):\n return self._identifier_type",
"def get_record_type(rr, offset=0):\n (generic_type, _) = RR.fromData(rr,offset)\n return {\n RR.TYPE_A : RR_A,\n RR.TYPE_AAAA : RR_AAAA,\n RR.TYPE_NS : RR_NS,\n RR.TYPE_CNAME : RR_CNAME\n }[generic_type._type]",
"def identify(self, record):\n if record.record_type in self.record_type_iders:\n ident = self.record_type_iders[record.record_type](record)\n if ident:\n return [record.record_type] + ident\n return [record.record_type, False]\n return False",
"def get_id(self, entity_name, type_name):\n if not self.is_type(type_name):\n raise TypeNameError(type_name, 'Undeclared type')\n try:\n return self._symtab[type_name].get_id(entity_name)\n except KeyError:\n raise EntityNameError(entity_name, type_name, 'No entity mapping')",
"def __get_type_id(tns_response: TNSObjectSearchResult) -> int:\n type_name = tns_response.object_type_name or 'Unknown'\n object_type = ObjectType.get_or_create(type_name)\n return object_type.id",
"def type(self):\n # easy enough\n return self._dataset._pyre_id.type",
"def get_part_type_id(self, parttype_name):\n cond = SQLBinaryExpr(SQLFuncExpr(self.db_func_map[DB_FUNC_NAME_LOWER],\n COL_NAME_PARTTYPES_NAME),\n OP_EQ, SQLLiteral(parttype_name.lower()))\n entries = self.select_generic_data(select_list=[COL_NAME_PARTTYPES_PARTTYPEID],\n table_list=[TABLE_NAME_PARTTYPES],\n where=cond)\n if len(entries) == 1:\n return entries[0][COL_NAME_PARTTYPES_PARTTYPEID]\n elif len(entries) > 1:\n tmp = \"Part type '%s' \" % parttype_name\n tmp += \"cannot be resolved because it is ambiguous. (%s)\" % entries\n raise AdasDBError(tmp)\n\n raise AdasDBError(\"No resolution of '%s'. (%s)\" % (parttype_name, entries))",
"def get_record_id(thing: Union[\"Record\", t.RecordId, UUID, str]) -> t.RecordId:\n if isinstance(thing, UUID):\n return t.RecordId(thing)\n elif isinstance(thing, Record):\n return thing.id\n return t.RecordId(UUID(thing))",
"def redis_type_to_id(key_type):\n if key_type == b'string' or key_type == 'string':\n return REDIS_TYPE_ID_STRING\n elif key_type == b'hash' or key_type == 'hash':\n return REDIS_TYPE_ID_HASH\n elif key_type == b'list' or key_type == 'list':\n return REDIS_TYPE_ID_LIST\n elif key_type == b'set' or key_type == 'set':\n return REDIS_TYPE_ID_SET\n elif key_type == b'zset' or key_type == 'zset':\n return REDIS_TYPE_ID_ZSET\n else:\n return REDIS_TYPE_ID_UNKNOWN",
"def typeid(tt):\n if not isinstance(tt, type):\n tt = type(tt)\n try:\n return _typeids[tt]\n except KeyError:\n tidname = 'typeid_'+str(len(_typeids))\n gbl.gInterpreter.ProcessLine(\n \"namespace _cppyy_internal { auto* %s = &typeid(%s); }\" %\\\n (tidname, _get_name(tt),))\n tid = getattr(gbl._cppyy_internal, tidname)\n _typeids[tt] = tid\n return tid",
"def _type(self):\n return self._id[1]",
"def type_id(self):\n return self._device.type",
"def type_name(self):\n return self._type_name",
"def typeName (self, typecode):\n if typecode == qmf2.SCHEMA_DATA_VOID: return \"void\"\n elif typecode == qmf2.SCHEMA_DATA_BOOL: return \"bool\"\n elif typecode == qmf2.SCHEMA_DATA_INT: return \"int\"\n elif typecode == qmf2.SCHEMA_DATA_FLOAT: return \"float\"\n elif typecode == qmf2.SCHEMA_DATA_STRING: return \"string\"\n elif typecode == qmf2.SCHEMA_DATA_MAP: return \"map\"\n elif typecode == qmf2.SCHEMA_DATA_LIST: return \"list\"\n elif typecode == qmf2.SCHEMA_DATA_UUID: return \"uuid\"\n else:\n raise ValueError (\"Invalid type code: %s\" % str(typecode))",
"def name_to_type(self, name):\n return self.CUSTOM_PREFIX + name",
"def get_typecode(self, name):\n return self.codes['type_codes'][name]",
"def typeID(self) -> int:\n\t\t# pylint: disable=invalid-name\n\t\treturn self._ot",
"def type_name(self):\n return self.TYPE_NAMES[self.type]",
"def get_type(self) -> str:\n return self.row_dict['type']"
] | [
"0.7928146",
"0.7352052",
"0.7027344",
"0.700802",
"0.6797513",
"0.6752316",
"0.67234236",
"0.66540086",
"0.6632991",
"0.6545357",
"0.64484364",
"0.64434844",
"0.64428693",
"0.6408364",
"0.64009094",
"0.63250935",
"0.63191825",
"0.6295935",
"0.62649363",
"0.6245594",
"0.62261295",
"0.61999136",
"0.6129389",
"0.6121775",
"0.61034524",
"0.6094845",
"0.6071735",
"0.60584337",
"0.60572666",
"0.6043692"
] | 0.77214324 | 1 |
Returns the number of items indicated for a related list. | def get_related_list_count(self, heading):
locator = lex_locators["record"]["related"]["count"].format(heading)
count = self.selenium.get_webelement(locator).text
count = count.replace("(", "").replace(")", "")
return int(count) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def count_items(self):\n count = 0\n for o in self.order_lst:\n count += o.count()\n \n return count",
"def get_num_items(self):\r\n return self.num_items",
"def items_num(self):\n\t\treturn len(self.items)",
"def items_num(self):\n\t\treturn len(self.items)",
"def items_num(self):\n return len(self.items)",
"def _items_count(self, queryset: QuerySet) -> int:\n try:\n # forcing to find queryset.count instead of list.count:\n return queryset.all().count()\n except AttributeError:\n return len(queryset)",
"def count(self):\n return len(self.order_lst)",
"def num_items(self):\n num_items = 0\n for line in self.lines.all():\n num_items += line.quantity\n return num_items",
"def count(item):\n return len(item)",
"def count(self, item):\n # type: (Any) -> int\n return list.count(self, self.ref(item))",
"def items_count(self):\n return len(self.items)",
"def count(self):\n return len(self.order_items)",
"def total(my_list, item):\n return my_list.count(item)",
"def listing_count(self) -> int:\n return pulumi.get(self, \"listing_count\")",
"def count(some_list):\n return len(some_list)",
"def get_item_count(self):\n resp = self.app.get('/items')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n return len(data)",
"def count_item(*, item : Any, list : Union[List[Any], ConduitVariable]) -> List[Any]:\n return list.count(item)",
"def get_total_number_of_items_in_queryset(self):\n if not hasattr(self, '_total_number_of_items_in_queryset'):\n self._total_number_of_items_in_queryset = self.get_queryset().count()\n return self._total_number_of_items_in_queryset",
"def get_number_of_items(self):\n return len(self.__item_map)",
"def length(self):\n # TODO: Count number of items\n return self.list.size",
"def get_list_length(self):\r\n return self._api.get_list_length()",
"def size(self):\n return self.list_size",
"def length(self):\n # TODO: Count number of items\n # print(\"self\", self.list)\n # print(\"type\", type(self.list))\n return len(self.list)",
"def length(self):\n return self.list.length",
"def length(self):\n return self.list.length",
"def length(self):\n return self.list.length()",
"def __len__(self, *args, **kwargs):\n return len(self._list(*args, **kwargs))",
"def size(self) -> int:\n return self.num_items",
"def getItemCount(self, ItemBase):\n Found = 0\n for CurrItem in self.List:\n if CurrItem.Base == ItemBase:\n Found = 1\n break\n\n if not Found: return 0\n else: return CurrItem.Count",
"def getSize(self):\r\n list = self.getList()\r\n return len(list)"
] | [
"0.74805504",
"0.7285734",
"0.69391817",
"0.69391817",
"0.6899637",
"0.6876357",
"0.68688875",
"0.68223625",
"0.68114096",
"0.67848015",
"0.67558473",
"0.6744843",
"0.67436016",
"0.6710305",
"0.66789955",
"0.6653826",
"0.6590316",
"0.6518359",
"0.6511388",
"0.6493154",
"0.64882946",
"0.6479507",
"0.64662004",
"0.64541095",
"0.64541095",
"0.6420698",
"0.64117926",
"0.64042777",
"0.6389271",
"0.6386835"
] | 0.75440687 | 0 |
Navigates to the Home view of a Salesforce Object | def go_to_object_home(self, obj_name):
url = self.cumulusci.org.lightning_base_url
url = "{}/lightning/o/{}/home".format(url, obj_name)
self.selenium.go_to(url)
self.wait_until_loading_is_complete(lex_locators["actions"]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _go_to_page(self):\n self.salesforce.go_to_setup_home()\n self.eda.wait_for_new_window(\"Home | Salesforce\")\n self.selenium.switch_window(\"Home | Salesforce\")\n self.salesforce.wait_until_loading_is_complete()",
"def go_to_record_home(self, obj_id):\n url = self.cumulusci.org.lightning_base_url\n url = \"{}/lightning/r/{}/view\".format(url, obj_id)\n self.selenium.go_to(url)\n self.wait_until_loading_is_complete(lex_locators[\"actions\"])",
"def home(self, *args, **kwargs):\n pass",
"def home(self):\n self.goto(0, 0)",
"def go_home(self):\r\n if self.home_url is not None:\r\n self.set_url(self.home_url)",
"def show_home(self):\n print(self.home.name)",
"def gohome(self):\n raise Exception(\"Not implemented\")",
"def home(self):\n self.goto(0, 0)\n self.setheading(0)",
"def _go_to_page(self, object_id=None, **kwargs):\n\n if kwargs and object_id:\n raise Exception(\"Specify an object id or keyword arguments, but not both\")\n\n if kwargs:\n # note: this will raise an exception if no object is found,\n # or if multiple objects are found.\n object_id = self._get_object(**kwargs)[\"Id\"]\n\n url_template = \"{root}/lightning/r/{object_name}/{object_id}/view\"\n url = url_template.format(\n root=self.cumulusci.org.lightning_base_url,\n object_name=self.object_name,\n object_id=object_id,\n )\n self.selenium.go_to(url)\n self.salesforce.wait_until_loading_is_complete()",
"def go_to_home_application(self):\n self.parent.show()\n self.hide()",
"def _home(self, op, context):\n self.page = \"HOME\"\n return {'FINISHED'}",
"def get(self, request):\n return redirect('start:home')",
"def get(self, request):\n return redirect('start:home')",
"def get(self, request):\n return redirect('start:home')",
"def show_homepage():\n return flask.redirect(\"products.show_product_manager\")",
"def go_home(request):\n\n url = request.route_url('home', _app_url=get_app_url(request))\n return HTTPFound(location=url)",
"def homepage(self):\n print('-=' * 12 + \" Home Page \" + '-=' * 12)\n self._user.list_contacts()\n options = {1: self.add_contact, 2:self.remove_contact ,3: self.view_contact_chat, 4: self.sign_out, 5: self.exit}\n print_out = \"(1) Add new contact \\n (2) Remove Contact \\n (3) View my chats \\n (4) Sign out \\n (5) Exit\"\n return self._take_option(options, print_out)",
"def open_home_page(self):\n com_util.wait_for(self.driver, element['waitToLoad'])\n com_util.tap_on(self.driver, element['clickOnContinue'])",
"def navigate_to(self):\n #self._kernel.navigate_to(route)\n pass",
"def home():\n payload = manager.get_payload()\n return render_template('index.html', payload=payload)",
"def go_to_setup_object_manager(self):\n url = self.cumulusci.org.lightning_base_url\n self.selenium.go_to(url + \"/lightning/setup/ObjectManager/home\")\n self.wait_until_loading_is_complete()",
"def goHome():\n\t#Go to pod home\n\tif screen.lastScreen in screen.protectedScreens:\n\t\tpodScreen.show()\n\telse:\n\t\tsplashScreen.show()",
"def home(request):\n\treturn render(request, \"compta/home.html\")",
"def click_home_button(self):\n return self",
"def home() -> Any:\n return home_page()",
"def go_home(self):\n command = _build_robovac_command(RobovacModes.WORK, RobovacCommands.GO_HOME)\n message = self._build_command_user_data_message(command)\n\n self._send_packet(message, False)",
"def homepage():\n if g.user:\n return redirect(f\"/user/{g.user.id}\")\n else:\n return redirect(\"/landing\")",
"def go_to_home_page():\n driver.get(home_page)",
"def home(request):\n return render_template('core/home.html')",
"def home_page(request):\r\n return render(request, 'ez_main/home_page.html')"
] | [
"0.72055185",
"0.6643557",
"0.65994006",
"0.65149677",
"0.6487298",
"0.6410646",
"0.6332935",
"0.63148344",
"0.6233376",
"0.6232684",
"0.6230519",
"0.61904204",
"0.61904204",
"0.61904204",
"0.6159019",
"0.6148358",
"0.6115426",
"0.6086321",
"0.6041018",
"0.60313743",
"0.60254914",
"0.5956077",
"0.5942687",
"0.59115154",
"0.58750874",
"0.5859975",
"0.58402205",
"0.5827382",
"0.5819887",
"0.58109725"
] | 0.7644369 | 0 |
Navigates to the Home tab of Salesforce Setup | def go_to_setup_home(self):
url = self.cumulusci.org.lightning_base_url
self.selenium.go_to(url + "/lightning/setup/SetupOneHome/home")
self.wait_until_loading_is_complete() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _go_to_page(self):\n self.salesforce.go_to_setup_home()\n self.eda.wait_for_new_window(\"Home | Salesforce\")\n self.selenium.switch_window(\"Home | Salesforce\")\n self.salesforce.wait_until_loading_is_complete()",
"def home(self):\n self.goto(0, 0)",
"def go_home(self):\r\n if self.home_url is not None:\r\n self.set_url(self.home_url)",
"def gohome(self):\n raise Exception(\"Not implemented\")",
"def open_home_page(self):\n com_util.wait_for(self.driver, element['waitToLoad'])\n com_util.tap_on(self.driver, element['clickOnContinue'])",
"def go_to_home_page():\n driver.get(home_page)",
"def home(self):\n self.goto(0, 0)\n self.setheading(0)",
"def go_to_object_home(self, obj_name):\n url = self.cumulusci.org.lightning_base_url\n url = \"{}/lightning/o/{}/home\".format(url, obj_name)\n self.selenium.go_to(url)\n self.wait_until_loading_is_complete(lex_locators[\"actions\"])",
"def goHome():\n\t#Go to pod home\n\tif screen.lastScreen in screen.protectedScreens:\n\t\tpodScreen.show()\n\telse:\n\t\tsplashScreen.show()",
"def go_to_home_application(self):\n self.parent.show()\n self.hide()",
"def home(self, *args, **kwargs):\n pass",
"def show_home(self):\n print(self.home.name)",
"def navigate_to():\n return Navi.navigate_to(\"Site Configuration\")",
"def _home(self, op, context):\n self.page = \"HOME\"\n return {'FINISHED'}",
"def seeHome(self, waitFor=0):\n print (\"seeHome\")\n self.driver.get(self.base_url)\n time.sleep(waitFor)",
"def go_to_setup_object_manager(self):\n url = self.cumulusci.org.lightning_base_url\n self.selenium.go_to(url + \"/lightning/setup/ObjectManager/home\")\n self.wait_until_loading_is_complete()",
"def test_02_go_home_from_compose_fax(self):\n self.compose_fax.click_menu_option_btn(self.compose_fax.MENU_HOME_BTN)\n self.fc.fd[\"home\"].verify_home_tile()",
"def show_homepage():\n return flask.redirect(\"products.show_product_manager\")",
"def home():\n G.DEVICE.home()",
"def homepage(self):\n print('-=' * 12 + \" Home Page \" + '-=' * 12)\n self._user.list_contacts()\n options = {1: self.add_contact, 2:self.remove_contact ,3: self.view_contact_chat, 4: self.sign_out, 5: self.exit}\n print_out = \"(1) Add new contact \\n (2) Remove Contact \\n (3) View my chats \\n (4) Sign out \\n (5) Exit\"\n return self._take_option(options, print_out)",
"def go_home(self):\n command = _build_robovac_command(RobovacModes.WORK, RobovacCommands.GO_HOME)\n message = self._build_command_user_data_message(command)\n\n self._send_packet(message, False)",
"def go_home(request):\n\n url = request.route_url('home', _app_url=get_app_url(request))\n return HTTPFound(location=url)",
"def visit_homepage(self) -> None:\n if self.home_page is not None:\n webbrowser.open(self.home_page)",
"def go_home(self):\n self.set_jpos(self._home_position, wait=True)",
"def click_home_button(self):\n return self",
"def test_home(self):\n self.selenium.get('{}/'.format(self.live_server_url))",
"def reset(self):\n self.home()",
"def get(self, request):\n return redirect('start:home')",
"def get(self, request):\n return redirect('start:home')",
"def get(self, request):\n return redirect('start:home')"
] | [
"0.8230737",
"0.71648955",
"0.70775753",
"0.70246947",
"0.7019923",
"0.7007859",
"0.67817223",
"0.67309695",
"0.6720572",
"0.6715987",
"0.6700127",
"0.6590666",
"0.65675294",
"0.6565556",
"0.65384",
"0.65323144",
"0.630954",
"0.6259082",
"0.62556046",
"0.6235437",
"0.6228942",
"0.62254405",
"0.62206566",
"0.62180173",
"0.6167961",
"0.6146365",
"0.6092391",
"0.6091061",
"0.6091061",
"0.6091061"
] | 0.7788175 | 1 |
Navigates to the Object Manager tab of Salesforce Setup | def go_to_setup_object_manager(self):
url = self.cumulusci.org.lightning_base_url
self.selenium.go_to(url + "/lightning/setup/ObjectManager/home")
self.wait_until_loading_is_complete() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _go_to_page(self):\n self.salesforce.go_to_setup_home()\n self.eda.wait_for_new_window(\"Home | Salesforce\")\n self.selenium.switch_window(\"Home | Salesforce\")\n self.salesforce.wait_until_loading_is_complete()",
"def go_to_object_home(self, obj_name):\n url = self.cumulusci.org.lightning_base_url\n url = \"{}/lightning/o/{}/home\".format(url, obj_name)\n self.selenium.go_to(url)\n self.wait_until_loading_is_complete(lex_locators[\"actions\"])",
"def go_to_record_home(self, obj_id):\n url = self.cumulusci.org.lightning_base_url\n url = \"{}/lightning/r/{}/view\".format(url, obj_id)\n self.selenium.go_to(url)\n self.wait_until_loading_is_complete(lex_locators[\"actions\"])",
"def NavigateGuestLogin(self):\n self._ExecuteOobeApi('Oobe.guestLoginForTesting')",
"def navigate_to(self):\n #self._kernel.navigate_to(route)\n pass",
"def _go_to_page(self, object_id=None, **kwargs):\n\n if kwargs and object_id:\n raise Exception(\"Specify an object id or keyword arguments, but not both\")\n\n if kwargs:\n # note: this will raise an exception if no object is found,\n # or if multiple objects are found.\n object_id = self._get_object(**kwargs)[\"Id\"]\n\n url_template = \"{root}/lightning/r/{object_name}/{object_id}/view\"\n url = url_template.format(\n root=self.cumulusci.org.lightning_base_url,\n object_name=self.object_name,\n object_id=object_id,\n )\n self.selenium.go_to(url)\n self.salesforce.wait_until_loading_is_complete()",
"def checkout(self): \n mtool = getToolByName(self.context, \"portal_membership\")\n ICheckoutManagement(self.context).redirectToNextURL(\"AFTER_START\")",
"def go_to_setup_home(self):\n url = self.cumulusci.org.lightning_base_url\n self.selenium.go_to(url + \"/lightning/setup/SetupOneHome/home\")\n self.wait_until_loading_is_complete()",
"def SetActiveObject(self):",
"def step():\n \n step = models.Step(action=u\"goto\", target=u\"http://www.joesfunerals.com\")",
"def navigate_to():\n return Navi.navigate_to(\"Site Configuration\")",
"def go_to_object_list(self, obj_name, filter_name=None):\n url = self.cumulusci.org.lightning_base_url\n url = \"{}/lightning/o/{}/list\".format(url, obj_name)\n if filter_name:\n url += \"?filterName={}\".format(filter_name)\n self.selenium.go_to(url)\n self.wait_until_loading_is_complete(lex_locators[\"actions\"])",
"def on_OpenExplorerAccount_clicked(self):\n # TODO: not implemented yet\n #raise NotImplementedError\n url = f\"http://kfc.matrix.io/{self.a0_Address}\"\n\n self.browser.openurl(url)\n self.OnlyDisplay(f\"start {url}\")",
"def action_goto(self):\n dialog = GoToDialog(self)\n dialog.exec()\n\n # Re-focus the main window\n self.activateWindow()",
"def go(target=None):\n global SESSION\n if SESSION:\n if target and SESSION.modelview and SESSION.modelview.ready:\n SESSION.modelview.init(target)\n SESSION.start_app()\n else:\n SESSION = Manager()\n SESSION.init_components(target)\n SESSION.start_app()",
"def open_user_page(self):\n self.switch_main_menu(\"Admin\")\n self.wait_unit_el_present(self.user_management_menu)\n self.click_menu(\"User Management\")\n self.click_menu(\"Users\")",
"def _management_portal(self, op, context):\n webbrowser.open(\"https://manage.batchapps.windows.net\", 2, True)\n return {'FINISHED'}",
"def manage_object(object_registry, object_class, object_id, next_url):\n if not object_class in object_registry:\n raise Exception(\"The object '%s' is not auto-managed\" % object_class)\n\n ManagedClass = object_registry[object_class]['class_name']\n managed_obj = ManagedClass()\n\n verb = 'Create'\n if object_id is not None:\n verb = 'Update'\n managed_obj = ManagedClass.query.get(object_id)\n\n ManagedClassForm = object_registry[object_class]['class_form']\n form = ManagedClassForm(obj=managed_obj)\n\n try:\n if form.validate_on_submit():\n form.populate_obj(managed_obj)\n if hasattr(managed_obj, 'form_populate_helper'):\n managed_obj.form_populate_helper()\n db.session.add(managed_obj)\n db.session.commit()\n flash(\"Object: '%s' Saved!\" % managed_obj.get_auto_manage_label(), category=\"success\")\n return redirect(url_for(next_url))\n except Exception as error:\n flash(error, category=\"danger\")\n return render_template(\n ManagedClass.manage_template(),\n title=\"%s %s\" % (verb, managed_obj.get_auto_manage_label()),\n form=form)",
"def loginAsManager(self):\n self.browser.open('http://nohost/plone/')\n self.browser.getLink('Log in').click()\n self.browser.getControl('Login Name').value = 'root'\n self.browser.getControl('Password').value = 'secret'\n self.browser.getControl('Log in').click()",
"def show_homepage():\n return flask.redirect(\"products.show_product_manager\")",
"def setCurrentPage(self):\n pass",
"def open_staff_debug_info(self):\r\n self.q(css='a.instructor-info-action').first.click()\r\n staff_debug_page = StaffDebugPage(self.browser)\r\n staff_debug_page.wait_for_page()\r\n return staff_debug_page",
"async def connect_to_tab(self) -> None:",
"def goToAdmin(request):\n\n\ttemplate = '../admin'\n\treturn HttpResponseRedirect(template)",
"def service_manual(self):\n try:\n webbrowser.open(\"https://openeo.org/documentation/1.0/qgis/#service-management\")\n except:\n pass",
"def OnFindManager(self, event):\r\n \r\n event.SetManager(self._owner_mgr)",
"def navigate(self):\n self.driver.get(self.url)\n self.driver.maximize_window()",
"def start_view(request):\n\n if request.user and Employee.objects.filter(user__pk=request.user.pk).exists():\n if Employee.objects.get(user__pk=request.user.pk).is_manager:\n return HttpResponseRedirect('/dashboard')\n else:\n return HttpResponseRedirect('/employee/show/%d/' % request.user.employee_user.first().pk)\n else:\n return HttpResponseRedirect('/login/')",
"async def begin_object(self):",
"def goto_menu(self, *args):\n self.manager.current = 'Main Menu'\n self.reset()\n self.manager.reset()"
] | [
"0.70036185",
"0.6478363",
"0.5637787",
"0.55917794",
"0.5564666",
"0.5546104",
"0.5476443",
"0.54016584",
"0.5381777",
"0.5281001",
"0.5279266",
"0.52702373",
"0.52061206",
"0.5108897",
"0.50760114",
"0.50358033",
"0.5031218",
"0.5030932",
"0.5012035",
"0.4993091",
"0.49859098",
"0.49678934",
"0.49416405",
"0.49205893",
"0.49175483",
"0.4871867",
"0.48574",
"0.48568258",
"0.48492646",
"0.4841098"
] | 0.79498047 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.